[
  {
    "path": ".github/.codecov.yml",
    "content": "coverage:\n  status:\n    project:\n      default:\n        target: 80%    # the required coverage value\n        threshold: 1%  # allows a 1% drop from the previous base commit coverage"
  },
  {
    "path": ".github/pull_request_template.md",
    "content": "## What is the change? Why is it being made?\n\n<!-- MANDATORY: Describe the change -->\n\n\n## SCR Information\n\n<!-- MANDATORY: uncomment one-and-only-one of these -->\n<!-- Change Type: features -->\n<!-- Change Type: fixes -->\n<!-- Change Type: trivial -->\n<!-- Change Type: docs -->\n\n<!-- MANDATORY: Describe why this change is needed, in one sentence -->\nOne-Sentence Rationale: TBD\n\n<!-- MANDATORY: Describe any impact on the requirements, all on one line -->\nOne-line Impact on Requirements: NA\n\n\n---\n\n## Checklist\n\n<!--\n    The pull request author should check the box if the condition is met OR if it does not apply.\n-->\n\n- [ ] This PR has only [one purpose or idea](https://terrapower.github.io/armi/developer/tooling.html#one-idea-one-pr).\n- [ ] [Tests](https://terrapower.github.io/armi/developer/tooling.html#test-it) have been added/updated to verify any new/changed code.\n- [ ] The [documentation](https://terrapower.github.io/armi/developer/tooling.html#document-it) is still up-to-date in the `doc` folder.\n- [ ] The code style follows [good practices](https://terrapower.github.io/armi/developer/standards_and_practices.html).\n- [ ] The dependencies are still up-to-date in `pyproject.toml`.\n"
  },
  {
    "path": ".github/workflows/coverage.yaml",
    "content": "name: Coverage\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - main\n    paths-ignore:\n      - 'doc/**'\n  pull_request:\n    paths-ignore:\n      - 'doc/**'\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n    # Deploying coverage to codecov.io should not happen on forks\n    if: github.repository == 'terrapower/armi'\n    runs-on: ubuntu-24.04\n    env:\n      GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}\n      CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}\n    steps:\n      - uses: actions/checkout@v2\n      - name: Setup Python\n        uses: actions/setup-python@v2\n        with:\n          python-version: '3.13'\n      - name: Install ARMI and MPI\n        run: |\n          sudo apt-get -y install libopenmpi-dev\n          pip install -e .[memprof,mpi,test]\n          pip install codecov\n      - name: Run Coverage\n        run: |\n          set -x\n          coverage run --rcfile=pyproject.toml -m pytest -n 4 --cov=armi --cov-config=pyproject.toml --cov-report=xml --ignore=venv armi\n          mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiFeatures.py || true\n          mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiParameters.py || true\n          mpiexec -n 2 --use-hwthread-cpus coverage run --rcfile=pyproject.toml -m pytest --cov=armi --cov-config=pyproject.toml --cov-report=xml --cov-append --ignore=venv armi/tests/test_mpiDirectoryChangers.py || true\n          coverage combine --rcfile=pyproject.toml --keep -a\n          coverage report --rcfile=pyproject.toml -i --skip-empty --skip-covered --sort=cover --fail-under=90\n      - name: Publish to codecov.io\n        continue-on-error: true\n        if: github.ref == 'refs/heads/main'\n        uses: codecov/codecov-action@v5\n        with:\n          fail_ci_if_error: false\n          token: ${{ secrets.CODECOV_TOKEN }}\n"
  },
  {
    "path": ".github/workflows/docs.yaml",
    "content": "name: Documentation\n\non:\n  push:\n    branches:\n      - main\n  pull_request:\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n    # Building and deploying docs is broken on forked repos\n    if: github.repository == 'terrapower/armi'\n    runs-on: ubuntu-22.04\n\n    steps:\n      - uses: actions/checkout@v4\n      - name: Setup Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: 3.13\n      - name: Update package index\n        run: sudo apt-get update\n      - name: Install apt-get libs\n        run: sudo apt-get -y install texlive-xetex=2021.20220204-1 texlive-latex-base=2021.20220204-1 texlive-fonts-recommended=2021.20220204-1 texlive-latex-extra=2021.20220204-1 texlive-full=2021.20220204-1 pandoc libopenmpi-dev\n      - name: Setup Graphviz\n        uses: ts-graphviz/setup-graphviz@v2.0.2\n      - name: Make html/pdf Docs\n        continue-on-error: true\n        env:\n          GH_TOKEN: ${{ github.token }}\n          PR_NUMBER: ${{ github.ref == 'refs/heads/main' && -1 || github.event.number }}\n          GIT_COMMIT: ${{ github.sha }}\n        run: |\n          echo \"Installing ARMI...\"\n          set -x\n          pip install -U pip\n          pip install -e .[memprof,mpi,test,docs]\n\n          echo \"Run unit tests...\"\n          pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log\n          mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi1.xml armi/tests/test_mpiFeatures.py > pytest_verbose_mpi1.log\n          mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi2.xml armi/tests/test_mpiParameters.py > pytest_verbose_mpi2.log\n          mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi3.xml armi/utils/tests/test_directoryChangersMpi.py > pytest_verbose_mpi3.log\n          python doc/.static/cleanup_test_results.py test_results.xml\n\n          echo \"Git magic so the SCR will build on GitHub Actions...\"\n          git fetch --depth=2000\n\n          echo \"Build HTML docs...\"\n          cd doc\n          git submodule init\n          git submodule update\n          make html\n\n          echo \"Build PDF docs...\"\n          make latex\n          cd _build/latex/\n          latexmk -pdf -f -interaction=nonstopmode ARMI.tex\n      - name: Deploy\n        if: github.ref == 'refs/heads/main'\n        uses: JamesIves/github-pages-deploy-action@v4.6.1\n        with:\n          token: ${{ secrets.ACCESS_TOKEN }}\n          repository-name: ${{ github.repository_owner }}/terrapower.github.io\n          branch: main\n          folder: doc/_build/html\n          target-folder: armi\n      - name: Archive HTML Docs\n        if: github.ref != 'refs/heads/main'\n        uses: actions/upload-artifact@v4\n        with:\n          name: html-docs\n          path: doc/_build/html\n          retention-days: 5\n      - name: Archive PDF Docs\n        uses: actions/upload-artifact@v4\n        with:\n          name: pdf-docs\n          path: doc/_build/latex/ARMI.pdf\n          retention-days: 5\n"
  },
  {
    "path": ".github/workflows/find_test_crumbs.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script exists so we can determine if new tests in CI are leaving crumbs.\"\"\"\n\nimport subprocess\n\n# A list of objects we expect during a run, and don't mind (like pycache dirs).\nIGNORED_OBJECTS = [\n    \".pytest_cache\",\n    \".tox\",\n    \"__pycache__\",\n    \"armi.egg-info\",\n    \"logs/\",\n]\n\n\ndef main():\n    # use \"git clean\" to find all non-tracked files\n    proc = subprocess.Popen([\"git\", \"clean\", \"-xnd\"], stdout=subprocess.PIPE)\n    lines = proc.communicate()[0].decode(\"utf-8\").split(\"\\n\")\n\n    # clean up the whitespace\n    lines = [ln.strip() for ln in lines if len(ln.strip())]\n\n    # ignore certain untracked object, like __pycache__ dirs\n    for ignore in IGNORED_OBJECTS:\n        lines = [ln for ln in lines if ignore not in ln]\n\n    # fail hard if there are still untracked files\n    if len(lines):\n        for line in lines:\n            print(line)\n\n        raise ValueError(\"The workspace is dirty; the tests are leaving crumbs!\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": ".github/workflows/licensechecker.yaml",
    "content": "name: Check License Lines\n\npermissions:\n  contents: read\n\non: [push]\n\njobs:\n  check-license-lines:\n    runs-on: ubuntu-24.04\n    steps:\n    - uses: actions/checkout@master\n    - name: Check License Lines\n      uses: kt3k/license_checker@v1.0.6\n"
  },
  {
    "path": ".github/workflows/linting.yaml",
    "content": "name: Linting\n\npermissions:\n  contents: read\n\non: [push]\n\njobs:\n  build:\n\n    runs-on: ubuntu-24.04\n\n    steps:\n      - uses: actions/checkout@v4\n      - name: Setup Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: '3.13'\n      - name: Run Linter\n        run:  |\n          set -x\n          pip install -e .[test]\n          ruff format --check .\n          ruff check .\n"
  },
  {
    "path": ".github/workflows/mac_tests.yaml",
    "content": "name: ARMI MacOS Tests\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - main\n    paths-ignore:\n      - 'doc/**'\n  pull_request:\n    paths-ignore:\n      - 'doc/**'\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n    if: github.repository == 'terrapower/armi'\n    runs-on: macos-14\n\n    steps:\n      - uses: actions/checkout@v2\n      - name: Setup Python\n        uses: actions/setup-python@v2\n        with:\n          python-version: '3.11'\n      - name: Upgrade PIP\n        run: python -m pip install --upgrade pip\n      - name: Run Unit Tests on MacOS\n        run: |\n          brew install openmpi\n          pip install -e .[memprof,mpi,test]\n          pytest -n 4 armi\n"
  },
  {
    "path": ".github/workflows/stale.yaml",
    "content": "# This workflow warns and then closes PRs that have had no activity for a specified amount of time.\n#\n# You can adjust the behavior by modifying this file.\n# For more information, see: https://github.com/actions/stale\nname: Mark Stale PRs\n\non:\n  schedule:\n  # once a day at 3:14 AM\n  - cron: '14 3 * * *'\n\npermissions:\n  pull-requests: write\n\njobs:\n  stale:\n    # This workflow is not designed to make sense on forks\n    if: github.repository == 'terrapower/armi'\n    runs-on: ubuntu-24.04\n    steps:\n      - uses: actions/stale@v8\n        with:\n          repo-token: ${{ secrets.GITHUB_TOKEN }}\n          stale-pr-message: \"This pull request has been automatically marked as stale because it has not had any activity in the last 100 days. It will be closed in 7 days if no further activity occurs. Thank you for your contributions.\"\n          stale-pr-label: \"stale\"\n          days-before-pr-stale: 100\n          days-before-pr-close: 7\n          days-before-issue-stale: -1\n          operations-per-run: 100"
  },
  {
    "path": ".github/workflows/unittests.yaml",
    "content": "name: ARMI unit tests\n\npermissions:\n  contents: read\n\non:\n  push:\n    paths-ignore:\n      - 'doc/**'\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n\n    runs-on: ubuntu-24.04\n    strategy:\n      matrix:\n        python: [3.9, '3.10', '3.11', '3.12', '3.13', '3.14']\n\n    steps:\n      - uses: actions/checkout@v4\n      - name: Setup Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: ${{ matrix.python }}\n      - name: Install mpi libs\n        run: sudo apt-get -y install libopenmpi-dev\n      - name: Run Tests\n        run: |\n          set -x\n          pip install -e .[memprof,mpi,test]\n          pytest -n 4 armi\n          mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiFeatures.py\n          mpiexec -n 2 --use-hwthread-cpus pytest armi/tests/test_mpiParameters.py\n          mpiexec -n 2 --use-hwthread-cpus pytest armi/utils/tests/test_directoryChangersMpi.py\n"
  },
  {
    "path": ".github/workflows/validatemanifest.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nValidating the package-data in the pyproject.toml.\n\nValidate that we aren't trying to include files that don't exist.\n\"\"\"\n\nimport os\nfrom glob import glob\n\nimport toml\n\n# CONSTANTS\nARMI_DIR = \"armi/\"\nPRPROJECT = \"pyproject.toml\"\n\n\ndef main():\n    # parse the data files out of the pyproject.toml\n    txt = open(PRPROJECT, \"r\").read()\n    data = toml.loads(txt)\n    fileChunks = data[\"tool\"][\"setuptools\"][\"package-data\"][\"armi\"]\n\n    # loop through each line in the package-data and find all the file paths\n    errors = []\n    for i, line in enumerate(fileChunks):\n        # make sure the file exists\n        path = ARMI_DIR + line.strip()\n        if \"*\" in path:\n            paths = [f for f in glob(path) if len(f) > 3]\n            if not len(paths):\n                errors.append((i, path))\n        else:\n            if not os.path.exists(path):\n                errors.append((i, path))\n\n    # If there were any missing files, raise an Error.\n    if errors:\n        for i, line in errors:\n            print(\"Nonexistant file on line {}: {}\".format(i, line))\n        raise ValueError(\"Package-data file is incorrect: includes non-existant files.\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": ".github/workflows/validatemanifest.yaml",
    "content": "name: Validate Manifest\n\npermissions:\n  contents: read\n\non: [push]\n\njobs:\n  build:\n\n    runs-on: ubuntu-24.04\n\n    steps:\n      - uses: actions/checkout@v2\n      - name: Setup Python\n        uses: actions/setup-python@v2\n        with:\n          python-version: '3.11'\n      - name: Validate Manifest\n        run: |\n          pip install toml\n          python .github/workflows/validatemanifest.py\n"
  },
  {
    "path": ".github/workflows/wheels.yaml",
    "content": "name: Build Wheel\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - main\n\njobs:\n  build:\n    if: github.repository == 'terrapower/armi'\n\n    runs-on: ubuntu-24.04\n\n    steps:\n      - uses: actions/checkout@v4\n      - name: Setup Python\n        uses: actions/setup-python@v5\n        with:\n          python-version: \"3.13\"\n      - name: Install PIP Packages\n        run: |\n          pip install -U pip\n          pip install -e .\n          pip install -U wheel\n      - name: Build Wheels\n        run: |\n          mkdir dist\n          pip wheel . -w dist/\n          chmod 664 dist/armi*.whl\n      - name: Archive PIP wheel artifacts\n        uses: actions/upload-artifact@v4\n        with:\n          name: armi-wheels\n          path: |\n            dist/armi*.whl\n          retention-days: 7"
  },
  {
    "path": ".github/workflows/wintests.yaml",
    "content": "name: ARMI Windows tests\n\npermissions:\n  contents: read\n\non:\n  push:\n    branches:\n      - main\n    paths-ignore:\n      - 'doc/**'\n  pull_request:\n    paths-ignore:\n      - 'doc/**'\n\nconcurrency:\n  group: ${{ github.workflow }}-${{ github.ref }}\n  cancel-in-progress: true\n\njobs:\n  build:\n\n    runs-on: windows-2022\n\n    steps:\n      - uses: actions/checkout@v2\n      - name: Setup Python\n        uses: actions/setup-python@v2\n        with:\n          python-version: '3.11'\n      - name: Upgrade PIP\n        run: python -m pip install --upgrade pip\n      - name: Run Unit Tests on Windows\n        run: |\n          pip install mpi4py==3.1.6\n          pip install -e .[memprof,mpi,test]\n          pytest -n 4 armi\n      - name: Find Test Crumbs\n        run: python .github/workflows/find_test_crumbs.py\n"
  },
  {
    "path": ".gitignore",
    "content": "# No non-source python resources\n*.pyc\n*.pyd\n*.pyo\n*.pyx\n\n# No build artifacts\n*.aux\n*.dll\n*.fdb_latexmk\n*.fls\n*.lib\narmi/tests/tutorials/case-suite\nbdist*/\nbin\nbuild\ncoverage.lcov\ncoverage.xml\ncoverage_results.*\ndist*/\ndoc/.apidocs\ndoc/_build\ndoc/anl-afci-177\ndoc/gallery\ndoc/gallery-src/framework/*.yaml\ndoc/tutorials/anl-afci-177*\ndoc/tutorials/case-suite\ndoc/user/tutorials\nhtmlcov/\nmonkeytype.*\ntest_results.*\nwheelhouse\n\n# No workspace crumbs\n**/.coverage*\n**/__pycache__\n**/logs/*\n*.ascii\n*.egg-info/\n*.sublime-project\n*.sublime-workspace\n*.temp\n*~\n.*.swp\n.cache/\n.coverage\n.DS_Store\n.externalToolBuilders/\n.hypothesis/\n.idea/\n.ipynb_checkpoints\n.metadata\n.mypy_cache/\n.project\n.pydevproject\n.pytest_cache/\n.ruff_cache/\n.settings\n.tox\n.vim-bookmarks\n.vscode\narmi-venv/*\ndump-temp-*\ndump-tests*\nphabricator-lint.txt\npytest_verbose.log\npytestdebug.log\npython_details.log\nreportsOutputFiles/\nsystem_info.log\ntags\ntemp-*\nvenv*/\n\n# Ignore certain data files\n*.avi\n*.diff\n*.flux_bg\n*.flux_ufg\n*.h5\n*.html\n*.mp4\n*.nucdata\n*.out\n*.ppm\n*.sum\n*.txt\n*.vtd\n*.vtu\n*.xdmf\n*dlayxs*\n"
  },
  {
    "path": ".gitmodules",
    "content": "[submodule \"doc/tutorials/armi-example-app\"]\n\tpath = doc/tutorials/armi-example-app\n\turl = https://github.com/terrapower/armi-example-app.git\n"
  },
  {
    "path": ".licenserc.json",
    "content": "{\n  \"**/*.py\": \"# Copyright \"\n}\n"
  },
  {
    "path": "AUTHORS",
    "content": "# This is the list of ARMI's contributors.\n#\n# This may not list everyone who has ever contributed code, important ideas, or discussions to ARMI. But this is a good\n# faith attempt to give credit where it is due.\nTerraPower, LLC\nAaron Reynolds (aaronjamesreynolds)\nAidan McDonald (AidanMcDonald)\nAlex James (alexhjames)\nAntoine Margeride (amargeride)\nArrielle Opotowsky (opotowsky)\nAshley Thompson (Ashlita6)\nBharat Medasani (mbk-tp)\nBrandon LaFleur (bdlafleur)\nBrian Sculac (bsculac)\nCasey Stocking (clstocking)\nChris Keckler (keckler)\nChris Wong (crswong888)\nChristen McKenzie (chris10mckenz)\nDavid Pham (dpham-materials)\nDrew Johnson (drewejohnson, drewj-tp, drewj-usnctech)\nDustin Langewisch (dlangewisch)\nEvan Albright\nGraham Malmgren\nHunter Smith (HunterPSmith)\nJacob Hader (jakehader)\nJames Marshall\nJason Meng (jasonbmeng)\nJeff Baylor (jeffbaylor)\nJinan Yang (jyang-TP)\nJohn Stilley (john-science)\nJonathon Shimwell (shimwell)\nJoshua Chen (joshuavictorchen)\nKayla Clements (clemekay)\nLim Swee Kiat (greentfrapp)\nMark Onufer (onufer)\nMichael Castillo (kasticrunch, mcastillo10)\nMichael Huang (LMikeH)\nMichael Jarrett (mgjarrett)\nMichael Johnson (mikepjohnson)\nMitch Young (youngmit)\nNick Touran (ntouran, partofthething)\nNicole Powell (nipowell)\nPaul Romano (paulromano)\nPeter McNabb\nSamual Miller (sammiller11235)\nScott Yak (scottyak)\nTian Jing (TianJingwd)\nTommy Cisneros (sombrereau)\nTony Alberti (albeanth)\nVirinder Sandhu (Nebbychadnezzar)\nWyatt Scherer (wcscherer)\nZachary Prince (zachmprince)\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "# Contribution License Agreement\n\nFor information on how to contribute to ARMI, see [our official documentation](https://terrapower.github.io/armi/developer/first_time_contributors.html).\n\nThis Contribution License Agreement (**\"Agreement\"**) is agreed to by the party signing below (**\"You\"**), and conveys certain license rights to TerraPower, LLC and its affiliates (**\"TerraPower\"**) for Your contributions to TerraPower open source projects. This Agreement is effective as of the latest signature date below.\n\n## 1. Definitions.\n\n**\"Code\"** means the computer software code, whether in human-readable or machine-executable form, that is delivered by You to TerraPower under this Agreement.\n\n**\"Project\"** means any of the projects owned or managed by TerraPower in which software is offered under a license approved by the Open Source Initiative (OSI) ([www.opensource.org](http://www.opensource.org)) and documentation offered under an OSI or a Creative Commons license (https://creativecommons.org/licenses).\n\n**\"Submit\"** is the act of uploading, submitting, transmitting, or distributing code or other content to any Project, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Project for the purpose of discussing and improving that Project, but excluding communication that is conspicuously marked or otherwise designated in writing by You as \"Not a Submission.\"\n\n**\"Submission\"** means the Code and any other copyrightable material Submitted by You, including any associated comments and documentation.\n\n## 2. Your Submission.\n\nYou must agree to the terms of this Agreement before making a Submission to any Project. This Agreement covers any and all Submissions that You, now or in the future (except as described in Section 4 below), Submit to any Project.\n\n## 3. Originality of Work.\n\nYou represent that each of Your Submissions is entirely Your original work. Should You wish to Submit materials that are not Your original work, You may Submit them separately to the Project if You (a) retain all copyright and license information that was in the materials as You received them, (b) in the description accompanying Your Submission, include the phrase \"Submission containing materials of a third party:\" followed by the names of the third party and any licenses or other restrictions of which You are aware, and (c) follow any other instructions in the Project’s written guidelines concerning Submissions.\n\n## 4. Your Employer.\n\nReferences to \"employer\" in this Agreement include Your employer or anyone else for whom You are acting in making Your Submission, e.g. as a contractor, vendor, or agent. If Your Submission is made in the course of Your work for an employer or Your employer has intellectual property rights in Your Submission by contract or applicable law, You must secure permission from Your employer to make the Submission before signing this Agreement. In that case, the term \"You\" in this Agreement will refer to You and the employer collectively. If You change employers in the future and desire to Submit additional Submissions for the new employer, then You agree to sign a new Agreement and secure permission from the new employer before Submitting those Submissions.\n\n## 5. Licenses.\n\n### a. Copyright License.\n\nYou grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license in the Submission to reproduce, prepare derivative works of, publicly display, publicly perform, and distribute the Submission and such derivative works, and to sublicense any or all of the foregoing rights to third parties.\n\n### b. Patent License.\n\nYou grant TerraPower, and those who receive the Submission directly or indirectly from TerraPower, a perpetual, worldwide, non-exclusive, royalty-free, irrevocable license under Your patent claims that are necessarily infringed by the Submission or the combination of the Submission with the Project to which it was Submitted to make, have made, use, offer to sell, sell and import or otherwise dispose of the Submission alone or with the Project.\n\n### c. Other Rights Reserved.\n\nEach party reserves all rights not expressly granted in this Agreement. No additional licenses or rights whatsoever (including, without limitation, any implied licenses) are granted by implication, exhaustion, estoppel or otherwise.\n\n## 6. Representations and Warranties.\n\nYou represent that You are legally entitled to grant the above licenses. You represent that each of Your Submissions is entirely Your original work (except as You may have disclosed under Section 3). You represent that You have secured permission from Your employer to make the Submission in cases where Your Submission is made in the course of Your work for Your employer or Your employer has intellectual property rights in Your Submission by contract or applicable law. If You are signing this Agreement on behalf of Your employer, You represent and warrant that You have the necessary authority to bind the listed employer to the obligations contained in this Agreement. You are not expected to provide support for Your Submission, unless You choose to do so. UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING, AND EXCEPT FOR THE WARRANTIES EXPRESSLY STATED IN SECTIONS 3, 4, AND 6, THE SUBMISSION PROVIDED UNDER THIS AGREEMENT IS PROVIDED WITHOUT WARRANTY OF ANY KIND, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTY OF NONINFRINGEMENT, MERCHANTABILITY, OR FITNESS FOR A PARTICULAR PURPOSE.\n\n## 7. Notice to TerraPower.\n\nYou agree to notify TerraPower in writing of any facts or circumstances of which You later become aware that would make Your representations in this Agreement inaccurate in any respect.\n\n## 8. Information about Submissions.\n\nYou agree that contributions to Projects and information about contributions may be maintained indefinitely and disclosed publicly, including Your name and other information that You submit with Your Submission.\n\n## 9. Governing Law/Jurisdiction.\n\nThis Agreement is governed by the laws of the State of Washington, USA and the parties consent to exclusive jurisdiction and venue in the federal courts located in King County, Washington, USA unless no federal subject matter jurisdiction exists, in which case the parties consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington, USA. The parties waive all defenses of lack of personal jurisdiction and forum non-conveniens.\n\n## 10. Entire Agreement/Assignment.\n\nThis Agreement is the entire agreement between the parties, and supersedes any and all prior agreements, understandings or communications, written or oral, between the parties relating to the subject matter hereof. This Agreement may be assigned by TerraPower.\n\nPlease select one of the options below and sign as indicated. By signing, You accept and agree to the terms of this Contribution License Agreement for Your present and future Submissions to TerraPower.\n"
  },
  {
    "path": "LICENSE.md",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2020 TerraPower, LLC\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.rst",
    "content": "\n|Build Status| |Code Coverage| |Commit Activity| |Good First Issues|\n\n#################\nARMI Introduction\n#################\n\nThe Advanced Reactor Modeling Interface (ARMI\\ :sup:`®`) is an open-source tool that streamlines your nuclear reactor\ndesign/analysis needs by providing a software *reactor at your fingertips* and a rich ecosystem of utilities working in\nconcert. It is made for and by professional reactor analysis teams and is maintained by\n`TerraPower LLC <http://terrapower.com/>`_, a nuclear technology development company.\n\nARMI:\n\n* Provides a hub-and-spoke mechanism to standardize communication and coupling between physics kernels and the\n  specialist analysts who use them,\n\n* Facilitates the creation and execution of detailed models and complex analysis methodologies,\n\n* Provides an ecosystem within which to rapidly and collaboratively build new analysis and physics simulation\n  capabilities, and\n\n* Provides useful utilities to assist in reactor development.\n\nA few demos of ARMI can be seen in the `ARMI example gallery <https://terrapower.github.io/armi/gallery/index.html>`_.\n\nUsing ARMI plus a collection of ARMI-aware physics plugins, an engineering team can perform a full analysis of a reactor\nsystem and then repeat the same level of analysis with some changed input parameters for almost no additional cost. Even\nbetter, thousands of perturbed cases can be executed in parallel on large clusters, helping conceptual design teams home\nin on an optimal design. Or design teams can analyze sensitivities all the way from, for example, an impurity in a\ncontrol material to the peak structural temperature in a design-basis transient.\n\n.. note:: ARMI does not come with a full selection of physics kernels. They will need to be acquired or developed for\n   your specific project in order to make full use of this tool. Many of the example use-cases discussed in this manual\n   require functionality that is not included in the open-source ARMI Framework.\n\nIn general, ARMI aims to enhance the quality, ease, and rigor of computational nuclear reactor design and analysis.\nAdditional high-level overview about this system can be found in [#touranarmi]_.\n\n\n.. list-table:: Quick links\n   :widths: 30 70\n\n   * - Source code\n     - https://github.com/terrapower/armi\n   * - Documentation\n     - https://terrapower.github.io/armi\n   * - First time contributor's guide\n     - https://terrapower.github.io/armi/developer/first_time_contributors.html\n   * - Bug tracker\n     - https://github.com/terrapower/armi/issues\n   * - Plugin directory\n     - https://github.com/terrapower/armi-plugin-directory\n   * - Contact\n     - armi-devs@terrapower.com\n\nQuick start\n***********\nBefore starting, you need to have `Python <https://www.python.org/downloads/>`_ 3.9+.\n\nGet the ARMI code, install the prerequisites, and fire up the launcher with the following commands. You probably want to\ndo this in a virtual environment as described in the\n`Installation documentation <https://terrapower.github.io/armi/installation.html>`_. Otherwise, the dependencies could\nconflict with your system dependencies.\n\nFirst, upgrade your version of pip::\n\n    $ pip install -U pip>=22.1\n\nNow clone and install ARMI::\n\n    $ git clone https://github.com/terrapower/armi\n    $ cd armi\n    $ pip install -e .\n    $ armi --help\n\nThe ARMI tests are meant to be run using `pytest <https://docs.pytest.org/en/8.0.x/>`_ locally::\n\n    $ pip install -e \".[test]\"\n    $ pytest -n 4 armi\n\nFrom here, we recommend going through a few of our\n`gallery examples <https://terrapower.github.io/armi/gallery/index.html>`_ and\n`tutorials <https://terrapower.github.io/armi/tutorials/index.html>`_ to start touring the features and capabilities and\nthen move on to the `User Manual <https://terrapower.github.io/armi/user/index.html>`_.\n\n\nBackground\n**********\nNuclear reactor design requires, among other things, answers to the following questions:\n\n* Where are the neutrons? How fast are they moving? In which direction?\n\n* How quickly are atomic nuclei splitting? How long until the fuel runs out? How many atoms in the structure are being\n  energetically displaced?\n\n* How much heat do these reactions produce? How quickly must coolant flow past the fuel to maintain appropriate\n  temperatures? What are the temperatures of the fuel, coolant, and structure?\n\n* Can the structural arrangement support itself given the temperatures and pressures induced by the flowing coolant? For\n  how long?\n\n* If a pump loses power or a control rod accidentally withdraws, how quickly will the chain reaction stop while keeping\n  radiation contained?\n\n* How much used nuclear fuel is generated per useful energy produced? How long until it decays to stability?\n\n* Where and when should we move the fuel to most economically maintain the chain reaction?\n\n* What's the dose and activation above the head and in the secondary loop?\n\n* How does containment handle various postulated accidents?\n\n* How does the building handle earthquakes?\n\nDigital computers have assisted in nuclear technology development since the days of the ENIAC in the 1940s. We now\nunderstand reactor physics well enough to build detailed simulations, which can answer many of these design questions in\na cost-effective, and flexible manner. This allows us to simulate all kinds of different reactors with different fuels,\ncoolants, moderators, power levels, safety systems, and power cycles. We can run our virtual reactors through the\ndecades, tossing various off-normal conditions at them now and then, to see how they perform in terms of capability,\neconomics, and safety.\n\nPerhaps surprisingly, some nuclear software written in the 1960s is still in use today. These codes are validated\nagainst physical experiments that no longer exist. Meanwhile, new cutting-edge nuclear software is being developed for\ntodays powerful computers. Both old and new, these tools are often challenging to operate and coordinate to produce a\nfull reactor analysis.\n\nThe ARMI approach was born out of this situation: how can we best leverage an eclectic mix of legacy and modern tools\nwith a small team to do full-scope analysis? We built a framework that lets us automate the tedious, uncoupled, and\nerror-prone parts of reactor engineering/analysis work. We can turn around a very meaningful and detailed core analysis\ngiven a major change (e.g. change power by 50%) in just a few weeks. We can dispatch hundreds of parameter sweeps to\nmultiple machines and then perform multi-objective optimization on the resulting design space.\n\nThe ARMI system is largely written in the Python programming language. Its high-level nature allows nuclear and\nmechanical engineers to rapidly automate their analysis tasks from their sub-specialties. This helps eliminate the\ntranslation step between computer-scientists and power plant design engineers. This allows good division of labor: the\ncomputer scientists can focus on the overall performance and maintainability of the framework, while the power plant\nengineers focus on power plant engineering.\n\nWe have spent over 10 years developing this system. Because of ARMI's high-level nature, we believe we can collaborate\neffectively with all ongoing reactor software developments.\n\nCommunication and coupling\n**************************\nARMI provides a central place for all physics kernels to interact: the Reactor Model. All modules read *state*\ninformation from this Reactor and write their output to it. This common interface allows seamless communication and\ncoupling between different physics sub-specialties. If you plug one new physics kernel into ARMI, it becomes coupled to\nN other kernels. The ARMI Framework, depicted in green below, is the majority of the open source package. Several\nskeletal analysis routines are included as well to perform basic data management and to help align efforts on external\nphysics kernels.\n\n.. figure:: https://terrapower.github.io/armi/_static/armiSchematicView.png\n   :figclass: align-center\n\n   **Figure 1.** The schematic representation of the ARMI data model.\n\n\nAutomation\n**********\nARMI can quickly and easily produce complex input files with high levels of detail in various approximations. This\nenables users to perform rapid high-fidelity analyses to make sure all important physics are captured. It also enables\nsensitivity studies of different modeling approximations (e.g. symmetries, transport vs. diffusion vs. Monte Carlo,\nsubchannel vs. CFD, etc.).\n\n\n.. figure:: https://terrapower.github.io/armi/_static/armiGeometries.png\n   :figclass: align-center\n\n   **Figure 2.** A variety of approximations in hexagonal geometry (1/3-core, full core, pin detailed, etc.) are shown,\n   all derived from one consistent input file. ARMI supports Cartesian, Hex, RZ, and RZTheta geometric grids and\n   includes many geometric components. Additionally, users can provide custom geometric elements.\n\n\nNew analysis and physics capabilities\n*************************************\nThe ARMI reactor model is fully accessible via a Python-based API, meaning that power-users and developers have full\naccess to the details of the plant at all times. Developers adding new physics features can take advantage of the ARMI\ndata management structure by simply reading and writing to the Reactor state. Leveraging the infrastructure of ARMI,\nprogress can be made rapidly.\n\nPower-user analysts can modify the plant in many ways. For instance, removing all sodium coolant is a one-liner::\n\n    core.setNumberDensity('NA23',0.0)\n\nand finding the peak power density is easy::\n\n    core.getMaxParam('pdens')\n\nAny ARMI state can be written out to whichever format the user desires, meaning that nominally identical cases can be\nproduced for multiple similar codes in sensitivity studies. To read power densities, simply read them off the assembly\nobjects. Instead of producing spreadsheets and making plots manually, analysts may write scripts to generate output\nreports that run automatically.\n\nWriting a module within ARMI automatically features access to the ARMI API, including:\n\n* Cross section processing\n* Material properties\n* Thermal expansion\n* Database persistence\n* Data visualization\n* A code testing, documentation, and version control system\n\n\nUse cases\n*********\nGiven an input describing a reactor, a typical ARMI run loops over a set of plugins in a certain sequence. Some plugins\ntrigger third-party simulation codes, producing input files for them, executing them, and translating the output back\nonto the reactor model. Other plugins perform physics simulations directly.\n\nFor example, one ARMI sequence may involve the calculation of:\n\n* nuclear cross sections\n* global flux and power\n* subchannel temperatures\n* duct wall pressures\n* cladding strain and wastage\n* fission gas pressure\n* reactivity feedbacks\n* flow orificing\n* the equilibrium fuel cycle\n* control rod worth\n* shutdown margin\n* frequency stability margins\n* peak cladding temperature\n* transient analysis\n* total levelized cost of electricity for the run\n\nAnother ARMI sequence may simply compute the cost of feed uranium and enrichment in an initial core and quit.\n\nLarger siumulations may also run through the multi-objective design optimization system, which runs many cases with\ninput perturbations to help find the best overall system, considering all important physics at the same time.\n\nOther interest may come from the following:\n\nThe Research Scientist\n======================\nA nuclear reactor research scientist, at a national lab or university, may benefit from ARMI. An ARMI workflow can\nreduce the time spent on data management. ARMI can handle the tedium so that researchers can better focus on designing\nand testing their research.\n\nFor example, if an ARMI input file describing the FFTF reactor is provided, the researcher can start running benchmark\ncases with their new code method very rapidly, rather than spending the time building their own FFTF model.\n\nIf someone wants to try varying nuclear cross sections by a percent here and there to compute sensitivities, ARMI is a\nperfect platform upon which to operate.\n\nIf a reactor designer wants to try out a new Machine Learning algorithm for fuel management, plugging it into ARMI and\nhaving it run on all the physics kernels of the ARMI ecosystem will be a great way to prove its true value (note that\nthis requires a rich ARMI physics ecosystem).\n\nThe Nuclear Startup Engineer\n============================\nAs various companies evaluate their ideas, they need tools for analysis. They can pick up ARMI and save 10 years of\ndevelopment and hit the ground running by plugging in their design-specific physics kernels and proprietary design\ninputs. ARMI's parameter sweep features, reactor model, and parallel utilities will all come in handy immediately.\n\n\nOperating and Vendor Engineers\n==============================\nPeople at well-established utilities or vendors can hook ARMI into their legacy systems and increase their overall\nproductivity.\n\nThe Enthusiast\n==============\nIf an enthusiast wants to try out a reactor idea they have, they can use ARMI (plus some physics kernels) to quickly get\nsome performance metrics. They can see if their idea has wings, and if it does, they can then find a way to bring it to\nengineering and commercial reality.\n\n\nHistory of ARMI\n***************\nARMI was originally created by TerraPower, LLC near Seattle WA starting in 2009. Its founding mission was to determine\nthe optimal fuel management operations required to transition a fresh Traveling Wave Reactor core from startup into an\nequilibrium state. It started out automating the Argonne National Lab (ANL) fast reactor neutronics codes, MC2 and\nREBUS. The reactor model design was made with the intention of adding other physics capabilities later. Soon, simple\nthermal hydraulics were added and it's grown ever since. It has continuously evolved towards a general reactor analysis\nframework.\n\nFollowing requests by outside parties to use ARMI, we started working on a more modular architecture for ARMI, allowing\nsome of the intertwined physics capabilities to be separated out as plugins from the standalone framework.\n\nThe nuclear industry is small, and it faces many challenges. It also has a tradition of secrecy. As a result, there is\nrisk of overlapping work being done by other entities.\n\nWe hypothesize that collaborating on software systems can help align some efforts worldwide, increasing quality and\nefficiency. In reactor development, the idea is generally cheap. It's the shakedown, technology and supply chain\ndevelopment, engineering demo, and commercial demo that are the hard parts.\n\nThus, ARMI was released under an open-source license in 2019 to facilitate mutually beneficial collaboration across the\nnuclear industry, where many teams are independently developing similar reactor analysis/automation frameworks.\n\nWe also hope that if more people can rapidly analyze the performance of their reactor ideas, limited available funding\ncan be spent more effectively.\n\n\nSystem Requirements\n*******************\nBeing largely written in the Python programming language, the ARMI system works on most platforms.  It can perform\nmeaningful analysis on a single laptop, but the full value of design optimization and large problems is realized with\nparallel runs over large clusters (using the optional ``mpi4py`` library).\n\n.. _getting-help:\n\nGetting Help\n************\nYou can get help with ARMI by either making issues on our `github page <https://github.com/terrapower/armi/issues>`_ or\nby e-mailing armi-devs@terrapower.com.\n\nDisclaimers\n***********\nDue to TerraPower goals and priorities, many ARMI modules were developed with the sodium-cooled fast reactors as a\ntarget, and are not necessarily yet optimized for other plants. This is a known issue with code organization and we are\nworking on it. On the other hand, the framework is sufficiently general that people have modeled other reactor types\nwith ARMI, including thermal reactors.\n\nARMI was developed within a rapidly changing R&D environment. It evolved accordingly, and naturally carries some legacy.\nWe continuously attempt to identify and update problematic parts of the code. Users should understand that ARMI is not a\npolished consumer software product, but rather a powerful and flexible engineering tool. It has the potential to\naccelerate work on many kinds of reactors.\n\nARMI has been written to support specific engineering/design tasks. As such, polish in the GUIs and output is somewhat\nlacking.\n\nThe ARMI framework uses the ``camelCase`` style, which is not the standard style for Python. As this is an issue of\nstyle, it is not considered worth the API-breaking cost to our downstream users to change it.\n\n\nLicense\n*******\nTerraPower and ARMI are registered trademarks of TerraPower, LLC. Other trademarks and registered trademarks used in\nthis Manual are the property of the respective trademark holders.\n\nThe ARMI system is licensed as follows:\n\n.. code-block:: none\n\n\tCopyright 2009 TerraPower, LLC\n\n\tLicensed under the Apache License, Version 2.0 (the \"License\");\n\tyou may not use this file except in compliance with the License.\n\tYou may obtain a copy of the License at\n\n\t    http://www.apache.org/licenses/LICENSE-2.0\n\n\tUnless required by applicable law or agreed to in writing, software\n\tdistributed under the License is distributed on an \"AS IS\" BASIS,\n\tWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n\tSee the License for the specific language governing permissions and\n\tlimitations under the License.\n\nBe careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a\nlicense that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL\nlicense will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep\nsome of their work private, so we can't allow any GPL dependencies.\n\nFor that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-party Python libraries\nthat have MIT or BSD licenses.\n\n.. [#touranarmi] Touran, Nicholas W., et al. \"Computational tools for the integrated design of advanced nuclear reactors.\"\n   Engineering 3.4 (2017): 518-526. https://doi.org/10.1016/J.ENG.2017.04.016\n\n.. |Build Status| image:: https://github.com/terrapower/armi/actions/workflows/unittests.yaml/badge.svg?branch=main\n    :target: https://github.com/terrapower/armi/actions/workflows/unittests.yaml\n\n.. |Code Coverage| image:: https://codecov.io/gh/terrapower/armi/branch/main/graph/badge.svg\n    :target: https://app.codecov.io/gh/terrapower/armi/tree/main\n\n.. |Commit Activity| image:: https://img.shields.io/github/commit-activity/m/terrapower/armi\n    :target: https://github.com/terrapower/armi/pulse\n\n.. |Good First Issues| image:: https://img.shields.io/github/issues/terrapower/armi/good%20first%20issue\n    :target: https://github.com/terrapower/armi/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22\n"
  },
  {
    "path": "armi/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nWelcome to the Advanced Reactor Modeling Interface (ARMI).\n\nThis module initializes the ARMI platform. The bootstrap process is broken into several phases:\n\n* Import fundamental dependencies in Python library and some third-party libs\n* Investigate environment: Check Python version, code version, MPI situation, and TTY/GUI/interactivity,\n* Set up temp dirs\n* Set up printout table formats (in preparation of logging info)\n* Initialize all possible nuclide objects in the nuclide directory\n* Discover and register available built-in :py:mod:`plugins <armi.plugins>` (custom ones are registered after inputs)\n* Discover and define all potential configuration settings from available plugins\n* Read input files\n* Update :py:mod:`nuclide directory <armi.nucDirectory>` with depletion info based on config\n* Discover and define all state :py:mod:`Parameters <armi.reactor.parameters>` on data model (maybe dependent on config)\n* Discover :py:mod:`Entry points <armi.cli>` from plugins\n* Choose entry point based on user command\n\nIf using the ``run`` entry point, additional work is done:\n\n* Build :py:mod:`reactor model <armi.reactor>` based on input\n* Build :py:mod:`operator object <armi.operators>` with specific calculation loop\n* Build ordered interface stack based on configuration settings\n* Begin looping over interface stack, operating upon data model according to operator design\n* Loop until complete\n* Wrap up\n* Quit\n\"\"\"\n\n# ruff: noqa: F401\nimport atexit\nimport datetime\nimport importlib\nimport os\nimport signal\nimport subprocess\nimport sys\nimport traceback\nimport warnings\nfrom typing import List, Optional, Type\n\nimport __main__ as main\n\n# The _bootstrap module performs operations that may need to occur before it is necessarily safe to import the rest of\n# the ARMI system. Things like:\n# - configure the MPI environment\n# - detect the nature of interaction with the user (terminal UI, GUI, unsupervized, etc)\n# - Initialize the nuclide database\nimport armi._bootstrap\nfrom armi import apps, cli, context, pluginManager, plugins, runLog\nfrom armi.context import (\n    APP_DATA,\n    CURRENT_MODE,\n    DOC,\n    MPI_COMM,\n    MPI_DISTRIBUTABLE,\n    MPI_NODENAME,\n    MPI_NODENAMES,\n    MPI_RANK,\n    MPI_SIZE,\n    RES,\n    ROOT,\n    START_TIME,\n    USER,\n    Mode,\n)\nfrom armi.meta import __version__\nfrom armi.nucDirectory import nuclideBases\nfrom armi.reactor import flags, parameters\n\n# ARMI does not configure its own application by default. This is mostly to catch issues involving calling code that\n# requires the framework to be configured before that has explicitly taken place. An application should call\n# `configure()` with its App class in order for ARMI to work properly\n_app: Optional[apps.App] = None\n\n_ARMI_CONFIGURE_CONTEXT: Optional[str] = None\n\n# Advanced flag used in documentation builds to avoid isConfigured guards.\n_ignoreConfigures = False\n\n\ndef disableFutureConfigures():\n    \"\"\"Exposed function to ensure armi.configure() isn't called more than once.\"\"\"\n    global _ignoreConfigures\n    _ignoreConfigures = True\n\n\ndef isStableReleaseVersion(version=None):\n    \"\"\"Determine if the version should be considered a stable release.\"\"\"\n    version = version or __version__\n    return \"-\" not in version\n\n\ndef init(fName=None, cs=None, skipInspection=False, choice=None):\n    \"\"\"\n    Scan a directory for armi inputs and load one to interact with.\n\n    .. impl:: Settings are used to define an ARMI run.\n        :id: I_ARMI_SETTING1\n        :implements: R_ARMI_SETTING\n\n        This method initializes an ARMI run, and if successful returns an Operator. That operator is designed to drive\n        the reactor simulation through time steps to simulate its operation. This method takes in a settings file or\n        object to initialize the operator. Whether a settings file or object is supplied, the operator will be built\n        based on the those settings. Because the total collection of settings can be modified by developers of ARMI\n        applications, providing these settings allow ARMI end-users to granularly define their simulations.\n\n    Parameters\n    ----------\n    fName : str, optional\n        The path to a settings file to load: my_case.yaml\n    cs : Settings, optional\n        If supplied, this CS object will supersede the other case input methods and use the object directly.\n    skipInspection : bool, optional\n        Whether or not the inputs should be checked for valid settings. Default is False.\n    choice : int, optional\n        Automatically run with this item out of the menu that would be produced by the existing YAML files.\n\n    Examples\n    --------\n    >>> o = armi.init()\n    \"\"\"\n    from armi import cases, settings\n\n    if cs is None:\n        if fName is None:\n            fName = settings.promptForSettingsFile(choice)\n        cs = settings.Settings(fName)\n\n    armiCase = cases.Case(cs=cs)\n    if not skipInspection:\n        armiCase.checkInputs()\n\n    try:\n        return armiCase.initializeOperator()\n    except:  # Catch any and all errors. Naked exception on purpose.\n        # Concatenate errors to the primary log file.\n        runLog.close()\n        raise\n\n\ndef getDefaultPlugins() -> List[Type[plugins.ArmiPlugin]]:\n    \"\"\"\n    Return a list containing the default set of ARMI Framework plugins.\n\n    This is useful for an application to fold all of the ARMI Framework's capabilities into its own set of plugins.\n    \"\"\"\n    from armi import bookkeeping, cli, reactor\n    from armi.physics import fuelCycle, neutronics, safety\n\n    defaultPlugins = [\n        cli.EntryPointsPlugin,\n        bookkeeping.BookkeepingPlugin,\n        fuelCycle.FuelHandlerPlugin,\n        neutronics.NeutronicsPlugin,\n        safety.SafetyPlugin,\n        reactor.ReactorPlugin,\n    ]\n\n    return defaultPlugins\n\n\ndef getDefaultPluginManager() -> pluginManager.ArmiPluginManager:\n    \"\"\"\n    Return a plugin manager containing the default set of ARMI Framework plugins.\n\n    This is useful when using standalone facilities of ARMI without a specific application.\n    \"\"\"\n    pm = plugins.getNewPluginManager()\n    for plugin in getDefaultPlugins():\n        pm.register(plugin)\n\n    return pm\n\n\ndef isConfigured():\n    \"\"\"Returns whether ARMI has been configured with an App.\"\"\"\n    return _app is not None\n\n\ndef getPluginManager() -> Optional[pluginManager.ArmiPluginManager]:\n    \"\"\"Return the plugin manager, if there is one.\"\"\"\n    global _app\n    if _app is None:\n        return None\n    return _app.pluginManager\n\n\ndef getPluginManagerOrFail() -> pluginManager.ArmiPluginManager:\n    \"\"\"Return the plugin manager. Raise an error if there is none.\"\"\"\n    global _app\n    assert _app is not None, (\n        \"The ARMI plugin manager was requested, no App has been configured. Ensure that `armi.configure()` has been \"\n        \"called before attempting to interact with the plugin manager.\"\n    )\n\n    return _app.pluginManager\n\n\ndef getApp() -> Optional[apps.App]:\n    global _app\n    return _app\n\n\ndef _cleanupOnCancel(signum, _frame):\n    \"\"\"Helper function to clean up upon cancellation.\"\"\"\n    print(f\"Caught Cancel signal ({signum}); cleaning temporary files and exiting...\", file=sys.stderr)\n    context.cleanFastPathAfterSimulation()\n    sys.stdout.flush()\n    sys.stderr.flush()\n    sys.exit(1)  # since we're handling the signal we have to cancel\n\n\ndef _liveInterpreter():\n    \"\"\"Return whether we are running within a live/interactive python interpreter.\"\"\"\n    return not hasattr(main, \"__file__\")\n\n\ndef configure(app: Optional[apps.App] = None, permissive=False):\n    \"\"\"\n    Set the plugin manager for the Framework and configure internals to those plugins.\n\n    Parameters\n    ----------\n    app :\n        An :py:class:`armi.apps.App` instance with which the framework is to be configured. If it is not provided, then\n        the default ARMI App will be used.\n    permissive :\n        Whether or not an error should be produced if ``configure`` is called more than once. This should only be set to\n        ``True`` under testing or demonstration purposes, where the contents of otherwise independent scripts need to be\n        run under the same python instance.\n\n    Important\n    ---------\n    Since this affects the behavior of several modules at their import time, it is generally not safe to re-configure\n    the ARMI framework once it has been configured. Therefore this will raise an ``RuntimeError`` if such a\n    re-configuration is attempted, unless ``permissive`` is set to ``True``.\n\n    Notes\n    -----\n    We are planning on encapsulating much of the global ARMI state that gets configured with an App into the App object\n    itself (with some other things going into the Case object). This will provide a number of benefits, the main one\n    being that it will become trivial to re-configure the framework, which is currently not possible.\n    \"\"\"\n    global _app\n    global _ARMI_CONFIGURE_CONTEXT\n\n    if _ignoreConfigures:\n        return\n\n    app = app or apps.App()\n\n    if _app is not None:\n        if permissive and isinstance(app, apps.App):\n            return\n        else:\n            raise RuntimeError(\n                f\"Multiple calls to armi.configure() are not allowed. Previous call from:\\n{_ARMI_CONFIGURE_CONTEXT}\"\n            )\n\n    assert not context.BLUEPRINTS_IMPORTED, (\n        \"ARMI can no longer be configured after blueprints have been imported. Blueprints were imported from\"\n        f\":\\n{context.BLUEPRINTS_IMPORT_CONTEXT}\"\n    )\n\n    _ARMI_CONFIGURE_CONTEXT = \"\".join(traceback.format_stack())\n\n    _app = app\n    context.APP_NAME = app.name\n\n    if _liveInterpreter():\n        runLog.LOG.startLog(name=f\"interactive-{app.name}\")\n        cli.splash()\n\n    pm = app.pluginManager\n    parameters.collectPluginParameters(pm)\n    parameters.applyAllParameters()\n    _app.registerPluginFlags()\n\n\ndef applyAsyncioWindowsWorkaround() -> None:\n    \"\"\"\n    Apply Asyncio workaround for Windows and Python 3.8.\n\n    This prevents a NotImplementedError on Windows with Python 3.8 his error showed up during jupyter notebook built-\n    tests and documentation. See https://bugs.python.org/issue37373\n    \"\"\"\n    import asyncio\n\n    if sys.version_info[0] == 3 and sys.version_info[1] >= 8 and sys.platform.startswith(\"win\"):\n        asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())\n\n\napplyAsyncioWindowsWorkaround()\n\n# The ``atexit`` handler is like putting it in a finally after everything.\natexit.register(context.cleanFastPathAfterSimulation)\n\n# register cleanups upon HPC cancellations. Linux clusters will send a different signal. SIGBREAK doesn't exist on\n# non-windows This actually doesn't work in mpi runs because MSMPI's mpiexec does not pass signals.\nif os.name == \"nt\":\n    signal.signal(signal.SIGBREAK, _cleanupOnCancel)\nsignal.signal(signal.SIGINT, _cleanupOnCancel)\n"
  },
  {
    "path": "armi/__main__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nPrimary entry point into ARMI.\n\nThere are a variety of entry points in the ``cli`` package that define the various run options.\nThis invokes them according to command-line user input.\n\"\"\"\n\nimport sys\nimport traceback\n\nfrom armi import apps, configure, context, isConfigured, runLog\nfrom armi.cli import ArmiCLI\n\n\ndef main():\n    # Main entry point into ARMI\n    try:\n        if not isConfigured():\n            configure(apps.App())\n        code = ArmiCLI().run()\n        # sys exit interprets None as 0\n        sys.exit(code)\n    except Exception:\n        # Make sure not to catch all BaseExceptions, lest we catch the expected SystemExit exception\n        runLog.error(\n            f\"Unhandled exception in __main__, rank {context.MPI_RANK} on {context.MPI_NODENAME}.\",\n            file=sys.__stderr__,\n        )\n        runLog.error(traceback.format_exc(), file=sys.__stderr__)\n        if context.MPI_SIZE > 1:\n            runLog.error(\n                f\"Killing all MPI tasks from __main__, rank {context.MPI_RANK}.\",\n                file=sys.__stderr__,\n            )\n            # cleanFastPathAfterSimulation has @atexit.register so it should be called at the end, but mpi. Abort\n            # in main will not allow for @atexit.register or except/finally code to be called so\n            # calling here as well\n            context.cleanFastPathAfterSimulation()\n            # .Abort will not allow for @atexit.register or except/finally code to be called\n            context.MPI_COMM.Abort(errorcode=-1)\n\n        raise SystemExit(1)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "armi/_bootstrap.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Code that needs to be executed before most ARMI components are safe to import.\"\"\"\n\nfrom armi.nucDirectory import nuclideBases  # noqa: E402\n\n# Nuclide bases get built explicitly here to have better determinism\n# about when they get instantiated. The burn chain is not applied\n# at this point, but only after input is read. Nuclides need to be built super early\n# because some import-time code needs them to function. Namely, Block parameter\n# collection uses them to create number density params.\nnuclideBases.factory()\n"
  },
  {
    "path": "armi/apps.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe base ARMI App class.\n\nThis module defines the :py:class:`App` class, which is used to configure the ARMI\nFramework for a specific application. An ``App`` implements a simple interface for\ncustomizing much of the Framework's behavior.\n\"\"\"\n\n# ruff: noqa: E402\nimport collections\nimport importlib\nimport sys\nfrom typing import Dict, List, Optional, Tuple\n\nfrom armi import context, meta, pluginManager, plugins, settings\nfrom armi.reactor import parameters\nfrom armi.reactor.flags import Flags\nfrom armi.settings import Setting, fwSettings\n\n\nclass App:\n    \"\"\"\n    The highest-level of abstraction for defining what happens during an ARMI run.\n\n    .. impl:: An App has a plugin manager.\n        :id: I_ARMI_APP_PLUGINS\n        :implements: R_ARMI_APP_PLUGINS\n\n        The App class is intended to be subclassed in order to customize the functionality\n        and look-and-feel of the ARMI Framework for a specific use case. An App contains a\n        plugin manager, which should be populated in ``__init__()`` with a collection of\n        plugins that are deemed suitable for a given application, as well as other methods\n        which provide further customization.\n\n        The base App class is also a good place to expose some more convenient ways to get\n        data out of the Plugin API; calling the ``pluggy`` hooks directly can sometimes be a\n        pain, as the results returned by the individual plugins may need to be merged and/or\n        checked for errors. Adding that logic here reduces boilerplate throughout the rest\n        of the code.\n    \"\"\"\n\n    name = \"armi\"\n    \"\"\"\n    The program name of the app. This should be the actual name of the python entry\n    point that loads the app, or the name of the module that contains the appropriate\n    __main__ function. For example, if the app is expected to be invoked with ``python\n    -m myapp``, ``name`` should be ``\"myapp\"``\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"\n        This mostly initializes the default plugin manager. Subclasses are free to adopt\n        this plugin manager and register more plugins of their own, or to throw it away\n        and start from scratch if they do not wish to use the default Framework plugins.\n\n        For a description of the things that an ARMI plugin can do, see the\n        :py:mod:`armi.plugins` module.\n        \"\"\"\n        self._pluginFlagsRegistered: bool = False\n        self._pm: Optional[pluginManager.ArmiPluginManager] = None\n        self._paramRenames: Optional[Tuple[Dict[str, str], int]] = None\n        self.__initNewPlugins()\n\n    def __initNewPlugins(self):\n        from armi import bookkeeping, cli, reactor\n        from armi.physics import (\n            fuelCycle,\n            fuelPerformance,\n            neutronics,\n            safety,\n            thermalHydraulics,\n        )\n\n        self._pm = plugins.getNewPluginManager()\n        for plugin in (\n            cli.EntryPointsPlugin,\n            bookkeeping.BookkeepingPlugin,\n            fuelCycle.FuelHandlerPlugin,\n            fuelPerformance.FuelPerformancePlugin,\n            neutronics.NeutronicsPlugin,\n            safety.SafetyPlugin,\n            thermalHydraulics.ThermalHydraulicsPlugin,\n            reactor.ReactorPlugin,\n        ):\n            self._pm.register(plugin)\n\n        self._paramRenames = None\n\n    @property\n    def version(self) -> str:\n        \"\"\"Grab the version of this app (defaults to ARMI version).\n\n        Notes\n        -----\n        This is designed to be over-ridable by Application developers.\n        \"\"\"\n        return meta.__version__\n\n    @property\n    def pluginManager(self) -> pluginManager.ArmiPluginManager:\n        \"\"\"Return the App's PluginManager.\"\"\"\n        return self._pm\n\n    def getSettings(self) -> Dict[str, Setting]:\n        \"\"\"Return a dictionary containing all Settings defined by the framework and all plugins.\"\"\"\n        # Start with framework settings\n        settingDefs = {setting.name: setting for setting in fwSettings.getFrameworkSettings()}\n\n        # The optionsCache stores options that may have come from a plugin before the setting to\n        # which they apply. Whenever a new setting is added, we check to see if there are any\n        # options in the cache, popping them out and adding them to the setting. If all plugins'\n        # settings have been processed and the cache is not empty, that's an error, because a plugin\n        # must have provided options to a setting that doesn't exist.\n        optionsCache: Dict[str, List[settings.Option]] = collections.defaultdict(list)\n        defaultsCache: Dict[str, settings.Default] = {}\n\n        for pluginSettings in self._pm.hook.defineSettings():\n            for pluginSetting in pluginSettings:\n                if isinstance(pluginSetting, settings.Setting):\n                    name = pluginSetting.name\n                    if name in settingDefs:\n                        raise ValueError(f\"The setting {pluginSetting.name} already exists and cannot be redefined.\")\n                    settingDefs[name] = pluginSetting\n                    # handle when new setting has modifier in the cache (modifier loaded first)\n                    if name in optionsCache:\n                        settingDefs[name].addOptions(optionsCache.pop(name))\n                    if name in defaultsCache:\n                        settingDefs[name].changeDefault(defaultsCache.pop(name))\n                elif isinstance(pluginSetting, settings.Option):\n                    if pluginSetting.settingName in settingDefs:\n                        # modifier loaded after setting, so just apply it (no cache needed)\n                        settingDefs[pluginSetting.settingName].addOption(pluginSetting)\n                    else:\n                        # no setting yet, cache it and apply when it arrives\n                        optionsCache[pluginSetting.settingName].append(pluginSetting)\n                elif isinstance(pluginSetting, settings.Default):\n                    if pluginSetting.settingName in settingDefs:\n                        # modifier loaded after setting, so just apply it (no cache needed)\n                        settingDefs[pluginSetting.settingName].changeDefault(pluginSetting)\n                    else:\n                        # no setting yet, cache it and apply when it arrives\n                        defaultsCache[pluginSetting.settingName] = pluginSetting\n                else:\n                    raise TypeError(\n                        \"Invalid setting definition found: {} ({})\".format(pluginSetting, type(pluginSetting))\n                    )\n\n        if optionsCache:\n            raise ValueError(\n                \"The following options were provided for settings that do \"\n                \"not exist. Make sure that the set of active plugins is \"\n                \"consistent.\\n{}\".format(optionsCache)\n            )\n\n        if defaultsCache:\n            raise ValueError(\n                \"The following defaults were provided for settings that do \"\n                \"not exist. Make sure that the set of active plugins is \"\n                \"consistent.\\n{}\".format(defaultsCache)\n            )\n\n        return settingDefs\n\n    def getParamRenames(self) -> Dict[str, str]:\n        \"\"\"\n        Return the parameter renames from all registered plugins.\n\n        This renders a merged dictionary containing all parameter renames from all of the registered\n        plugins. It also performs simple error checking. The result of this operation is cached,\n        since it is somewhat expensive to perform. If the App detects that its plugin manager's set\n        of registered plugins has changed, the cache will be invalidated and recomputed.\n        \"\"\"\n        cacheInvalid = False\n        if self._paramRenames is not None:\n            renames, counter = self._paramRenames\n            if counter != self._pm.counter:\n                cacheInvalid = True\n        else:\n            cacheInvalid = True\n\n        if cacheInvalid:\n            currentNames = {pd.name for pd in parameters.ALL_DEFINITIONS}\n\n            renames = dict()\n            for pluginRenames in self._pm.hook.defineParameterRenames():\n                collisions = currentNames & pluginRenames.keys()\n                if collisions:\n                    raise plugins.PluginError(\n                        \"The following parameter renames from a plugin collide with \"\n                        \"currently-defined parameters:\\n{}\".format(collisions)\n                    )\n                pluginCollisions = renames.keys() & pluginRenames.keys()\n                if pluginCollisions:\n                    raise plugins.PluginError(\n                        \"The following parameter renames are already defined by another plugin:\\n{}\".format(\n                            pluginCollisions\n                        )\n                    )\n                renames.update(pluginRenames)\n            self._paramRenames = renames, self._pm.counter\n        return renames\n\n    def registerPluginFlags(self):\n        \"\"\"\n        Apply flags specified in the passed ``PluginManager`` to the ``Flags`` class.\n\n        See Also\n        --------\n        armi.plugins.ArmiPlugin.defineFlags\n        \"\"\"\n        if self._pluginFlagsRegistered:\n            raise RuntimeError(\"Plugin flags have already been registered. Cannot do it twice!\")\n\n        for pluginFlags in self._pm.hook.defineFlags():\n            Flags.extend(pluginFlags)\n\n        self._pluginFlagsRegistered = True\n\n    def registerUserPlugins(self, pluginPaths):\n        r\"\"\"\n        Register additional plugins passed in by importable paths.\n        These plugins may be provided e.g. by an application during startup\n        based on user input.\n\n        Format expected to be a list of full namespaces to plugin classes.\n        There should be a comma between individual plugins and dots representing\n        the file path or importable python namespace.\n\n        Examples\n        --------\n        importable namespace:\n        ``armi.stuff.plugindir.pluginMod.pluginCls,armi.whatever.plugMod2.plugCls2``\n\n        or on Linux/Unix:\n        ``/path/to/pluginMod.py:pluginCls,/path/to/plugMod2.py:plugCls2``\n\n        or on Windows:\n        ``C:\\\\path\\\\to\\\\pluginMod.py:pluginCls,C:\\\\\\\\path\\\\to\\\\plugMod2.py:plugCls2``\n\n        Notes\n        -----\n        These paths are meant to be taken from a settings file, though this method\n        is public. The idea is that these \"user plugins\" differ from regular plugins\n        because they are defined during run time, not import time. As such, we\n        restrict their flexibility and power as compared to the usual ArmiPlugins.\n        \"\"\"\n        for pluginPath in pluginPaths:\n            if self._isPluginRegistered(pluginPath):\n                continue\n            if \".py:\" in pluginPath:\n                # The path is of the form: /path/to/why.py:MyPlugin\n                self.__registerUserPluginsAbsPath(pluginPath)\n            else:\n                # The path is of the form: armi.thing.what.MyPlugin\n                self.__registerUserPluginsInternalImport(pluginPath)\n\n    def _isPluginRegistered(self, pluginPath: str):\n        r\"\"\"\n        Check if the plugin at the provided path is already registered.\n\n        The expected path formats are:\n        ------------------------------\n        importable namespace:\n        ``armi.stuff.plugindir.pluginMod.pluginCls``\n\n        or on Linux/Unix:\n        ``/path/to/pluginMod.py:pluginCls``\n\n        or on Windows:\n        ``C:\\\\path\\\\to\\\\pluginMod.py:pluginCls``\n\n        Parameters\n        ----------\n        pluginPath : str\n            String path to a userPlugin.\n\n        Returns\n        -------\n        bool\n            Whether or not the plugin name is already registered with the manager.\n        \"\"\"\n        if \":\" in pluginPath:\n            pluginName = pluginPath.strip().split(\":\")[-1]\n        else:\n            pluginName = pluginPath.strip().split(\".\")[-1]\n\n        return self._pm.has_plugin(pluginName)\n\n    def __registerUserPluginsAbsPath(self, pluginPath):\n        \"\"\"Helper method to register a single UserPlugin via absolute path.\n\n        Here the given path is of the form: /path/to/why.py:MyPlugin\n        \"\"\"\n        assert pluginPath.count(\".py:\") == 1, f\"Invalid plugin path: {pluginPath}\"\n\n        # split the settings string into file path and class name\n        filePath, className = pluginPath.split(\".py:\")\n        filePath += \".py\"\n\n        spec = importlib.util.spec_from_file_location(className, filePath)\n        mod = importlib.util.module_from_spec(spec)\n        sys.modules[spec.name] = mod\n        spec.loader.exec_module(mod)\n        plugin = getattr(mod, className)\n        assert issubclass(plugin, plugins.UserPlugin)\n        self._pm.register(plugin)\n\n        # ensure UserPlugin flags are loaded\n        newFlags = plugin.defineFlags()\n        if newFlags:\n            Flags.extend(newFlags)\n\n    def __registerUserPluginsInternalImport(self, pluginPath):\n        \"\"\"Helper method to register a single UserPlugin via internal import.\n\n        Here the given path is of the form: armi.thing.what.MyPlugin\n        \"\"\"\n        names = pluginPath.strip().split(\".\")\n        modPath = \".\".join(names[:-1])\n        clsName = names[-1]\n        mod = importlib.import_module(modPath)\n        plugin = getattr(mod, clsName)\n        assert issubclass(plugin, plugins.UserPlugin)\n        self._pm.register(plugin)\n\n        # ensure UserPlugin flags are loaded\n        newFlags = plugin.defineFlags()\n        if newFlags:\n            Flags.extend(newFlags)\n\n    @property\n    def splashText(self):\n        \"\"\"\n        Return a textual splash screen.\n\n        Specific applications will want to customize this, but by default the ARMI one\n        is produced, with extra data on the App name and version, if available.\n        \"\"\"\n        # typical ARMI splash text\n        splash = r\"\"\"\n+===================================================+\n|            _      ____     __  __    ___          |\n|           / \\    |  _ \\   |  \\/  |  |_ _|         |\n|          / _ \\   | |_) |  | |\\/| |   | |          |\n|         / ___ \\  |  _ <   | |  | |   | |          |\n|        /_/   \\_\\ |_| \\_\\  |_|  |_|  |___|         |\n|        Advanced  Reactor  Modeling Interface      |\n|                                                   |\n|                    version {0:10s}             |\n|                                                   |\"\"\".format(meta.__version__)\n\n        # add the name/version of the current App, if it's not the default\n        if context.APP_NAME != \"armi\":\n            from armi import getApp\n\n            splash += r\"\"\"\n|---------------------------------------------------|\n|   {0:>17s} app version {1:10s}        |\"\"\".format(context.APP_NAME, getApp().version)\n\n        # bottom border of the splash\n        splash += r\"\"\"\n+===================================================+\n\"\"\"\n\n        return splash\n"
  },
  {
    "path": "armi/bookkeeping/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The bookkeeping package handles data persistence, reporting, and some debugging.\"\"\"\n\nfrom armi import plugins\n\n\nclass BookkeepingPlugin(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        from armi.bookkeeping import (\n            historyTracker,\n            mainInterface,\n            memoryProfiler,\n            snapshotInterface,\n        )\n        from armi.bookkeeping.db import databaseInterface\n        from armi.bookkeeping.report import reportInterface\n\n        interfaceInfo = []\n        interfaceInfo += plugins.collectInterfaceDescriptions(mainInterface, cs)\n        interfaceInfo += plugins.collectInterfaceDescriptions(databaseInterface, cs)\n        interfaceInfo += plugins.collectInterfaceDescriptions(historyTracker, cs)\n        interfaceInfo += plugins.collectInterfaceDescriptions(memoryProfiler, cs)\n        interfaceInfo += plugins.collectInterfaceDescriptions(reportInterface, cs)\n        interfaceInfo += plugins.collectInterfaceDescriptions(snapshotInterface, cs)\n\n        return interfaceInfo\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineEntryPoints():\n        from armi.bookkeeping import visualization\n        from armi.cli import database\n\n        entryPoints = []\n        entryPoints.append(database.ExtractInputs)\n        entryPoints.append(database.InjectInputs)\n        entryPoints.append(visualization.VisFileEntryPoint)\n\n        return entryPoints\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineCaseDependencies(case, suite):\n        if case.cs[\"loadStyle\"] == \"fromDB\":\n            # the ([^\\/]) capture basically gets the file name portion and excludes any\n            # directory separator\n            return case.getPotentialParentFromSettingValue(\n                case.cs[\"reloadDBName\"],\n                r\"^(?P<dirName>.*[\\/\\\\])?(?P<title>[^\\/\\\\]+?)(\\.[hH]5)?$\",\n            )\n        return None\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def mpiActionRequiresReset(cmd) -> bool:\n        \"\"\"\n        Prevent reactor resets after certain mpi actions.\n\n        * Memory profiling is small enough that we don't want to reset\n        * distributing state would be undone by this so we don't want that.\n\n        See Also\n        --------\n        armi.operators.operatorMPI.OperatorMPI.workerOperate\n        \"\"\"\n        from armi import mpiActions\n        from armi.bookkeeping import memoryProfiler\n\n        if isinstance(cmd, mpiActions.MpiAction):\n            for donotReset in (\n                mpiActions.DistributeStateAction,\n                mpiActions.DistributionAction,\n                memoryProfiler.PrintSystemMemoryUsageAction,\n                memoryProfiler.ProfileMemoryUsageAction,\n            ):\n                if isinstance(cmd, donotReset):\n                    return False\n\n        return True\n"
  },
  {
    "path": "armi/bookkeeping/db/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe db package is responsible for reading and writing the state of the reactor to/from disk.\n\nAs an ARMI run progresses, this is periodically updated as the primary output file.\nIt can also be an input file for follow-on analysis or restart runs.\n\nThis module contains factories for selecting and building DB-related objects.\n\nWhen updating a db version\n--------------------------\nThe code associated with reading and writing database files may not benefit from Don't\nRepeat Yourself (DRY) practices in the same way as other code. Therefore, do not share\ncode between different major versions of the databases. Create a new module if you are\ncreating a new major database version.\n\nDatabase revision changelog\n---------------------------\n - 1: Originally, calculation results were stored in a SQL database.\n\n - 2: The storage format was changed to HDF5. This required less external\n   infrastructure than SQL. However, the implementation did not store a complete\n   model of a reactor, but a ghost of assembly, block, and reactor parameters that\n   could be applied to an existing reactor model (so long as the dimensions were\n   consistent). This was inconvenient and error prone.\n\n - 3: The HDF5 format was kept, but the schema was made more flexible to permit\n   storing the entire reactor model. All objects in the ARMI Composite Model are\n   written to the database, and the model can be completely recovered from just the\n   HDF5 file.\n\n     - 3.1: Improved the handling of reading/writing grids.\n\n     - 3.2: Changed the strategy for storing large attributes to using a special\n       string starting with an \"@\" symbol (e.g., \"@/c00n00/attrs/5_linkedDims\"). This\n       was done to support copying time node datasets from one file to another without\n       invalidating the references. Support was maintained for reading previous\n       versions, by performing a ``mergeHistory()`` and converting to the new naming\n       strategy, but the old version cannot be written.\n\n     - 3.3: Compressed the way locations are stored in the database and allow\n       MultiIndex locations to be read and written.\n\n     - 3.4: Modified the way locations are stored in the database to include complete\n       indices for indices that can be composed from multiple grids. Having complete\n       indices allows for more efficient means of extracting information based on\n       location, without having to compose the full model.\n\"\"\"\n\nimport os\n\nfrom armi import runLog\nfrom armi.bookkeeping.db.compareDB3 import compareDatabases\n\n# re-export package components for easier import\nfrom armi.bookkeeping.db.database import Database\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.bookkeeping.db.factory import databaseFactory\n\n__all__ = [\n    \"Database\",\n    \"DatabaseInterface\",\n    \"compareDatabases\",\n    \"databaseFactory\",\n]\n\n\ndef loadOperator(\n    pathToDb,\n    loadCycle,\n    loadNode,\n    statePointName=None,\n    allowMissing=False,\n    handleInvalids=True,\n    callReactorConstructionHook=False,\n):\n    \"\"\"\n    Return an operator given the path to a database.\n\n    Parameters\n    ----------\n    pathToDb : str\n        The path of the database to load from.\n    loadCycle : int\n        The cycle to load the reactor state from.\n    loadNode : int\n        The time node to load the reactor from.\n    statePointName: str\n        State point name at the end, E.G. `EOC` or `EOL`.\n        Full name would be C0N2EOC, see database.getH5GroupName\n    allowMissing : bool\n        Whether to emit a warning, rather than crash if reading a database\n        with undefined parameters. Default False.\n    handleInvalids : bool\n        Whether to check for invalid settings. Default True.\n    callReactorConstructionHook : bool\n        Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False.\n\n    See Also\n    --------\n    armi.operator.Operator.loadState:\n        A method for loading reactor state that is useful if you already have an\n        operator and a reactor object. loadOperator varies in that it supplies these\n        given only a database file. loadState should be used if you are in the\n        middle of an ARMI calculation and need load a different time step.\n\n    Notes\n    -----\n    The operator will have a reactor attached that is loaded to the specified cycle\n    and node. The operator will not be in the same state that it was at that cycle and\n    node, only the reactor.\n\n    Examples\n    --------\n    >>> o = db.loadOperator(r\"pathToDatabase\", 0, 1)\n    >>> r = o.r\n    >>> cs = o.cs\n    >>> r.p.timeNode\n    1\n    >>> r.getFPMass()  # Note since it is loaded from step 1 there are fission products.\n    12345.67\n    \"\"\"\n    # `import armi` doesn't work if imported at top\n    from armi import cases\n\n    if not os.path.exists(pathToDb):\n        raise ValueError(\n            f\"Specified database at path {pathToDb} does not exist. \\n\\n\"\n            \"Double check that escape characters were correctly processed.\\n\"\n            \"Consider sending the full path, or change directory to be the directory \"\n            \"of the database.\"\n        )\n\n    db = Database(pathToDb, \"r\")\n    with db:\n        # init Case here as it keeps track of execution time and assigns a reactor\n        # attribute. This attribute includes the time it takes to initialize the reactor\n        # so creating a reactor from the database should be included.\n        cs = db.loadCS(handleInvalids=handleInvalids)\n        thisCase = cases.Case(cs)\n        r = db.load(\n            loadCycle,\n            loadNode,\n            cs=cs,\n            statePointName=statePointName,\n            allowMissing=allowMissing,\n            handleInvalids=handleInvalids,\n            callReactorConstructionHook=callReactorConstructionHook,\n        )\n\n    o = thisCase.initializeOperator(r=r)\n    runLog.important(\n        \"The operator will not be in the same state that it was at that cycle and \"\n        \"node, only the reactor.\\n\"\n        \"The operator should have access to the same interface stack, but the \"\n        \"interfaces will not be in the same state (they will be fresh instances \"\n        \"of each interface as if __init__ was just called rather than the state \"\n        \"during the run at this time node.)\\n\"\n        \"ARMI does not support loading operator states, as they are not stored.\"\n    )\n    return o\n\n\ndef _getH5File(db):\n    \"\"\"Return the underlying h5py File that provides the backing storage for a database.\n\n    This is done here because HDF5 isn't an official aspect of the base Database\n    abstraction, and thus making this part of the base Database class interface wouldn't\n    be ideal. **However**, we violate this assumption when working with \"auxiliary\"\n    data, which use HDF5 features directly. To be able to convert, we need to be able to\n    access and copy these groups, so we need access to the HDF5 file under the hood. To\n    avoid this, we would need to come up with our own formalization of what a\n    storage-agnostic aux data concept looks like. We can tackle that if/when we decode\n    that we want to start using protobufs or whatever.\n\n    All this being said, we are probably violating this already with genAuxiliaryData,\n    but we have to start somewhere.\n    \"\"\"\n    if isinstance(db, Database):\n        return db.h5db\n    else:\n        raise TypeError(\"Unsupported Database type ({})!\".format(type(db)))\n"
  },
  {
    "path": "armi/bookkeeping/db/compareDB3.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUse the generic database class to compare two ARMI databases.\n\nThis assumes some intimate knowledge about how the database is structured internally.\nFor instance, it knows that the database is composed of HDF5 data (the attrs of a\ndataset are used, and h5py Groups are indexed), and it knows how special data is\nstructured within the HDF5 dataset and what the corresponding attributes are used for.\nSome of this could be easily pulled up to the public interfaces of the Database class,\nwhich may allow for cross-version database checking, but there is probably little value\nin doing so if one is able to convert between versions.\n\nSpeaking of conversions, there are some common issues that may arise from comparing\nagainst databases that were converted from an old version. The process of reading in the\nold database values can sometimes lead to more parameters being written out to the new\ndatabase than were in the original database (set to the parameter's default value). That\nmeans that one generally should not be worried about a converted database having more\nparameters in it that the one produced directly may not, assuming that the extra\nconverted parameters are the default. Also, especially at the Component level, some of\nthe parameters are expected to be different. Specifically the following:\n\n* temperatures: The old database format simply did not store these on the component\n  level, so when converting a database, the components in a block will uniformly get\n  whatever the Block temperature was.\n* serial numbers: At all levels, we cannot really expect the serial numbers to line\n  up from object to object. These are not really supposed to be the same.\n* volume: Component volumes also are not stored on the database, and come from\n  temperatures\n* memory usage: Relatively self-evident. Resource usage will vary from run to run,\n  even if the code hasn't changed.\n\n\"\"\"\n\nimport collections\nimport os\nimport re\nimport traceback\nfrom typing import Optional, Pattern, Sequence, Tuple\n\nimport h5py\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.bookkeeping.db import database\nfrom armi.bookkeeping.db.database import Database\nfrom armi.bookkeeping.db.factory import databaseFactory\nfrom armi.bookkeeping.db.permissions import Permissions\nfrom armi.reactor.composites import ArmiObject\nfrom armi.utils.tabulate import tabulate\n\n\nclass OutputWriter:\n    \"\"\"Basically a tee to writeln to runLog and the output file.\"\"\"\n\n    def __init__(self, fname):\n        self.fname = fname\n        self._stream = None\n\n    def __enter__(self):\n        self._stream = open(self.fname, \"w\")\n        return self\n\n    def __exit__(self, *args):\n        self._stream.close()\n\n    def writeln(self, msg: str) -> None:\n        runLog.info(msg)\n        self._stream.write(msg)\n        self._stream.write(\"\\n\")\n\n\nclass DiffResults:\n    \"\"\"Utility class for storing differences between database data.\n\n    This class is used to store the differences between reference data and other\n    (\"source\") data. It is configured with a tolerance, below which differences are\n    ignored. Differences that exceed the tolerance are stored in a collection of\n    differences, organized by time step to be outputted later. It also keeps track of\n    the number of issues that may have been encountered in attempting to compare two\n    databases. For instance, missing datasets on one database or the other, or datasets\n    with incompatible dimensions and the like.\n\n    All differences are based on a weird type of relative difference, which uses the\n    mean of the reference and source data elements as the normalization value:\n    2*(C-E)/(C+E). This is somewhat strange, in that if the two are very different, the\n    reported relative difference will be smaller than expected. It does have the useful\n    property that if the reference value is zero and the source value is non-zero, the\n    diff will not be infinite. We do not typically report these in any rigorous manner,\n    so this should be fine, though we may wish to revisit this in the future.\n    \"\"\"\n\n    def __init__(self, tolerance):\n        self._columns = []\n        self._structureDiffs = []\n        self.tolerance = tolerance\n        # diffs is a dictionary, keyed on strings describing the object to which the\n        # diffs apply, and the different diff metrics that we use (e.g. mean(abs(diff)),\n        # max(abs(diff))), with the values being a list of diffs by time step. If the\n        # diff doesn't exceed the tolerance, a None is inserted instead.\n        self.diffs = collections.defaultdict(self._getDefault)\n\n    def addDiff(self, compType: str, paramName: str, absMean: float, mean: float, absMax: float) -> None:\n        \"\"\"Add a collection of diffs to the diff dictionary if they exceed the tolerance.\"\"\"\n        absMean = absMean if absMean > self.tolerance else None\n        self.diffs[\"{}/{} mean(abs(diff))\".format(compType, paramName)].append(absMean)\n\n        mean = mean if abs(mean) > self.tolerance else None\n        self.diffs[\"{}/{} mean(diff)\".format(compType, paramName)].append(mean)\n\n        absMax = absMax if absMax > self.tolerance else None\n        self.diffs[\"{}/{} max(abs(diff))\".format(compType, paramName)].append(absMax)\n\n    def addStructureDiffs(self, nDiffs: int) -> None:\n        if not self._structureDiffs:\n            self._structureDiffs = [0]\n\n        self._structureDiffs[-1] += nDiffs\n\n    def addTimeStep(self, tsName: str) -> None:\n        self._structureDiffs.append(0)\n        self._columns.append(tsName)\n\n    def _getDefault(self) -> list:\n        return [None] * (len(self._columns) - 1)\n\n    def reportDiffs(self, stream: OutputWriter) -> None:\n        \"\"\"Print out a well-formatted table of the non-zero diffs.\"\"\"\n        # filter out empty rows\n        diffsToPrint = {key: value for key, value in self.diffs.items() if not all(v is None for v in value)}\n        stream.writeln(\n            tabulate(\n                [k.split() + val for k, val in sorted(diffsToPrint.items())],\n                headers=self._columns,\n            )\n        )\n\n    def nDiffs(self) -> int:\n        \"\"\"Return the number of differences that exceeded the tolerance.\"\"\"\n        return sum(1 for _, value in self.diffs.items() if any(v is not None for v in value)) + sum(\n            self._structureDiffs\n        )\n\n\ndef compareDatabases(\n    refFileName: str,\n    srcFileName: str,\n    exclusions: Optional[Sequence[str]] = None,\n    tolerance: float = 0.0,\n    timestepCompare: Optional[Sequence[Tuple[int, int]]] = None,\n) -> Optional[DiffResults]:\n    \"\"\"High-level method to compare two ARMI H5 files, given file paths.\"\"\"\n    compiledExclusions = None\n    if exclusions is not None:\n        compiledExclusions = [re.compile(ex) for ex in exclusions]\n\n    outputName = os.path.basename(refFileName) + \"_vs_\" + os.path.basename(srcFileName) + \".txt\"\n\n    diffResults = DiffResults(tolerance)\n    with OutputWriter(outputName) as out:\n        ref = databaseFactory(refFileName, Permissions.READ_ONLY_FME)\n        src = databaseFactory(srcFileName, Permissions.READ_ONLY_FME)\n        if not isinstance(ref, Database) or not isinstance(src, Database):\n            raise TypeError(\n                \"This database comparer only knows how to deal with database version 3; received {} and {}\".format(\n                    type(ref), type(src)\n                )\n            )\n\n        with ref, src:\n            if not timestepCompare:\n                _, nDiff = _compareH5Groups(out, ref, src, \"timesteps\")\n\n                if nDiff > 0:\n                    runLog.warning(\n                        \"{} and {} have differing timestep groups, and are \"\n                        \"probably not safe to compare. This is likely due to one of \"\n                        \"the cases having failed to complete.\".format(ref, src)\n                    )\n                    return None\n\n            for refGroup, srcGroup in zip(\n                ref.genTimeStepGroups(timeSteps=timestepCompare),\n                src.genTimeStepGroups(timeSteps=timestepCompare),\n            ):\n                runLog.info(\n                    f\"Comparing ref time step {refGroup.name.split('/')[1]} to src time \"\n                    f\"step {srcGroup.name.split('/')[1]}\"\n                )\n                diffResults.addTimeStep(refGroup.name)\n                _compareTimeStep(out, refGroup, srcGroup, diffResults, exclusions=compiledExclusions)\n\n        diffResults.reportDiffs(out)\n\n    return diffResults\n\n\ndef _compareH5Groups(out: OutputWriter, ref: h5py.Group, src: h5py.Group, name: str) -> Tuple[Sequence[str], int]:\n    refGroups = set(ref.keys())\n    srcGroups = set(src.keys())\n\n    n = _compareSets(srcGroups, refGroups, out, name)\n\n    return sorted(refGroups & srcGroups), n\n\n\ndef _compareTimeStep(\n    out: OutputWriter,\n    refGroup: h5py.Group,\n    srcGroup: h5py.Group,\n    diffResults: DiffResults,\n    exclusions: Optional[Sequence[Pattern]] = None,\n):\n    groupNames, structDiffs = _compareH5Groups(out, refGroup, srcGroup, \"composite objects/auxiliary data\")\n    diffResults.addStructureDiffs(structDiffs)\n\n    componentTypes = {gn for gn in groupNames if gn in ArmiObject.TYPES}\n    auxData = set(groupNames) - componentTypes\n    auxData.discard(\"layout\")\n\n    for componentType in componentTypes:\n        refTypeGroup = refGroup[componentType]\n        srcTypeGroup = srcGroup[componentType]\n\n        _compareComponentData(out, refTypeGroup, srcTypeGroup, diffResults, exclusions=exclusions)\n\n    for aux in auxData:\n        _compareAuxData(out, refGroup[aux], srcGroup[aux], diffResults)\n\n\ndef _compareAuxData(\n    out: OutputWriter,\n    refGroup: h5py.Group,\n    srcGroup: h5py.Group,\n    diffResults: DiffResults,\n):\n    \"\"\"\n    Compare auxiliary datasets, which aren't stored as Parameters on the Composite model.\n\n    Some parts of ARMI directly create HDF5 groups under the time step group to store\n    arbitrary data. These still need to be compared. Missing datasets will be treated as\n    structure differences and reported.\n    \"\"\"\n    data = dict()\n\n    def visitor(name, obj):\n        if isinstance(obj, h5py.Dataset):\n            data[name] = obj\n\n    refGroup.visititems(visitor)\n    refData = data\n\n    data = dict()\n    srcGroup.visititems(visitor)\n    srcData = data\n\n    n = _compareSets(set(srcData.keys()), set(refData.keys()), out, name=\"auxiliary dataset\")\n    diffResults.addStructureDiffs(n)\n    matchedSets = set(srcData.keys()) & set(refData.keys())\n    for name in matchedSets:\n        _diffSimpleData(refData[name], srcData[name], diffResults)\n\n\ndef _compareSets(src: set, ref: set, out: OutputWriter, name: Optional[str] = None) -> int:\n    nDiffs = 0\n    printName = \"\" if name is None else name + \" \"\n    if ref - src:\n        nDiffs += len(ref - src)\n        out.writeln(\"ref has {}not in src: {}\".format(printName, list(ref - src)))\n\n    if src - ref:\n        nDiffs += len(src - ref)\n        out.writeln(\"src has {}not in ref: {}\".format(printName, list(src - ref)))\n\n    return nDiffs\n\n\ndef _diffSpecialData(\n    refData: h5py.Dataset,\n    srcData: h5py.Dataset,\n    out: OutputWriter,\n    diffResults: DiffResults,\n):\n    \"\"\"\n    Compare specially-formatted datasets.\n\n    This employs the pack/unpackSpecialData functions to reconstitute complicated\n    datasets for comparison. These usually don't behave well as giant numpy arrays, so\n    we go element-by-element to calculate the diffs, then concatenate them.\n    \"\"\"\n    name = refData.name\n    paramName = refData.name.split(\"/\")[-1]\n    compName = refData.name.split(\"/\")[-2]\n\n    nDiffs = _compareSets(set(srcData.attrs.keys()), set(refData.attrs.keys()), out, \"formatting data\")\n    keysMatch = nDiffs == 0\n    diffResults.addStructureDiffs(nDiffs)\n\n    if not keysMatch:\n        diffResults.addDiff(name, name, np.inf, np.inf, np.inf)\n        return\n\n    if srcData.attrs.get(\"dict\", False):\n        out.writeln(f\"Not comparing {name} as it is a dictionary.\")\n        return\n\n    attrsMatch = True\n    for k, srcAttr in srcData.attrs.items():\n        refAttr = refData.attrs[k]\n\n        if isinstance(srcAttr, np.ndarray) and isinstance(refAttr, np.ndarray):\n            srcFlat = srcAttr.flatten()\n            refFlat = refAttr.flatten()\n            if len(srcFlat) != len(refFlat):\n                same = False\n            else:\n                same = all(srcFlat == refFlat)\n        else:\n            same = srcAttr == refAttr\n\n        if not same:\n            attrsMatch = False\n            out.writeln(\n                \"Special formatting parameters for {} do not match for {}. Src: {} Ref: {}\".format(\n                    name, k, srcData.attrs[k], refData.attrs[k]\n                )\n            )\n            break\n\n    if not attrsMatch:\n        diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf)\n        return\n\n    try:\n        src = database.unpackSpecialData(srcData[()], srcData.attrs, paramName)\n        ref = database.unpackSpecialData(refData[()], refData.attrs, paramName)\n    except Exception:\n        runLog.error(\n            f\"Unable to unpack special data for paramName {paramName}. {traceback.format_exc()}\",\n        )\n        return\n\n    diff = []\n    for dSrc, dRef in zip(src.tolist(), ref.tolist()):\n        if isinstance(dSrc, np.ndarray) and isinstance(dRef, np.ndarray):\n            if dSrc.shape != dRef.shape:\n                out.writeln(\"Shapes did not match for {}\".format(refData))\n                diffResults.addDiff(compName, paramName, np.inf, np.inf, np.inf)\n                return\n\n            if dSrc.dtype.type == np.bytes_ or dRef.dtype.type == np.bytes_:\n                # data is byte strings; can't be diffed like numbers\n                if np.array_equal(dSrc, dRef):\n                    diffResults.addDiff(name, name, 0.0, 0.0, 0.0)\n                else:\n                    diffResults.addDiff(name, name, np.inf, np.inf, np.inf)\n                return\n\n            # Make sure not to try to compare empty arrays. Numpy is mediocre at these;\n            # they are super degenerate and cannot participate in concatenation.\n            if 0 not in dSrc.shape:\n                # Use the mean of the two to calc relative error. This is more robust to\n                # changes that cause one of the values to be zero, while the other is\n                # non-zero, leading to infinite relative error\n                dMean = (dSrc + dRef) / 2\n                diff.append((dSrc - dRef) / dMean)\n            continue\n\n        if (dSrc is None) ^ (dRef is None):\n            out.writeln(\"Mismatched Nones for {} in {}\".format(paramName, compName))\n            diff.append([np.inf])\n            continue\n\n        if dSrc is None:\n            diff.append([0.0])\n            continue\n\n        try:\n            # Use mean to avoid some infinities; see above\n            dMean = (dSrc + dRef) / 2\n            diff.append([(dSrc - dRef) / dMean])\n        except ZeroDivisionError:\n            if dSrc == dRef:\n                diff.append([0.0])\n            else:\n                diff.append([np.inf])\n\n    if diff:\n        try:\n            diff = [np.array(d).flatten() for d in diff]\n            diff = np.concatenate(diff)\n        except ValueError as e:\n            out.writeln(\"Failed to concatenate diff data for {} in {}: {}\".format(paramName, compName, diff))\n            out.writeln(\"Because: {}\".format(e))\n            return\n        absDiff = np.abs(diff)\n        mean = np.nanmean(diff)\n        absMax = np.nanmax(absDiff)\n        absMean = np.nanmean(absDiff)\n\n        diffResults.addDiff(compName, paramName, absMean, mean, absMax)\n\n\ndef _diffSimpleData(ref: h5py.Dataset, src: h5py.Dataset, diffResults: DiffResults):\n    paramName = ref.name.split(\"/\")[-1]\n    compName = ref.name.split(\"/\")[-2]\n\n    try:\n        # use mean to avoid some unnecessary infinities\n        mean = (src[()] + ref[()]) / 2.0\n        diff = (src[()] - ref[()]) / mean\n    except TypeError:\n        # Strings are persnickety\n        if src.dtype.kind == ref.dtype.kind and src.dtype.kind in {\"U\", \"S\"}:\n            return\n        else:\n            runLog.error(\"Failed to compare {} in {}\".format(paramName, compName))\n            runLog.error(\"source: {}\".format(src))\n            runLog.error(\"reference: {}\".format(ref))\n            diff = np.array([np.inf])\n    except ValueError:\n        runLog.error(\"Failed to compare {} in {}\".format(paramName, compName))\n        runLog.error(\"source: {}\".format(src))\n        runLog.error(\"reference: {}\".format(ref))\n        diff = np.array([np.inf])\n\n    if 0 in diff.shape:\n        # Empty list, no diff\n        return\n\n    absDiff = np.abs(diff)\n    mean = np.nanmean(diff)\n    absMax = np.nanmax(absDiff)\n    absMean = np.nanmean(absDiff)\n\n    diffResults.addDiff(compName, paramName, absMean, mean, absMax)\n\n\ndef _compareComponentData(\n    out: OutputWriter,\n    refGroup: h5py.Group,\n    srcGroup: h5py.Group,\n    diffResults: DiffResults,\n    exclusions: Optional[Sequence[Pattern]] = None,\n):\n    exclusions = exclusions or []\n    compName = refGroup.name\n    paramNames, nDiff = _compareH5Groups(out, refGroup, srcGroup, \"{} parameters\".format(compName))\n    diffResults.addStructureDiffs(nDiff)\n\n    for paramName in paramNames:\n        fullName = \"/\".join((refGroup.name, paramName))\n        if any(pattern.match(fullName) for pattern in exclusions):\n            runLog.debug(\"Skipping comparison of {} since it is being ignored.\".format(fullName))\n            continue\n        refDataset = refGroup[paramName]\n        srcDataset = srcGroup[paramName]\n\n        srcSpecial = srcDataset.attrs.get(\"specialFormatting\", False)\n        refSpecial = refDataset.attrs.get(\"specialFormatting\", False)\n\n        if srcSpecial ^ refSpecial:\n            out.writeln(\n                \"Could not compare data for parameter {} because one uses special \"\n                \"formatting, and the other does not. Ref: {} Src: {}\".format(paramName, refSpecial, srcSpecial)\n            )\n            diffResults.addDiff(refGroup.name, paramName, np.inf, np.inf, np.inf)\n            continue\n\n        if srcSpecial or refSpecial:\n            _diffSpecialData(refDataset, srcDataset, out, diffResults)\n        else:\n            _diffSimpleData(refDataset, srcDataset, diffResults)\n"
  },
  {
    "path": "armi/bookkeeping/db/database.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nARMI Database implementation, version 3.4.\n\nA reactor model should be fully recoverable from the database; all the way down to the component level. As a result, the\nstructure of the underlying data is bound to the hierarchical Composite Reactor Model. Furthermore, this database format\nis intended to be more dynamic, permitting as-yet undeveloped levels and classes in the Composite Reactor Model to be\nsupported as they are added. More high-level discussion is contained in :ref:`database-file`.\n\nThe :py:class:`Database` class contains most of the functionality for interacting with the underlying data. This\nincludes things like dumping a Reactor state to the database and loading it back again, as well as extracting historical\ndata for a given object or collection of object from the database file. However, for the nitty-gritty details of how the\nhierarchical Composite Reactor Model is translated to the flat file database, please refer to\n:py:mod:`armi.bookkeeping.db.layout`.\n\nRefer to :py:mod:`armi.bookkeeping.db` for information about versioning.\n\"\"\"\n\nimport collections\nimport copy\nimport gc\nimport io\nimport itertools\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom platform import uname\nfrom typing import (\n    Any,\n    Dict,\n    Generator,\n    List,\n    Optional,\n    Sequence,\n    Tuple,\n    Type,\n    Union,\n)\n\nimport h5py\nimport numpy as np\n\nfrom armi import context, getApp, getPluginManagerOrFail, meta, runLog, settings\nfrom armi.bookkeeping.db.jaggedArray import JaggedArray\nfrom armi.bookkeeping.db.layout import (\n    DB_VERSION,\n    LOC_COORD,\n    Layout,\n    replaceNonesWithNonsense,\n    replaceNonsenseWithNones,\n)\nfrom armi.bookkeeping.db.typedefs import Histories, History\nfrom armi.physics.neutronics.settings import CONF_LOADING_FILE\nfrom armi.reactor import grids, parameters\nfrom armi.reactor.assemblies import Assembly\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.components import Component\nfrom armi.reactor.composites import ArmiObject\nfrom armi.reactor.parameters import parameterCollections\nfrom armi.reactor.reactorParameters import makeParametersReadOnly\nfrom armi.reactor.reactors import Core, Reactor\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_GROW_TO_FULL_CORE_AFTER_LOAD,\n    CONF_SORT_REACTOR,\n)\nfrom armi.utils import getNodesPerCycle, safeCopy, safeMove\nfrom armi.utils.textProcessors import resolveMarkupInclusions\n\n# CONSTANTS\n_SERIALIZER_NAME = \"serializerName\"\n_SERIALIZER_VERSION = \"serializerVersion\"\n\n\ndef getH5GroupName(cycle: int, timeNode: int, statePointName: str = None) -> str:\n    \"\"\"\n    Naming convention specifier.\n\n    ARMI defines the naming convention cXXnYY for groups of simulation data. That is, data is grouped by cycle and time\n    node information during a simulated run.\n    \"\"\"\n    return \"c{:0>2}n{:0>2}{}\".format(cycle, timeNode, statePointName or \"\")\n\n\nclass Database:\n    \"\"\"\n    ARMI Database, handling serialization and loading of Reactor states.\n\n    This implementation of the database pushes all objects in the Composite Reactor Model into the database. This\n    process is aided by the ``Layout`` class, which handles the packing and unpacking of the structure of the objects,\n    their relationships, and their non-parameter attributes.\n\n    .. impl:: The database files are H5, and thus language agnostic.\n        :id: I_ARMI_DB_H51\n        :implements: R_ARMI_DB_H5\n\n        This class implements a light wrapper around H5 files, so they can be used to store ARMI outputs. H5 files are\n        commonly used in scientific applications in Fortran and C++. As such, they are entirely language agnostic binary\n        files. The implementation here is that ARMI wraps the ``h5py`` library, and uses its extensive tooling, instead\n        of re-inventing the wheel.\n\n    See Also\n    --------\n    `doc/user/outputs/database` for more details.\n    \"\"\"\n\n    # Allows matching for, e.g., c01n02EOL\n    timeNodeGroupPattern = re.compile(r\"^c(\\d\\d)n(\\d\\d).*$\")\n\n    def __init__(self, fileName: os.PathLike, permission: str = \"r\"):\n        \"\"\"\n        Create a new Database object.\n\n        Parameters\n        ----------\n        fileName:\n            name of the file\n        permission:\n            file permissions, write (\"w\") or read (\"r\")\n        \"\"\"\n        self._fileName = fileName\n        # No full path yet; we will determine this based on FAST_PATH and permissions\n        self._fullPath: Optional[str] = None\n        self._permission = permission\n        self.h5db: Optional[h5py.File] = None\n\n        # Allows context management on open files. If context management is used on a file that is already open, it will\n        # not reopen and it will also not close after leaving that context. This allows the treatment of all databases\n        # the same whether they are open or closed.\n        self._openCount: int = 0\n\n        if permission == \"w\":\n            self.version = DB_VERSION\n        else:\n            # will be set upon read\n            self._version = None\n            self._versionMajor = None\n            self._versionMinor = None\n\n    @property\n    def version(self) -> str:\n        return self._version\n\n    @version.setter\n    def version(self, value: str):\n        self._version = value\n        self._versionMajor, self._versionMinor = (int(v) for v in value.split(\".\"))\n        if self.versionMajor != 3:\n            raise ValueError(f\"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.\")\n\n    @property\n    def versionMajor(self):\n        return self._versionMajor\n\n    @property\n    def versionMinor(self):\n        return self._versionMinor\n\n    def __repr__(self):\n        return \"<{} {}>\".format(self.__class__.__name__, repr(self.h5db).replace(\"<\", \"\").replace(\">\", \"\"))\n\n    def open(self):\n        if self.h5db is not None:\n            raise ValueError(\"This database is already open; make sure to close it before trying to open it again.\")\n\n        filePath = self._fileName\n        self._openCount += 1\n\n        if self._permission in {\"r\", \"a\"}:\n            self._fullPath = os.path.abspath(filePath)\n            self.h5db = h5py.File(filePath, self._permission)\n            self.version = self.h5db.attrs[\"databaseVersion\"]\n            return\n\n        if self._permission == \"w\":\n            # assume fast path!\n            filePath = os.path.join(context.getFastPath(), filePath)\n            self._fullPath = os.path.abspath(filePath)\n\n        else:\n            runLog.error(f\"Unrecognized file permissions `{self._permission}`\")\n            raise ValueError(f\"Cannot open database with permission `{self._permission}`\")\n\n        # open the database, and write a bunch of metadata to it\n        runLog.info(\"Opening database file at {}\".format(os.path.abspath(filePath)))\n        self.h5db = h5py.File(filePath, self._permission)\n        self.h5db.attrs[\"successfulCompletion\"] = False\n        self.h5db.attrs[\"version\"] = meta.__version__\n        self.h5db.attrs[\"databaseVersion\"] = self.version\n        self.writeSystemAttributes(self.h5db)\n\n        # store app and plugin data\n        app = getApp()\n        self.h5db.attrs[\"appName\"] = app.name\n        plugins = app.pluginManager.list_name_plugin()\n        ps = [(os.path.abspath(sys.modules[p[1].__module__].__file__), p[1].__name__) for p in plugins]\n        ps = np.array([str(p[0]) + \":\" + str(p[1]) for p in ps]).astype(\"S\")\n        self.h5db.attrs[\"pluginPaths\"] = ps\n        self.h5db.attrs[\"localCommitHash\"] = Database.grabLocalCommitHash()\n\n    def isOpen(self):\n        return self.h5db is not None\n\n    @staticmethod\n    def writeSystemAttributes(h5db):\n        \"\"\"Write system attributes to the database.\n\n        .. impl:: Add system attributes to the database.\n            :id: I_ARMI_DB_QA\n            :implements: R_ARMI_DB_QA\n\n            This method writes some basic system information to the H5 file. This is designed as a starting point, so\n            users can see information about the system their simulations were run on. As ARMI is used on Windows and\n            Linux, the tooling here has to be platform independent. The two major sources of information are the ARMI\n            :py:mod:`context <armi.context>` module and the Python standard library ``platform``.\n        \"\"\"\n        h5db.attrs[\"user\"] = context.USER\n        h5db.attrs[\"python\"] = sys.version\n        h5db.attrs[\"armiLocation\"] = os.path.dirname(context.ROOT)\n        h5db.attrs[\"startTime\"] = context.START_TIME\n        h5db.attrs[\"machines\"] = np.array(context.MPI_NODENAMES).astype(\"S\")\n\n        # store platform data\n        platform_data = uname()\n        h5db.attrs[\"platform\"] = platform_data.system\n        h5db.attrs[\"hostname\"] = platform_data.node\n        h5db.attrs[\"platformRelease\"] = platform_data.release\n        h5db.attrs[\"platformVersion\"] = platform_data.version\n        h5db.attrs[\"platformArch\"] = platform_data.processor\n\n    @staticmethod\n    def grabLocalCommitHash():\n        \"\"\"\n        Try to determine the local Git commit.\n\n        We have to be sure to handle the errors where the code is run on a system that doesn't have Git installed. Or if\n        the code is simply not run from inside a repo.\n\n        Returns\n        -------\n        str\n            The commit hash if it exists, otherwise \"unknown\".\n        \"\"\"\n        unknown = \"unknown\"\n        if not shutil.which(\"git\"):\n            # no git available. cannot check git info\n            return unknown\n        repo_exists = (\n            subprocess.run(\n                \"git rev-parse --git-dir\".split(),\n                stdout=subprocess.DEVNULL,\n                stderr=subprocess.DEVNULL,\n            ).returncode\n            == 0\n            and subprocess.run(\n                [\"git\", \"describe\"],\n                stdout=subprocess.DEVNULL,\n                stderr=subprocess.DEVNULL,\n            ).returncode\n            == 0\n        )\n        if repo_exists:\n            try:\n                commit_hash = subprocess.check_output([\"git\", \"describe\"])\n                return commit_hash.decode(\"utf-8\").strip()\n            except Exception:\n                return unknown\n        else:\n            return unknown\n\n    def close(self, completedSuccessfully=False):\n        \"\"\"Close the DB and perform cleanups and auto-conversions.\"\"\"\n        self._openCount = 0\n        if self.h5db is None:\n            return\n\n        if self._permission == \"w\":\n            self.h5db.attrs[\"successfulCompletion\"] = completedSuccessfully\n            # a bit redundant to call flush, but with unreliable IO issues, why not?\n            self.h5db.flush()\n\n        self.h5db.close()\n        self.h5db = None\n\n        if self._permission == \"w\":\n            # move out of the FAST_PATH and into the working directory\n            newPath = safeMove(self._fullPath, self._fileName)\n            self._fullPath = os.path.abspath(newPath)\n\n    def splitDatabase(self, keepTimeSteps: Sequence[Tuple[int, int]], label: str) -> str:\n        \"\"\"\n        Discard all data except for specific time steps, retaining old data in a separate file.\n\n        This is useful when performing more exotic analyses, where each \"time step\" may not represent a specific point\n        in time, but something more nuanced. For example, equilibrium cases store a new \"cycle\" for each iteration as it\n        attempts to converge the equilibrium cycle. At the end of the run, the last \"cycle\" is the converged equilibrium\n        cycle, whereas the previous cycles constitute the path to convergence, which we typically wish to discard before\n        further analysis.\n\n        Parameters\n        ----------\n        keepTimeSteps\n            A collection of the time steps to retain\n        label\n            An informative label for the backed-up database. Usually something like \"-all-iterations\". Will be\n            interposed between the source name and the \".h5\" extension.\n\n        Returns\n        -------\n        str\n            The name of the new, backed-up database file.\n        \"\"\"\n        if self.h5db is None:\n            raise ValueError(\"There is no open database to split.\")\n\n        self.h5db.close()\n\n        backupDBPath = os.path.abspath(label.join(os.path.splitext(self._fileName)))\n        runLog.info(f\"Retaining full database history in {backupDBPath}\")\n        if self._fullPath is not None:\n            safeMove(self._fullPath, backupDBPath)\n\n        self.h5db = h5py.File(self._fullPath, self._permission)\n        dbOut = self.h5db\n\n        with h5py.File(backupDBPath, \"r\") as dbIn:\n            dbOut.attrs.update(dbIn.attrs)\n\n            # Copy everything except time node data\n            timeSteps = set()\n            for groupName, _ in dbIn.items():\n                m = self.timeNodeGroupPattern.match(groupName)\n                if m:\n                    timeSteps.add((int(m.group(1)), int(m.group(2))))\n                else:\n                    dbIn.copy(groupName, dbOut)\n\n            if not set(keepTimeSteps).issubset(timeSteps):\n                raise ValueError(f\"Not all desired time steps ({keepTimeSteps}) are even present in the database\")\n\n            minCycle = next(iter(sorted(keepTimeSteps)))[0]\n            for cycle, node in keepTimeSteps:\n                offsetCycle = cycle - minCycle\n                offsetGroupName = getH5GroupName(offsetCycle, node)\n                dbIn.copy(getH5GroupName(cycle, node), dbOut, name=offsetGroupName)\n                dbOut[offsetGroupName + \"/Reactor/cycle\"][()] = offsetCycle\n\n        return backupDBPath\n\n    @property\n    def fileName(self):\n        return self._fileName\n\n    @fileName.setter\n    def fileName(self, fName):\n        if self.h5db is not None:\n            raise RuntimeError(\"Cannot change Database file name while it's opened!\")\n        self._fileName = fName\n\n    def loadCS(self, handleInvalids=True):\n        \"\"\"Attempt to load settings from the database file.\n\n        Parameters\n        ----------\n        handleInvalids : bool\n            Whether to check for invalid settings. Default True.\n\n        Notes\n        -----\n        There are no guarantees here. If the database was written from a different version of ARMI than you are using,\n        these results may not be usable. Or if the database was written using a custom Application you do not have\n        access to, the DB may not be usable.\n        \"\"\"\n        cs = settings.Settings()\n        cs.path = self.fileName\n        cs.loadFromString(self.h5db[\"inputs/settings\"].asstr()[()], handleInvalids=handleInvalids)\n\n        return cs\n\n    def loadBlueprints(self, cs=None):\n        \"\"\"Attempt to load reactor blueprints from the database file.\n\n        Notes\n        -----\n        There are no guarantees here. If the database was written from a different version of ARMI than you are using,\n        these results may not be usable. Or if the database was written using a custom Application you do not have\n        access to, the DB may not be usable.\n        \"\"\"\n        # Blueprints use the yamlize package, which uses class attributes to define much of the class's behavior through\n        # metaclassing. Therefore, we need to be able to import all plugins before importing blueprints.\n        from armi.reactor.blueprints import Blueprints\n\n        bpString = None\n\n        try:\n            bpString = self.h5db[\"inputs/blueprints\"].asstr()[()]\n            # Need to update the blueprint file to be the database so that its not pointing at a source that doesn't\n            # exist anymore (the original blueprints yaml).\n            if cs:\n                # Update the settings to point at where the file was actually read from\n                cs[CONF_LOADING_FILE] = os.path.basename(self.fileName)\n        except KeyError:\n            # not all reactors need to be created from blueprints, so they may not exist\n            pass\n\n        if not bpString:\n            # looks like no blueprints contents\n            return None\n\n        stream = io.StringIO(bpString)\n        stream = Blueprints.migrate(stream)\n        return Blueprints.load(stream)\n\n    def writeInputsToDB(self, cs, csString=None, bpString=None):\n        \"\"\"\n        Write inputs into the database based the Settings.\n\n        This is not DRY on purpose. The goal is that any particular Database implementation should be very stable, so we\n        dont want it to be easy to change one Database implementation's behavior when trying to change another's.\n\n        .. impl:: The run settings are saved the settings file.\n            :id: I_ARMI_DB_CS\n            :implements: R_ARMI_DB_CS\n\n            A ``Settings`` object is passed into this method, and then the settings are converted into a YAML string\n            stream. That stream is then written to the H5 file. Optionally, this method can take a pre-build settings\n            string to be written directly to the file.\n\n        .. impl:: The reactor blueprints are saved the settings file.\n            :id: I_ARMI_DB_BP\n            :implements: R_ARMI_DB_BP\n\n            A ``Blueprints`` string is optionally passed into this method, and then written to the H5 file. If it is not\n            passed in, this method will attempt to find the blueprints input file in the settings, and read the contents\n            of that file into a stream to be written to the H5 file.\n\n        Notes\n        -----\n        This is hard-coded to read the entire file contents into memory and write that directly into the database. We\n        could have the cs/blueprints/geom write to a string, however the ARMI log file contains a hash of each files'\n        contents. In the future, we should be able to reproduce a calculation with confidence that the inputs are\n        identical.\n        \"\"\"\n        caseTitle = cs.caseTitle if cs is not None else os.path.splitext(self.fileName)[0]\n        self.h5db.attrs[\"caseTitle\"] = caseTitle\n        if csString is None:\n            # Don't read file; use what's in the cs now. Sometimes settings are modified in tests.\n            stream = io.StringIO()\n            cs.writeToYamlStream(stream)\n            stream.seek(0)\n            csString = stream.read()\n\n        if bpString is None:\n            bpPath = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]\n            if bpPath.suffix.lower() in (\".h5\", \".hdf5\"):\n                # The blueprints are in a database file, they need to be read\n                try:\n                    db = h5py.File(bpPath, \"r\")\n                    bpString = db[\"inputs/blueprints\"].asstr()[()]\n                except KeyError:\n                    # not all reactors need to be created from blueprints, so they may not exist\n                    bpString = \"\"\n            else:\n                # The blueprints are a standard blueprints yaml that can be read.\n                if bpPath.exists() and bpPath.is_file():\n                    # Only store blueprints if we actually loaded from them. Ensure that the input as stored in the DB\n                    # is complete\n                    bpString = resolveMarkupInclusions(pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]).read()\n                else:\n                    bpString = \"\"\n\n        self.h5db[\"inputs/settings\"] = csString\n        self.h5db[\"inputs/blueprints\"] = bpString\n\n    def readInputsFromDB(self):\n        return (\n            self.h5db[\"inputs/settings\"].asstr()[()],\n            self.h5db[\"inputs/blueprints\"].asstr()[()],\n        )\n\n    def mergeHistory(self, inputDB, startCycle, startNode):\n        \"\"\"\n        Copy time step data up to, but not including the passed cycle and node.\n\n        Notes\n        -----\n        This is used for restart runs with the standard operator for example. The current time step (being loaded from)\n        should not be copied, as that time steps data will be written at the end of the time step.\n        \"\"\"\n        if self.versionMajor != 3:\n            raise ValueError(f\"Only version 3 of the ARMI DB is supported, found {self.versionMajor}.\")\n        elif inputDB.versionMajor != 3:\n            raise ValueError(f\"Only version 3 of the ARMI DB is supported, found {inputDB.versionMajor}.\")\n\n        # iterate over the top level H5Groups and copy\n        for time, h5ts in zip(inputDB.genTimeSteps(), inputDB.genTimeStepGroups()):\n            cyc, tn = time\n            if cyc == startCycle and tn == startNode:\n                # all data up to current state are merged\n                return\n            self.h5db.copy(h5ts, h5ts.name)\n\n    def __enter__(self):\n        \"\"\"Context management support.\"\"\"\n        if self._openCount == 0:\n            # open also increments _openCount\n            self.open()\n        else:\n            self._openCount += 1\n\n        return self\n\n    def __exit__(self, type, value, traceback):\n        \"\"\"Typically we don't care why it broke but we want the DB to close.\"\"\"\n        self._openCount -= 1\n        # always close if there is a traceback.\n        if self._openCount == 0 or traceback:\n            self.close(all(i is None for i in (type, value, traceback)))\n\n    def __del__(self):\n        if self.h5db is not None:\n            self.close(False)\n\n    def __delitem__(self, tn: Tuple[int, int, Optional[str]]):\n        cycle, timeNode, statePointName = tn\n        name = getH5GroupName(cycle, timeNode, statePointName)\n        if self.h5db is not None:\n            del self.h5db[name]\n\n    def genTimeStepGroups(\n        self, timeSteps: Sequence[Tuple[int, int]] = None\n    ) -> Generator[h5py._hl.group.Group, None, None]:\n        \"\"\"Returns a generator of HDF5 Groups for all time nodes, or for the passed selection.\"\"\"\n        assert self.h5db is not None, \"Must open the database before calling genTimeStepGroups\"\n        if timeSteps is None:\n            for groupName, h5TimeNodeGroup in sorted(self.h5db.items()):\n                match = self.timeNodeGroupPattern.match(groupName)\n                if match:\n                    yield h5TimeNodeGroup\n        else:\n            for step in timeSteps:\n                yield self.h5db[getH5GroupName(*step)]\n\n    def getLayout(self, cycle, node):\n        \"\"\"Return a Layout object representing the requested cycle and time node.\"\"\"\n        version = (self._versionMajor, self._versionMinor)\n        timeGroupName = getH5GroupName(cycle, node)\n\n        return Layout(version, self.h5db[timeGroupName])\n\n    def genTimeSteps(self) -> Generator[Tuple[int, int], None, None]:\n        \"\"\"Returns a generator of (cycle, node) tuples that are present in the DB.\"\"\"\n        assert self.h5db is not None, \"Must open the database before calling genTimeSteps\"\n        for groupName in sorted(self.h5db.keys()):\n            match = self.timeNodeGroupPattern.match(groupName)\n            if match:\n                cycle = int(match.groups()[0])\n                node = int(match.groups()[1])\n                yield (cycle, node)\n\n    def genAuxiliaryData(self, ts: Tuple[int, int]) -> Generator[str, None, None]:\n        \"\"\"Returns a generator of names of auxiliary data on the requested time point.\"\"\"\n        assert self.h5db is not None, \"Must open the database before calling genAuxiliaryData\"\n        cycle, node = ts\n        groupName = getH5GroupName(cycle, node)\n        timeGroup = self.h5db[groupName]\n        exclude = set(ArmiObject.TYPES.keys())\n        exclude.add(\"layout\")\n        return (groupName + \"/\" + key for key in timeGroup.keys() if key not in exclude)\n\n    @staticmethod\n    def getAuxiliaryDataPath(ts: Tuple[int, int], name: str) -> str:\n        return getH5GroupName(*ts) + \"/\" + name\n\n    def keys(self):\n        return (g.name for g in self.genTimeStepGroups())\n\n    def getH5Group(self, r, statePointName=None):\n        \"\"\"\n        Get the H5Group for the current ARMI timestep.\n\n        This method can be used to allow other interfaces to place data into the database at the correct timestep.\n        \"\"\"\n        groupName = getH5GroupName(r.p.cycle, r.p.timeNode, statePointName)\n        if groupName in self.h5db:\n            return self.h5db[groupName]\n        else:\n            group = self.h5db.create_group(groupName, track_order=True)\n            group.attrs[\"cycle\"] = r.p.cycle\n            group.attrs[\"timeNode\"] = r.p.timeNode\n            return group\n\n    def hasTimeStep(self, cycle, timeNode, statePointName=\"\"):\n        \"\"\"Returns True if (cycle, timeNode, statePointName) is contained in the database.\"\"\"\n        return getH5GroupName(cycle, timeNode, statePointName) in self.h5db\n\n    def writeToDB(self, reactor, statePointName=None):\n        assert self.h5db is not None, \"Database must be open before writing.\"\n        # _createLayout is recursive\n        h5group = self.getH5Group(reactor, statePointName)\n        runLog.info(\"Writing to database for statepoint: {}\".format(h5group.name))\n        layout = Layout((self.versionMajor, self.versionMinor), comp=reactor)\n        layout.writeToDB(h5group)\n        groupedComps = layout.groupedComps\n\n        for comps in groupedComps.values():\n            self._writeParams(h5group, comps)\n\n    def syncToSharedFolder(self):\n        \"\"\"\n        Copy DB to run working directory.\n\n        Needed when multiple MPI processes need to read the same db, for example when a history is needed from\n        independent runs (e.g. for fuel performance on a variety of assemblies).\n\n        Notes\n        -----\n        At some future point, we may implement a client-server like DB system which would render this kind of operation\n        unnecessary.\n        \"\"\"\n        runLog.extra(\"Copying DB to shared working directory.\")\n        self.h5db.flush()\n\n        # Close the h5 file so it can be copied\n        self.h5db.close()\n        self.h5db = None\n        safeCopy(self._fullPath, self._fileName)\n\n        # Garbage collect so we don't have multiple databases hanging around in memory\n        gc.collect()\n\n        # Reload the file in append mode and continue on our merry way\n        self.h5db = h5py.File(self._fullPath, \"r+\")\n\n    def load(\n        self,\n        cycle,\n        node,\n        cs=None,\n        bp=None,\n        statePointName=None,\n        allowMissing=False,\n        handleInvalids=True,\n        callReactorConstructionHook=False,\n    ):\n        \"\"\"Load a new reactor from a DB at (cycle, node).\n\n        Case settings and blueprints can be provided, or read from the database. Providing  can be useful for snapshot\n        runs or when you want to change settings mid-simulation. Geometry is read from the database.\n\n        .. impl:: Users can load a reactor from a DB.\n            :id: I_ARMI_DB_TIME1\n            :implements: R_ARMI_DB_TIME\n\n            This method creates a ``Reactor`` object by reading the reactor state out of an ARMI database file. This is\n            done by passing in mandatory arguments that specify the exact place in time you want to load the reactor\n            from. (That is, the cycle and node numbers.) Users can either pass the settings and blueprints directly into\n            this method, or it will attempt to read them from the database file. The primary work done here is to read\n            the hierarchy of reactor objects from the data file, then reconstruct them in the correct order.\n\n        Parameters\n        ----------\n        cycle : int\n            Cycle number\n        node : int\n            Time node. If value is negative, will be indexed from EOC backwards like a list.\n        cs : armi.settings.Settings, optional\n            If not provided one is read from the database\n        bp : armi.reactor.Blueprints, optional\n            If not provided one is read from the database\n        statePointName : str, optional\n            Statepoint name (e.g., \"special\" for \"c00n00-special/\")\n        allowMissing : bool, optional\n            Whether to emit a warning, rather than crash if reading a database\n            with undefined parameters. Default False.\n        handleInvalids : bool\n            Whether to check for invalid settings. Default True.\n        callReactorConstructionHook : bool\n            Flag for whether the beforeReactorConstruction plugin hook should be executed. Default is False.\n\n        Returns\n        -------\n        root : Reactor\n            The top-level object stored in the database; a Reactor.\n        \"\"\"\n        runLog.info(f\"Loading reactor state for time node ({cycle}, {node})\")\n\n        if cs is None:\n            cs = self.loadCS(handleInvalids=handleInvalids)\n        if bp is None:\n            bp = self.loadBlueprints(cs)\n\n        if callReactorConstructionHook:\n            getPluginManagerOrFail().hook.beforeReactorConstruction(cs=cs)\n\n        if node < 0:\n            numNodes = getNodesPerCycle(cs)[cycle]\n            if (node + numNodes) < 0:\n                raise ValueError(f\"Node {node} specified does not exist for cycle {cycle}\")\n            node = numNodes + node\n\n        h5group = self.h5db[getH5GroupName(cycle, node, statePointName)]\n\n        layout = Layout((self.versionMajor, self.versionMinor), h5group=h5group)\n        comps, groupedComps = layout._initComps(cs.caseTitle, bp)\n\n        # populate data onto initialized components\n        for compType, compTypeList in groupedComps.items():\n            self._readParams(h5group, compType, compTypeList, allowMissing=allowMissing)\n\n        # assign params from blueprints\n        if bp is not None:\n            self._assignBlueprintsParams(bp, groupedComps)\n\n        # stitch together\n        self._compose(iter(comps), cs)\n\n        # also, make sure to update the global serial number so we don't reuse a number\n        parameterCollections.GLOBAL_SERIAL_NUM = max(parameterCollections.GLOBAL_SERIAL_NUM, layout.serialNum.max())\n        root = comps[0][0]\n\n        # return a Reactor object\n        if cs[CONF_SORT_REACTOR]:\n            root.sort()\n        else:\n            runLog.warning(\n                \"DeprecationWarning: This Reactor is not being sorted on DB load. Due to the setting \"\n                f\"{CONF_SORT_REACTOR}, this Reactor is unsorted. But this feature is temporary and will be removed by \"\n                \"2024.\"\n            )\n\n        if cs[CONF_GROW_TO_FULL_CORE_AFTER_LOAD] and not root.core.isFullCore:\n            root.core.growToFullCore(cs)\n\n        return root\n\n    def loadReadOnly(self, cycle, node, statePointName=None):\n        \"\"\"Load a new reactor, in read-only mode from a DB at (cycle, node).\n\n        Parameters\n        ----------\n        cycle : int\n            Cycle number\n        node : int\n            Time node. If value is negative, will be indexed from EOC backwards like a list.\n        statePointName : str, optional\n            Statepoint name (e.g., \"special\" for \"c00n00-special/\")\n\n        Returns\n        -------\n        Reactor\n            The top-level object stored in the database; a Reactor.\n        \"\"\"\n        r = self.load(cycle, node, statePointName=statePointName, allowMissing=True)\n        self._setParamsBeforeFreezing(r)\n        makeParametersReadOnly(r)\n        return r\n\n    @staticmethod\n    def _setParamsBeforeFreezing(r: Reactor):\n        \"\"\"Set some special case parameters before they are made read-only.\"\"\"\n        for child in r.iterChildren(deep=True, predicate=lambda c: isinstance(c, Component)):\n            # calling Component.getVolume() sets the volume parameter\n            child.getVolume()\n\n    @staticmethod\n    def _assignBlueprintsParams(blueprints, groupedComps):\n        for compType, designs in (\n            (Block, blueprints.blockDesigns),\n            (Assembly, blueprints.assemDesigns),\n        ):\n            paramsToSet = {pDef.name for pDef in compType.pDefs.inCategory(parameters.Category.assignInBlueprints)}\n\n            for comp in groupedComps[compType]:\n                design = designs[comp.p.type]\n                for pName in paramsToSet:\n                    val = getattr(design, pName)\n                    if val is not None:\n                        comp.p[pName] = val\n\n    def _compose(self, comps, cs, parent=None):\n        \"\"\"Given a flat collection of all of the ArmiObjects in the model, reconstitute the hierarchy.\"\"\"\n        comp, _, numChildren, location, locationType = next(comps)\n\n        # attach the parent early, if provided; some cases need the parent attached for the rest of _compose to work\n        # properly.\n        comp.parent = parent\n\n        # The Reactor adds a Core child by default, this is not ideal\n        for spontaneousChild in list(comp):\n            comp.remove(spontaneousChild)\n\n        if isinstance(comp, Core):\n            pass\n        elif isinstance(comp, Assembly):\n            # Assemblies force their name to be something based on assemNum. When the assembly is created it gets a new\n            # assemNum, and throws out the correct name read from the DB.\n            comp.name = comp.makeNameFromAssemNum(comp.p.assemNum)\n            comp.lastLocationLabel = Assembly.DATABASE\n\n        # set the spatialLocators on each component\n        if location is not None:\n            if parent is not None and parent.spatialGrid is not None:\n                if locationType != LOC_COORD:\n                    # We can directly index into the spatial grid for IndexLocation and MultiIndexLocators to get\n                    # equivalent spatial locators\n                    comp.spatialLocator = parent.spatialGrid[location]\n                else:\n                    comp.spatialLocator = grids.CoordinateLocation(\n                        location[0], location[1], location[2], parent.spatialGrid\n                    )\n            else:\n                comp.spatialLocator = grids.CoordinateLocation(location[0], location[1], location[2], None)\n\n        # Need to keep a collection of Component instances for linked dimension resolution, before they can be add()ed\n        # to their parents. Not just filtering out of `children`, since resolveLinkedDims() needs a dict\n        childComponents = collections.OrderedDict()\n        children = []\n\n        for _ in range(numChildren):\n            child = self._compose(comps, cs, parent=comp)\n            children.append(child)\n            if isinstance(child, Component):\n                childComponents[child.name] = child\n\n        for _childName, child in childComponents.items():\n            child.resolveLinkedDims(childComponents)\n\n        for child in children:\n            comp.add(child)\n\n        if isinstance(comp, Core):\n            comp.processLoading(cs, dbLoad=True)\n        elif isinstance(comp, Assembly):\n            comp.calculateZCoords()\n        elif isinstance(comp, Component):\n            comp.finalizeLoadingFromDB()\n\n        return comp\n\n    @staticmethod\n    def _getArrayShape(arr: Union[np.ndarray, List, Tuple]):\n        \"\"\"Get the shape of a np.ndarray, list, or tuple.\"\"\"\n        if isinstance(arr, np.ndarray):\n            return arr.shape\n        elif isinstance(arr, (list, tuple)):\n            return (len(arr),)\n        else:\n            # not a list, tuple, or array (likely int, float, or None)\n            return 1\n\n    def _writeParams(self, h5group, comps) -> tuple:\n        c = comps[0]\n        groupName = c.__class__.__name__\n        if groupName not in h5group:\n            # Only create the group if it doesn't already exist. This happens when re-writing params in the same time\n            # node (e.g. something changed between EveryNode and EOC).\n            g = h5group.create_group(groupName, track_order=True)\n        else:\n            g = h5group[groupName]\n\n        for paramDef in c.p.paramDefs.toWriteToDB():\n            attrs = {}\n\n            if hasattr(c, \"DIMENSION_NAMES\") and paramDef.name in c.DIMENSION_NAMES:\n                linkedDims = []\n                data = []\n\n                for _, c in enumerate(comps):\n                    val = c.p[paramDef.name]\n                    if isinstance(val, tuple):\n                        linkedDims.append(\"{}.{}\".format(val[0].name, val[1]))\n                        data.append(val[0].getDimension(val[1]))\n                    else:\n                        linkedDims.append(\"\")\n                        data.append(val)\n\n                data = np.array(data)\n                if any(linkedDims):\n                    attrs[\"linkedDims\"] = np.array(linkedDims).astype(\"S\")\n            else:\n                # NOTE: after loading, the previously unset values will be defaulted\n                temp = [c.p.get(paramDef.name, paramDef.default) for c in comps]\n                if paramDef.serializer is not None:\n                    data, sAttrs = paramDef.serializer.pack(temp)\n                    assert data.dtype.kind != \"O\", \"{} failed to convert {} to a numpy-supported type.\".format(\n                        paramDef.serializer.__name__, paramDef.name\n                    )\n                    attrs.update(sAttrs)\n                    attrs[_SERIALIZER_NAME] = paramDef.serializer.__name__\n                    attrs[_SERIALIZER_VERSION] = paramDef.serializer.version\n                else:\n                    # check if temp is a jagged array\n                    if any(isinstance(x, (np.ndarray, list)) for x in temp):\n                        jagged = len(set([self._getArrayShape(x) for x in temp])) != 1\n                    else:\n                        jagged = False\n                    data = JaggedArray(temp, paramDef.name) if jagged else np.array(temp)\n                    del temp\n\n            # - Check to see if the array is jagged. If so, flatten, store the data offsets and array shapes, and None\n            #   locations as attrs.\n            # - If not jagged, all top-level ndarrays are the same shape, so it is easier to replace Nones with ndarrays\n            #   filled with special values.\n            if isinstance(data, JaggedArray):\n                data, specialAttrs = packSpecialData(data, paramDef.name)\n                attrs.update(specialAttrs)\n\n            else:  # np.ndarray\n                # Convert Unicode to byte-string\n                if data.dtype.kind == \"U\":\n                    data = data.astype(\"S\")\n\n                if data.dtype.kind == \"O\":\n                    # Something was added to the data array that caused np to want to treat it as a general-purpose\n                    # Object array. This usually happens because:\n                    # - the data contain NoDefaults\n                    # - the data contain one or more Nones,\n                    # - the data contain special types like tuples, dicts, etc\n                    # - there is some sort of honest-to-goodness weird object\n                    # We want to support the first two cases with minimal intrusion, since these should be pretty easy\n                    # to faithfully represent in the db. The last case isn't really worth supporting.\n                    if parameters.NoDefault in data:\n                        data = None\n                    else:\n                        data, specialAttrs = packSpecialData(data, paramDef.name)\n                        attrs.update(specialAttrs)\n\n            if data is None:\n                continue\n\n            try:\n                if paramDef.name in g:\n                    raise ValueError(f\"`{paramDef.name}` was already in `{g}`. This time node should have been empty\")\n\n                dataset = g.create_dataset(paramDef.name, data=data, compression=\"gzip\", track_order=True)\n                if any(attrs):\n                    Database._writeAttrs(dataset, h5group, attrs)\n            except Exception:\n                runLog.error(f\"Failed to write {paramDef.name} to database. Data: {data}\")\n                raise\n\n        if isinstance(c, Block):\n            self._addHomogenizedNumberDensityParams(comps, g)\n\n    @staticmethod\n    def _addHomogenizedNumberDensityParams(blocks, h5group):\n        \"\"\"\n        Create on-the-fly block homog. number density params for XTVIEW viewing.\n\n        See Also\n        --------\n        collectBlockNumberDensities\n        \"\"\"\n        nDens = collectBlockNumberDensities(blocks)\n\n        for nucName, numDens in nDens.items():\n            h5group.create_dataset(nucName, data=numDens, compression=\"gzip\", track_order=True)\n\n    @staticmethod\n    def _readParams(h5group, compTypeName, comps, allowMissing=False):\n        g = h5group[compTypeName]\n\n        renames = getApp().getParamRenames()\n\n        pDefs = comps[0].pDefs\n\n        # this can also be made faster by specializing the method by type\n        for paramName, dataSet in g.items():\n            # Honor historical databases where the parameters may have changed names since.\n            while paramName in renames:\n                paramName = renames[paramName]\n\n            try:\n                pDef = pDefs[paramName]\n            except KeyError:\n                if re.match(r\"^n[A-Z][a-z]?\\d*\", paramName):\n                    # This is a temporary viz param (number density) made by _addHomogenizedNumberDensityParams ignore\n                    # it safely\n                    continue\n                else:\n                    # If a parameter exists in the database but not in the application reading it, we can technically\n                    # keep going. Since this may lead to potential correctness issues, raise a warning\n                    if allowMissing:\n                        runLog.warning(\n                            \"Found `{}` parameter `{}` in the database, which is not defined. Ignoring it.\".format(\n                                compTypeName, paramName\n                            )\n                        )\n                        continue\n                    else:\n                        raise\n\n            data = dataSet[:]\n            attrs = Database._resolveAttrs(dataSet.attrs, h5group)\n\n            if pDef.serializer is not None:\n                assert _SERIALIZER_NAME in dataSet.attrs\n                assert dataSet.attrs[_SERIALIZER_NAME] == pDef.serializer.__name__\n                assert _SERIALIZER_VERSION in dataSet.attrs\n\n                data = np.array(pDef.serializer.unpack(data, dataSet.attrs[_SERIALIZER_VERSION], attrs))\n\n            # nuclides are a special case where we want to keep in np.bytes_ format\n            if data.dtype.type is np.bytes_ and \"nuclides\" not in paramName.lower():\n                data = np.char.decode(data)\n\n            if attrs.get(\"specialFormatting\", False):\n                data = unpackSpecialData(data, attrs, paramName)\n\n            linkedDims = []\n            if \"linkedDims\" in attrs:\n                linkedDims = np.char.decode(attrs[\"linkedDims\"])\n\n            unpackedData = data.tolist()\n            if len(comps) != len(unpackedData):\n                msg = (\n                    \"While unpacking special data for {}, encountered composites and parameter \"\n                    \"data with unmatched sizes.\\nLength of composites list = {}\\nLength of data \"\n                    \"list = {}\\nThis could indicate an error in data unpacking, which could \"\n                    \"result in faulty data on the resulting reactor model.\".format(\n                        paramName, len(comps), len(unpackedData)\n                    )\n                )\n                runLog.error(msg)\n                raise ValueError(msg)\n\n            if paramName == \"numberDensities\" and attrs.get(\"dict\", False):\n                Database._applyComponentNumberDensitiesMigration(comps, unpackedData)\n            else:\n                # iterating of np is not fast...\n                for c, val, linkedDim in itertools.zip_longest(comps, unpackedData, linkedDims, fillvalue=\"\"):\n                    try:\n                        if linkedDim != \"\":\n                            c.p[paramName] = linkedDim\n                        else:\n                            c.p[paramName] = val\n                    except AssertionError as ae:\n                        # happens when a param was deprecated but being loaded from old DB\n                        runLog.warning(\n                            f\"{str(ae)}\\nSkipping load of invalid param `{paramName}` (possibly loading from old DB)\\n\"\n                        )\n\n    def getHistoryByLocation(\n        self,\n        comp: ArmiObject,\n        params: Optional[List[str]] = None,\n        timeSteps: Optional[Sequence[Tuple[int, int]]] = None,\n    ) -> History:\n        \"\"\"Get the parameter histories at a specific location.\"\"\"\n        return self.getHistoriesByLocation([comp], params=params, timeSteps=timeSteps)[comp]\n\n    def getHistoriesByLocation(\n        self,\n        comps: Sequence[ArmiObject],\n        params: Optional[List[str]] = None,\n        timeSteps: Optional[Sequence[Tuple[int, int]]] = None,\n    ) -> Histories:\n        \"\"\"\n        Get the parameter histories at specific locations.\n\n        This has a number of limitations, which should in practice not be too limiting:\n         - The passed objects must have IndexLocations. This type of operation doesn't make much sense otherwise.\n         - The passed objects must exist in a hierarchy that leads to a Core object, which serves as an anchor that can\n           fully define all index locations. This could possibly be made more general by extending grids, but that gets\n           a little more complicated.\n         - All requested objects must exist under the **same** anchor object, and at the same depth below it.\n         - All requested objects must have the same type.\n\n        Parameters\n        ----------\n        comps : list of ArmiObject\n            The components/composites that currently occupy the location that you want histories at. ArmiObjects are\n            passed, rather than locations, because this makes it easier to figure out things related to layout.\n        params : List of str, optional\n            The parameter names for the parameters that we want the history of. If None, all parameter history is given.\n        timeSteps : List of (cycle, node) tuples, optional\n            The time nodes that you want history for. If None, all available time nodes will be returned.\n        \"\"\"\n        if self.versionMajor != 3:\n            raise ValueError(f\"This version of ARMI only supports version 3 of the ARMI DB, found {self.versionMajor}.\")\n        elif self.versionMinor < 4:\n            raise ValueError(\n                \"Location-based histories are only supported for db version 3.4 and greater. This database is version \"\n                f\"{self.versionMajor}, {self.versionMinor}.\"\n            )\n\n        locations = [c.spatialLocator.getCompleteIndices() for c in comps]\n\n        histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps}\n\n        # Check our assumptions about the passed locations: All locations must have the same parent and bear the same\n        # relationship to the anchor object.\n        anchors = {obj.getAncestorAndDistance(lambda a: isinstance(a, Core)) for obj in comps}\n\n        if len(anchors) != 1:\n            raise ValueError(\n                \"The passed objects do not have the same anchor or distance to that anchor; encountered the following: \"\n                f\"{anchors}\"\n            )\n\n        anchorInfo = anchors.pop()\n        if anchorInfo is not None:\n            anchor, anchorDistance = anchorInfo\n        else:\n            raise ValueError(\"Could not determine an anchor object for the passed components\")\n\n        anchorSerialNum = anchor.p.serialNum\n\n        # All objects of the same type\n        objectTypes = {type(obj) for obj in comps}\n        if len(objectTypes) != 1:\n            raise TypeError(f\"The passed objects must be the same type; got objects of types `{objectTypes}`\")\n\n        compType = objectTypes.pop()\n        objClassName = compType.__name__\n\n        locToComp = {c.spatialLocator.getCompleteIndices(): c for c in comps}\n\n        for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps):\n            if \"layout\" not in h5TimeNodeGroup:\n                # Layout hasn't been written for this time step, so we can't get anything useful here. Perhaps the\n                # current value is of use, in which case the DatabaseInterface should be used.\n                continue\n\n            cycle = h5TimeNodeGroup.attrs[\"cycle\"]\n            timeNode = h5TimeNodeGroup.attrs[\"timeNode\"]\n            layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup)\n            ancestors = layout.computeAncestors(layout.serialNum, layout.numChildren, depth=anchorDistance)\n\n            lLocation = layout.location\n            # filter for objects that live under the desired ancestor and at a desired location\n            objectIndicesInLayout = np.array(\n                [\n                    i\n                    for i, (ancestor, loc) in enumerate(zip(ancestors, lLocation))\n                    if ancestor == anchorSerialNum and loc in locations\n                ]\n            )\n\n            # This could also be way more efficient if lLocation were a numpy array\n            objectLocationsInLayout = [lLocation[i] for i in objectIndicesInLayout]\n            objectIndicesInData = np.array(layout.indexInData)[objectIndicesInLayout].tolist()\n\n            try:\n                h5GroupForType = h5TimeNodeGroup[objClassName]\n            except KeyError as ee:\n                runLog.error(f\"{objClassName} not found in {h5TimeNodeGroup} of {self}\")\n                raise ee\n\n            for paramName in params or h5GroupForType.keys():\n                if paramName == \"location\":\n                    # location is special, since it is stored in layout/\n                    data = np.array(layout.location)[objectIndicesInLayout]\n                elif paramName in h5GroupForType:\n                    dataSet = h5GroupForType[paramName]\n                    try:\n                        data = dataSet[objectIndicesInData]\n                    except:\n                        runLog.error(f\"Failed to load index {objectIndicesInData} from {dataSet}@{(cycle, timeNode)}\")\n                        raise\n\n                    if data.dtype.type is np.bytes_:\n                        data = np.char.decode(data)\n\n                    if dataSet.attrs.get(\"specialFormatting\", False):\n                        if dataSet.attrs.get(\"nones\", False):\n                            data = replaceNonsenseWithNones(data, paramName)\n                        else:\n                            raise ValueError(\n                                \"History tracking for non-None, special-formatted parameters is not supported: \"\n                                \"{}, {}\".format(paramName, {k: v for k, v in dataSet.attrs.items()})\n                            )\n                else:\n                    # Nothing in the database for this param, so use the default value\n                    data = np.repeat(\n                        parameters.byNameAndType(paramName, compType).default,\n                        len(comps),\n                    )\n\n                # store data to the appropriate comps. This is where taking components as the argument (rather than\n                # locations) is a little bit peculiar.\n                #\n                # At this point, `data` are arranged by the order of elements in `objectIndicesInData`, which\n                # corresponds to the order of `objectIndicesInLayout`\n                for loc, val in zip(objectLocationsInLayout, data.tolist()):\n                    comp = locToComp[loc]\n                    histData[comp][paramName][cycle, timeNode] = val\n\n        return histData\n\n    def getHistory(\n        self,\n        comp: ArmiObject,\n        params: Optional[Sequence[str]] = None,\n        timeSteps: Optional[Sequence[Tuple[int, int]]] = None,\n    ) -> History:\n        \"\"\"\n        Get parameter history for a single ARMI Object.\n\n        Parameters\n        ----------\n        comps\n            An individual ArmiObject\n        params\n            parameters to gather\n\n        Returns\n        -------\n        dict\n            Dictionary of str/list pairs.\n        \"\"\"\n        return self.getHistories([comp], params, timeSteps)[comp]\n\n    def getHistories(\n        self,\n        comps: Sequence[ArmiObject],\n        params: Optional[Sequence[str]] = None,\n        timeSteps: Optional[Sequence[Tuple[int, int]]] = None,\n    ) -> Histories:\n        \"\"\"\n        Get the parameter histories for a sequence of ARMI Objects.\n\n        This implementation is unaware of the state of the reactor outside of the database itself, and is therefore not\n        usually what client code should be calling directly during normal ARMI operation. It only knows about historical\n        data that have actually been written to the database. Usually one wants to be able to get historical, plus\n        current data, for which the similar method on the DatabaseInterface may be more useful.\n\n        Parameters\n        ----------\n        comps\n            Something that is iterable multiple times\n        params\n            parameters to gather.\n        timeSteps\n            Selection of time nodes to get data for. If omitted, return full history\n\n        Returns\n        -------\n        dict\n            Dictionary ArmiObject (input): dict of str/list pairs containing ((cycle, node), value).\n        \"\"\"\n        histData: Histories = {c: collections.defaultdict(collections.OrderedDict) for c in comps}\n        types = {c.__class__ for c in comps}\n        compsByTypeThenSerialNum: Dict[Type[ArmiObject], Dict[int, ArmiObject]] = {t: dict() for t in types}\n\n        for c in comps:\n            compsByTypeThenSerialNum[c.__class__][c.p.serialNum] = c\n\n        for h5TimeNodeGroup in self.genTimeStepGroups(timeSteps):\n            if \"layout\" not in h5TimeNodeGroup:\n                # Layout hasn't been written for this time step, so whatever is in there didn't come from the\n                # DatabaseInterface. Probably because it's the current time step and something has created the group to\n                # store aux data\n                continue\n\n            # might save as int or np.int64, so forcing int keeps things predictable\n            cycle = int(h5TimeNodeGroup.attrs[\"cycle\"])\n            timeNode = int(h5TimeNodeGroup.attrs[\"timeNode\"])\n            layout = Layout((self.versionMajor, self.versionMinor), h5group=h5TimeNodeGroup)\n\n            for compType, compsBySerialNum in compsByTypeThenSerialNum.items():\n                compTypeName = compType.__name__\n                try:\n                    h5GroupForType = h5TimeNodeGroup[compTypeName]\n                except KeyError as ee:\n                    runLog.error(\"{} not found in {} of {}\".format(compTypeName, h5TimeNodeGroup, self))\n                    raise ee\n                layoutIndicesForType = np.where(layout.type == compTypeName)[0]\n                serialNumsForType = layout.serialNum[layoutIndicesForType].tolist()\n                layoutIndexInData = layout.indexInData[layoutIndicesForType].tolist()\n\n                indexInData = []\n                reorderedComps = []\n\n                for ii, sn in zip(layoutIndexInData, serialNumsForType):\n                    d = compsBySerialNum.get(sn, None)\n                    if d is not None:\n                        indexInData.append(ii)\n                        reorderedComps.append(d)\n\n                if not indexInData:\n                    continue\n\n                # note this is very similar to _readParams but there are some important differences.\n                # 1) we are not assigning to p[paramName]\n                # 2) not using linkedDims at all\n                # 3) not performing parameter renaming. This may become necessary\n                for paramName in params or h5GroupForType.keys():\n                    if paramName == \"location\":\n                        locs = []\n                        for id in indexInData:\n                            locs.append((layout.location[layoutIndicesForType[id]]))\n                        data = np.array(locs)\n                    elif paramName in h5GroupForType:\n                        dataSet = h5GroupForType[paramName]\n                        try:\n                            data = dataSet[indexInData]\n                        except:\n                            runLog.error(\n                                \"Failed to load index {} from {}@{}\".format(indexInData, dataSet, (cycle, timeNode))\n                            )\n                            raise\n\n                        if data.dtype.type is np.bytes_:\n                            data = np.char.decode(data)\n\n                        if dataSet.attrs.get(\"specialFormatting\", False):\n                            if dataSet.attrs.get(\"nones\", False):\n                                data = replaceNonsenseWithNones(data, paramName)\n                            else:\n                                raise ValueError(\n                                    \"History tracking for non-none special formatting not supported: {}, {}\".format(\n                                        paramName,\n                                        {k: v for k, v in dataSet.attrs.items()},\n                                    )\n                                )\n                    else:\n                        # Nothing in the database, so use the default value\n                        data = np.repeat(\n                            parameters.byNameAndType(paramName, compType).default,\n                            len(reorderedComps),\n                        )\n\n                    # iterating of np is not fast..\n                    for c, val in zip(reorderedComps, data.tolist()):\n                        if paramName == \"location\":\n                            val = tuple(val)\n                        elif isinstance(val, list):\n                            val = np.array(val)\n\n                        histData[c][paramName][cycle, timeNode] = val\n\n        r = comps[0].getAncestor(lambda c: isinstance(c, Reactor))\n        cycleNode = r.p.cycle, r.p.timeNode\n        for c, paramHistories in histData.items():\n            for paramName, hist in paramHistories.items():\n                if cycleNode not in hist:\n                    try:\n                        hist[cycleNode] = c.p[paramName]\n                    except Exception:\n                        if paramName == \"location\":\n                            hist[cycleNode] = tuple(c.spatialLocator.indices)\n\n        return histData\n\n    @staticmethod\n    def _writeAttrs(obj, group, attrs):\n        \"\"\"\n        Handle safely writing attributes to a dataset, handling large data if necessary.\n\n        This will attempt to store attributes directly onto an HDF5 object if possible, falling back to proper datasets\n        and reference attributes if necessary. This is needed because HDF5 tries to fit attributes into the object\n        header, which has limited space. If an attribute is too large, h5py raises a RuntimeError. In such cases, this\n        will store the attribute data in a proper dataset and place a reference to that dataset in the attribute\n        instead.\n\n        In practice, this takes ``linkedDims`` attrs from a particular component type (like ``c00n00/Circle/id``) and\n        stores them in new datasets (like ``c00n00/attrs/1_linkedDims``, ``c00n00/attrs/2_linkedDims``) and then sets\n        the object's attrs to links to those datasets.\n        \"\"\"\n        for key, value in attrs.items():\n            try:\n                obj.attrs[key] = value\n            except RuntimeError as err:\n                if \"object header message is too large\" not in err.args[0]:\n                    raise\n\n                runLog.info(f\"Storing attribute `{key}` for `{obj}` into it's own dataset within `{group}/attrs`\")\n\n                if \"attrs\" not in group:\n                    attrGroup = group.create_group(\"attrs\")\n                else:\n                    attrGroup = group[\"attrs\"]\n                dataName = str(len(attrGroup)) + \"_\" + key\n                attrGroup[dataName] = value\n\n                # using a soft link here allows us to cheaply copy time nodes without needing to crawl through and\n                # update object references.\n                linkName = attrGroup[dataName].name\n                obj.attrs[key] = \"@{}\".format(linkName)\n\n    @staticmethod\n    def _resolveAttrs(attrs, group):\n        \"\"\"\n        Reverse the action of _writeAttrs.\n\n        This reads actual attrs and looks for the real data in the datasets that the attrs were pointing to.\n        \"\"\"\n        attr_link = re.compile(\"^@(.*)$\")\n\n        resolved = {}\n        for key, val in attrs.items():\n            try:\n                if isinstance(val, h5py.h5r.Reference):\n                    # Old style object reference. If this cannot be dereferenced, it is likely because mergeHistory was\n                    # used to get the current database, which does not preserve references.\n                    resolved[key] = group[val]\n                elif isinstance(val, str):\n                    m = attr_link.match(val)\n                    if m:\n                        # dereference the path to get the data out of the dataset.\n                        resolved[key] = group[m.group(1)][()]\n                    else:\n                        resolved[key] = val\n                else:\n                    resolved[key] = val\n            except ValueError:\n                runLog.error(f\"HDF error loading {key} : {val}\\nGroup: {group}\")\n                raise\n\n        return resolved\n\n    @staticmethod\n    def _applyComponentNumberDensitiesMigration(comps, unpackedData):\n        \"\"\"\n        Special migration from <= v0.5.1 component numberDensities parameter data type.\n\n        old format: dict[str: float]\n        new format: two numpy arrays\n        - nuclides = np.array(dtype=\"S6\")\n        - numberDensities = np.array(dtype=np.float64)\n        \"\"\"\n        for c, ndensDict in zip(comps, unpackedData):\n            nuclides = np.array(list(ndensDict.keys()), dtype=\"S6\")\n            numberDensities = np.array(list(ndensDict.values()), dtype=np.float64)\n            c.p.nuclides = nuclides\n            c.p.numberDensities = numberDensities\n\n    @staticmethod\n    def getCycleNodeAtTime(dbPath, startTime, endTime, errorIfNotExactlyOne=True):\n        \"\"\"Given the path to an ARMI database file and a start and end time (in years), return the full set of all time\n        nodes that correspond to that time period in the database.\n\n        Parameters\n        ----------\n        dbPath : str\n            File path to an ARMI database.\n        startTime : int\n            In years, start of the desired interval.\n        endTime : int\n            In years, end of the desired interval.\n        errorIfNotExactlyOne : boolean\n            Raise an error if more than one cycle/node combination is returned. Default is True.\n\n        Returns\n        -------\n        list of strings\n            A list of strings to the desired time interval, e.g.: [\"c01n08\", \"c14n18EOL\"]\n        \"\"\"\n        # basic sanity checks\n        assert startTime >= 0.0, f\"The start time cannot be negative: {startTime}.\"\n        assert endTime >= startTime, f\"The end time ({endTime}) is not greater than the start time ({startTime}).\"\n\n        # open the H5 file directly\n        with h5py.File(dbPath, \"r\") as h5:\n            # read time steps in H5 file\n            thisTime = 0.0\n            cycleNodes = []\n            for h5Key in h5.keys():\n                if h5Key == \"inputs\":\n                    continue\n\n                thisTime = h5[h5Key][\"Reactor\"][\"time\"][0]\n                if thisTime >= endTime:\n                    cycleNodes.append(h5Key)\n                    break\n                elif thisTime >= startTime:\n                    cycleNodes.append(h5Key)\n\n        # more validation\n        if not cycleNodes:\n            raise ValueError(f\"Provided start time ({startTime}) was greater than the modeled period: {thisTime}.\")\n        elif errorIfNotExactlyOne and len(cycleNodes) != 1:\n            raise ValueError(f\"Did not find exactly one cycle/node pair: {cycleNodes}\")\n\n        return cycleNodes\n\n\ndef packSpecialData(\n    arrayData: [np.ndarray, JaggedArray], paramName: str\n) -> Tuple[Optional[np.ndarray], Dict[str, Any]]:\n    \"\"\"\n    Reduce data that wouldn't otherwise play nicely with HDF5/numpy arrays to a format that will.\n\n    This is the main entry point for conforming \"strange\" data into something that will both fit\n    into a numpy array/HDF5 dataset, and be recoverable to its original-ish state when reading it\n    back in. This is accomplished by detecting a handful of known offenders and using various HDF5\n    attributes to store necessary auxiliary data. It is important to keep in mind that the data that\n    is passed in has already been converted to a numpy array, so the top dimension is always\n    representing the collection of composites that are storing the parameters. For instance, if we\n    are dealing with a Block parameter, the first index in the numpy array of data is the block\n    index; so if each block has a parameter that is a dictionary, ``data`` would be a ndarray,\n    where each element is a dictionary. This routine supports a number of different things:\n\n    * Dict[str, float]: These are stored by finding the set of all keys for all instances, and\n      storing those keys as a list in an attribute. The data themselves are stored as arrays indexed\n      by object, then key index. Dictionaries lacking data for a key store a nan in it's place. This\n      will work well in instances where most objects have data for most keys.\n    * Jagged arrays: These are stored by concatenating all of the data into a single, one-\n      dimensional array, and storing attributes to describe the shapes of each object's data, and an\n      offset into the beginning of each object's data.\n    * Arrays with ``None`` in them: These are stored by replacing each instance of ``None`` with a\n      magical value that shouldn't be encountered in realistic scenarios.\n\n    Parameters\n    ----------\n    arrayData\n        An ndarray or JaggedArray object storing the data that we want to stuff into the database.\n        If the data is jagged, a special JaggedArray instance is passed in, which contains a 1D\n        array with offsets and shapes.\n    paramName\n        The parameter name that we are trying to store. This is mostly used for diagnostics.\n\n    See Also\n    --------\n    unpackSpecialData\n    \"\"\"\n    if isinstance(arrayData, JaggedArray):\n        data = arrayData.flattenedArray\n    else:\n        # Check to make sure that we even need to do this. If the numpy data type is not \"O\",\n        # chances are we have nice, clean data.\n        if arrayData.dtype != \"O\":\n            return arrayData, {}\n        else:\n            data = arrayData\n\n    attrs: Dict[str, Any] = {\"specialFormatting\": True}\n\n    # make a copy of the data, so that the original is unchanged\n    data = copy.copy(data)\n\n    # Find locations of Nones.\n    nones = np.where([d is None for d in data])[0]\n    if len(nones) == data.shape[0]:\n        # Everything is None, so why bother?\n        return None, attrs\n\n    if len(nones) > 0:\n        attrs[\"nones\"] = True\n\n    # Pack different types of data\n    if any(isinstance(d, dict) for d in data):\n        # We're assuming that a dict is {str: float}.\n        attrs[\"dict\"] = True\n        keys = sorted({k for d in data for k in d})\n        data = np.array([[d.get(k, np.nan) for k in keys] for d in data])\n        if data.dtype == \"O\":\n            raise TypeError(f\"Unable to coerce dictionary data into usable numpy array for {paramName}\")\n        # We store the union of all of the keys for all of the objects as a special \"keys\"\n        # attribute, and store a value for all of those keys for all objects, whether or not there\n        # is actually data associated with that key\n        attrs[\"keys\"] = np.array(keys).astype(\"S\")\n\n        return data, attrs\n    elif isinstance(arrayData, JaggedArray):\n        attrs[\"jagged\"] = True\n        attrs[\"offsets\"] = arrayData.offsets\n        attrs[\"shapes\"] = arrayData.shapes\n        attrs[\"noneLocations\"] = arrayData.nones\n        return data, attrs\n\n    # conform non-numpy arrays to numpy\n    for i, val in enumerate(data):\n        if isinstance(val, (list, tuple)):\n            data[i] = np.array(val)\n\n    if not any(isinstance(d, np.ndarray) for d in data):\n        # looks like 1-D plain-old-data\n        data = replaceNonesWithNonsense(data, paramName, nones)\n        return data, attrs\n    elif any(isinstance(d, (tuple, list, np.ndarray)) for d in data):\n        data = replaceNonesWithNonsense(data, paramName, nones)\n        return data, attrs\n\n    if len(nones) == 0:\n        raise TypeError(f\"Cannot write {paramName} to the database, it did not resolve to a numpy/HDF5 type.\")\n\n    runLog.error(f\"Data unable to find special none value: {data}\")\n    raise TypeError(f\"Failed to process special data for {paramName}\")\n\n\ndef unpackSpecialData(data: np.ndarray, attrs, paramName: str) -> np.ndarray:\n    \"\"\"\n    Extract data from a specially-formatted HDF5 dataset into a numpy array.\n\n    This should invert the operations performed by :py:func:`packSpecialData`.\n\n    Parameters\n    ----------\n    data\n        Specially-formatted data array straight from the database.\n    attrs\n        The attributes associated with the dataset that contained the data.\n    paramName\n        The name of the parameter that is being unpacked. Only used for diagnostics.\n\n    Returns\n    -------\n    np.ndarray\n        An ndarray containing the closest possible representation of the data that was originally written to the\n        database.\n\n    See Also\n    --------\n    packSpecialData\n    \"\"\"\n    if not attrs.get(\"specialFormatting\", False):\n        # The data were not subjected to any special formatting; short circuit.\n        assert data.dtype != \"O\"\n        return data\n\n    unpackedData: List[Any]\n    if attrs.get(\"nones\", False) and not attrs.get(\"jagged\", False):\n        data = replaceNonsenseWithNones(data, paramName)\n        return data\n    if attrs.get(\"jagged\", False):\n        offsets = attrs[\"offsets\"]\n        shapes = attrs[\"shapes\"]\n        nones = attrs[\"noneLocations\"]\n        data = JaggedArray.fromH5(data, offsets, shapes, nones, data.dtype, paramName)\n        return data\n    if attrs.get(\"dict\", False):\n        keys = np.char.decode(attrs[\"keys\"])\n        unpackedData = []\n        assert data.ndim == 2\n        for d in data:\n            unpackedData.append({key: value for key, value in zip(keys, d) if not np.isnan(value)})\n        return np.array(unpackedData)\n\n    raise ValueError(\n        \"Do not recognize the type of special formatting that was applied to {}. Attrs: {}\".format(\n            paramName, {k: v for k, v in attrs.items()}\n        )\n    )\n\n\ndef collectBlockNumberDensities(blocks) -> Dict[str, np.ndarray]:\n    \"\"\"\n    Collect block-by-block homogenized number densities for each nuclide.\n\n    Homogenize the component-level to the block level. These are written to the database and useful for visualization.\n    \"\"\"\n    # find the NuclidesBases object on the Reactor\n    nuclideBases = None\n    for b in blocks:\n        if b.nuclideBases is not None:\n            nuclideBases = b.nuclideBases\n            break\n\n    if not nuclideBases:\n        return {}\n\n    nucNames = sorted(list(set(nucName for b in blocks for nucName in b.getNuclides())))\n    nucBases = [nuclideBases.byName[nn] for nn in nucNames]\n    # It's faster to loop over blocks first and get all number densities from each than it is to get one nuclide at a\n    # time from each block because of area fraction calculations. So we use some RAM here instead.\n    nucDensityMatrix = []\n    for block in blocks:\n        nucDensityMatrix.append(block.getNuclideNumberDensities(nucNames))\n    nucDensityMatrix = np.array(nucDensityMatrix)\n\n    dataDict = dict()\n    for ni, nb in enumerate(nucBases):\n        # the nth column is a vector of nuclide densities for this nuclide across all blocks\n        dataDict[nb.getDatabaseName()] = nucDensityMatrix[:, ni]\n\n    return dataDict\n"
  },
  {
    "path": "armi/bookkeeping/db/databaseInterface.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe database interface provides a way to save the reactor state to a file, throughout\na simulation.\n\"\"\"\n\nimport copy\nimport os\nimport pathlib\nimport time\nfrom typing import (\n    MutableSequence,\n    Optional,\n    Sequence,\n    Tuple,\n)\n\nfrom armi import context, interfaces, runLog\nfrom armi.bookkeeping.db.database import Database, getH5GroupName\nfrom armi.bookkeeping.db.typedefs import Histories, History\nfrom armi.reactor.composites import ArmiObject\nfrom armi.reactor.parameters import parameterDefinitions\nfrom armi.settings.fwSettings.databaseSettings import (\n    CONF_FORCE_DB_PARAMS,\n    CONF_SYNC_AFTER_WRITE,\n)\nfrom armi.utils import getPreviousTimeNode, getStepLengths\n\nORDER = interfaces.STACK_ORDER.BOOKKEEPING\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    return (DatabaseInterface, {\"enabled\": cs[\"db\"]})\n\n\nclass DatabaseInterface(interfaces.Interface):\n    \"\"\"\n    Handles interactions between the ARMI data model and the persistent data storage\n    system.\n\n    This reads/writes the ARMI state to/from the database and helps derive state\n    information that can be derived.\n    \"\"\"\n\n    name = \"database\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self._db = None\n        self._dbPath: Optional[pathlib.Path] = None\n\n        if cs[CONF_FORCE_DB_PARAMS]:\n            toSet = {paramName: set() for paramName in cs[CONF_FORCE_DB_PARAMS]}\n            for (name, _), pDef in parameterDefinitions.ALL_DEFINITIONS.items():\n                if name in toSet.keys():\n                    toSet[name].add(pDef)\n\n            for name, pDefs in toSet.items():\n                runLog.info(\"Forcing parameter {} to be written to the database, per user input\".format(name))\n                for pDef in pDefs:\n                    pDef.saveToDB = True\n\n    def __repr__(self):\n        return \"<{} '{}' {} >\".format(self.__class__.__name__, self.name, repr(self._db))\n\n    @property\n    def database(self):\n        \"\"\"Presents the internal database object, if it exists.\"\"\"\n        if self._db is not None:\n            return self._db\n        else:\n            raise RuntimeError(\n                \"The Database interface has not yet created a database \"\n                \"object. InteractBOL or loadState must be called first.\"\n            )\n\n    def interactBOL(self):\n        \"\"\"Initialize the database if the main interface was not available. (Beginning of Life).\"\"\"\n        if not self._db:\n            self.initDB()\n\n    def initDB(self, fName: Optional[os.PathLike] = None):\n        \"\"\"\n        Open the underlying database to be written to, and write input files to DB.\n\n        Notes\n        -----\n        Main Interface calls this so that the database is available as early as possible in the run.\n        The database interface interacts near the end of the interface stack (so that all the\n        parameters have been updated) while the Main Interface interacts first.\n        \"\"\"\n        if fName is None:\n            self._dbPath = pathlib.Path(self.cs.caseTitle + \".h5\")\n        else:\n            self._dbPath = pathlib.Path(fName)\n\n        if self.cs[\"reloadDBName\"].lower() == str(self._dbPath).lower():\n            raise ValueError(\n                \"It appears that reloadDBName is the same as the case title. \"\n                \"This could lead to data loss! Rename the reload DB or the case.\"\n            )\n        self._db = Database(self._dbPath, \"w\")\n        self._db.open()\n        self._db.writeInputsToDB(self.cs)\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"\n        Write to database.\n\n        DBs should receive the state information of the run at each node.\n\n        Notes\n        -----\n        - If tight coupling is enabled, the DB will be written in ``Operator::_timeNodeLoop`` via\n          writeDBEveryNode.\n        \"\"\"\n        if self.o.cs[\"tightCoupling\"]:\n            # h5 can't handle overwriting so we skip here and write once the tight coupling loop has completed\n            return\n        self.writeDBEveryNode()\n\n    def writeDBEveryNode(self):\n        \"\"\"Write the database at the end of the time node.\"\"\"\n        self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0\n        self._db.writeToDB(self.r)\n        if self.cs[CONF_SYNC_AFTER_WRITE]:\n            self._db.syncToSharedFolder()\n\n    def interactEOC(self, cycle=None):\n        \"\"\"\n        Do not write; this state doesn't tend to be important since its decay only step.\n\n        Notes\n        -----\n        The same time is available at start of next cycle.\n        \"\"\"\n        return\n\n    def interactEOL(self):\n        \"\"\"DB's should be closed at run's end. (End of Life).\"\"\"\n        # minutesSinceStarts should include as much of the ARMI run as possible so EOL is necessary, too.\n        self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0\n        self._db.writeToDB(self.r, \"EOL\")\n        self.closeDB()\n\n    def closeDB(self):\n        \"\"\"Close the DB, writing to file.\"\"\"\n        self._db.close(True)\n\n    def interactError(self):\n        \"\"\"Get shutdown state information even if the run encounters an error.\"\"\"\n        try:\n            self.r.core.p.minutesSinceStart = (time.time() - self.r.core.timeOfStart) / 60.0\n\n            # this can result in a double-error if the error occurred in the database\n            # writing\n            self._db.writeToDB(self.r, \"error\")\n            self._db.close(False)\n        except Exception:  # we're already responding to an error\n            pass\n\n    def interactDistributeState(self) -> None:\n        \"\"\"\n        Reconnect to pre-existing database.\n\n        DB is created and managed by the primary node only but we can still connect to it\n        from workers to enable things like history tracking.\n        \"\"\"\n        if context.MPI_RANK > 0:\n            # DB may not exist if distribute state is called early.\n            if self._dbPath is not None and os.path.exists(self._dbPath):\n                self._db = Database(self._dbPath, \"r\")\n                self._db.open()\n\n    def distributable(self):\n        return self.Distribute.SKIP\n\n    def prepRestartRun(self):\n        \"\"\"\n        Load the data history from the database requested in the case setting\n        `reloadDBName`.\n\n        Reactor state is put at the cycle/node requested in the case settings\n        `startCycle` and `startNode`, having loaded the state from all cycles prior\n        to that in the requested database.\n\n        .. impl:: Runs at a particular timenode can be re-instantiated for a snapshot.\n            :id: I_ARMI_SNAPSHOT_RESTART\n            :implements: R_ARMI_SNAPSHOT_RESTART\n\n            This method loads the state of a reactor from a particular point in time\n            from a standard ARMI\n            :py:class:`Database <armi.bookkeeping.db.database.Database>`. This is a\n            major use-case for having ARMI databases in the first case. And restarting\n            from such a database is easy, you just need to set a few settings::\n\n            * reloadDBName - Path to existing H5 file to reload from.\n            * startCycle - Operational cycle to restart from.\n            * startNode - Time node to start from.\n\n        Notes\n        -----\n        Mixing the use of simple vs detailed cycles settings is allowed, provided\n        that the cycle histories prior to `startCycle`/`startNode` are equivalent.\n\n        ARMI expects the reload DB to have been made in the same version of ARMI as you\n        are running. ARMI does not guarantee that a DB from a decade ago will be easily\n        used to restart a run.\n        \"\"\"\n        reloadDBName = self.cs[\"reloadDBName\"]\n        runLog.info(f\"Merging database history from {reloadDBName} for restart analysis.\")\n        startCycle = self.cs[\"startCycle\"]\n        startNode = self.cs[\"startNode\"]\n\n        with Database(reloadDBName, \"r\") as inputDB:\n            loadDbCs = inputDB.loadCS()\n\n            # pull the history up to the cycle/node prior to `startCycle`/`startNode`\n            dbCycle, dbNode = getPreviousTimeNode(\n                startCycle,\n                startNode,\n                self.cs,\n            )\n\n            self._checkThatCyclesHistoriesAreEquivalentUpToRestartTime(loadDbCs, dbCycle, dbNode)\n\n            self._db.mergeHistory(inputDB, startCycle, startNode)\n        self.loadState(dbCycle, dbNode)\n\n    def _checkThatCyclesHistoriesAreEquivalentUpToRestartTime(self, loadDbCs, dbCycle, dbNode):\n        \"\"\"Check that cycle histories are equivalent up to this point.\"\"\"\n        dbStepLengths = getStepLengths(loadDbCs)\n        currentCaseStepLengths = getStepLengths(self.cs)\n        dbStepHistory = []\n        currentCaseStepHistory = []\n        try:\n            for cycleIdx in range(dbCycle + 1):\n                if cycleIdx == dbCycle:\n                    # truncate it at dbNode\n                    dbStepHistory.append(dbStepLengths[cycleIdx][:dbNode])\n                    currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx][:dbNode])\n                else:\n                    dbStepHistory.append(dbStepLengths[cycleIdx])\n                    currentCaseStepHistory.append(currentCaseStepLengths[cycleIdx])\n        except IndexError:\n            runLog.error(f\"DB cannot be loaded to this time: cycle={dbCycle}, node={dbNode}\")\n            raise\n\n        if dbStepHistory != currentCaseStepHistory:\n            raise ValueError(\"The cycle history up to the restart cycle/node must be equivalent.\")\n\n    def _getLoadDB(self, fileName):\n        \"\"\"\n        Return the database to load from in order of preference.\n\n        Notes\n        -----\n        If filename is present only returns one database since specifically instructed to load from that database.\n        \"\"\"\n        if fileName is not None:\n            # only yield 1 database if the file name is specified\n            if self._db is not None and fileName == self._db._fileName:\n                yield self._db\n            elif os.path.exists(fileName):\n                yield Database(fileName, \"r\")\n        else:\n            if self._db is not None:\n                yield self._db\n            if os.path.exists(self.cs[\"reloadDBName\"]):\n                yield Database(self.cs[\"reloadDBName\"], \"r\")\n\n    def loadState(self, cycle, timeNode, timeStepName=\"\", fileName=None):\n        \"\"\"\n        Loads a fresh reactor and applies it to the Operator.\n\n        Notes\n        -----\n        Will load preferentially from the ``fileName`` if passed. Otherwise will load from existing database in memory\n        or ``cs[\"reloadDBName\"]`` in that order.\n\n        Raises\n        ------\n        RuntimeError\n            If fileName is specified and that  file does not have the time step.\n            If fileName is not specified and neither the database in memory, nor the\n            ``cs[\"reloadDBName\"]`` have the time step specified.\n        \"\"\"\n        for potentialDatabase in self._getLoadDB(fileName):\n            with potentialDatabase as loadDB:\n                if loadDB.hasTimeStep(cycle, timeNode, statePointName=timeStepName):\n                    newR = loadDB.load(\n                        cycle,\n                        timeNode,\n                        statePointName=timeStepName,\n                        cs=self.cs,\n                        allowMissing=True,\n                    )\n                    self.o.reattach(newR, self.cs)\n                    break\n        else:\n            # reactor was never set so fail\n            if fileName:\n                raise RuntimeError(\n                    \"Cannot load state from specified file {} @ {}\".format(\n                        fileName, getH5GroupName(cycle, timeNode, timeStepName)\n                    )\n                )\n            raise RuntimeError(\n                \"Cannot load state from <unspecified file> @ {}\".format(getH5GroupName(cycle, timeNode, timeStepName))\n            )\n\n    def getHistory(\n        self,\n        comp: ArmiObject,\n        params: Optional[Sequence[str]] = None,\n        timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None,\n        byLocation: bool = False,\n    ) -> History:\n        \"\"\"\n        Get historical parameter values for a single object.\n\n        This is mostly a wrapper around the same function on the ``Database`` class,\n        but knows how to return the current value as well.\n\n        See Also\n        --------\n        Database.getHistory\n        \"\"\"\n        # make a copy so that we can potentially remove timesteps without affecting the caller\n        timeSteps = copy.copy(timeSteps)\n        now = (self.r.p.cycle, self.r.p.timeNode)\n        nowRequested = timeSteps is None\n        if timeSteps is not None and now in timeSteps:\n            nowRequested = True\n            timeSteps.remove(now)\n\n        if byLocation:\n            history = self.database.getHistoryByLocation(comp, params, timeSteps)\n        else:\n            history = self.database.getHistory(comp, params, timeSteps)\n\n        if nowRequested:\n            for param in params or history.keys():\n                if param == \"location\":\n                    # might save as int or np.int64, so forcing int keeps things predictable\n                    history[param][now] = tuple(int(i) for i in comp.spatialLocator.indices)\n                else:\n                    history[param][now] = comp.p[param]\n\n        return history\n\n    def getHistories(\n        self,\n        comps: Sequence[ArmiObject],\n        params: Optional[Sequence[str]] = None,\n        timeSteps: Optional[MutableSequence[Tuple[int, int]]] = None,\n        byLocation: bool = False,\n    ) -> Histories:\n        \"\"\"\n        Get historical parameter values for one or more objects.\n\n        This is mostly a wrapper around the same function on the ``Database`` class,\n        but knows how to return the current value as well.\n\n        See Also\n        --------\n        Database.getHistories\n        \"\"\"\n        now = (self.r.p.cycle, self.r.p.timeNode)\n        nowRequested = timeSteps is None\n        if timeSteps is not None:\n            # make a copy so that we can potentially remove timesteps without affecting\n            # the caller\n            timeSteps = copy.copy(timeSteps)\n        if timeSteps is not None and now in timeSteps:\n            nowRequested = True\n            timeSteps.remove(now)\n\n        if byLocation:\n            histories = self.database.getHistoriesByLocation(comps, params, timeSteps)\n        else:\n            histories = self.database.getHistories(comps, params, timeSteps)\n\n        if nowRequested:\n            for c in comps:\n                for param in params or histories[c].keys():\n                    if param == \"location\":\n                        histories[c][param][now] = tuple(int(i) for i in c.spatialLocator.indices)\n                    else:\n                        histories[c][param][now] = c.p[param]\n\n        return histories\n"
  },
  {
    "path": "armi/bookkeeping/db/factory.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pathlib\nfrom typing import Optional\n\nimport h5py\n\nfrom armi.bookkeeping.db import permissions\nfrom armi.bookkeeping.db.database import Database\n\n\ndef databaseFactory(dbName: str, permission: str, version: Optional[str] = None):\n    \"\"\"\n    Return an appropriate object for interacting with a database file.\n\n    Parameters\n    ----------\n    dbName: str\n        Path to db file, e.g. `baseCase.h5`\n    permission: str\n        String defining permission, `r` for read only. See armi.bookkeeping.db.permissions\n    version: str, optional\n        Version of database you want to read or write. In most cases ARMI will\n        auto-detect. For advanced users.\n\n    Notes\n    -----\n    This is not a proper factory, as the different database versions do not present a\n    common interface. However, this is useful code, since it at least creates an object\n    based on some knowledge of how to probe around. This allows client code to just\n    interrogate the type of the returned object to figure out to do based on whatever it\n    needs.\n    \"\"\"\n    dbPath = pathlib.Path(dbName)\n\n    # if it's not an hdf5 file, we dont even know where to start...\n    if dbPath.suffix != \".h5\":\n        raise RuntimeError(\"Unknown database format for {}\".format(dbName))\n\n    if permission in permissions.Permissions.read:\n        if version is not None:\n            raise ValueError(\"Cannot specify version when reading a database.\")\n\n        if not dbPath.exists() or not dbPath.is_file():\n            raise ValueError(\"Database file `{}` does not appear to be a file.\".format(dbName))\n\n        # probe for the database version. We started adding these with \"database 3\", so if\n        # databaseVersion is not present, assume it's the \"old\" version\n        version = \"2\"\n        tempDb = h5py.File(dbPath, \"r\")\n        if \"databaseVersion\" in tempDb.attrs:\n            version = tempDb.attrs[\"databaseVersion\"]\n        del tempDb\n\n        majorversion = version.split(\".\")[0] if version else \"2\"\n        if majorversion == \"2\":\n            raise ValueError(\n                'Database version 2 (\"XTView database\") is no longer '\n                \"supported. To migrate to a newer version, use version 0.1.5.\"\n            )\n\n        if majorversion == \"3\":\n            return Database(dbPath, permission)\n\n        raise ValueError(\"Unable to determine Database version for {}\".format(dbName))\n    elif permission in permissions.Permissions.write:\n        majorversion = version.split(\".\")[0] if version else \"3\"\n        if majorversion == \"2\":\n            raise ValueError(\n                'Database version 2 (\"XTView database\") is no longer '\n                \"supported. To migrate to a newer version, use version 0.1.5 to migrate.\"\n            )\n        if majorversion == \"3\":\n            return Database(dbPath, permission)\n\n    return None\n"
  },
  {
    "path": "armi/bookkeeping/db/jaggedArray.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTooling to help flatten jagged (non rectangular) data into rectangular arrays.\n\nThe goal here is to support jagged data for NumPy arrays to be written into the ARMI databases.\n\"\"\"\n\nfrom typing import List, Optional\n\nimport numpy as np\n\nfrom armi import runLog\n\n\nclass JaggedArray:\n    \"\"\"\n    Take a list of numpy arrays or lists and flatten them into a single 1D array.\n\n    This implementation can preserve the structure of a multi-dimensional numpy array\n    by storing the dimensions in self.shapes and then re-populating a numpy array of\n    that shape from the flattened 1D array. However, it can only preserve one layer of\n    jaggedness in a list of lists (or other iterables). For example, a list of tuples\n    with varying lengths can be flattened and reconstituted exactly. But, if a list of\n    lists of tuples is passed in, the tuples in that final layer of nesting will all be\n    flattened to a single 1D numpy array after a round trip. No structure is retained\n    from nested lists of jagged lists or tuples.\n    \"\"\"\n\n    def __init__(self, jaggedData, paramName):\n        \"\"\"\n        JaggedArray constructor.\n\n        Parameters\n        ----------\n        jaggedData: list of np.ndarray\n            A list of numpy arrays (or lists or tuples) to be flattened into a single array\n        paramName: str\n            The name of the parameter represented by this data\n        \"\"\"\n        offset = 0\n        flattenedArray = []\n        offsets = []\n        shapes = []\n        nones = []\n        for i, arr in enumerate(jaggedData):\n            if isinstance(arr, (np.ndarray, list, tuple)):\n                if len(arr) == 0:\n                    nones.append(i)\n                else:\n                    offsets.append(offset)\n                    try:\n                        numpyArray = np.array(arr)\n                        shapes.append(numpyArray.shape)\n                        offset += numpyArray.size\n                        flattenedArray.extend(numpyArray.flatten())\n                    except:  # noqa: E722\n                        # numpy might fail if it's jagged\n                        flattenedList = self.flatten(arr)\n                        shapes.append(\n                            len(flattenedList),\n                        )\n                        offset += len(flattenedList)\n                        flattenedArray.extend(flattenedList)\n            elif isinstance(arr, (int, float)):\n                offsets.append(offset)\n                shapes.append((1,))\n                offset += 1\n                flattenedArray.append(arr)\n            elif arr is None:\n                nones.append(i)\n\n        self.flattenedArray = np.array(flattenedArray)\n        self.offsets = np.array(offsets)\n        try:\n            self.shapes = np.array(shapes)\n        except ValueError as ee:\n            runLog.error(\n                \"Error! It seems like ARMI may have tried to flatten a jagged array \"\n                \"where the elements have different numbers of dimensions. `shapes` \"\n                \"attribute of the JaggedArray for {} cannot be made into a numpy \"\n                \"array; it might be jagged.\".format(paramName)\n            )\n            runLog.error(shapes)\n            raise ValueError(ee)\n        self.nones = np.array(nones)\n        self.dtype = self.flattenedArray.dtype\n        self.paramName = paramName\n\n    def __iter__(self):\n        \"\"\"Iterate over the unpacked list.\"\"\"\n        return iter(self.unpack())\n\n    def __contains__(self, other):\n        return other in self.flattenedArray\n\n    @staticmethod\n    def flatten(x):\n        \"\"\"\n        Recursively flatten an iterable (list, tuple, or numpy.ndarray).\n\n        x : list, tuple, np.ndarray\n            An iterable. Can be a nested iterable in which the elements\n            themselves are also iterable.\n        \"\"\"\n        if isinstance(x, (list, tuple, np.ndarray)):\n            if len(x) == 0:\n                return []\n            first, rest = x[0], x[1:]\n            return JaggedArray.flatten(first) + JaggedArray.flatten(rest)\n        else:\n            return [x]\n\n    @classmethod\n    def fromH5(cls, data, offsets, shapes, nones, dtype, paramName):\n        \"\"\"\n        Create a JaggedArray instance from an HDF5 dataset.\n\n        The JaggedArray is stored in HDF5 as a flat 1D array with accompanying\n        attributes of \"offsets\" and \"shapes\" to define how to reconstitute the\n        original data.\n\n        Parameters\n        ----------\n        data: np.ndarray\n            A flattened 1D numpy array read in from an HDF5 file\n        offsets: np.ndarray\n            Offset indices for the zeroth element of each constituent array\n        shapes: np.ndarray\n            The shape of each constituent array\n        nones: np.ndarray\n            The location of Nones\n        dtype: np.dtype\n            The data type for the array\n        paramName: str\n            The name of the parameter represented by this data\n\n        Returns\n        -------\n        obj: JaggedArray An instance of JaggedArray populated with the input data\n        \"\"\"\n        obj = cls([], paramName)\n        obj.flattenedArray = np.array(data)\n        obj.offsets = np.array(offsets)\n        obj.shapes = np.array(shapes)\n        obj.nones = np.array(nones)\n        obj.dtype = dtype\n        obj.paramName = paramName\n        return obj\n\n    def tolist(self):\n        \"\"\"Alias for unpack() to make this class respond like a np.ndarray.\"\"\"\n        return self.unpack()\n\n    def unpack(self):\n        \"\"\"\n        Unpack a JaggedArray object into a list of arrays.\n\n        Returns\n        -------\n        unpackedJaggedData: list of np.ndarray\n            List of numpy arrays with varying dimensions (i.e., jagged arrays)\n        \"\"\"\n        unpackedJaggedData: List[Optional[np.ndarray]] = []\n        shapeIndices = [i for i, x in enumerate(self.shapes) if sum(x) != 0]\n        numElements = len(shapeIndices) + len(self.nones)\n        j = 0  # non-None element counter\n        for i in range(numElements):\n            if i in self.nones:\n                unpackedJaggedData.append(None)\n            else:\n                k = shapeIndices[j]\n                unpackedJaggedData.append(\n                    np.ndarray(\n                        self.shapes[k],\n                        dtype=self.dtype,\n                        buffer=self.flattenedArray[self.offsets[k] :],\n                    )\n                )\n                j += 1\n\n        return unpackedJaggedData\n"
  },
  {
    "path": "armi/bookkeeping/db/layout.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGroundwork for ARMI Database, version 3.4.\n\nWhen interacting with the database file, the :py:class:`Layout` class is used to help\nmap the hierarchical Composite Reactor Model to the flat representation in\n:py:class:`Database <armi.bookkeeping.db.database.Database>`.\n\nThis module also stores packing/packing tools to support\n:py:class:`Database <armi.bookkeeping.db.database.Database>`, as well as database\nversioning information.\n\"\"\"\n\nimport collections\nfrom typing import (\n    Any,\n    Dict,\n    List,\n    Optional,\n    Tuple,\n    Type,\n)\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.reactor import grids\nfrom armi.reactor.components import Component\nfrom armi.reactor.composites import ArmiObject\nfrom armi.reactor.excoreStructure import ExcoreStructure\nfrom armi.reactor.reactors import Core, Reactor\n\n# Here we store the Database version information.\nDB_MAJOR = 3\nDB_MINOR = 4\nDB_VERSION = f\"{DB_MAJOR}.{DB_MINOR}\"\n\n# CONSTANTS USED TO PACK AND UNPACK DATA\nLOC_NONE = \"N\"\nLOC_COORD = \"C\"\nLOC_INDEX = \"I\"\nLOC_MULTI = \"M:\"\n\nLOCATION_TYPE_LABELS = {\n    type(None): LOC_NONE,\n    grids.CoordinateLocation: LOC_COORD,\n    grids.IndexLocation: LOC_INDEX,\n    grids.MultiIndexLocation: LOC_MULTI,\n}\n\n# NOTE: Here we assume no one assigns min(int)+2 as a meaningful value\nNONE_MAP = {float: float(\"nan\"), str: \"<!None!>\"}\nNONE_MAP.update(\n    {\n        intType: np.iinfo(intType).min + 2\n        for intType in (\n            int,\n            np.int8,\n            np.int16,\n            np.int32,\n            np.int64,\n        )\n    }\n)\nNONE_MAP.update(\n    {\n        intType: np.iinfo(intType).max - 2\n        for intType in (\n            np.uint,\n            np.uint8,\n            np.uint16,\n            np.uint32,\n            np.uint64,\n        )\n    }\n)\nNONE_MAP.update({floatType: floatType(\"nan\") for floatType in (float, np.float64)})\n\n\nclass Layout:\n    \"\"\"\n    The Layout class describes the hierarchical layout of the Composite Reactor model\n    in a flat representation for\n    :py:class:`Database <armi.bookkeeping.db.database.Database>`.\n\n    A Layout is built by starting at the root of a composite tree and recursively\n    appending each node in the tree to a list of data. So the data will be ordered by\n    depth-first search: [r, c, a1, a1b1, a1b1c1, a1b1c2, a1b2, a1b2c1, ..., a2, ...].\n\n    The layout is also responsible for storing Component attributes, like location,\n    material, and temperatures, which aren't stored as Parameters. Temperatures,\n    specifically, are rather complicated in ARMI.\n\n    Notes\n    -----\n     * Elements in Layout are stored in depth-first order. This permits use of\n       algorithms such as Pre-Order Tree Traversal for efficient traversal of regions\n       of the model.\n\n     * ``indexInData`` increases monotonically within each object ``type``. For\n       example, the data for all ``HexBlock`` children of a given parent are stored\n       contiguously within the ``HexBlock`` group, and will not be interleaved with\n       data from the ``HexBlock`` children of any of the parent's siblings.\n\n     * Aside from the hierarchy, there is no guarantee what order objects are stored\n       in the layout.  The ``Core`` is not necessarily the first child of the\n       ``Reactor``, and is not guaranteed to use the zeroth grid.\n    \"\"\"\n\n    def __init__(self, version: Tuple[int, int], h5group=None, comp=None):\n        self.type: List[str] = []\n        self.name: List[str] = []\n        self.serialNum: List[int] = []\n        # The index into the parameter datasets corresponding to each object's class.\n        # E.g., the 5th HexBlock object in the tree would get 5; to look up its\n        # \"someParameter\" value, you would extract cXXnYY/HexBlock/someParameter[5].\n        self.indexInData: List[int] = []\n        # The number of direct children this object has.\n        self.numChildren: List[int] = []\n        # The type of location that specifies the object's physical location; see the\n        # associated pack/unpackLocation functions for more information about how\n        # locations are handled.\n        self.locationType: List[str] = []\n        # There is a minor asymmetry here in that before writing to the DB, this is\n        # truly a flat list of tuples. However when reading, this may contain lists of\n        # tuples, which represent MI locations. This comes from the fact that we map the\n        # tuples to Location objects in Database._compose, but map from Locations to\n        # tuples in Layout._createLayout. Ideally we would handle both directions in the\n        # same place so this can be less surprising. Resolving this would require\n        # changing the interface of the various pack/unpack functions, which have\n        # multiple versions, so the update would need to be done with care.\n        self.location: List[Tuple[int, int, int]] = []\n        # Which grid, as stored in the database, this object uses to arrange its\n        # children\n        self.gridIndex: List[int] = []\n        self.temperatures: List[float] = []\n        self.material: List[str] = []\n        # Used to cache all of the spatial locators so that we can pack them all at\n        # once. The benefit here is that the version checking can happen up front and\n        # less branching down below\n        self._spatialLocators: List[grids.LocationBase] = []\n        # set of grid parameters that have been seen in _createLayout. For efficient\n        # checks for uniqueness\n        self._seenGridParams: Dict[Any, Any] = dict()\n        # actual list of grid parameters, with stable order for safe indexing\n        self.gridParams: List[Any] = []\n        self.version = version\n\n        self.groupedComps: Dict[Type[ArmiObject], List[ArmiObject]] = collections.defaultdict(list)\n\n        # it should be noted, one of the two inputs must be non-None: comp/h5group\n        if comp is not None:\n            self._createLayout(comp)\n            self.locationType, self.location = _packLocations(self._spatialLocators)\n        else:\n            self._readLayout(h5group)\n\n        self._snToLayoutIndex = {sn: i for i, sn in enumerate(self.serialNum)}\n\n        # find all subclasses of Grid\n        self.gridClasses = {c.__name__: c for c in Layout.allSubclasses(grids.Grid)}\n        self.gridClasses[\"Grid\"] = grids.Grid\n\n    def __getitem__(self, sn):\n        layoutIndex = self._snToLayoutIndex[sn]\n        return (\n            self.type[layoutIndex],\n            self.name[layoutIndex],\n            self.serialNum[layoutIndex],\n            self.indexInData[layoutIndex],\n            self.numChildren[layoutIndex],\n            self.locationType[layoutIndex],\n            self.location[layoutIndex],\n            self.temperatures[layoutIndex],\n            self.material[layoutIndex],\n        )\n\n    def _createLayout(self, comp):\n        \"\"\"\n        Populate a hierarchical representation and group the reactor model items by type.\n\n        This is used when writing a reactor model to the database.\n\n        Notes\n        -----\n        This is recursive.\n\n        See Also\n        --------\n        _readLayout : does the opposite\n        \"\"\"\n        compList = self.groupedComps[type(comp)]\n        compList.append(comp)\n\n        self.type.append(comp.__class__.__name__)\n        self.name.append(comp.name)\n        self.serialNum.append(comp.p.serialNum)\n        self.indexInData.append(len(compList) - 1)\n        self.numChildren.append(len(comp))\n\n        # determine how many components have been read in, to set the grid index\n        if comp.spatialGrid is not None:\n            gridType = type(comp.spatialGrid).__name__\n            gridParams = (gridType, comp.spatialGrid.reduce())\n            if gridParams not in self._seenGridParams:\n                self._seenGridParams[gridParams] = len(self.gridParams)\n                self.gridParams.append(gridParams)\n            self.gridIndex.append(self._seenGridParams[gridParams])\n        else:\n            self.gridIndex.append(None)\n\n        self._spatialLocators.append(comp.spatialLocator)\n\n        # set the materials and temperatures\n        try:\n            self.temperatures.append((comp.inputTemperatureInC, comp.temperatureInC))\n            self.material.append(comp.material.__class__.__name__)\n        except Exception:\n            self.temperatures.append((-900, -900))  # an impossible temperature\n            self.material.append(\"\")\n\n        try:\n            comps = sorted(list(comp))\n        except ValueError:\n            runLog.error(\n                \"Failed to sort some collection of ArmiObjects for database output: {} value {}\".format(\n                    type(comp), list(comp)\n                )\n            )\n            raise\n\n        # depth-first search recursion of all components\n        for c in comps:\n            self._createLayout(c)\n\n    def _readLayout(self, h5group):\n        \"\"\"\n        Populate a hierarchical representation and group the reactor model items by type.\n\n        This is used when reading a reactor model from a database.\n\n        See Also\n        --------\n        _createLayout : does the opposite\n        \"\"\"\n        try:\n            # location is either an index, or a point\n            # iter over list is faster\n            locations = h5group[\"layout/location\"][:].tolist()\n            self.locationType = np.char.decode(h5group[\"layout/locationType\"][:]).tolist()\n            self.location = _unpackLocations(self.locationType, locations, self.version[1])\n            self.type = np.char.decode(h5group[\"layout/type\"][:])\n            self.name = np.char.decode(h5group[\"layout/name\"][:])\n            self.serialNum = h5group[\"layout/serialNum\"][:]\n            self.indexInData = h5group[\"layout/indexInData\"][:]\n            self.numChildren = h5group[\"layout/numChildren\"][:]\n            self.material = np.char.decode(h5group[\"layout/material\"][:])\n            self.temperatures = h5group[\"layout/temperatures\"][:]\n            self.gridIndex = replaceNonsenseWithNones(h5group[\"layout/gridIndex\"][:], \"layout/gridIndex\")\n\n            gridGroup = h5group[\"layout/grids\"]\n            gridTypes = [t.decode() for t in gridGroup[\"type\"][:]]\n\n            self.gridParams = []\n            for iGrid, gridType in enumerate(gridTypes):\n                thisGroup = gridGroup[str(iGrid)]\n\n                unitSteps = thisGroup[\"unitSteps\"][:]\n                bounds = []\n                for ibound in range(3):\n                    boundName = \"bounds_{}\".format(ibound)\n                    if boundName in thisGroup:\n                        bounds.append(thisGroup[boundName][:])\n                    else:\n                        bounds.append(None)\n                unitStepLimits = thisGroup[\"unitStepLimits\"][:]\n                offset = thisGroup[\"offset\"][:] if thisGroup.attrs[\"offset\"] else None\n                geomType = thisGroup[\"geomType\"].asstr()[()] if \"geomType\" in thisGroup else None\n                symmetry = thisGroup[\"symmetry\"].asstr()[()] if \"symmetry\" in thisGroup else None\n\n                self.gridParams.append(\n                    (\n                        gridType,\n                        grids.GridParameters(\n                            unitSteps,\n                            bounds,\n                            unitStepLimits,\n                            offset,\n                            geomType,\n                            symmetry,\n                        ),\n                    )\n                )\n\n        except KeyError as e:\n            runLog.error(\"Failed to get layout information from group: {}\".format(h5group.name))\n            raise e\n\n    def _initComps(self, caseTitle, bp):\n        comps = []\n        groupedComps = collections.defaultdict(list)\n\n        for (\n            compType,\n            name,\n            serialNum,\n            numChildren,\n            location,\n            locationType,\n            material,\n            temperatures,\n            gridIndex,\n        ) in zip(\n            self.type,\n            self.name,\n            self.serialNum,\n            self.numChildren,\n            self.location,\n            self.locationType,\n            self.material,\n            self.temperatures,\n            self.gridIndex,\n        ):\n            Klass = ArmiObject.TYPES[compType]\n\n            if issubclass(Klass, Reactor):\n                comp = Klass(caseTitle, bp)\n            elif issubclass(Klass, Core):\n                comp = Klass(name)\n            elif issubclass(Klass, ExcoreStructure):\n                comp = Klass(name)\n            elif issubclass(Klass, Component):\n                # init all dimensions to 0, they will be loaded and assigned after load\n                kwargs = dict.fromkeys(Klass.DIMENSION_NAMES, 0)\n                kwargs[\"modArea\"] = None\n                kwargs[\"material\"] = material\n                kwargs[\"name\"] = name\n                kwargs[\"Tinput\"] = temperatures[0]\n                kwargs[\"Thot\"] = temperatures[1]\n                comp = Klass(**kwargs)\n            else:\n                comp = Klass(name)\n\n            if gridIndex is not None:\n                gridParams = self.gridParams[gridIndex]\n                comp.spatialGrid = self.gridClasses[gridParams[0]](*gridParams[1], armiObject=comp)\n\n            comps.append((comp, serialNum, numChildren, location, locationType))\n            groupedComps[compType].append(comp)\n\n        return comps, groupedComps\n\n    def writeToDB(self, h5group):\n        \"\"\"Write a chunk of data to the database.\n\n        .. impl:: Write data to the DB for a given time step.\n            :id: I_ARMI_DB_TIME0\n            :implements: R_ARMI_DB_TIME\n\n            This method writes a snapshot of the current state of the reactor to the\n            database. It takes a pointer to an existing HDF5 file as input, and it\n            writes the reactor data model to the file in depth-first search order.\n            Other than this search order, there are no guarantees as to what order the\n            objects are written to the file. Though, this turns out to still be very\n            powerful. For instance, the data for all ``HexBlock`` children of a given\n            parent are stored contiguously within the ``HexBlock`` group, and will not\n            be interleaved with data from the ``HexBlock`` children of any of the parent's siblings.\n        \"\"\"\n        if \"layout/type\" in h5group:\n            # It looks like we have already written the layout to DB, skip for now\n            return\n        try:\n            h5group.create_dataset(\n                \"layout/type\",\n                data=np.array(self.type).astype(\"S\"),\n                compression=\"gzip\",\n            )\n            h5group.create_dataset(\n                \"layout/name\",\n                data=np.array(self.name).astype(\"S\"),\n                compression=\"gzip\",\n            )\n            h5group.create_dataset(\"layout/serialNum\", data=self.serialNum, compression=\"gzip\")\n            h5group.create_dataset(\"layout/indexInData\", data=self.indexInData, compression=\"gzip\")\n            h5group.create_dataset(\n                \"layout/numChildren\",\n                data=self.numChildren,\n                compression=\"gzip\",\n                track_order=True,\n            )\n            h5group.create_dataset(\n                \"layout/location\",\n                data=self.location,\n                compression=\"gzip\",\n                track_order=True,\n            )\n            h5group.create_dataset(\n                \"layout/locationType\",\n                data=np.array(self.locationType).astype(\"S\"),\n                compression=\"gzip\",\n                track_order=True,\n            )\n            h5group.create_dataset(\n                \"layout/material\",\n                data=np.array(self.material).astype(\"S\"),\n                compression=\"gzip\",\n                track_order=True,\n            )\n            h5group.create_dataset(\n                \"layout/temperatures\",\n                data=self.temperatures,\n                compression=\"gzip\",\n                track_order=True,\n            )\n\n            h5group.create_dataset(\n                \"layout/gridIndex\",\n                data=replaceNonesWithNonsense(np.array(self.gridIndex), \"layout/gridIndex\"),\n                compression=\"gzip\",\n            )\n\n            gridsGroup = h5group.create_group(\"layout/grids\", track_order=True)\n            gridsGroup.attrs[\"nGrids\"] = len(self.gridParams)\n            gridsGroup.create_dataset(\n                \"type\",\n                data=np.array([gp[0] for gp in self.gridParams]).astype(\"S\"),\n                track_order=True,\n            )\n\n            for igrid, gridParams in enumerate(gp[1] for gp in self.gridParams):\n                thisGroup = gridsGroup.create_group(str(igrid), track_order=True)\n                thisGroup.create_dataset(\"unitSteps\", data=gridParams.unitSteps, track_order=True)\n\n                for ibound, bound in enumerate(gridParams.bounds):\n                    if bound is not None:\n                        bound = np.array(bound)\n                        thisGroup.create_dataset(\"bounds_{}\".format(ibound), data=bound, track_order=True)\n\n                thisGroup.create_dataset(\"unitStepLimits\", data=gridParams.unitStepLimits, track_order=True)\n\n                offset = gridParams.offset\n                thisGroup.attrs[\"offset\"] = offset is not None\n                if offset is not None:\n                    thisGroup.create_dataset(\"offset\", data=offset, track_order=True)\n                thisGroup.create_dataset(\"geomType\", data=gridParams.geomType, track_order=True)\n                thisGroup.create_dataset(\"symmetry\", data=gridParams.symmetry, track_order=True)\n        except RuntimeError:\n            runLog.error(\"Failed to create datasets in: {}\".format(h5group))\n            raise\n\n    @staticmethod\n    def computeAncestors(serialNum, numChildren, depth=1) -> List[Optional[int]]:\n        \"\"\"\n        Return a list containing the serial number of the parent corresponding to each\n        object at the given depth.\n\n        Depth in this case means how many layers to reach up to find the desired\n        ancestor. A depth of 1 will yield the direct parent of each element, depth of 2\n        would yield the elemen's parent's parent, and so on.\n\n        The zero-th element will always be None, as the first object is the root element\n        and so has no parent. Subsequent depths will result in more Nones.\n\n        This function is useful for forming a lightweight sense of how the database\n        contents stitch together, without having to go to the trouble of fully unpacking\n        the Reactor model.\n\n        Parameters\n        ----------\n        serialNum : List of int\n            List of serial numbers for each object/element, as laid out in Layout\n        numChildren : List of int\n            List of numbers of children for each object/element, as laid out in Layout\n\n        Note\n        ----\n        This is not using a recursive approach for a couple of reasons. First, the\n        iterative form isn't so bad; we just need two stacks. Second, the interface of\n        the recursive function would be pretty unwieldy. We are progressively\n        consuming two lists, of which we would need to keep passing down with an\n        index/cursor, or progressively slice them as we go, which would be pretty\n        inefficient.\n        \"\"\"\n        ancestors: List[Optional[int]] = [None]\n\n        snStack = [serialNum[0]]\n        ncStack = [numChildren[0]]\n\n        for sn, nc in zip(serialNum[1:], numChildren[1:]):\n            ncStack[-1] -= 1\n            if nc > 0:\n                ancestors.append(snStack[-1])\n                snStack.append(sn)\n                ncStack.append(nc)\n            else:\n                ancestors.append(snStack[-1])\n\n            while ncStack and ncStack[-1] == 0:\n                snStack.pop()\n                ncStack.pop()\n\n        if depth > 1:\n            # handle deeper scenarios. This is a bit tricky. Store the original\n            # ancestors for the first generation, since that ultimately contains all of\n            # the information that we need. Then in a loop, keep hopping one more layer\n            # of indirection, and indexing into the corresponding location in the\n            # original ancestor array\n            indexMap = {sn: i for i, sn in enumerate(serialNum)}\n            origAncestors = ancestors\n            for _ in range(depth - 1):\n                ancestors = [origAncestors[indexMap[ia]] if ia is not None else None for ia in ancestors]\n\n        return ancestors\n\n    @staticmethod\n    def allSubclasses(cls) -> set:\n        \"\"\"Find all subclasses of the given class, in any namespace.\"\"\"\n        return set(cls.__subclasses__()).union([s for c in cls.__subclasses__() for s in Layout.allSubclasses(c)])\n\n\ndef _packLocations(\n    locations: List[grids.LocationBase], minorVersion: int = DB_MINOR\n) -> Tuple[List[str], List[Tuple[int, int, int]]]:\n    \"\"\"\n    Extract information from a location needed to write it to this DB.\n\n    Each locator has one locationType and up to N location-defining datums,\n    where N is the number of entries in a possible multiindex, or just 1\n    for everything else.\n\n    Shrink grid locator names for storage efficiency.\n\n    Notes\n    -----\n    Contains some conditionals to still load databases made before\n    db version 3.3 which can be removed once no users care about\n    those DBs anymore.\n    \"\"\"\n    if minorVersion <= 2:\n        locationTypes, locationData = _packLocationsV1(locations)\n    elif minorVersion == 3:\n        locationTypes, locationData = _packLocationsV2(locations)\n    elif minorVersion > 3:\n        locationTypes, locationData = _packLocationsV3(locations)\n    else:\n        raise ValueError(\"Unsupported minor version: {}\".format(minorVersion))\n    return locationTypes, locationData\n\n\ndef _packLocationsV1(\n    locations: List[grids.LocationBase],\n) -> Tuple[List[str], List[Tuple[int, int, int]]]:\n    \"\"\"Delete when reading v <=3.2 DB's no longer wanted.\"\"\"\n    locTypes = []\n    locData: List[Tuple[int, int, int]] = []\n    for loc in locations:\n        locationType = loc.__class__.__name__\n        if loc is None:\n            locationType = \"None\"\n            locDatum = [(0.0, 0.0, 0.0)]\n        elif isinstance(loc, grids.IndexLocation):\n            locDatum = [loc.indices]\n        else:\n            raise ValueError(f\"Invalid location type: {loc}\")\n\n        locTypes.append(locationType)\n        locData.extend(locDatum)\n\n    return locTypes, locData\n\n\ndef _packLocationsV2(\n    locations: List[grids.LocationBase],\n) -> Tuple[List[str], List[Tuple[int, int, int]]]:\n    \"\"\"Location packing implementation for minor version 3. See module docstring above.\"\"\"\n    locTypes = []\n    locData: List[Tuple[int, int, int]] = []\n    for loc in locations:\n        locationType = LOCATION_TYPE_LABELS[type(loc)]\n        if loc is None:\n            locDatum = [(0.0, 0.0, 0.0)]\n        elif loc.__class__ is grids.CoordinateLocation:\n            locDatum = [loc.indices]\n        elif loc.__class__ is grids.IndexLocation:\n            locDatum = [loc.indices]\n        elif loc.__class__ is grids.MultiIndexLocation:\n            # encode number of sub-locations to allow in-line unpacking.\n            locationType += f\"{len(loc)}\"\n            locDatum = [subloc.indices for subloc in loc]\n        else:\n            raise ValueError(f\"Invalid location type: {loc}\")\n\n        locTypes.append(locationType)\n        locData.extend(locDatum)\n\n    return locTypes, locData\n\n\ndef _packLocationsV3(\n    locations: List[grids.LocationBase],\n) -> Tuple[List[str], List[Tuple[int, int, int]]]:\n    \"\"\"Location packing implementation for minor version 4. See module docstring above.\"\"\"\n    locTypes = []\n    locData: List[Tuple[int, int, int]] = []\n\n    for loc in locations:\n        locationType = LOCATION_TYPE_LABELS[type(loc)]\n        if loc is None:\n            locDatum = [(0.0, 0.0, 0.0)]\n        elif type(loc) is grids.IndexLocation:\n            locDatum = [loc.getCompleteIndices()]\n        elif type(loc) is grids.CoordinateLocation:\n            # CoordinateLocations do not implement getCompleteIndices properly, and we\n            # do not really have a motivation to store them as we do with index\n            # locations.\n            locDatum = [loc.indices]\n        elif type(loc) is grids.MultiIndexLocation:\n            locationType += f\"{len(loc)}\"\n            locDatum = [subloc.indices for subloc in loc]\n        else:\n            raise ValueError(f\"Invalid location type: {loc}\")\n\n        locTypes.append(locationType)\n        locData.extend(locDatum)\n\n    return locTypes, locData\n\n\ndef _unpackLocations(locationTypes, locData, minorVersion: int = DB_MINOR):\n    \"\"\"\n    Convert location data as read from DB back into data structure for building reactor model.\n\n    location and locationType will only have different lengths when multiindex locations\n    are used.\n    \"\"\"\n    if minorVersion < 3:\n        return _unpackLocationsV1(locationTypes, locData)\n    else:\n        return _unpackLocationsV2(locationTypes, locData)\n\n\ndef _unpackLocationsV1(locationTypes, locData):\n    \"\"\"Delete when reading v <=3.2 DB's no longer wanted.\"\"\"\n    locsIter = iter(locData)\n    unpackedLocs = []\n    for lt in locationTypes:\n        if lt == \"None\":\n            loc = next(locsIter)\n            unpackedLocs.append(None)\n        elif lt == \"IndexLocation\":\n            loc = next(locsIter)\n            # the data is stored as float, so cast back to int\n            unpackedLocs.append(tuple(int(i) for i in loc))\n        else:\n            loc = next(locsIter)\n            unpackedLocs.append(tuple(loc))\n    return unpackedLocs\n\n\ndef _unpackLocationsV2(locationTypes, locData):\n    \"\"\"Location unpacking implementation for minor version 3+. See module docstring above.\"\"\"\n    locsIter = iter(locData)\n    unpackedLocs = []\n    for lt in locationTypes:\n        if lt == LOC_NONE:\n            loc = next(locsIter)\n            unpackedLocs.append(None)\n        elif lt == LOC_INDEX:\n            loc = next(locsIter)\n            # the data is stored as float, so cast back to int\n            unpackedLocs.append(tuple(int(i) for i in loc))\n        elif lt == LOC_COORD:\n            loc = next(locsIter)\n            unpackedLocs.append(tuple(loc))\n        elif lt.startswith(LOC_MULTI):\n            # extract number of sublocations from e.g. \"M:345\" string.\n            numSubLocs = int(lt.split(\":\")[1])\n            multiLocs = []\n            for _ in range(numSubLocs):\n                subLoc = next(locsIter)\n                # All multiindexes sublocs are index locs\n                multiLocs.append(tuple(int(i) for i in subLoc))\n            unpackedLocs.append(multiLocs)\n        else:\n            raise ValueError(f\"Read unknown location type {lt}. Invalid DB.\")\n\n    return unpackedLocs\n\n\ndef replaceNonesWithNonsense(data: np.ndarray, paramName: str, nones: np.ndarray = None) -> np.ndarray:\n    \"\"\"\n    Replace instances of ``None`` with nonsense values that can be detected/recovered\n    when reading.\n\n    Parameters\n    ----------\n    data\n        The numpy array containing ``None`` values that need to be replaced.\n\n    paramName\n        The name of the parameter who's data we are treating. Only used for diagnostics.\n\n    nones\n        An array containing the index locations on the ``None`` elements. It is a little\n        strange to pass these, in but we find these indices to determine whether we need\n        to call this function in the first place, so might as well pass it in, so that\n        we don't need to perform the operation again.\n\n    Notes\n    -----\n    This only supports situations where the data is a straight-up ``None``, or a valid,\n    database-storable numpy array (or easily convertible to one (e.g. tuples/lists with\n    numerical values)). This does not support, for instance, a numpy ndarray with some\n    Nones in it.\n\n    For example, the following is supported::\n\n        [[1, 2, 3], None, [7, 8, 9]]\n\n    However, the following is not::\n\n        [[1, 2, 3], [4, None, 6], [7, 8, 9]]\n\n    See Also\n    --------\n    replaceNonsenseWithNones\n        Reverses this operation.\n    \"\"\"\n    if nones is None:\n        nones = np.where([d is None for d in data])[0]\n\n    try:\n        # loop to find what the default value should be. This is the first non-None\n        # value that we can find.\n        defaultValue = None\n        realType = None\n        val = None\n\n        for val in data:\n            if isinstance(val, np.ndarray):\n                # if multi-dimensional, val[0] could still be an array, val.flat is\n                # a flattened iterator, so next(val.flat) gives the first value in\n                # an n-dimensional array\n                realType = type(next(val.flat))\n\n                if realType is type(None):\n                    continue\n\n                defaultValue = np.reshape(np.repeat(NONE_MAP[realType], val.size), val.shape)\n                break\n            else:\n                realType = type(val)\n\n                if realType is type(None):\n                    continue\n\n                defaultValue = NONE_MAP[realType]\n                break\n        else:\n            # Couldn't find any non-None entries, so it really doesn't matter what type we\n            # use. Using float, because NaN is nice.\n            realType = float\n            defaultValue = NONE_MAP[realType]\n\n        if isinstance(val, np.ndarray):\n            data = np.array([d if d is not None else defaultValue for d in data])\n        else:\n            data[nones] = defaultValue\n\n    except Exception as ee:\n        runLog.error(\n            \"Error while attempting to determine default for {}.\\nvalue: {}\\nError: {}\".format(paramName, val, ee)\n        )\n        raise TypeError(\n            \"Could not determine None replacement for {} with type {}, val {}, default {}\".format(\n                paramName, realType, val, defaultValue\n            )\n        )\n\n    try:\n        data = data.astype(realType)\n    except Exception:\n        raise ValueError(\"Could not coerce data for {} to {}, data:\\n{}\".format(paramName, realType, data))\n\n    if data.dtype.kind == \"O\":\n        raise TypeError(\"Failed to convert data to valid HDF5 type {}, data:{}\".format(paramName, data))\n\n    return data\n\n\ndef replaceNonsenseWithNones(data: np.ndarray, paramName: str) -> np.ndarray:\n    \"\"\"\n    Replace special nonsense values with ``None``.\n\n    This essentially reverses the operations performed by\n    :py:func:`replaceNonesWithNonsense`.\n\n    Parameters\n    ----------\n    data\n        The array from the database that contains special ``None`` nonsense values.\n\n    paramName\n        The param name who's data we are dealing with. Only used for diagnostics.\n\n    See Also\n    --------\n    replaceNonesWithNonsense\n    \"\"\"\n    # NOTE: This is closely-related to the NONE_MAP.\n    if np.issubdtype(data.dtype, np.floating):\n        isNone = np.isnan(data)\n    elif np.issubdtype(data.dtype, np.integer):\n        isNone = data == np.iinfo(data.dtype).min + 2\n    elif np.issubdtype(data.dtype, np.str_):\n        isNone = data == \"<!None!>\"\n    else:\n        raise TypeError(\"Unable to resolve values that should be None for `{}`\".format(paramName))\n\n    if data.ndim > 1:\n        result = np.ndarray(data.shape[0], dtype=np.dtype(\"O\"))\n        for i in range(data.shape[0]):\n            if isNone[i].all():\n                result[i] = None\n            elif isNone[i].any():\n                # This is the meat of the logic to replace \"nonsense\" with None.\n                result[i] = np.array(data[i], dtype=np.dtype(\"O\"))\n                result[i][isNone[i]] = None\n            else:\n                result[i] = data[i]\n    else:\n        result = np.ndarray(data.shape, dtype=np.dtype(\"O\"))\n        result[:] = data\n        result[isNone] = None\n\n    return result\n"
  },
  {
    "path": "armi/bookkeeping/db/passiveDBLoadPlugin.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nProvides the ability to ignore parameters sections of blueprint files.\n\nThis plugin can allow you to more easily open a database, because you can ignore sections of the\nblueprint files, and ignore any parameters as you want.\n\nThis was designed to allow loading an ARMI database without the application that created it.\n\"\"\"\n\nimport yamlize\n\nfrom armi import plugins\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils import units\n\n\nclass PassThroughYamlize(yamlize.Object):\n    \"\"\"Just a helper for PassiveDBLoadPlugin, to allow for ignore unknown blueprints sections.\"\"\"\n\n    @classmethod\n    def from_yaml(cls, loader, node, round_trip_data=None):\n        node.value = []\n        return yamlize.Object.from_yaml.__func__(PassThroughYamlize, loader, node, round_trip_data)\n\n\nclass PassiveDBLoadPlugin(plugins.ArmiPlugin):\n    \"\"\"Provides the ability to passively load a reactor data model from an ARMI DB even if there are\n    unknown parameters and blueprint sections.\n\n    This plugin allows you two define two things:\n\n    1. Sections of blueprint files to ignore entirely.\n    2. A collection of unknown parameters that will be loaded without units or underlying metadata.\n\n    To use this plugin, you need to set two class variables before instantiating the ARMI App:\n\n    1. Set ``SKIP_BP_SECTIONS`` to a list of BP section names (strings).\n    2. Set ``UNKNOWN_PARAMS`` to a mapping from param class to name: ``{Core: [\"a\", \"b\", \"c\"]}``\n\n    Notes\n    -----\n    Obviously, if you are loading huge numbers of unknown parameters and ignoring whole sections of\n    blueprints, you are losing information. There is no way to use this plugin and still claim full\n    fidelity of your understanding of the reactor. ARMI does not support any such claims.\n    \"\"\"\n\n    SKIP_BP_SECTIONS = []\n    UNKNOWN_PARAMS = {}\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineBlueprintsSections():\n        \"\"\"Ignore a pre-determined set of blueprint sections.\"\"\"\n        skips = []\n        for skippedBp in PassiveDBLoadPlugin.SKIP_BP_SECTIONS:\n            skips.append(\n                (\n                    skippedBp.replace(\" \", \"\"),\n                    yamlize.Attribute(key=skippedBp, type=PassThroughYamlize, default=None),\n                    PassThroughYamlize,\n                )\n            )\n\n        return skips\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameters():\n        \"\"\"Define parameters for the plugin.\"\"\"\n        # build all the parameters we are missing in default ARMI\n        params = {}\n        for dataClass, paramNames in PassiveDBLoadPlugin.UNKNOWN_PARAMS.items():\n            if len(paramNames):\n                params[dataClass] = PassiveDBLoadPlugin.buildParamColl(paramNames)\n\n        return params\n\n    @staticmethod\n    def buildParamColl(names):\n        \"\"\"Try replacing any missing parameters with unitless nonsense.\"\"\"\n        # build a collection of defaulted parameters to passively ignore\n        desc = \"This is just a placeholder Parameter; it's meaning is unknown.\"\n        pDefs = parameters.ParameterDefinitionCollection()\n        with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb:\n            for param in names:\n                pb.defParam(param, units=units.UNITLESS, description=desc, saveToDB=False)\n\n        return pDefs\n"
  },
  {
    "path": "armi/bookkeeping/db/permissions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass Permissions:\n    \"\"\"Mappings to HDF5 permissions flags.\"\"\"\n\n    READ_ONLY_FME = \"r\"  # File Must Exist\n    READ_WRITE_FME = \"r+\"  # File Must Exist\n    CREATE_FILE_TIE = \"w\"  # Truncate If Exists\n    CREATE_FILE_FIE = \"w-\"  # Fail If Exists\n    CREATE_FILE_FIE2 = \"x\"  # Fail If Exists, Alternate option\n    READ_WRITE_CREATE = \"a\"\n\n    DEFAULT = READ_WRITE_CREATE\n\n    # Strictly reading, not writing or creating a file if it doesn't exist\n    read = {READ_ONLY_FME, READ_WRITE_FME}\n\n    write = {\n        READ_WRITE_FME,\n        CREATE_FILE_TIE,\n        CREATE_FILE_FIE,\n        CREATE_FILE_FIE2,\n        READ_WRITE_CREATE,\n    }\n\n    create = {CREATE_FILE_TIE, CREATE_FILE_FIE, CREATE_FILE_FIE2, READ_WRITE_CREATE}\n\n    all = {\n        READ_ONLY_FME,\n        READ_WRITE_FME,\n        CREATE_FILE_TIE,\n        CREATE_FILE_FIE,\n        CREATE_FILE_FIE2,\n        READ_WRITE_CREATE,\n    }\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Database tests.\"\"\"\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_comparedb3.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the compareDB3 module.\"\"\"\n\nimport unittest\nimport warnings\nfrom unittest.mock import patch\n\nimport h5py\nimport numpy as np\n\nfrom armi.bookkeeping.db.compareDB3 import (\n    DiffResults,\n    OutputWriter,\n    _compareAuxData,\n    _compareSets,\n    _diffSimpleData,\n    _diffSpecialData,\n    compareDatabases,\n)\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestCompareDB3(unittest.TestCase):\n    \"\"\"Tests for the compareDB3 module.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_outputWriter(self):\n        fileName = \"test_outputWriter.txt\"\n        with OutputWriter(fileName) as out:\n            out.writeln(\"Rubber Baby Buggy Bumpers\")\n\n        txt = open(fileName, \"r\").read()\n        self.assertIn(\"Rubber\", txt)\n\n    def test_compareSets(self):\n        shorter = set({1, 2, 3})\n        longer = set({1, 2, 3, 4})\n        fileName = \"fakeOutWriter.txt\"\n        with OutputWriter(fileName) as out:\n            nDiffs = _compareSets(shorter, longer, out, name=\"number\")\n            self.assertEqual(nDiffs, 1)\n            nDiffs = _compareSets(longer, shorter, out, name=\"number\")\n            self.assertEqual(nDiffs, 1)\n\n    def test_diffResultsBasic(self):\n        # init an instance of the class\n        dr = DiffResults(0.01)\n        self.assertEqual(len(dr._columns), 0)\n        self.assertEqual(len(dr._structureDiffs), 0)\n        self.assertEqual(len(dr.diffs), 0)\n\n        # simple test of addDiff\n        dr.addDiff(\"thing\", \"what\", 123.4, 122.2345, 555)\n        self.assertEqual(len(dr._columns), 0)\n        self.assertEqual(len(dr._structureDiffs), 0)\n        self.assertEqual(len(dr.diffs), 3)\n        self.assertEqual(dr.diffs[\"thing/what mean(abs(diff))\"][0], 123.4)\n        self.assertEqual(dr.diffs[\"thing/what mean(diff)\"][0], 122.2345)\n        self.assertEqual(dr.diffs[\"thing/what max(abs(diff))\"][0], 555)\n\n        # simple test of addTimeStep\n        dr.addTimeStep(\"timeStep\")\n        self.assertEqual(dr._structureDiffs[0], 0)\n        self.assertEqual(dr._columns[0], \"timeStep\")\n\n        # simple test of addStructureDiffs\n        dr.addStructureDiffs(7)\n        self.assertEqual(len(dr._structureDiffs), 1)\n        self.assertEqual(dr._structureDiffs[0], 7)\n\n        # simple test of _getDefault\n        self.assertEqual(len(dr._getDefault()), 0)\n\n        # simple test of nDiffs\n        self.assertEqual(dr.nDiffs(), 10)\n\n    def test_compareDatabaseDuplicate(self):\n        \"\"\"End-to-end test of compareDatabases() on a photocopy database.\"\"\"\n        # build two super-simple H5 files for testing\n        o, r = test_reactors.loadTestReactor(\n            TEST_ROOT,\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n\n        # create two DBs, identical but for file names\n        dbs = []\n        for i in range(2):\n            # create the tests DB\n            dbi = DatabaseInterface(r, o.cs)\n            dbi.initDB(fName=self._testMethodName + str(i) + \".h5\")\n            db = dbi.database\n\n            # validate the file exists, and force it to be readable again\n            b = h5py.File(db._fullPath, \"r\")\n            self.assertEqual(list(b.keys()), [\"inputs\"])\n            self.assertEqual(sorted(b[\"inputs\"].keys()), [\"blueprints\", \"settings\"])\n            b.close()\n\n            # append to lists\n            dbs.append(db)\n\n        # end-to-end validation that comparing a photocopy database works\n        diffs = compareDatabases(dbs[0]._fullPath, dbs[1]._fullPath)\n        self.assertEqual(len(diffs.diffs), 0)\n        self.assertEqual(diffs.nDiffs(), 0)\n\n    def test_compareDatabaseSim(self):\n        \"\"\"End-to-end test of compareDatabases() on very similar databases.\"\"\"\n        # build two super-simple H5 files for testing\n        o, r = test_reactors.loadTestReactor(\n            TEST_ROOT,\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n\n        # create two DBs, identical but for file names and cycle lengths\n        dbs = []\n        for lenCycle in range(1, 3):\n            # build some test data\n            days = 100\n            cs = o.cs.modified(\n                newSettings={\n                    \"cycles\": [{\"step days\": [days, days], \"power fractions\": [1, 0.5]}],\n                    \"reloadDBName\": \"something_fake.h5\",\n                }\n            )\n\n            # create the tests DB\n            dbi = DatabaseInterface(r, cs)\n            dbi.initDB(fName=self._testMethodName + str(lenCycle) + \".h5\")\n            db = dbi.database\n\n            # populate the db with something\n            r.p.cycle = 0\n            for node in range(2):\n                r.p.timeNode = node\n                r.p.cycleLength = days * lenCycle\n                db.writeToDB(r)\n\n            # validate the file exists, and force it to be readable again\n            b = h5py.File(db._fullPath, \"r\")\n            dbKeys = sorted(b.keys())\n            self.assertEqual(len(dbKeys), 3)\n            self.assertIn(\"inputs\", dbKeys)\n            self.assertIn(\"c00n00\", dbKeys)\n            self.assertEqual(sorted(b[\"inputs\"].keys()), [\"blueprints\", \"settings\"])\n            b.close()\n\n            # append to lists\n            dbs.append(db)\n\n        # end-to-end validation that comparing a photocopy database works\n        with warnings.catch_warnings():\n            warnings.filterwarnings(\"ignore\")\n            diffs = compareDatabases(\n                dbs[0]._fullPath,\n                dbs[1]._fullPath,\n                timestepCompare=[(0, 0), (0, 1)],\n            )\n\n        # spot check the diffs\n        self.assertGreater(len(diffs.diffs), 200)\n        self.assertLess(len(diffs.diffs), 800)\n        self.assertIn(\"/c00n00\", diffs._columns)\n        self.assertIn(\"/c00n01\", diffs._columns)\n        self.assertIn(0, diffs._structureDiffs)\n        self.assertEqual(sum(diffs._structureDiffs), 0)\n        self.assertEqual(diffs.tolerance, 0)\n        self.assertIn(\"SpentFuelPool/flags max(abs(diff))\", diffs.diffs)\n        self.assertIn(\"Circle/volume mean(diff)\", diffs.diffs)\n        self.assertIn(\"Reactor/flags mean(diff)\", diffs.diffs)\n        self.assertEqual(diffs.nDiffs(), 3)\n\n    def test_diffSpecialData(self):\n        dr = DiffResults(0.01)\n\n        fileName = \"test_diffSpecialData.txt\"\n        with OutputWriter(fileName) as out:\n            # spin up one example H5 Dataset\n            f1 = h5py.File(\"test_diffSpecialData1.hdf5\", \"w\")\n            a1 = np.arange(100, dtype=\"<f8\")\n            refData = f1.create_dataset(\"numberDensities\", data=a1)\n            refData.attrs[\"1\"] = 1\n            refData.attrs[\"2\"] = 22\n            refData.attrs[\"numDens\"] = a1\n\n            # spin up an identical example H5 Dataset\n            f2 = h5py.File(\"test_diffSpecialData2.hdf5\", \"w\")\n            srcData = f2.create_dataset(\"numberDensities\", data=a1)\n            srcData.attrs[\"1\"] = 1\n            srcData.attrs[\"2\"] = 22\n            srcData.attrs[\"numDens\"] = a1\n\n            # there should be no difference\n            _diffSpecialData(refData, srcData, out, dr)\n            self.assertEqual(dr.nDiffs(), 0)\n\n            # spin up a different size example H5 Dataset\n            f3 = h5py.File(\"test_diffSpecialData3.hdf5\", \"w\")\n            a2 = np.arange(90, dtype=\"<f8\")\n            srcData3 = f3.create_dataset(\"numberDensities\", data=a2)\n            srcData3.attrs[\"1\"] = 1\n            srcData3.attrs[\"2\"] = 22\n            srcData3.attrs[\"numDens\"] = a2\n\n            # there should a logged error\n            with mockRunLogs.BufferLog() as mock:\n                _diffSpecialData(refData, srcData3, out, dr)\n                self.assertIn(\"Special formatting parameters for\", mock.getStdout())\n\n            # make an H5 datasets that will cause unpackSpecialData to fail\n            f4 = h5py.File(\"test_diffSpecialData4.hdf5\", \"w\")\n            refData4 = f4.create_dataset(\"numberDensities\", data=a2)\n            refData4.attrs[\"shapes\"] = \"2\"\n            refData4.attrs[\"numDens\"] = a2\n            refData4.attrs[\"specialFormatting\"] = True\n            f5 = h5py.File(\"test_diffSpecialData5.hdf5\", \"w\")\n            srcData5 = f5.create_dataset(\"numberDensities\", data=a2)\n            srcData5.attrs[\"shapes\"] = \"2\"\n            srcData5.attrs[\"numDens\"] = a2\n            srcData5.attrs[\"specialFormatting\"] = True\n\n            # there should a log message\n            with mockRunLogs.BufferLog() as mock:\n                _diffSpecialData(refData4, srcData5, out, dr)\n                self.assertIn(\"Unable to unpack special data for\", mock.getStdout())\n\n            # make an H5 datasets that will add a np.inf diff because keys don't match\n            f6 = h5py.File(\"test_diffSpecialData6.hdf5\", \"w\")\n            refData6 = f6.create_dataset(\"numberDensities\", data=a2)\n            refData6.attrs[\"shapes\"] = \"2\"\n            refData6.attrs[\"numDens\"] = a2\n            f7 = h5py.File(\"test_diffSpecialData7.hdf5\", \"w\")\n            srcData7 = f7.create_dataset(\"densities\", data=a2)\n            srcData7.attrs[\"colors\"] = \"2\"\n            srcData7.attrs[\"numberDens\"] = a2\n            _diffSpecialData(refData6, srcData7, out, dr)\n\n    def test_diffSimpleData(self):\n        dr = DiffResults(0.01)\n\n        # spin up one example H5 Dataset\n        f1 = h5py.File(\"test_diffSimpleData1.hdf5\", \"w\")\n        a1 = np.arange(1, 101, dtype=\"<f8\")\n        refData = f1.create_dataset(\"numberDensities\", data=a1)\n        refData.attrs[\"1\"] = 1\n        refData.attrs[\"2\"] = 22\n        refData.attrs[\"numDens\"] = a1\n\n        # spin up an identical example H5 Dataset\n        f2 = h5py.File(\"test_diffSimpleData2.hdf5\", \"w\")\n        srcData = f2.create_dataset(\"numberDensities\", data=a1)\n        srcData.attrs[\"1\"] = 1\n        srcData.attrs[\"2\"] = 22\n        srcData.attrs[\"numDens\"] = a1\n\n        # there should be no difference\n        _diffSimpleData(refData, srcData, dr)\n        self.assertEqual(dr.nDiffs(), 0)\n\n        # spin up a different size example H5 Dataset\n        f3 = h5py.File(\"test_diffSimpleData3.hdf5\", \"w\")\n        a2 = np.arange(1, 91, dtype=\"<f8\")\n        srcData3 = f3.create_dataset(\"numberDensities\", data=a2)\n        srcData3.attrs[\"1\"] = 1\n        srcData3.attrs[\"2\"] = 22\n        srcData3.attrs[\"numDens\"] = a2\n\n        # there should be a small difference\n        _diffSimpleData(refData, srcData3, dr)\n        self.assertEqual(dr.nDiffs(), 3)\n\n    def test_compareAuxData(self):\n        dr = DiffResults(0.01)\n\n        fileName = \"test_diffSpecialData.txt\"\n        with OutputWriter(fileName) as out:\n            # spin up one example H5 Dataset\n            f1 = h5py.File(\"test_compareAuxData1.hdf5\", \"w\")\n            a1 = np.arange(100, dtype=\"<f8\")\n            refData = f1.create_group(\"numberDensities\")\n            refData.attrs[\"1\"] = 1\n            refData.attrs[\"2\"] = 22\n            refData.attrs[\"numDens\"] = a1\n\n            # spin up an identical example H5 Dataset\n            f2 = h5py.File(\"test_compareAuxData2.hdf5\", \"w\")\n            srcData = f2.create_group(\"numberDensities\")\n            srcData.attrs[\"1\"] = 1\n            srcData.attrs[\"2\"] = 22\n            srcData.attrs[\"numDens\"] = a1\n\n            # there should be no difference\n            _compareAuxData(out, refData, srcData, dr)\n            self.assertEqual(dr.nDiffs(), 0)\n\n    def test_differentlySizedSpecialData(self):\n        \"\"\"Ensure that special formatting data that are differently sized report a diff.\"\"\"\n        differ = DiffResults(0.0)\n        with h5py.File(self._testMethodName + \".h5\", \"w\") as f, OutputWriter(self._testMethodName + \".txt\") as out:\n            # Create two datasets with no data, but with different attributes\n            # The attributes are used in the special data checks\n            short = f.create_dataset(\"short\", dtype=float)\n            short.attrs[\"offsets\"] = np.arange(10)\n            long = f.create_dataset(\"long\", dtype=float)\n            long.attrs[\"offsets\"] = np.arange(100)\n            with patch.object(out, \"writeln\") as writeln:\n                _diffSpecialData(short, long, out, differ)\n        # Ensure the user is alerted the datasets have different parameters\n        writeln.assert_called_once()\n        # Ensure this is treated as a diff\n        self.assertGreater(differ.nDiffs(), 0)\n\n    def test_nothingForDictionaries(self):\n        \"\"\"Ensure we alert the user we do not perform diffs on dictionaries.\"\"\"\n        differ = DiffResults(0.0)\n        with h5py.File(self._testMethodName + \".h5\", \"w\") as f, OutputWriter(self._testMethodName + \".txt\") as out:\n            first = f.create_dataset(\"first_dictionary\", dtype=float)\n            first.attrs[\"dict\"] = True\n            second = f.create_dataset(\"second_dictionary\", dtype=float)\n            second.attrs[\"dict\"] = True\n            with patch.object(out, \"writeln\") as writeln:\n                _diffSpecialData(first, second, out, differ)\n            # Not considered a diff\n            self.assertEqual(differ.nDiffs(), 0)\n            # But we've let the user know\n            writeln.assert_called_once()\n            # And the parameter is in the printed message\n            msg = writeln.call_args.args[0]\n            # NOTE If you try to grab first.name on the closed DB, you get None which is not helpful\n            self.assertIn(first.name, msg)\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_database.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the Database class.\"\"\"\n\nimport io\nimport os\nimport shutil\nimport subprocess\nimport unittest\nfrom glob import glob\nfrom unittest.mock import Mock, patch\n\nimport h5py\nimport numpy as np\n\nfrom armi.bookkeeping.db import _getH5File, database, loadOperator\nfrom armi.bookkeeping.db.database import Database\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.bookkeeping.db.jaggedArray import JaggedArray\nfrom armi.reactor import parameters\nfrom armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure\nfrom armi.reactor.grids import CoordinateLocation, MultiIndexLocation\nfrom armi.reactor.reactors import Core, Reactor\nfrom armi.reactor.spentFuelPool import SpentFuelPool\nfrom armi.reactor.tests.test_blocks import loadTestBlock\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_GROW_TO_FULL_CORE_AFTER_LOAD,\n    CONF_SORT_REACTOR,\n)\nfrom armi.testing import TESTING_ROOT, loadTestReactor\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils import getPreviousTimeNode, safeCopy\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n# determine if this is a parallel run, and git is installed\nGIT_EXE = None\nif shutil.which(\"git\") is not None:\n    GIT_EXE = \"git\"\nelif shutil.which(\"git.exe\") is not None:\n    GIT_EXE = \"git.exe\"\n\n\nclass TestDatabase(unittest.TestCase):\n    \"\"\"Tests for the Database class that require a large, complicated reactor.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n        )\n\n        self.dbi = DatabaseInterface(self.r, self.o.cs)\n        self.dbi.initDB(fName=self._testMethodName + \".h5\")\n        self.db: Database = self.dbi.database\n        self.stateRetainer = self.r.retainState().__enter__()\n\n        # used to test location-based history. see details below\n        self.centralAssemSerialNums = []\n        self.centralTopBlockSerialNums = []\n\n    def tearDown(self):\n        self.db.close()\n        self.stateRetainer.__exit__()\n        self.td.__exit__(None, None, None)\n\n    def makeShuffleHistory(self):\n        \"\"\"Walk the reactor through a few time steps with some shuffling.\"\"\"\n        # Serial numbers *are not stable* (i.e., they can be different between test runs due to parallelism and test run\n        # order). However, they are the simplest way to check correctness of location-based history tracking. So we\n        # stash the serial numbers at the location of interest so we can use them later to check our work.\n        self.centralAssemSerialNums = []\n        self.centralTopBlockSerialNums = []\n\n        grid = self.r.core.spatialGrid\n\n        t = 0\n        for cycle in range(2):\n            a1 = self.r.core.childrenByLocator[grid[cycle, 0, 0]]\n            a2 = self.r.core.childrenByLocator[grid[0, 0, 0]]\n            olda1Loc = a1.spatialLocator\n            a1.moveTo(a2.spatialLocator)\n            a2.moveTo(olda1Loc)\n            c = self.r.core.childrenByLocator[grid[0, 0, 0]]\n            self.centralAssemSerialNums.append(c.p.serialNum)\n            self.centralTopBlockSerialNums.append(c[-1].p.serialNum)\n\n            for node in range(2):\n                # something that splitDatabase won't change, so that we can make sure that the right data went to the\n                # right new groups/cycles\n                self.r.p.cycleLength = cycle\n                self.r.p.cycle = cycle\n                self.r.p.timeNode = node\n                t += 1.0\n                self.r.p.time = t\n                self.db.writeToDB(self.r)\n\n        # Add some more data that isn't written to the database to test the DatabaseInterface API.\n        self.r.p.cycle = 2\n        self.r.p.timeNode = 0\n        self.r.p.cycleLength = cycle\n        self.r.core[0].p.chargeTime = 2\n\n        # add some fake missing parameter data to test allowMissing\n        self.db.h5db[\"c00n00/Reactor/missingParam\"] = \"i don't exist\"\n\n    def test_load(self):\n        \"\"\"Load a reactor at different time steps, from the database.\n\n        .. test:: Load the reactor from the database.\n            :id: T_ARMI_DB_TIME1\n            :tests: R_ARMI_DB_TIME\n        \"\"\"\n        self.makeShuffleHistory()\n        with self.assertRaises(KeyError):\n            _r = self.db.load(0, 0)\n\n        # Default load, should pass without error\n        _r = self.db.load(0, 0, allowMissing=True)\n\n        # Show that we can use negative indices to load\n        r = self.db.load(0, -2, allowMissing=True)\n        self.assertEqual(r.p.timeNode, 1)\n\n        with self.assertRaises(ValueError):\n            # makeShuffleHistory only populates 2 nodes, but the case settings defines 3, so we must check -4 before\n            # getting an error\n            self.db.load(0, -4, allowMissing=True)\n\n        # show we can delete a specify H5 key.\n        del self.db.h5db[\"c00n00/Reactor/missingParam\"]\n        _r = self.db.load(0, 0, allowMissing=False)\n\n        # show we can delete an entire time now from the DB.\n        del self.db[0, 0, \"\"]\n        with self.assertRaises(KeyError):\n            self.db.load(0, 0, allowMissing=False)\n\n        # We should not be able to set the fileName if a file is open.\n        with self.assertRaises(RuntimeError):\n            self.db.fileName = \"whatever.h5\"\n\n    def test_loadSortSetting(self):\n        self.makeShuffleHistory()\n\n        # default load, should pass without error\n        r0 = self.db.load(0, 0, allowMissing=True)\n\n        # test that the reactor loads differently, dependent on the setting\n        cs = self.db.loadCS()\n        cs = cs.modified(newSettings={CONF_SORT_REACTOR: False})\n        r1 = self.db.load(0, 0, cs=cs, allowMissing=True)\n\n        # the reactor / core should be the same size\n        self.assertEqual(len(r0), len(r1))\n        self.assertEqual(len(r0.core), len(r1.core))\n\n    def test_history(self):\n        self.makeShuffleHistory()\n\n        grid = self.r.core.spatialGrid\n        testAssem = self.r.core.childrenByLocator[grid[0, 0, 0]]\n        testBlock = testAssem[-1]\n\n        # Test assem\n        hist = self.db.getHistoryByLocation(testAssem, params=[\"chargeTime\", \"serialNum\"])\n        expectedSn = {(c, n): self.centralAssemSerialNums[c] for c in range(2) for n in range(2)}\n        self.assertEqual(expectedSn, hist[\"serialNum\"])\n\n        # test block\n        hists = self.db.getHistoriesByLocation([testBlock], params=[\"serialNum\"], timeSteps=[(0, 0), (1, 0)])\n        expectedSn = {(c, 0): self.centralTopBlockSerialNums[c] for c in range(2)}\n        self.assertEqual(expectedSn, hists[testBlock][\"serialNum\"])\n\n        # can't mix blocks and assems, since they are different distance from core\n        with self.assertRaises(ValueError):\n            self.db.getHistoriesByLocation([testAssem, testBlock], params=[\"serialNum\"])\n\n        # if requested time step isn't written, return no content\n        hist = self.dbi.getHistory(self.r.core[0], params=[\"chargeTime\", \"serialNum\"], byLocation=True)\n        self.assertIn((2, 0), hist[\"chargeTime\"].keys())\n        self.assertEqual(hist[\"chargeTime\"][(2, 0)], 2)\n\n        # test edge case: ancient DB file\n        with patch.object(self.db, \"_versionMinor\", 3), self.assertRaises(ValueError):\n            self.db.getHistoriesByLocation([testBlock], params=[\"serialNum\"], timeSteps=[(0, 0), (1, 0)])\n\n        # test edge case: DB is not version 3\n        with patch.object(self.db, \"_versionMajor\", 2), self.assertRaises(ValueError):\n            self.db.getHistoryByLocation(testAssem, params=[\"chargeTime\", \"serialNum\"])\n\n        with patch.object(self.db, \"_versionMajor\", 4), self.assertRaises(ValueError):\n            self.db.getHistoryByLocation(testAssem, params=[\"chargeTime\", \"serialNum\"])\n\n    def test_fullCoreOnDbLoad(self):\n        \"\"\"Test we can expand a reactor to full core when loading from DB via settings.\"\"\"\n        self.assertFalse(self.r.core.isFullCore)\n        self.db.writeToDB(self.r)\n        cs = self.db.loadCS()\n        cs = cs.modified(newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True})\n        r: Reactor = self.db.load(0, 0, cs=cs)\n        self.assertTrue(r.core.isFullCore)\n\n    def test_dontExpandIfFullCoreInDB(self):\n        \"\"\"Test that a full core reactor in the database is not expanded further.\"\"\"\n        self.assertFalse(self.r.core.isFullCore)\n        self.db.writeToDB(self.r)\n        cs = self.db.loadCS()\n        cs = cs.modified(newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True})\n        mockGrow = Mock()\n        with (\n            patch(\"armi.reactor.cores.Core.isFullCore\", Mock(return_value=True)),\n            patch(\"armi.reactor.cores.Core.growToFullCore\", mockGrow),\n        ):\n            self.db.load(0, 0, cs=cs)\n        mockGrow.assert_not_called()\n\n    def test_getCycleNodeAtTime(self):\n        self.makeShuffleHistory()\n        self.db.close()\n\n        # test that the math works correctly\n        cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0, 0.87, False)\n        self.assertEqual(cycleNodes, [\"c00n00\"])\n\n        cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.23, 1.2, False)\n        self.assertEqual(cycleNodes, [\"c00n00\", \"c00n01\"])\n\n        cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.001, 2.345, False)\n        self.assertEqual(cycleNodes, [\"c00n00\", \"c00n01\", \"c01n00\"])\n\n        cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0, 3.123, False)\n        self.assertEqual(cycleNodes, [\"c00n00\", \"c00n01\", \"c01n00\", \"c01n01\"])\n\n        cycleNodes = Database.getCycleNodeAtTime(self.db.fileName, 0.123, 4.0, False)\n        self.assertEqual(cycleNodes, [\"c00n00\", \"c00n01\", \"c01n00\", \"c01n01\"])\n\n        # test some exceptions are correctly raised\n        with self.assertRaises(AssertionError):\n            Database.getCycleNodeAtTime(self.db.fileName, -1, 1, False)\n\n        with self.assertRaises(AssertionError):\n            Database.getCycleNodeAtTime(self.db.fileName, 3, 1, False)\n\n        with self.assertRaises(ValueError):\n            Database.getCycleNodeAtTime(self.db.fileName, 5, 6, False)\n\n        with self.assertRaises(ValueError):\n            Database.getCycleNodeAtTime(self.db.fileName, 1, 140, True)\n\n\nclass TestDatabaseSmaller(unittest.TestCase):\n    \"\"\"Tests for the Database class, that can use a smaller test reactor.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.o, self.r = loadTestReactor(\n            TEST_ROOT,\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n\n        self.dbi = DatabaseInterface(self.r, self.o.cs)\n        self.dbi.initDB(fName=self._testMethodName + \".h5\")\n        self.db: Database = self.dbi.database\n        self.stateRetainer = self.r.retainState().__enter__()\n\n    def tearDown(self):\n        self.db.close()\n        self.stateRetainer.__exit__()\n        self.td.__exit__(None, None, None)\n\n    def makeHistory(self):\n        \"\"\"Walk the reactor through a few time steps and write them to the db.\"\"\"\n        for cycle, node in ((cycle, node) for cycle in range(2) for node in range(2)):\n            self.r.p.cycle = cycle\n            self.r.p.timeNode = node\n            # something that splitDatabase won't change, so that we can make sure that\n            # the right data went to the right new groups/cycles\n            self.r.p.cycleLength = cycle\n\n            self.db.writeToDB(self.r)\n\n    def test_loadOperator(self):\n        self.makeHistory()\n        self.db.close()\n        # Write a bad setting to the DB\n        with h5py.File(self.db.fileName, \"r+\") as hf:\n            settingz = hf[\"inputs/settings\"].asstr()[()]\n            settingz += \"  fakeTerminator: I'll be back\"\n            stream = io.StringIO(settingz)\n            csString = stream.read()\n            del hf[\"inputs/settings\"]\n            hf[\"inputs/settings\"] = csString\n\n        # Test with no complaints\n        with mockRunLogs.BufferLog() as mock:\n            _o = loadOperator(\n                self._testMethodName + \".h5\",\n                0,\n                0,\n                allowMissing=True,\n                handleInvalids=False,\n            )\n            self.assertNotIn(\"fakeTerminator\", mock.getStdout())\n\n        # Test with complaints\n        with mockRunLogs.BufferLog() as mock:\n            _o = loadOperator(\n                self._testMethodName + \".h5\",\n                0,\n                0,\n                allowMissing=True,\n                handleInvalids=True,\n            )\n            self.assertIn(\"Ignoring invalid settings\", mock.getStdout())\n            self.assertIn(\"fakeTerminator\", mock.getStdout())\n\n    def _compareArrays(self, ref, src):\n        \"\"\"\n        Compare two numpy arrays.\n\n        Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged data, etc.) is really difficult. For\n        now, convert to a list and compare element-by-element.\n        \"\"\"\n        self.assertEqual(type(ref), type(src))\n        if isinstance(ref, np.ndarray):\n            ref = ref.tolist()\n            src = src.tolist()\n\n        for v1, v2 in zip(ref, src):\n            # Entries may be None\n            if isinstance(v1, np.ndarray):\n                v1 = v1.tolist()\n            if isinstance(v2, np.ndarray):\n                v2 = v2.tolist()\n            self.assertEqual(v1, v2)\n\n    def _compareRoundTrip(self, data):\n        \"\"\"Make sure that data is unchanged by packing/unpacking.\"\"\"\n        packed, attrs = database.packSpecialData(data, \"testing\")\n        roundTrip = database.unpackSpecialData(packed, attrs, \"testing\")\n        self._compareArrays(data, roundTrip)\n\n    def test_getArrayShape(self):\n        \"\"\"Tests a helper method for ``_writeParams``.\"\"\"\n        base = [1, 2, 3, 4]\n        self.assertEqual(Database._getArrayShape(base), (4,))\n        self.assertEqual(Database._getArrayShape(tuple(base)), (4,))\n        arr = np.array(base)\n        self.assertEqual(Database._getArrayShape(arr), (4,))\n        arr = np.array([base])\n        self.assertEqual(Database._getArrayShape(arr), (1, 4))\n        # not array type\n        self.assertEqual(Database._getArrayShape(1), 1)\n        self.assertEqual(Database._getArrayShape(None), 1)\n\n    def test_writeToDB(self):\n        \"\"\"Test writing to the database.\n\n        .. test:: Write a single time step of data to the database.\n            :id: T_ARMI_DB_TIME0\n            :tests: R_ARMI_DB_TIME\n        \"\"\"\n        self.r.p.cycle = 0\n        self.r.p.cycleLength = 1\n        self.r.p.time = 0\n        self.r.p.timeNode = 0\n\n        # Adding some nonsense in, to test NoDefault params\n        self.r.p.availabilityFactor = parameters.NoDefault\n\n        # validate that the H5 file gets bigger after the write\n        self.assertEqual(list(self.db.h5db.keys()), [\"inputs\"])\n        self.db.writeToDB(self.r)\n        self.assertEqual(sorted(self.db.h5db.keys()), [\"c00n00\", \"inputs\"])\n\n        # check the keys for a single time step\n        keys = [\n            \"Circle\",\n            \"Core\",\n            \"DerivedShape\",\n            \"Helix\",\n            \"HexAssembly\",\n            \"HexBlock\",\n            \"Hexagon\",\n            \"Reactor\",\n            \"SpentFuelPool\",\n            \"layout\",\n        ]\n        self.assertEqual(sorted(self.db.h5db[\"c00n00\"].keys()), sorted(keys))\n\n        # validate availabilityFactor did not make it into the H5 file, but the time parameters did\n        rKeys = [\n            \"cycle\",\n            \"cycleLength\",\n            \"time\",\n            \"timeNode\",\n        ]\n        h5Keys = sorted(self.db.h5db[\"c00n00\"][\"Reactor\"].keys())\n        for rKey in rKeys:\n            self.assertIn(rKey, h5Keys)\n\n    def test_getH5File(self):\n        \"\"\"\n        Get the h5 file for the database, because that file format is language-agnostic.\n\n        .. test:: Show the database is H5-formatted.\n            :id: T_ARMI_DB_H5\n            :tests: R_ARMI_DB_H5\n        \"\"\"\n        with self.assertRaises(TypeError):\n            _getH5File(None)\n\n        h5 = _getH5File(self.db)\n        self.assertEqual(type(h5), h5py.File)\n\n    def test_auxData(self):\n        path = self.db.getAuxiliaryDataPath((2, 0), \"test_stuff\")\n        self.assertEqual(path, \"c02n00/test_stuff\")\n\n        with self.assertRaises(KeyError):\n            self.db.genAuxiliaryData((-1, -1))\n\n    def test_replaceNones(self):\n        \"\"\"Super basic test that we handle Nones correctly in database read/writes.\"\"\"\n        data3 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n        data1 = np.array([1, 2, 3, 4, 5, 6, 7, 8])\n        data1iNones = np.array([1, 2, None, 5, 6])\n        data1fNones = np.array([None, 2.0, None, 5.0, 6.0])\n        data2fNones = np.array([None, [[1.0, 2.0, 6.0], [2.0, 3.0, 4.0]]], dtype=object)\n        twoByTwo = np.array([[1, 2], [3, 4]])\n        twoByOne = np.array([[1], [None]])\n        threeByThree = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n        dataJag = JaggedArray([twoByTwo, threeByThree], \"testParam\")\n        dataJagNones = JaggedArray([twoByTwo, twoByOne, threeByThree], \"testParam\")\n        dataDict = np.array([{\"bar\": 2, \"baz\": 3}, {\"foo\": 4, \"baz\": 6}, {\"foo\": 7, \"bar\": 8}])\n        self._compareRoundTrip(data3)\n        self._compareRoundTrip(data1)\n        self._compareRoundTrip(data1iNones)\n        self._compareRoundTrip(data1fNones)\n        self._compareRoundTrip(data2fNones)\n        self._compareRoundTrip(dataJag)\n        self._compareRoundTrip(dataJagNones)\n        self._compareRoundTrip(dataDict)\n\n    def test_mergeHistory(self):\n        self.makeHistory()\n\n        # put some big data in an HDF5 attribute. This will exercise the code that pulls such attributes into a formal\n        # dataset and a reference.\n        self.r.p.cycle = 1\n        self.r.p.timeNode = 0\n        tnGroup = self.db.getH5Group(self.r)\n        randomText = \"this isn't a reference to another dataset\"\n        Database._writeAttrs(\n            tnGroup[\"layout/serialNum\"],\n            tnGroup,\n            {\n                \"fakeBigData\": np.eye(8),\n                \"someString\": randomText,\n            },\n        )\n\n        dbPath = \"restartDB.h5\"\n        db2 = Database(dbPath, \"w\")\n        with db2:\n            db2.mergeHistory(self.db, 2, 2)\n            self.r.p.cycle = 1\n            self.r.p.timeNode = 0\n            tnGroup = db2.getH5Group(self.r)\n\n            # this test is a little bit implementation-specific, but nice to be explicit\n            self.assertEqual(tnGroup[\"layout/serialNum\"].attrs[\"someString\"], randomText)\n\n            # exercise the _resolveAttrs function\n            attrs = Database._resolveAttrs(tnGroup[\"layout/serialNum\"].attrs, tnGroup)\n            self.assertTrue(np.array_equal(attrs[\"fakeBigData\"], np.eye(8)))\n\n            keys = sorted(db2.keys())\n            self.assertEqual(len(keys), 4)\n            self.assertEqual(keys[:3], [\"/c00n00\", \"/c00n01\", \"/c01n00\"])\n\n        # check edge case: major vesion is not 3\n        db3 = Database(\"restartDBedgeCase1.h5\", \"w\")\n        with patch.object(db3, \"_versionMajor\", 2), self.assertRaises(ValueError):\n            with db3:\n                db3.mergeHistory(self.db, 2, 2)\n\n    def test_splitDatabase(self):\n        self.makeHistory()\n\n        self.db.splitDatabase([(c, n) for c in (0, 1) for n in range(2)], \"-all-iterations\")\n\n        # Closing to copy back from fast path\n        self.db.close()\n\n        with h5py.File(\"test_splitDatabase.h5\", \"r\") as newDb:\n            self.assertEqual(newDb[\"c00n00/Reactor/cycle\"][()], 0)\n            self.assertEqual(newDb[\"c00n00/Reactor/cycleLength\"][()][0], 0)\n            self.assertNotIn(\"c03n00\", newDb)\n            self.assertEqual(newDb.attrs[\"databaseVersion\"], database.DB_VERSION)\n\n            # validate that the min set of meta data keys exists\n            meta_data_keys = [\n                \"appName\",\n                \"armiLocation\",\n                \"databaseVersion\",\n                \"hostname\",\n                \"localCommitHash\",\n                \"machines\",\n                \"platform\",\n                \"platformArch\",\n                \"platformRelease\",\n                \"platformVersion\",\n                \"pluginPaths\",\n                \"python\",\n                \"startTime\",\n                \"successfulCompletion\",\n                \"user\",\n                \"version\",\n            ]\n            for meta_key in meta_data_keys:\n                self.assertIn(meta_key, newDb.attrs)\n                self.assertIsNotNone(newDb.attrs[meta_key])\n\n        # test an edge case - no DB to split\n        with self.assertRaises(ValueError):\n            self.db.h5db = None\n            self.db.splitDatabase([(c, n) for c in (0, 1) for n in range(2)], \"-all-iterations\")\n\n    @unittest.skipIf(GIT_EXE is None, \"This test needs Git.\")\n    def test_grabLocalCommitHash(self):\n        \"\"\"Test of static method to grab a local commit hash with ARMI version.\"\"\"\n        # 1. test outside a Git repo\n        localHash = Database.grabLocalCommitHash()\n        self.assertEqual(localHash, \"unknown\")\n\n        # 2. test inside an empty git repo\n        try:\n            code = subprocess.run(\n                [\"git\", \"init\", \".\"],\n                stdout=subprocess.DEVNULL,\n                stderr=subprocess.DEVNULL,\n            ).returncode\n        except FileNotFoundError:\n            print(\"Skipping this test because it is being run outside a git repo.\")\n            return\n\n        self.assertEqual(code, 0)\n        localHash = Database.grabLocalCommitHash()\n        self.assertEqual(localHash, \"unknown\")\n\n        # 3. test inside a git repo with one tag\n        # commit the empty repo\n        code = subprocess.run(\n            [\"git\", \"commit\", \"--allow-empty\", \"-m\", '\"init\"', \"--author\", '\"sam <>\"'],\n            stdout=subprocess.DEVNULL,\n            stderr=subprocess.DEVNULL,\n        ).returncode\n        if code == 128:\n            # GitHub Actions blocks certain kinds of Git commands\n            return\n\n        # create a tag off our new commit\n        code = subprocess.run(\n            [\"git\", \"tag\", \"thanks\", \"-m\", '\"you_rock\"'],\n            stdout=subprocess.DEVNULL,\n            stderr=subprocess.DEVNULL,\n        ).returncode\n        self.assertEqual(code, 0)\n\n        # test that we recover the correct commit hash\n        localHash = Database.grabLocalCommitHash()\n        self.assertEqual(localHash, \"thanks\")\n\n        # delete the .git directory\n        code = subprocess.run([\"git\", \"clean\", \"-f\"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL).returncode\n        self.assertEqual(code, 0)\n        code = subprocess.run(\n            [\"git\", \"clean\", \"-f\", \"-d\"],\n            stdout=subprocess.DEVNULL,\n            stderr=subprocess.DEVNULL,\n        ).returncode\n        self.assertEqual(code, 0)\n\n    def test_fileName(self):\n        # test the file name getter\n        self.assertEqual(str(self.db.fileName), \"test_fileName.h5\")\n\n        # test the file name setter\n        self.db.close()\n        self.db.fileName = \"thing.h5\"\n        self.assertEqual(str(self.db.fileName), \"thing.h5\")\n\n    def test_readInputsFromDB(self):\n        \"\"\"Test that we can read inputs from the database.\n\n        .. test:: Save and retrieve settings from the database.\n            :id: T_ARMI_DB_CS\n            :tests: R_ARMI_DB_CS\n\n        .. test:: Save and retrieve blueprints from the database.\n            :id: T_ARMI_DB_BP\n            :tests: R_ARMI_DB_BP\n        \"\"\"\n        inputs = self.db.readInputsFromDB()\n        self.assertEqual(len(inputs), 2)\n\n        # settings\n        self.assertGreater(len(inputs[0]), 100)\n        self.assertIn(\"settings:\", inputs[0])\n\n        # blueprints\n        self.assertGreater(len(inputs[1]), 2400)\n        self.assertIn(\"blocks:\", inputs[1])\n\n    def test_deleting(self):\n        self.assertTrue(isinstance(self.db, Database))\n        del self.db\n        self.assertFalse(hasattr(self, \"db\"))\n        self.db = self.dbi.database\n\n    def test_open(self):\n        self.assertTrue(self.db.isOpen())\n        with self.assertRaises(ValueError):\n            self.db.open()\n\n    def test_loadCS(self):\n        cs = self.db.loadCS()\n        self.assertEqual(cs[\"nTasks\"], 1)\n        self.assertEqual(cs[\"nCycles\"], 2)\n\n    def test_loadBlueprints(self):\n        bp = self.db.loadBlueprints()\n        self.assertIsNone(bp.nuclideFlags)\n        self.assertEqual(len(bp.assemblies), 0)\n\n    def test_prepRestartRun(self):\n        \"\"\"\n        This test is based on the armiRun.yaml case that is loaded during the `setUp` above. In that cs, `reloadDBName`\n        is set to 'reloadingDB.h5', `startCycle` = 1, and `startNode` = 2. The nonexistent 'reloadingDB.h5' must first\n        be created here for this test.\n\n        .. test:: Runs can be restarted from a snapshot.\n            :id: T_ARMI_SNAPSHOT_RESTART\n            :tests: R_ARMI_SNAPSHOT_RESTART\n        \"\"\"\n        # first successfully call to prepRestartRun\n        o, r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n        )\n        cs = o.cs\n\n        ratedPower = cs[\"power\"]\n        startCycle = cs[\"startCycle\"]\n        startNode = cs[\"startNode\"]\n        cyclesSetting = [\n            {\"step days\": [1000, 1000], \"power fractions\": [1, 1]},\n            {\"step days\": [1000, 1000], \"power fractions\": [1, 1]},\n            {\"step days\": [1000, 1000], \"power fractions\": [1, 1]},\n        ]\n        cycleP, nodeP = getPreviousTimeNode(startCycle, startNode, cs)\n        cyclesSetting[cycleP][\"power fractions\"][nodeP] = 0.5\n        numCycles = 2\n        numNodes = 2\n        cs = cs.modified(\n            newSettings={\n                \"nCycles\": numCycles,\n                \"cycles\": cyclesSetting,\n                \"reloadDBName\": \"something_fake.h5\",\n            }\n        )\n\n        # create a db based on the cs\n        dbi = DatabaseInterface(r, cs)\n        dbi.initDB(fName=\"reloadingDB.h5\")\n        db = dbi.database\n\n        # populate the db with some things\n        for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)):\n            r.p.cycle = cycle\n            r.p.timeNode = node\n            r.p.cycleLength = sum(cyclesSetting[cycle][\"step days\"])\n            r.core.p.power = ratedPower * cyclesSetting[cycle][\"power fractions\"][node]\n            db.writeToDB(r)\n        self.assertTrue(db.isOpen())\n        db.close()\n        self.assertFalse(db.isOpen())\n\n        self.dbi.prepRestartRun()\n\n        # prove that the reloaded reactor has the correct power\n        self.assertEqual(self.o.r.p.cycle, cycleP)\n        self.assertEqual(self.o.r.p.timeNode, nodeP)\n        self.assertEqual(cyclesSetting[cycleP][\"power fractions\"][nodeP], 0.5)\n        self.assertEqual(\n            self.o.r.core.p.power,\n            ratedPower * cyclesSetting[cycleP][\"power fractions\"][nodeP],\n        )\n\n        # now make the cycle histories clash and confirm that an error is thrown\n        cs = cs.modified(\n            newSettings={\n                \"cycles\": [\n                    {\"step days\": [666, 666], \"power fractions\": [1, 1]},\n                    {\"step days\": [666, 666], \"power fractions\": [1, 1]},\n                    {\"step days\": [666, 666], \"power fractions\": [1, 1]},\n                ],\n            }\n        )\n\n        # create a db based on the cs\n        dbi = DatabaseInterface(r, cs)\n        dbi.initDB(fName=\"reloadingDB.h5\")\n        db = dbi.database\n\n        # populate the db with something\n        for cycle, node in ((cycle, node) for cycle in range(numCycles) for node in range(numNodes)):\n            r.p.cycle = cycle\n            r.p.timeNode = node\n            r.p.cycleLength = 2000\n            db.writeToDB(r)\n        self.assertTrue(db.isOpen())\n        db.close()\n        self.assertFalse(db.isOpen())\n\n        with self.assertRaises(ValueError):\n            self.dbi.prepRestartRun()\n\n    def test_computeParents(self):\n        # The below arrays represent a tree structure like this:\n        #                 71 -----------------------.\n        #                 |                          \\\n        #                12--.-----.------.          72\n        #               / |  \\      \\      \\\n        #             22 30  4---.   6      18-.\n        #            / |  |  | \\  \\        / |  \\\n        #           8 17  2 32 52 62      1  9  10\n        #\n        # This should cover a handful of corner cases\n        numChildren = [2, 5, 2, 0, 0, 1, 0, 3, 0, 0, 0, 0, 3, 0, 0, 0, 0]\n        serialNums = [71, 12, 22, 8, 17, 30, 2, 4, 32, 53, 62, 6, 18, 1, 9, 10, 72]\n\n        expected_1 = [None, 71, 12, 22, 22, 12, 30, 12, 4, 4, 4, 12, 12, 18, 18, 18, 71]\n        expected_2 = [\n            None,\n            None,\n            71,\n            12,\n            12,\n            71,\n            12,\n            71,\n            12,\n            12,\n            12,\n            71,\n            71,\n            12,\n            12,\n            12,\n            None,\n        ]\n        expected_3 = [\n            None,\n            None,\n            None,\n            71,\n            71,\n            None,\n            71,\n            None,\n            71,\n            71,\n            71,\n            None,\n            None,\n            71,\n            71,\n            71,\n            None,\n        ]\n\n        self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren), expected_1)\n        self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 2), expected_2)\n        self.assertEqual(database.Layout.computeAncestors(serialNums, numChildren, 3), expected_3)\n\n\nclass TestWriteReadDatabase(unittest.TestCase):\n    \"\"\"Round-trip tests that we can write/read data to and from a Database.\"\"\"\n\n    SMALL_YAML = \"\"\"!include refOneBlockReactor.yaml\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n    sfp:\n        type: sfp\n        grid name: sfp\n        origin:\n            x: 1000.0\n            y: 1000.0\n            z: 1000.0\n    evst:\n        type: excore\n        grid name: evst\n        origin:\n            x: 2000.0\n            y: 2000.0\n            z: 2000.0\ngrids:\n    core:\n      geom: hex_corners_up\n      lattice map: |\n        IC\n      symmetry: full\n    evst:\n      lattice pitch:\n          x: 32.0\n          y: 32.0\n      geom: hex\n      symmetry: full\n\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n        # copy these test files over, so we can edit them\n        thisDir = self.td.destination\n        yamls = glob(os.path.join(TEST_ROOT, \"smallestTestReactor\", \"*.yaml\"))\n        for yam in yamls:\n            safeCopy(os.path.join(TEST_ROOT, \"smallestTestReactor\", yam), thisDir)\n\n        # Add an EVST to this reactor\n        with open(\"refSmallestReactor.yaml\", \"w\") as f:\n            f.write(self.SMALL_YAML)\n\n        self.o, self.r = loadTestReactor(thisDir, inputFileName=\"armiRunSmallest.yaml\")\n        self.dbi = DatabaseInterface(self.r, self.o.cs)\n        self.dbi.initDB(fName=f\"{self._testMethodName}.h5\")\n        self.db: Database = self.dbi.database\n\n    def tearDown(self):\n        self.db.close()\n        self.td.__exit__(None, None, None)\n\n    def test_readWriteRoundTrip(self):\n        \"\"\"Test DB some round tripping, writing some data to a DB, then reading from it.\n\n        In particular, we test some parameters on the reactor, core, and blocks. And we move an assembly from the core\n        to an EVST between timenodes, and test that worked.\n        \"\"\"\n        # put some data in the DB, for timenode 0\n        self.r.p.cycle = 0\n        self.r.p.timeNode = 0\n        self.r.core.p.keff = 0.99\n        b = self.r.core.getFirstBlock()\n        self.assertIsInstance(b[0].spatialLocator, MultiIndexLocation)\n        self.assertIsInstance(b[-1].spatialLocator, CoordinateLocation)\n        b.p.power = 12345.6\n\n        self.db.writeToDB(self.r)\n\n        # put some data in the DB, for timenode 1\n        self.r.p.timeNode = 1\n        self.r.core.p.keff = 1.01\n\n        # move the assembly from the core to the EVST\n        a = self.r.core.getFirstAssembly()\n        loc = self.r.excore.evst.spatialGrid[(0, 0, 0)]\n        self.r.core.remove(a)\n        self.r.excore.evst.add(a, loc)\n\n        self.db.writeToDB(self.r)\n\n        # close the DB\n        self.db.close()\n\n        # open the DB and verify, the first timenode\n        with Database(self.db.fileName) as db:\n            r0 = db.load(0, 0, allowMissing=True)\n            self.assertEqual(r0.p.cycle, 0)\n            self.assertEqual(r0.p.timeNode, 0)\n            self.assertEqual(r0.core.p.keff, 0.99)\n\n            # check the types of the data model objects\n            self.assertTrue(isinstance(r0, Reactor))\n            self.assertTrue(isinstance(r0.core, Core))\n            self.assertTrue(isinstance(r0.excore, ExcoreCollection))\n            self.assertTrue(isinstance(r0.excore.evst, ExcoreStructure))\n            self.assertTrue(isinstance(r0.excore.sfp, SpentFuelPool))\n\n            # Prove our one special block is in the core\n            self.assertEqual(len(r0.core.getChildren()), 1)\n            b0 = r0.core.getFirstBlock()\n            self.assertEqual(b0.p.power, 12345.6)\n\n            self.assertIsInstance(b0[0].spatialLocator, MultiIndexLocation)\n            np.testing.assert_array_equal(b[0].spatialLocator.indices, b0[0].spatialLocator.indices)\n            self.assertIsInstance(b0[-1].spatialLocator, CoordinateLocation)\n            np.testing.assert_array_equal(b[-1].spatialLocator.indices, b0[-1].spatialLocator.indices)\n\n            # the ex-core structures should be empty\n            self.assertEqual(len(r0.excore[\"sfp\"].getChildren()), 0)\n            self.assertEqual(len(r0.excore[\"evst\"].getChildren()), 0)\n\n        # open the DB and verify, the second timenode\n        with Database(self.db.fileName, \"r\") as db:\n            r1 = db.load(0, 1, allowMissing=True)\n            self.assertEqual(r1.p.cycle, 0)\n            self.assertEqual(r1.p.timeNode, 1)\n            self.assertEqual(r1.core.p.keff, 1.01)\n\n            # check the types of the data model objects\n            self.assertTrue(isinstance(r1, Reactor))\n            self.assertTrue(isinstance(r1.core, Core))\n            self.assertTrue(isinstance(r1.excore, ExcoreCollection))\n            self.assertTrue(isinstance(r1.excore.evst, ExcoreStructure))\n            self.assertTrue(isinstance(r1.excore.sfp, SpentFuelPool))\n\n            # Prove our one special block is NOT in the core, or the SFP\n            self.assertEqual(len(r1.core.getChildren()), 0)\n            self.assertEqual(len(r1.excore[\"sfp\"].getChildren()), 0)\n            self.assertEqual(len(r1.excore.sfp.getChildren()), 0)\n\n            # Prove our one special block is in the EVST\n            evst = r1.excore[\"evst\"]\n            self.assertEqual(len(evst.getChildren()), 1)\n            b1 = evst.getChildren()[0].getChildren()[0]\n            self.assertEqual(b1.p.power, 12345.6)\n\n    def test_badData(self):\n        # create a DB to be modified\n        self.db.writeToDB(self.r)\n        self.db.close()\n\n        # modify the HDF5 file to corrupt a dataset\n        with h5py.File(self.db.fileName, \"r+\") as hf:\n            circleGroup = hf[\"c00n00\"][\"Circle\"]\n            circleMass = np.array(circleGroup[\"massHmBOL\"][()])\n            badData = circleMass[:-1]\n            del circleGroup[\"massHmBOL\"]\n            circleGroup.create_dataset(\"massHmBOL\", data=badData)\n\n        with self.assertRaises(ValueError):\n            with Database(self.db.fileName, \"r\") as db:\n                _r = db.load(0, 0, allowMissing=True)\n\n\nclass TestSimplestDatabaseItems(unittest.TestCase):\n    \"\"\"The tests here are simple, direct tests of Database, that don't need a DatabaseInterface or Reactor.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_open(self):\n        dbPath = \"test_open.h5\"\n        db = Database(dbPath, \"w\")\n\n        self.assertFalse(db.isOpen())\n        db._permission = \"mock\"\n        with self.assertRaises(ValueError):\n            db.open()\n\n\nclass TestStaticDatabaseItems(unittest.TestCase):\n    def test_applyComponentNumberDensitiesMigration(self):\n        b = loadTestBlock()\n        comps = [b[0], b[1]]\n        unpacked = [\n            {\"U235\": 1.23e-3, \"U238\": 2.34e-3},\n            {\"PU239\": 5.6e-4, \"PU240\": 7.8e-4},\n        ]\n\n        Database._applyComponentNumberDensitiesMigration(comps, unpacked)\n\n        for comp, orig in zip(comps, unpacked):\n            expected_nucs = np.array(list(orig.keys()), dtype=\"S6\")\n            expected_nds = np.array(list(orig.values()), dtype=np.float64)\n\n            # verify nuclide names and dtype\n            self.assertTrue(np.array_equal(comp.p[\"nuclides\"], expected_nucs))\n            self.assertEqual(comp.p[\"nuclides\"].dtype, np.dtype(\"S6\"))\n\n            # verify number densities and dtype\n            self.assertTrue(np.allclose(comp.p[\"numberDensities\"], expected_nds))\n            self.assertEqual(comp.p[\"numberDensities\"].dtype, np.float64)\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_databaseInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of the Database Interface.\"\"\"\n\nimport os\nimport types\nimport unittest\n\nimport h5py\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom armi import __version__ as version\nfrom armi import interfaces, runLog, settings\nfrom armi.bookkeeping.db.database import Database\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.cases import case\nfrom armi.context import PROJECT_ROOT\nfrom armi.physics.neutronics.settings import CONF_LOADING_FILE\nfrom armi.reactor import blueprints, grids\nfrom armi.reactor.blueprints import loadFromCs\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.reactors import Reactor\nfrom armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import directoryChangers\n\n\ndef getSimpleDBOperator(cs):\n    \"\"\"\n    Return a very simple operator that covers most of the database interactions.\n\n    Notes\n    -----\n    This reactor has only 1 assembly with 1 type of block.\n    It's used to make the db unit tests run very quickly.\n    \"\"\"\n    newSettings = {}\n    newSettings[CONF_LOADING_FILE] = \"smallestTestReactor/refSmallestReactor.yaml\"\n    newSettings[\"verbosity\"] = \"important\"\n    newSettings[\"db\"] = True\n    newSettings[\"runType\"] = \"Standard\"\n    newSettings[\"nCycles\"] = 1\n    cs = cs.modified(newSettings=newSettings)\n    genDBCase = case.Case(cs)\n    runLog.setVerbosity(\"info\")\n\n    o = genDBCase.initializeOperator()\n    o.interfaces = [interface for interface in o.interfaces if interface.name in [\"database\", \"main\"]]\n\n    return o, cs\n\n\nclass MockInterface(interfaces.Interface):\n    name = \"mockInterface\"\n\n    def __init__(self, r, cs, action=None):\n        interfaces.Interface.__init__(self, r, cs)\n        self.action = action\n\n    def interactEveryNode(self, cycle, node):\n        self.action(cycle, node)\n\n\nclass TestDatabaseInterfaceBOL(unittest.TestCase):\n    \"\"\"Test the DatabaseInterface class at the BOL.\"\"\"\n\n    def test_interactBOL(self):\n        \"\"\"This test is in its own class, because of temporary directory issues.\"\"\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n            self.dbi = DatabaseInterface(self.r, self.o.cs)\n\n            dbName = f\"{self._testMethodName}.h5\"\n            self.dbi.initDB(fName=dbName)\n            self.db: Database = self.dbi.database\n            self.stateRetainer = self.r.retainState().__enter__()\n            self.assertIsNotNone(self.dbi._db)\n            self.dbi.interactBOL()\n            self.dbi.closeDB()\n            self.dbi._db = None\n            self.assertIsNone(self.dbi._db)\n\n            if os.path.exists(dbName):\n                os.remove(dbName)\n\n\nclass TestDatabaseInterface(unittest.TestCase):\n    \"\"\"Tests for the DatabaseInterface class.\"\"\"\n\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        self.dbi = DatabaseInterface(self.r, self.o.cs)\n        self.dbi.initDB(fName=self._testMethodName + \".h5\")\n        self.db: Database = self.dbi.database\n        self.stateRetainer = self.r.retainState().__enter__()\n\n    def tearDown(self):\n        self.db.close()\n        self.stateRetainer.__exit__()\n        self.td.__exit__(None, None, None)\n        # test_interactBOL leaves behind some dirt (accessible after db close) that the\n        # TempDirChanger is not catching\n        bolDirt = [\n            os.path.join(PROJECT_ROOT, \"armiRun.h5\"),\n            os.path.join(PROJECT_ROOT, \"armiRunSmallest.h5\"),\n        ]\n        for dirt in bolDirt:\n            if os.path.exists(dirt):\n                os.remove(dirt)\n\n    def test_distributable(self):\n        self.assertEqual(self.dbi.distributable(), 4)\n        self.dbi.interactDistributeState()\n        self.assertEqual(self.dbi.distributable(), 4)\n\n    def test_demonstrateWritingInteractions(self):\n        \"\"\"Test what nodes are written to the database during the interaction calls.\"\"\"\n        self.o.cs[\"burnSteps\"] = 2  # make test insensitive to burn steps\n        r = self.r\n\n        # BOC/BOL doesn't write anything\n        r.p.cycle, r.p.timeNode = 0, 0\n        self.assertFalse(self.dbi.database.hasTimeStep(0, 0))\n        self.dbi.interactBOL()\n        self.assertFalse(self.dbi.database.hasTimeStep(0, 0))\n        self.dbi.interactBOC(0)\n        self.assertFalse(self.dbi.database.hasTimeStep(0, 0))\n\n        # but the first time node does\n        self.dbi.interactEveryNode(0, 0)\n        self.assertTrue(self.dbi.database.hasTimeStep(0, 0))\n\n        # EOC 0 shouldn't write, its written by last time node\n        r.p.cycle, r.p.timeNode = 0, self.o.cs[\"burnSteps\"]\n        self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n        self.dbi.interactEOC(r.p.cycle)\n        self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n\n        # The last node of the step should write though\n        self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n        self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)\n        self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n\n        # EOL should also write, but lets write last time node first\n        r.p.cycle, r.p.timeNode = self.o.cs[\"nCycles\"] - 1, self.o.cs[\"burnSteps\"]\n        self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n        self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)\n        self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode))\n\n        # now write EOL\n        self.assertFalse(self.dbi.database.hasTimeStep(r.p.cycle, r.p.timeNode, \"EOL\"))\n        self.dbi.interactEOL()  # this also saves and closes db\n\n        # reopen db to show EOL is written\n        with Database(self._testMethodName + \".h5\", \"r\") as db:\n            self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode, \"EOL\"))\n            # and confirm that last time node is still there/separate\n            self.assertTrue(db.hasTimeStep(r.p.cycle, r.p.timeNode))\n\n    def test_interactEveryNodeReturnTightCoupling(self):\n        \"\"\"Test that the DB is NOT written to if cs[\"tightCoupling\"] = True.\"\"\"\n        self.o.cs[\"tightCoupling\"] = True\n        self.dbi.interactEveryNode(0, 0)\n        self.assertFalse(self.dbi.database.hasTimeStep(0, 0))\n\n    def test_timeNodeLoop_tightCoupling(self):\n        \"\"\"Test that database is written out after the coupling loop has completed.\"\"\"\n        # clear out interfaces (no need to run physics) but leave database\n        self.o.interfaces = [self.dbi]\n        self.o.cs[\"tightCoupling\"] = True\n        self.assertFalse(self.dbi._db.hasTimeStep(0, 0))\n        self.o._timeNodeLoop(0, 0)\n        self.assertTrue(self.dbi._db.hasTimeStep(0, 0))\n\n    def test_syncDbAfterWrite(self):\n        \"\"\"\n        Test to ensure that the fast-path database is copied to working\n        directory at every time node when ``syncDbAfterWrite`` is ``True``.\n        \"\"\"\n        r = self.r\n\n        self.o.cs[\"syncDbAfterWrite\"] = True\n        self.o.cs[\"burnSteps\"] = 2  # make test insensitive to burn steps\n\n        self.dbi.interactBOL()\n        self.assertFalse(os.path.exists(self.dbi.database.fileName))\n\n        # Go through a few time nodes to ensure appending is working\n        for timeNode in range(self.o.cs[\"burnSteps\"]):\n            r.p.cycle = 0\n            r.p.timeNode = timeNode\n            self.dbi.interactEveryNode(r.p.cycle, r.p.timeNode)\n\n            # The file should have been copied to working directory\n            self.assertTrue(os.path.exists(self.dbi.database.fileName))\n\n            # The copied file should have the newest time node\n            with Database(self.dbi.database.fileName, \"r\") as db:\n                for tn in range(timeNode + 1):\n                    self.assertTrue(db.hasTimeStep(r.p.cycle, tn))\n\n            # The in-memory database should have been reloaded properly\n            for tn in range(timeNode + 1):\n                self.assertTrue(self.dbi.database.hasTimeStep(r.p.cycle, tn))\n\n        # Make sure EOL runs smoothly\n        self.dbi.interactEOL()\n        self.assertTrue(os.path.exists(self.dbi.database.fileName))\n\n    def test_noSyncDbAfterWrite(self):\n        \"\"\"\n        Test to ensure that the fast-path database is NOT copied to working\n        directory at every time node when ``syncDbAfterWrite`` is ``False``.\n        \"\"\"\n        self.o.cs[\"syncDbAfterWrite\"] = False\n\n        self.dbi.interactBOL()\n        self.assertFalse(os.path.exists(self.dbi.database.fileName))\n        self.dbi.interactEveryNode(0, 0)\n        self.assertFalse(os.path.exists(self.dbi.database.fileName))\n        self.dbi.interactEOL()\n        self.assertTrue(os.path.exists(self.dbi.database.fileName))\n\n    def test_writeDBFromDBLoadSameDir(self):\n        \"\"\"\n        Test to ensure that a reactor loaded from a database can be written to a\n        working database file (one that has case settings and blueprints if applicable).\n        \"\"\"\n        # Write this reactor to a database file.\n        dbi = DatabaseInterface(self.r, self.o.cs)\n        dbi.initDB(fName=\"testDB1.h5\")\n        db = dbi.database\n        db.writeToDB(self.r)\n        db.close()\n\n        # Now load the db again\n        with Database(\"testDB1.h5\", \"r\") as db:\n            cs2 = db.loadCS()\n            r2 = db.load(0, 0, cs=cs2)\n\n        self.assertIsInstance(cs2, settings.Settings)\n        self.assertIsInstance(r2, Reactor)\n\n        # Now write this db to this folder\n        dbi = DatabaseInterface(r2, cs2)\n        dbi.initDB(fName=\"testDB2.h5\")\n        db = dbi.database\n        db.writeToDB(r2)\n        db.close()\n\n        # Now load this db. It should load\n        with Database(\"testDB2.h5\", \"r\") as db:\n            cs3 = db.loadCS()\n            bp3 = loadFromCs(cs3)\n            self.assertIsInstance(bp3, blueprints.Blueprints)\n            r3 = db.load(0, 0, cs=cs3, bp=bp3)\n\n        self.assertIsInstance(cs3, settings.Settings)\n        self.assertIsInstance(r3, Reactor)\n\n    def test_writeDBFromDBLoadDifDir(self):\n        \"\"\"\n        Test to ensure that a reactor loaded from a database can be written to a\n        working database file (one that has case settings and blueprints if applicable).\n\n        The directory is changed between writing and loading.\n        \"\"\"\n        # Write this reactor to a database file.\n        dbi = DatabaseInterface(self.r, self.o.cs)\n        dbi.initDB(fName=\"testDB1.h5\")\n        db = dbi.database\n        db.writeToDB(self.r)\n        db.close()\n\n        # Let's move to a different folder\n        os.makedirs(\"sub\", exist_ok=True)\n        os.chdir(\"sub\")\n\n        # Now load the db again\n        with Database(os.path.join(os.pardir, \"testDB1.h5\"), \"r\") as db:\n            cs2 = db.loadCS()\n            r2 = db.load(0, 0, cs=cs2)\n\n        self.assertIsInstance(cs2, settings.Settings)\n        self.assertIsInstance(r2, Reactor)\n\n        # Now write this db to this folder\n        dbi = DatabaseInterface(r2, cs2)\n        dbi.initDB(fName=\"testDB2.h5\")\n        db = dbi.database\n        db.writeToDB(r2)\n        db.close()\n\n        # Now load this db. It should load\n        with Database(\"testDB2.h5\", \"r\") as db:\n            cs3 = db.loadCS()\n            r3 = db.load(0, 0, cs=cs3)\n\n        self.assertIsInstance(cs3, settings.Settings)\n        self.assertIsInstance(r3, Reactor)\n\n\nclass TestDatabaseWriter(unittest.TestCase):\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n        cs = settings.Settings(os.path.join(TEST_ROOT, \"armiRun.yaml\"))\n        cs = cs.modified(newSettings={\"power\": 0.0, \"powerDensity\": 9e4})\n        self.o, cs = getSimpleDBOperator(cs)\n        self.r = self.o.r\n        self.stateRetainer = self.r.retainState().__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n        self.stateRetainer.__exit__()\n\n    def test_writeSystemAttributes(self):\n        \"\"\"Test the writeSystemAttributes method.\n\n        .. test:: Validate that we can directly write system attributes to a database file.\n            :id: T_ARMI_DB_QA0\n            :tests: R_ARMI_DB_QA\n        \"\"\"\n        with h5py.File(\"test_writeSystemAttributes.h5\", \"w\") as h5:\n            Database.writeSystemAttributes(h5)\n\n        with h5py.File(\"test_writeSystemAttributes.h5\", \"r\") as h5:\n            self.assertIn(\"user\", h5.attrs)\n            self.assertIn(\"python\", h5.attrs)\n            self.assertIn(\"armiLocation\", h5.attrs)\n            self.assertIn(\"startTime\", h5.attrs)\n            self.assertIn(\"machines\", h5.attrs)\n            self.assertIn(\"platform\", h5.attrs)\n            self.assertIn(\"hostname\", h5.attrs)\n            self.assertIn(\"platformRelease\", h5.attrs)\n            self.assertIn(\"platformVersion\", h5.attrs)\n            self.assertIn(\"platformArch\", h5.attrs)\n\n    def test_metaData_endSuccessfully(self):\n        \"\"\"Test databases have the correct metadata in them.\n\n        .. test:: Validate that databases have system attributes written to them during the usual workflow.\n            :id: T_ARMI_DB_QA1\n            :tests: R_ARMI_DB_QA\n        \"\"\"\n        # the power should start at zero\n        self.assertEqual(self.r.core.p.power, 0)\n\n        def goodMethod(cycle, node):\n            pass\n\n        self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, goodMethod))\n        with self.o:\n            self.o.operate()\n\n        self.assertEqual(0, self.r.p.cycle)\n        self.assertEqual(2, self.r.p.timeNode)\n\n        with h5py.File(self.o.cs.caseTitle + \".h5\", \"r\") as h5:\n            self.assertTrue(h5.attrs[\"successfulCompletion\"])\n            self.assertEqual(h5.attrs[\"version\"], version)\n\n            self.assertIn(\"caseTitle\", h5.attrs)\n            self.assertIn(\"settings\", h5[\"inputs\"])\n            self.assertIn(\"blueprints\", h5[\"inputs\"])\n\n            # validate system attributes\n            self.assertIn(\"user\", h5.attrs)\n            self.assertIn(\"python\", h5.attrs)\n            self.assertIn(\"armiLocation\", h5.attrs)\n            self.assertIn(\"startTime\", h5.attrs)\n            self.assertIn(\"machines\", h5.attrs)\n            self.assertIn(\"platform\", h5.attrs)\n            self.assertIn(\"hostname\", h5.attrs)\n            self.assertIn(\"platformRelease\", h5.attrs)\n            self.assertIn(\"platformVersion\", h5.attrs)\n            self.assertIn(\"platformArch\", h5.attrs)\n\n        # after operating, the power will be greater than zero\n        self.assertGreater(self.r.core.p.power, 1e9)\n\n    def test_metaDataEndFail(self):\n        def failMethod(cycle, node):\n            if cycle == 0 and node == 1:\n                raise Exception(\"forcing failure\")\n\n        self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, failMethod))\n\n        with self.assertRaises(Exception):\n            with self.o:\n                self.o.operate()\n\n        self.assertEqual(0, self.r.p.cycle)\n        self.assertEqual(1, self.r.p.timeNode)\n\n        with h5py.File(self.o.cs.caseTitle + \".h5\", \"r\") as h5:\n            self.assertFalse(h5.attrs[\"successfulCompletion\"])\n            self.assertEqual(h5.attrs[\"version\"], version)\n            self.assertIn(\"caseTitle\", h5.attrs)\n\n    def test_getHistory(self):\n        expectedFluxes0 = {}\n        expectedFluxes7 = {}\n\n        def setFluxAwesome(cycle, node):\n            for bi, b in enumerate(self.r.core.iterBlocks()):\n                b.p.flux = 1e6 * bi + 1e3 * cycle + node\n                if bi == 0:\n                    expectedFluxes0[cycle, node] = b.p.flux\n                if bi == 7:\n                    expectedFluxes7[cycle, node] = b.p.flux\n\n        # use as attribute so it is accessible within getFluxAwesome\n        self.called = False\n\n        def getFluxAwesome(cycle, node):\n            if cycle != 0 or node != 2:\n                return\n\n            b0 = next(self.r.core.iterBlocks())\n\n            db = self.o.getInterface(\"database\")._db\n\n            # we are now in cycle 1, node 2 ... AFTER setFluxAwesome, but BEFORE writeToDB\n            actualFluxes0 = db.getHistory(b0)[\"flux\"]\n            self.assertEqual(expectedFluxes0, actualFluxes0)\n            self.called = True\n\n        self.o.interfaces.insert(0, MockInterface(self.o.r, self.o.cs, setFluxAwesome))\n        self.o.interfaces.insert(1, MockInterface(self.o.r, self.o.cs, getFluxAwesome))\n\n        with self.o:\n            self.o.operate()\n\n        self.assertTrue(self.called)\n\n    def test_getHistoryByLocation(self):\n        def setFluxAwesome(cycle, node):\n            for bi, b in enumerate(self.r.core.iterBlocks()):\n                b.p.flux = 1e6 * bi + 1e3 * cycle + node\n\n        def getFluxAwesome(cycle, node):\n            if cycle != 1 or node != 2:\n                return\n\n            b = next(self.r.core.iterBlocks())\n\n            db = self.o.getInterface(\"database\").database\n\n            # we are now in cycle 1, node 2 ... AFTER setFluxAwesome\n            _fluxes = db.getHistory(b, params=[\"flux\"])\n\n        self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, setFluxAwesome))\n        self.o.interfaces.append(MockInterface(self.o.r, self.o.cs, getFluxAwesome))\n\n        with self.o:\n            self.o.operate()\n\n        with h5py.File(self.o.cs.caseTitle + \".h5\", \"r\") as h5:\n            self.assertEqual(h5.attrs[\"version\"], version)\n\n\nclass TestDatabaseReading(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.td = directoryChangers.TemporaryDirectoryChanger()\n        cls.td.__enter__()\n\n        # The database writes the settings object to the DB rather than the original input file.\n        # This allows settings to be changed in memory like this and survive for testing.\n        newSettings = {\"verbosity\": \"extra\"}\n        cls.nCycles = 2\n        newSettings[\"nCycles\"] = cls.nCycles\n        newSettings[\"burnSteps\"] = 2\n        o, r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings=newSettings,\n        )\n        reduceTestReactorRings(r, o.cs, 3)\n\n        o.interfaces = [i for i in o.interfaces if isinstance(i, (DatabaseInterface))]\n        dbi = o.getInterface(\"database\")\n        dbi.enabled(True)\n        dbi.initDB()  # Main Interface normally does this\n\n        # update a few parameters\n        def writeFlux(cycle, node):\n            for bi, b in enumerate(o.r.core.iterBlocks()):\n                b.p.flux = 1e6 * bi + cycle * 100 + node\n                b.p.mgFlux = np.repeat(b.p.flux / 33, 33)\n\n        o.interfaces.insert(0, MockInterface(o.r, o.cs, writeFlux))\n        with o:\n            o.operate()\n\n        cls.cs = o.cs\n        cls.bp = o.r.blueprints\n        cls.dbName = o.cs.caseTitle + \".h5\"\n\n        # needed for test_readWritten\n        cls.r = o.r\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n        del cls.r\n        cls.r = None\n\n    def _fullCoreSizeChecker(self, r):\n        self.assertEqual(r.core.numRings, 3)\n        self.assertEqual(r.p.cycle, 0)\n        self.assertEqual(len(r.core.assembliesByName), 19)\n        self.assertEqual(len(r.core.circularRingList), 0)\n        self.assertEqual(len(r.core.blocksByName), 57)\n\n    def test_loadReadOnly(self):\n        with Database(self.dbName, \"r\") as db:\n            r = db.loadReadOnly(0, 0)\n\n            # now show we can no longer edit those parameters\n            with self.assertRaises(RuntimeError):\n                r.core.p.keff = 0.99\n\n            b = r.core.getFirstBlock()\n            with self.assertRaises(RuntimeError):\n                b.p.power = 432.1\n\n            for c in b:\n                self.assertGreater(c.getVolume(), 0)\n\n    def test_growToFullCore(self):\n        with Database(self.dbName, \"r\") as db:\n            r = db.load(0, 0, allowMissing=True)\n\n        # test partial core values\n        self.assertEqual(r.core.numRings, 3)\n        self.assertEqual(r.p.cycle, 0)\n        self.assertEqual(len(r.core.assembliesByName), 7)\n        self.assertEqual(len(r.core.circularRingList), 0)\n        self.assertEqual(len(r.core.blocksByName), 21)\n\n        r.core.growToFullCore(None)\n        self._fullCoreSizeChecker(r)\n\n    def test_growToFullCoreWithCS(self):\n        with Database(self.dbName, \"r\") as db:\n            r = db.load(0, 0, allowMissing=True)\n\n        r.core.growToFullCore(self.cs)\n        self._fullCoreSizeChecker(r)\n\n    def test_growToFullCoreFromFactory(self):\n        from armi.bookkeeping.db import databaseFactory\n\n        db = databaseFactory(self.dbName, \"r\")\n        with db:\n            r = db.load(0, 0, allowMissing=True)\n\n        r.core.growToFullCore(None)\n        self._fullCoreSizeChecker(r)\n\n    def test_growToFullCoreFromFactoryWithCS(self):\n        from armi.bookkeeping.db import databaseFactory\n\n        db = databaseFactory(self.dbName, \"r\")\n        with db:\n            r = db.load(0, 0, allowMissing=True)\n\n        r.core.growToFullCore(self.cs)\n        self._fullCoreSizeChecker(r)\n\n    def test_readWritten(self):\n        with Database(self.dbName, \"r\") as db:\n            r2 = db.load(0, 0, self.cs)\n\n        for a1, a2 in zip(self.r.core, r2.core):\n            # assemblies assign a name based on assemNum at initialization\n            self.assertEqual(a1.name, a2.name)\n            assert_equal(a1.spatialLocator.indices, a2.spatialLocator.indices)\n            self.assertEqual(a1.p.assemNum, a2.p.assemNum)\n            self.assertEqual(a1.p.serialNum, a2.p.serialNum)\n\n            for b1, b2 in zip(a1, a2):\n                # blocks assign a name based on assemNum at initialization\n                self.assertEqual(b1.name, b2.name)\n                assert_equal(b1.spatialLocator.indices, b2.spatialLocator.indices)\n                self.assertEqual(b1.p.serialNum, b2.p.serialNum)\n\n                for c1, c2 in zip(sorted(b1), sorted(b2)):\n                    self.assertEqual(c1.name, c2.name)\n                    if isinstance(c1.spatialLocator, grids.MultiIndexLocation):\n                        assert_equal(\n                            np.array(c1.spatialLocator.indices),\n                            np.array(c2.spatialLocator.indices),\n                        )\n                    else:\n                        assert_equal(c1.spatialLocator.indices, c2.spatialLocator.indices)\n                    self.assertEqual(c1.p.serialNum, c2.p.serialNum)\n\n                # volume is pretty difficult to get right. it relies upon linked dimensions\n                v1 = b1.getVolume()\n                v2 = b2.getVolume()\n                assert_allclose(v1, v2)\n                self.assertEqual(b1.p.serialNum, b2.p.serialNum)\n\n            self.assertEqual(\n                self.r.core.childrenByLocator[0, 0, 0].p.serialNum,\n                r2.core.childrenByLocator[0, 0, 0].p.serialNum,\n            )\n\n    def test_readWithoutInputs(self):\n        with Database(self.dbName, \"r\") as db:\n            r2 = db.load(0, 0)\n\n        for b1, b2 in zip(self.r.core.iterBlocks(), r2.core.iterBlocks()):\n            for c1, c2 in zip(sorted(b1), sorted(b2)):\n                self.assertEqual(c1.name, c2.name)\n\n        for bi, b in enumerate(r2.core.iterBlocks()):\n            assert_allclose(b.p.flux, 1e6 * bi)\n\n    def test_variousTypesWork(self):\n        with Database(self.dbName, \"r\") as db:\n            r2 = db.load(1, 1)\n\n        b1 = self.r.core.getFirstBlock(Flags.FUEL)\n        b2 = r2.core.getFirstBlock(Flags.FUEL)\n\n        self.assertIsInstance(b1.p.mgFlux, np.ndarray)\n        self.assertIsInstance(b2.p.mgFlux, np.ndarray)\n        assert_allclose(b1, b2)\n\n        c1 = b1.getComponent(Flags.FUEL)\n        c2 = b2.getComponent(Flags.FUEL)\n\n        for i, v1 in enumerate(c1.p.numberDensities):\n            self.assertAlmostEqual(v1, c2.p.numberDensities[i])\n\n    def test_timesteps(self):\n        with Database(self.dbName, \"r\") as db:\n            # build time steps in the DB file\n            timesteps = []\n            for cycle in range(self.nCycles):\n                for bStep in range(3):\n                    timesteps.append(f\"/c0{cycle}n0{bStep}\")\n            timesteps.append(\"/c01n02EOL\")\n\n            # verify the timesteps are correct, including the EOL\n            self.assertEqual(list(db.keys()), timesteps)\n\n\nclass TestBadName(unittest.TestCase):\n    def test_badDBName(self):\n        cs = settings.Settings(os.path.join(TEST_ROOT, \"armiRun.yaml\"))\n        cs = cs.modified(newSettings={\"reloadDBName\": \"aRmIRuN.h5\"})\n\n        dbi = DatabaseInterface(None, cs)\n        with self.assertRaises(ValueError):\n            # an error should be raised when the database loaded from\n            # has the same name as the run to avoid overwriting.\n            dbi.initDB()\n\n\nclass TestStandardFollowOn(unittest.TestCase):\n    \"\"\"Tests related to doing restart runs (loading from DB with Standard operator).\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.td = directoryChangers.TemporaryDirectoryChanger()\n        cls.td.__enter__()\n        # make DB to load from\n        o = cls._getOperatorThatChangesVariables(settings.Settings(os.path.join(TEST_ROOT, \"armiRun.yaml\")))\n        with o:\n            o.operate()\n            cls.FIRST_END_TIME = o.r.p.time\n            if cls.FIRST_END_TIME == 0:\n                # Can't use self.assertEqual in the class method but we still need this information\n                raise RuntimeError(\"Time should have advanced by the end of the run.\")\n        cls.LOAD_DB_PATH = \"loadFrom.h5\"\n        os.rename(\"armiRun.h5\", cls.LOAD_DB_PATH)\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n\n    @staticmethod\n    def _getOperatorThatChangesVariables(cs):\n        \"\"\"\n        Return an operator that advances time so that restart runs can be tested.\n\n        Notes\n        -----\n        Ensures that parameters are consistent between Standard runs and restart runs.\n        \"\"\"\n        o, cs = getSimpleDBOperator(cs)\n\n        mock = MockInterface(o.r, o.cs, None)\n\n        def interactEveryNode(self, cycle, node):\n            # Could use just += 1 but this will show more errors since it is less\n            # susceptible to cancellation of errors off by one.\n            self.r.p.time += self.r.p.timeNode + 1\n\n        # Magic to change the method only on this instance of the class.\n        mock.interactEveryNode = types.MethodType(interactEveryNode, mock)\n\n        # insert 1 before the database interface so that changes are written to db.\n        o.interfaces.insert(1, mock)\n        return o\n\n    def test_standardRestart(self):\n        o = self._getRestartOperator()\n\n        # the interact BOL has historically failed due to trying to write inputs\n        # which are already in the DB from the _mergeStandardRunDB call\n        with o:\n            o.operate()\n            self.assertEqual(\n                self.FIRST_END_TIME,\n                o.r.p.time,\n                \"End time should have been the same for the restart run.\\n\"\n                \"First end time: {},\\nSecond End time: {}\".format(self.FIRST_END_TIME, o.r.p.time),\n            )\n\n    def _getRestartOperator(self):\n        cs = settings.Settings(os.path.join(TEST_ROOT, \"armiRun.yaml\"))\n        newSettings = {}\n        newSettings[\"loadStyle\"] = \"fromDB\"\n        newSettings[\"reloadDBName\"] = self.LOAD_DB_PATH\n        newSettings[\"startCycle\"] = 0\n        newSettings[\"startNode\"] = 1\n        cs = cs.modified(newSettings=newSettings)\n        o = self._getOperatorThatChangesVariables(cs)\n        return o\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_jaggedArray.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the JaggedArray class.\"\"\"\n\nimport unittest\n\nimport h5py\nimport numpy as np\n\nfrom armi.bookkeeping.db.jaggedArray import JaggedArray\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestJaggedArray(unittest.TestCase):\n    \"\"\"Tests for the JaggedArray class.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_roundTrip(self):\n        \"\"\"Basic test that we handle Nones correctly in database read/writes.\"\"\"\n        dataSet = [1, 2.0, None, [], [3, 4], (5, 6, 7), np.array([8, 9, 10, 11])]\n        self._compareRoundTrip(dataSet, \"test-numbers\")\n\n    def test_roundTripBool(self):\n        \"\"\"Basic test that we handle Nones correctly in database read/writes.\"\"\"\n        dataSet = [True, True, [False, True, False]]\n        self._compareRoundTrip(dataSet, \"test-bool\")\n\n    def test_flatten(self):\n        \"\"\"Test the recursive flattening static method.\"\"\"\n        testdata = [(1, 2), [3, 4, 5], [], None, 6, np.array([7, 8, 9])]\n        flatArray = JaggedArray.flatten(testdata)\n        self.assertEqual(flatArray, [1, 2, 3, 4, 5, None, 6, 7, 8, 9])\n\n    def test_backwardsCompatible(self):\n        \"\"\"\n        Test that the new JaggedArray can unpack the old database jagged data format.\n\n        The \"old\" database format contains shapes and offsets for locations that have None.\n        The \"new\" database format only contains shapes and offsets for non-None values.\n        The \"new\" unpacking routine is able to read either format.\n        \"\"\"\n        paramName = \"test_old\"\n        data = [[1, 2], None, [3, 4, 5], None, None, [6, 7, 8, 9]]\n        flattenedArray = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9])\n        shapes = [(2,), (0,), (3,), (0,), (0,), (4,)]\n        offsets = [0, 2, 2, 5, 5, 5, 5]\n        nones = [1, 3, 4]\n        h5file = \"test_oldFormat.h5\"\n        with h5py.File(h5file, \"w\") as hf:\n            dset = hf.create_dataset(\n                data=flattenedArray,\n                name=paramName,\n            )\n            dset.attrs[\"jagged\"] = True\n            dset.attrs[\"offsets\"] = offsets\n            dset.attrs[\"shapes\"] = shapes\n            dset.attrs[\"noneLocations\"] = nones\n\n        with h5py.File(h5file, \"r\") as hf:\n            dataset = hf[paramName]\n            values = dataset[()]\n            offsets = dataset.attrs[\"offsets\"]\n            shapes = dataset.attrs[\"shapes\"]\n            nones = dataset.attrs[\"noneLocations\"]\n\n        roundTrip = JaggedArray.fromH5(\n            values,\n            offsets,\n            shapes,\n            nones,\n            dtype=flattenedArray.dtype,\n            paramName=paramName,\n        )\n        self._compareArrays(data, roundTrip)\n\n    def _compareRoundTrip(self, data, paramName):\n        \"\"\"Make sure that data is unchanged by packing/unpacking.\"\"\"\n        jaggedArray = JaggedArray(data, paramName)\n\n        # write to HDF5\n        h5file = \"test_jaggedArray.h5\"\n        with h5py.File(h5file, \"w\") as hf:\n            dset = hf.create_dataset(\n                data=jaggedArray.flattenedArray,\n                name=jaggedArray.paramName,\n            )\n            dset.attrs[\"jagged\"] = True\n            dset.attrs[\"offsets\"] = jaggedArray.offsets\n            dset.attrs[\"shapes\"] = jaggedArray.shapes\n            dset.attrs[\"noneLocations\"] = jaggedArray.nones\n\n        with h5py.File(h5file, \"r\") as hf:\n            dataset = hf[paramName]\n            values = dataset[()]\n            offsets = dataset.attrs[\"offsets\"]\n            shapes = dataset.attrs[\"shapes\"]\n            nones = dataset.attrs[\"noneLocations\"]\n\n        roundTrip = JaggedArray.fromH5(\n            values,\n            offsets,\n            shapes,\n            nones,\n            dtype=jaggedArray.flattenedArray.dtype,\n            paramName=paramName,\n        )\n        self._compareArrays(data, roundTrip)\n\n    def _compareArrays(self, ref, src):\n        \"\"\"\n        Compare two numpy arrays.\n\n        Comparing numpy arrays that may have unsavory data (NaNs, Nones, jagged\n        data, etc.) is really difficult. For now, convert to a list and compare\n        element-by-element.\n\n        Several types of data do not survive a round trip. The if-elif branch\n        here converts the initial data into the format expected to be produced\n        by the round trip. The conversions are:\n\n        - For scalar values (int, float, etc.), the data becomes a numpy\n          array with a dimension of 1 after the round trip.\n        - Tuples and lists become numpy arrays\n        - Empty lists become `None`\n\n        \"\"\"\n        self.assertEqual(type(src), JaggedArray)\n        if isinstance(ref, np.ndarray):\n            ref = ref.tolist()\n            src = src.tolist()\n\n        for v1, v2 in zip(ref, src):\n            # Entries may be None\n            if isinstance(v1, np.ndarray):\n                v1 = v1.tolist()\n            elif isinstance(v1, tuple):\n                v1 = list(v1)\n            elif isinstance(v1, int):\n                v1 = np.array([v1])\n            elif isinstance(v1, float):\n                v1 = np.array([v1], dtype=np.float64)\n            elif v1 is None:\n                pass\n            elif len(v1) == 0:\n                v1 = None\n\n            if isinstance(v2, np.ndarray):\n                v2 = v2.tolist()\n\n            self.assertEqual(v1, v2)\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_layout.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the db Layout and associated tools.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi import context\nfrom armi.bookkeeping.db import database, layout\nfrom armi.reactor import grids\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestLocationPacking(unittest.TestCase):\n    \"\"\"Tests for database location.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_locationPacking(self):\n        loc1 = grids.IndexLocation(1, 2, 3, None)\n        loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)\n        loc3 = grids.MultiIndexLocation(None)\n        loc3.append(grids.IndexLocation(7, 8, 9, None))\n        loc3.append(grids.IndexLocation(10, 11, 12, None))\n\n        locs = [loc1, loc2, loc3]\n        tp, data = layout._packLocations(locs)\n\n        self.assertEqual(tp[0], layout.LOC_INDEX)\n        self.assertEqual(tp[1], layout.LOC_COORD)\n        self.assertEqual(tp[2], layout.LOC_MULTI + \"2\")\n\n        unpackedData = layout._unpackLocations(tp, data)\n\n        self.assertEqual(unpackedData[0], (1, 2, 3))\n        self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))\n        self.assertEqual(unpackedData[2], [(7, 8, 9), (10, 11, 12)])\n\n    def test_locationPackingOlderVersions(self):\n        for version in [1, 2]:\n            loc1 = grids.IndexLocation(1, 2, 3, None)\n            loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)\n            loc3 = grids.MultiIndexLocation(None)\n            loc3.append(grids.IndexLocation(7, 8, 9, None))\n            loc3.append(grids.IndexLocation(10, 11, 12, None))\n\n            locs = [loc1, loc2, loc3]\n            tp, data = layout._packLocations(locs, minorVersion=version)\n\n            self.assertEqual(tp[0], \"IndexLocation\")\n            self.assertEqual(tp[1], \"CoordinateLocation\")\n            self.assertEqual(tp[2], \"MultiIndexLocation\")\n\n            unpackedData = layout._unpackLocations(tp, data, minorVersion=version)\n\n            self.assertEqual(unpackedData[0], (1, 2, 3))\n            self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))\n            self.assertEqual(unpackedData[2][0].tolist(), [7, 8, 9])\n            self.assertEqual(unpackedData[2][1].tolist(), [10, 11, 12])\n\n    def test_locationPackingOldVersion(self):\n        version = 3\n\n        loc1 = grids.IndexLocation(1, 2, 3, None)\n        loc2 = grids.CoordinateLocation(4.0, 5.0, 6.0, None)\n        loc3 = grids.MultiIndexLocation(None)\n        loc3.append(grids.IndexLocation(7, 8, 9, None))\n        loc3.append(grids.IndexLocation(10, 11, 12, None))\n\n        locs = [loc1, loc2, loc3]\n        tp, data = layout._packLocations(locs, minorVersion=version)\n\n        self.assertEqual(tp[0], \"I\")\n        self.assertEqual(tp[1], \"C\")\n        self.assertEqual(tp[2], \"M:2\")\n\n        unpackedData = layout._unpackLocations(tp, data, minorVersion=version)\n\n        self.assertEqual(unpackedData[0], (1, 2, 3))\n        self.assertEqual(unpackedData[1], (4.0, 5.0, 6.0))\n        self.assertEqual(unpackedData[2][0], (7, 8, 9))\n        self.assertEqual(unpackedData[2][1], (10, 11, 12))\n\n    def test_close(self):\n        intendedFileName = \"xyz.h5\"\n\n        db = database.Database(intendedFileName, \"w\")\n        self.assertEqual(db._fileName, intendedFileName)\n        self.assertIsNone(db._fullPath)  # this isn't set until the db is opened\n\n        db.open()\n        self.assertEqual(db._fullPath, os.path.join(context.getFastPath(), intendedFileName))\n\n        db.close()  # this should move the file out of the FAST_PATH\n        self.assertEqual(db._fullPath, os.path.join(os.path.abspath(\".\"), intendedFileName))\n"
  },
  {
    "path": "armi/bookkeeping/db/tests/test_passiveDBLoadPlugin.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides functionality for testing the PassiveDBLoadPlugin.\"\"\"\n\nimport unittest\nfrom copy import deepcopy\nfrom io import StringIO\n\nfrom ruamel.yaml import RoundTripLoader\nfrom ruamel.yaml.nodes import MappingNode, ScalarNode\n\nfrom armi import context, getApp\nfrom armi.bookkeeping.db.passiveDBLoadPlugin import (\n    PassiveDBLoadPlugin,\n    PassThroughYamlize,\n)\nfrom armi.reactor.blocks import Block\n\n\nclass TestPassiveDBLoadPlugin(unittest.TestCase):\n    def setUp(self):\n        \"\"\"\n        Manipulate the standard App. We can't just configure our own, since the\n        pytest environment bleeds between tests.\n        \"\"\"\n        self.app = getApp()\n        self._backupApp = deepcopy(self.app)\n        self._cacheBPSections = PassiveDBLoadPlugin.SKIP_BP_SECTIONS\n        self._cacheUnkownParams = PassiveDBLoadPlugin.UNKNOWN_PARAMS\n        PassiveDBLoadPlugin.SKIP_BP_SECTIONS = []\n        PassiveDBLoadPlugin.UNKNOWN_PARAMS = {}\n\n    def tearDown(self):\n        \"\"\"Restore the App to its original state.\"\"\"\n        import armi\n\n        armi._app = self._backupApp\n        context.APP_NAME = \"armi\"\n        PassiveDBLoadPlugin.SKIP_BP_SECTIONS = self._cacheBPSections\n        PassiveDBLoadPlugin.UNKNOWN_PARAMS = self._cacheUnkownParams\n\n    def test_passiveDBLoadPlugin(self):\n        plug = PassiveDBLoadPlugin()\n\n        # default case\n        bpSections = plug.defineBlueprintsSections()\n        self.assertEqual(len(bpSections), 0)\n        params = plug.defineParameters()\n        self.assertEqual(len(params), 0)\n\n        # non-empty cases\n        PassiveDBLoadPlugin.SKIP_BP_SECTIONS = [\"hi\", \"mom\"]\n        PassiveDBLoadPlugin.UNKNOWN_PARAMS = {Block: [\"fake1\", \"fake2\"]}\n        bpSections = plug.defineBlueprintsSections()\n        self.assertEqual(len(bpSections), 2)\n        self.assertTrue(type(bpSections[0]), tuple)\n        self.assertEqual(bpSections[0][0], \"hi\")\n        self.assertTrue(type(bpSections[1]), tuple)\n        self.assertEqual(bpSections[1][0], \"mom\")\n        params = plug.defineParameters()\n        self.assertEqual(len(params), 1)\n        self.assertIn(Block, params)\n\n\nclass TestPassThroughYamlize(unittest.TestCase):\n    def test_passThroughYamlizeExample1(self):\n        # create node from known BP-style YAML object\n        node = MappingNode(\n            \"test_passThroughYamlizeExample1\",\n            [\n                (\n                    ScalarNode(tag=\"tag:yaml.org,2002:str\", value=\"core-wide\"),\n                    MappingNode(\n                        tag=\"tag:yaml.org,2002:map\",\n                        value=[\n                            (\n                                ScalarNode(\n                                    tag=\"tag:yaml.org,2002:str\",\n                                    value=\"fuel axial expansion\",\n                                ),\n                                ScalarNode(tag=\"tag:yaml.org,2002:bool\", value=\"False\"),\n                            ),\n                            (\n                                ScalarNode(\n                                    tag=\"tag:yaml.org,2002:str\",\n                                    value=\"grid plate radial expansion\",\n                                ),\n                                ScalarNode(tag=\"tag:yaml.org,2002:bool\", value=\"True\"),\n                            ),\n                        ],\n                    ),\n                )\n            ],\n        )\n\n        # test that node is non-zero and has the \"core-wide\" section\n        self.assertEqual(node.value[0][0].value, \"core-wide\")\n\n        # pass the YAML string through the known YAML\n        pty = PassThroughYamlize()\n        loader = RoundTripLoader(StringIO(\"\"))\n        _p = pty.from_yaml(loader, node)\n\n        # prove the section has been cleared\n        self.assertEqual(len(node.value), 0)\n"
  },
  {
    "path": "armi/bookkeeping/db/typedefs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Any, Dict, Tuple\n\nfrom armi.reactor.composites import ArmiObject\nfrom armi.reactor.grids import LocationBase\n\n# Return type for the getHistories() method\n#              param       time node      value\nHistory = Dict[str, Dict[Tuple[int, int], Any]]\nHistories = Dict[ArmiObject, History]\nLocationHistories = Dict[LocationBase, History]\n"
  },
  {
    "path": "armi/bookkeeping/historyTracker.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe History Tracker is a bookkeeping interface that accesses and reports time-dependent state information from the\ndatabase.\n\nAt the end of a run, these write text files to show the histories for various follow-on mechanical analysis, fuel\nperformance analysis, etc.\n\nOther interfaces may find this useful as well, to get an assembly history for fuel performance analysis, etc. This is\nparticularly useful in equilibrium runs, where the ``EqHistoryTrackerInterface`` will unravel the full history from a\nsingle equilibrium cycle.\n\nGetting history information\n---------------------------\nLoop over blocks, keys, and timesteps of interest and use commands like this::\n\n    history.getBlockHistoryVal(armiBlock.getName(), key, ts)\n\nUsing the database-based history trackers\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nYou can pre-load information before gathering it to get much better performance::\n\n    history.preloadBlockHistoryVals(blockNames, historyKeys, timeSteps)\n\nThis is essential for performance when history information is going to be accessed in loops over assemblies or blocks.\nReading each param directly from the database individually in loops is paralyzingly slow.\n\nSpecifying parameters to add to the EOL history report\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nTo add state parameters to the list of things that get their history reported, you need to define an interface method\ncalled `getHistoryParams`. It should return a list of block parameters that will become available. For example::\n\n    def getHistoryParams(self):\n        return [\"flux\", \"percentBu\"]\n\nWhen you'd like to access history information, you need to grab the history interface. The history interfaces is present\nby default in your interface stack. To get it, just call::\n\n    history = self.getInterface(\"history\")\n\nNow you can do a few things, such as::\n\n    # get some info about what's stored in the history\n    assemsWithHistory = history.getDetailAssemblies()\n    timeStepsAvailable = history.getTimeIndices()\n\n    # now go out and get some time-dependent block params:\n    fluxAtTimeStep3 = history.getBlockHistoryVal(\"B1003A\", \"flux\", 3)\n\nSpecifying blocks and assemblies to track\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nSee :ref:`detail-assems`.\n\n\"\"\"\n\nimport traceback\nfrom typing import TYPE_CHECKING, List\n\nfrom armi import interfaces, operators, runLog\nfrom armi.reactor import grids\nfrom armi.reactor.flags import Flags\nfrom armi.utils import tabulate\n\nORDER = 2 * interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING\n\nif TYPE_CHECKING:\n    from armi.reactor.assemblies import Assembly\n    from armi.reactor.blocks import Block\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    if cs[\"runType\"] not in (operators.RunTypes.EQUILIBRIUM):\n        klass = HistoryTrackerInterface\n        return (klass, {})\n\n    return None\n\n\nclass HistoryTrackerInterface(interfaces.Interface):\n    \"\"\"\n    Makes reports of the state that individual assemblies encounter.\n\n    .. impl:: This interface allows users to retrieve run data from somewhere other\n        than the database.\n        :id: I_ARMI_HIST_TRACK\n        :implements: R_ARMI_HIST_TRACK\n\n        This is a special :py:class:`Interface <armi.interfaces.Interface>` that is designed to store assembly and cross\n        section data throughout time. This is done directly, with time-based lists of assembly data, and dictionaries of\n        cross- section data. Users turn this feature on or off using the ``\"detailAllAssems\"`` setting.\n\n    Notes\n    -----\n    This pre-dates the ARMI database system, and we would like to stop supporting this. Please do not find new uses for\n    this; use the databases.\n\n    Attributes\n    ----------\n    detailAssemblyNames : list\n        List of detail assembly names in the reactor\n    time : list\n        list of reactor time in years\n    \"\"\"\n\n    name = \"history\"\n\n    DETAILED_ASSEMBLY_FLAGS = [Flags.FUEL, Flags.CONTROL]\n\n    def __init__(self, r, cs):\n        \"\"\"\n        HistoryTracker that uses the database to look up parameter history rather than storing them in memory.\n\n        Warning\n        -------\n        If the current timestep history is requested and the database has not yet been written this timestep, the\n        current value of the requested parameter is provided. It is possible that this is not the value that will be\n        written to the database during this time step since many interfaces that change parameters may interact between\n        this call and the database write.\n        \"\"\"\n        interfaces.Interface.__init__(self, r, cs)\n        self.detailAssemblyNames = []\n        self._preloadedBlockHistory = None\n\n    def interactBOL(self):\n        self.addDetailAssembliesBOL()\n\n    def interactBOC(self, cycle=None):\n        \"\"\"Look for any new assemblies that are asked for and add them to tracking.\"\"\"\n        self.addDetailAssemsByAssemNums()\n        if self.cs[\"detailAllAssems\"]:\n            self.addAllDetailedAssems()\n\n    def interactEOL(self):\n        \"\"\"Generate the history reports.\"\"\"\n        self._writeDetailAssemblyHistories()\n\n    def addDetailAssembliesBOL(self):\n        \"\"\"Find and activate assemblies that the user requested detailed treatment of.\"\"\"\n        if self.cs[\"detailAssemLocationsBOL\"]:\n            for locLabel in self.cs[\"detailAssemLocationsBOL\"]:\n                ring, pos, _axial = grids.locatorLabelToIndices(locLabel)\n                i, j = self.r.core.spatialGrid.getIndicesFromRingAndPos(ring, pos)\n                aLoc = self.r.core.spatialGrid[i, j, 0]\n                try:\n                    a = self.r.core.childrenByLocator[aLoc]\n                except KeyError:\n                    runLog.error(\n                        f\"Detail assembly in location {locLabel} (requested via `detailAssemLocationsBOL`) is not in \"\n                        \"the core. Update settings.\"\n                    )\n                    raise\n                self.addDetailAssembly(a)\n\n        if self.cs[\"detailAllAssems\"]:\n            self.addAllDetailedAssems()\n\n        # This also gets called at BOC but we still do it here for operators that do not call BOC.\n        self.addDetailAssemsByAssemNums()\n\n    def addAllDetailedAssems(self):\n        \"\"\"Add all assems who have the DETAILED_ASSEMBLY_FLAGS as detail assems.\"\"\"\n        for a in self.r.core:\n            if a.hasFlags(self.DETAILED_ASSEMBLY_FLAGS):\n                self.addDetailAssembly(a)\n\n    def addDetailAssemsByAssemNums(self):\n        \"\"\"\n        Activate detail assemblies from input based on assembly number.\n\n        This is used to activate detail assembly tracking on assemblies that are not present in the core at BOL.\n\n        See Also\n        --------\n        addDetailAssembliesBOL : Similar but for BOL\n        \"\"\"\n        detailAssemNums = self.cs[\"detailAssemNums\"]\n        if not detailAssemNums:\n            return\n        for a in self.r.core:\n            thisNum = a.getNum()\n            # check for new detail assemblies\n            if thisNum in detailAssemNums:\n                self.addDetailAssembly(a)\n\n    def _writeDetailAssemblyHistories(self):\n        \"\"\"Write data file with assembly histories.\"\"\"\n        detailAssems = self.getDetailAssemblies()\n        if len(detailAssems) == 0:\n            return\n        allBlockHistories = self.getAssemHistories(detailAssems)\n        dbi = self.getInterface(\"database\")\n        locHistory = dbi.getHistories(detailAssems, [\"location\"])\n        assemLocations = {a: locHistory[a][\"location\"] for a in detailAssems}\n        self.writeAssemHistories(detailAssems, allBlockHistories, assemLocations)\n\n    def _getAssemHistoryFileName(self, assem):\n        return self._getHistoryFileName(assem.getName(), \"a\")\n\n    def _getHistoryFileName(self, label, letter):\n        return f\"{self.cs.caseTitle}-{label}-{letter}Hist.txt\"\n\n    def getTrackedParams(self):\n        \"\"\"Give the list of block parameters that are being tracked.\"\"\"\n        trackedParams = {\"residence\", \"ztop\", \"zbottom\"}\n\n        # loop through interfaces to allow them to add custom params.\n        for i in self.o.getInterfaces():\n            for newParam in i.getHistoryParams():\n                if newParam not in trackedParams:\n                    trackedParams.add(newParam)\n        return sorted(trackedParams)\n\n    def addDetailAssembly(self, a: \"Assembly\"):\n        \"\"\"Track the name of assemblies that are flagged for detailed treatment.\"\"\"\n        aName = a.getName()\n        if aName not in self.detailAssemblyNames:\n            self.detailAssemblyNames.append(aName)\n\n    def getDetailAssemblies(self) -> list[\"Assembly\"]:\n        \"\"\"Returns the assemblies that have been signaled as detail assemblies.\"\"\"\n        assems = []\n        if not self.detailAssemblyNames:\n            runLog.info(\"No detail assemblies HistoryTrackerInterface\")\n        for name in self.detailAssemblyNames:\n            try:\n                assems.append(self.r.core.getAssemblyByName(name))\n            except KeyError:\n                if name in {a.name for a in self.r.core}:\n                    raise Exception(\"Found it\")\n                runLog.warning(\n                    \"Cannot find detail assembly {} in assemblies-by-name lookup table, which has {} entries\".format(\n                        name, len(self.r.core.assembliesByName)\n                    )\n                )\n        return assems\n\n    def getDetailBlocks(self) -> list[\"Block\"]:\n        \"\"\"Get all blocks in all detail assemblies.\"\"\"\n        return [block for a in self.getDetailAssemblies() for block in a]\n\n    def nonStationaryBlocks(self, a: \"Assembly\"):\n        return [b for b in a if not any(b.hasFlags(sbf) for sbf in self.r.core.stationaryBlockFlagsList)]\n\n    def getAssemHistories(self, assemList: List[\"Assembly\"]):\n        \"\"\"Get the histories for all blocks in detailed assemblies.\"\"\"\n        return self.getInterface(\"database\").getHistories(\n            [b for a in assemList for b in self.nonStationaryBlocks(a)],\n            self.getTrackedParams(),\n        )\n\n    def writeAssemHistories(self, detailAssems, allBlockHistories, assemLocations):\n        \"\"\"Write detailed assembly histories to text files.\"\"\"\n        dbi = self.getInterface(\"database\")\n        times = dbi.getHistory(self.r, [\"time\"])[\"time\"]\n        params = self.getTrackedParams()\n        for a in detailAssems:\n            fName = self._getAssemHistoryFileName(a)\n            with open(fName, \"w\") as out:\n                # ts is a tuple, remove the spaces from the string representation so it is easy to load into a\n                # spreadsheet or whatever\n                headers = [str(ts).replace(\" \", \"\") for ts in times.keys()]\n                out.write(\n                    tabulate.tabulate(\n                        data=(times.values(),),\n                        headers=headers,\n                        tableFmt=\"plain\",\n                        floatFmt=\"11.5E\",\n                    )\n                )\n                out.write(\"\\n\")\n\n                for param in params:\n                    out.write(\"\\n\\nkey: {0}\\n\".format(param))\n\n                    data = [allBlockHistories[b][param].values() for b in self.nonStationaryBlocks(a)]\n                    out.write(tabulate.tabulate(data, tableFmt=\"plain\", floatFmt=\"11.5E\"))\n                    out.write(\"\\n\")\n\n                # loc is a tuple, remove the spaces from the string representation so it is easy to load into a\n                # spreadsheet or whatever\n                location = [str(loc).replace(\" \", \"\") for loc in assemLocations[a].values()]\n                out.write(\"\\n\\nkey: location\\n\")\n                out.write(tabulate.tabulate((location,), tableFmt=\"plain\"))\n                out.write(\"\\n\\n\\n\")\n\n                headers = \"EOL bottom top center\".split()\n                data = [(\"\", b.p.zbottom, b.p.ztop, b.p.z) for b in self.nonStationaryBlocks(a)]\n                out.write(tabulate.tabulate(data, headers=headers, tableFmt=\"plain\", floatFmt=\"10.3f\"))\n\n                out.write(\"\\n\\n\\nAssembly info\\n\")\n                out.write(f\"{a.getName()} {a.getType()}\\n\")\n                for b in self.nonStationaryBlocks(a):\n                    out.write(f'\"{b.getType()}\" {b.p.xsType} {b.p.envGroup}\\n')\n\n    def preloadBlockHistoryVals(self, names, keys, timesteps):\n        \"\"\"\n        Pre-load block data so it can be more quickly accessed in the future.\n\n        Notes\n        -----\n        Pre-loading has value because the database is organized in a fashion that is easy/inexpensive to look up data\n        for many of time steps simultaneously. These can then be stored and provided when the specific timestep is\n        requested. The method ``getBlockHistoryVal`` still looks at the database if the preloaded values don't have the\n        needed data, so the same results should be given if this method is not called.\n        \"\"\"\n        try:\n            dbi = self.getInterface(\"database\")\n            blocks = [self.r.core.getBlockByName(name) for name in names]\n            # weird special stuff for loc, just leave it be.\n            keys = [key for key in keys if key != \"loc\"]\n            data = dbi.getHistories(blocks, keys, timesteps)\n            self._preloadedBlockHistory = data\n        except Exception:\n            # fails during the beginning of standard runs, but that's ok\n            runLog.info(f\"Unable to pre-load block history values due to error:\\n{traceback.format_exc()}\")\n            self.unloadBlockHistoryVals()\n\n    def unloadBlockHistoryVals(self):\n        \"\"\"Remove all cached db reads.\"\"\"\n        self._preloadedBlockHistory = None\n\n    def getBlockHistoryVal(self, name: str, paramName: str, ts: tuple[int, int]):\n        \"\"\"\n        Use the database interface to return the parameter values for the supplied block names, and timesteps.\n\n        Notes\n        -----\n        If the current timestep history is requested and the database has not yet been written this timestep, the\n        current value of the requested parameter is returned.\n\n        Parameters\n        ----------\n        name\n            name of block\n        paramName\n            parameter keys of interest\n        ts\n            cycle and node from which to load data\n\n        Raises\n        ------\n        KeyError\n            When param not found in database.\n        \"\"\"\n        block = self.r.core.getBlockByName(name)\n\n        if self._isCurrentTimeStep(ts) and not self._databaseHasDataForTimeStep(ts):\n            # Current timenode may not have been written to the DB. Use the current value in the param system. Works for\n            # fuel performance, for some params, e.g. burnup, dpa.\n            return block.p[paramName]\n\n        try:\n            val = self._preloadedBlockHistory[block][paramName][ts]\n        # not in preloaded or preloaded failed\n        except (TypeError, ValueError, KeyError, IndexError):\n            dbi = self.getInterface(\"database\")\n            try:\n                data = dbi.database.getHistory(block, [paramName], [ts])\n                val = data[paramName][ts]\n            except KeyError:\n                runLog.error(f\"No value in DB. param name: {paramName} requested index: {ts}\")\n                raise\n\n        return val\n\n    def _isCurrentTimeStep(self, ts: tuple[int, int]) -> bool:\n        \"\"\"Return True if the timestep requested is the current time step.\"\"\"\n        return ts == (self.r.p.cycle, self.r.p.timeNode)\n\n    def _databaseHasDataForTimeStep(self, ts) -> bool:\n        \"\"\"Return True if the database has data for the requested time step.\"\"\"\n        dbi = self.getInterface(\"database\")\n        return ts in dbi.database.genTimeSteps()\n\n    def getTimeSteps(self, a: \"Assembly\" = None) -> list[float]:\n        \"\"\"\n        Given a fuel assembly, return list of time steps values (in years) that are available.\n\n        Parameters\n        ----------\n        a\n            A fuel assembly that has been designated a detail assem. If passed, only timesteps where this assembly is in\n            the core will be tracked.\n\n        Returns\n        -------\n        timeSteps\n            times in years that are available in the history\n\n        See Also\n        --------\n        getTimeIndices : gets indices where an assembly is in the core\n        \"\"\"\n        dbi = self.getInterface(\"database\")\n        timeInYears = dbi.getHistory(self.r, [\"time\"])[\"time\"]\n\n        # remove the time step info. Clients don't want it\n        timeInYears = [t[1] for t in timeInYears]\n        if a:\n            b = self._getBlockInAssembly(a)\n            ids = dbi.getHistory([\"id\"])[\"id\"]\n            timeInYears = [time for time, ids in zip(timeInYears, ids) if b.p.id in ids]\n        return timeInYears\n\n    @staticmethod\n    def _getBlockInAssembly(a: \"Assembly\") -> \"Block\":\n        \"\"\"Get a representative fuel block from a fuel assembly.\"\"\"\n        b = a.getFirstBlock(Flags.FUEL)\n        if not b:\n            runLog.error(f\"Assembly {a} does not contain fuel\")\n            for b in a:\n                runLog.error(f\"Block {b}\")\n            raise RuntimeError(\n                \"A tracked assembly does not contain fuel and has caused this error, see the details in stdout.\"\n            )\n\n        return b\n"
  },
  {
    "path": "armi/bookkeeping/mainInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module performs some file manipulations, cleanups, state loads, etc.\n\nIt's a bit of a catch-all interface, and it's name is admittedly not very descriptive.\n\"\"\"\n\nimport glob\nimport itertools\nimport os\nimport re\n\nfrom armi import context, interfaces, runLog, utils\nfrom armi.bookkeeping.db.database import Database\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_COPY_FILES_FROM,\n    CONF_COPY_FILES_TO,\n    CONF_ZONE_DEFINITIONS,\n    CONF_ZONES_FILE,\n)\nfrom armi.utils import pathTools\nfrom armi.utils.customExceptions import InputError\n\nORDER = interfaces.STACK_ORDER.PREPROCESSING\n\n\ndef describeInterfaces(_cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    return (MainInterface, {\"reverseAtEOL\": True})\n\n\nclass MainInterface(interfaces.Interface):\n    \"\"\"\n    Do some basic manipulations, calls, Instantiates the database.\n\n    Notes\n    -----\n    Interacts early so that the database is accessible as soon as possible in the run. The database\n    interfaces runs near the end of the interface stack, but the main interface interacts first.\n    \"\"\"\n\n    name = \"main\"\n\n    @staticmethod\n    def specifyInputs(cs):\n        return {CONF_ZONES_FILE: [cs[CONF_ZONES_FILE]]}\n\n    def interactBOL(self):\n        interfaces.Interface.interactBOL(self)\n        self._moveFiles()\n\n    def _moveFiles(self):\n        \"\"\"\n        At the start of each run, arbitrary lists of user-defined files can be copied around.\n\n        This logic is controlled by the settings ``copyFilesFrom`` & ``copyFilesTo``.\n\n        ``copyFilesFrom`` :\n\n        - List of files to copy (cannot be directories).\n        - Can be of length zero (that just means no files will be copied).\n        - The file names listed can use the ``*`` glob syntax, to reference multiple files.\n\n\n        ``copyFilesTo`` :\n\n        - List of directories to copy the files into.\n        - Can be of length zero; all files will be copied to the local dir.\n        - Can be of length one; all files will be copied to that dir.\n        - The only other valid length for this list _must_ be the same length as the \"from\" list.\n\n        Notes\n        -----\n        If a provided \"from\" file is missing, this method will silently pass over that. It will only\n        check if the length of the \"from\" and \"to\" lists are valid in the end.\n        \"\"\"\n        # handle a lot of asterisks and missing files\n        copyFilesFrom = [\n            filePath for possiblePath in self.cs[CONF_COPY_FILES_FROM] for filePath in glob.glob(possiblePath)\n        ]\n        copyFilesTo = self.cs[CONF_COPY_FILES_TO]\n\n        if len(copyFilesTo) in (len(copyFilesFrom), 0, 1):\n            # if any files to copy, then use the first as the default, i.e. len() == 1,\n            # otherwise assume '.'\n            default = copyFilesTo[0] if any(copyFilesTo) else \".\"\n            for filename, dest in itertools.zip_longest(copyFilesFrom, copyFilesTo, fillvalue=default):\n                pathTools.copyOrWarn(CONF_COPY_FILES_FROM, filename, dest)\n        else:\n            runLog.error(\n                f\"cs['{CONF_COPY_FILES_TO}'] must either be length 0, 1, or have the same number \"\n                f\"of entries as cs['{CONF_COPY_FILES_FROM}']. Actual values:\\n\"\n                f\"    {CONF_COPY_FILES_TO}   : {copyFilesTo}\\n\"\n                f\"    {CONF_COPY_FILES_FROM} : {copyFilesFrom}\"\n            )\n            raise InputError(f\"Failed to process {CONF_COPY_FILES_FROM}/{CONF_COPY_FILES_TO}\")\n\n    def interactBOC(self, cycle=None):\n        \"\"\"Typically the first interface to interact beginning of cycle.\"\"\"\n        runLog.important(f\"Beginning of Cycle {cycle}\")\n        runLog.LOG.clearSingleLogs()\n\n        if self.cs[\"rmExternalFilesAtBOC\"]:\n            self.cleanLastCycleFiles()\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"Loads from db if necessary.\"\"\"\n        if self.cs[\"loadStyle\"] == \"fromDB\" and self.cs[\"loadFromDBEveryNode\"]:\n            if cycle == 0 and node == 0:\n                # skip at BOL because interactBOL handled it.\n                pass\n            else:\n                with Database(self.cs[\"reloadDBName\"], \"r\") as db:\n                    r = db.load(cycle, node, self.cs)\n\n                self.o.reattach(r, self.cs)\n\n        if self.cs[CONF_ZONES_FILE] or self.cs[CONF_ZONE_DEFINITIONS]:\n            self.r.core.buildManualZones(self.cs)\n\n    def interactEOL(self):\n        if self.cs[\"rmExternalFilesAtEOL\"]:\n            # successful run with rmExternalFilesAtEOL activated. Clean things up.\n            self.cleanARMIFiles()\n        runLog.warningReport()\n\n    def cleanARMIFiles(self):\n        \"\"\"\n        Delete temporary ARMI run files like simulation inputs/outputs.\n\n        Useful if running a clean job that doesn't require restarts.\n        \"\"\"\n        if context.MPI_RANK != 0:\n            # avoid inadvertently calling from worker nodes which could cause filesystem lockups.\n            raise ValueError(\"Only the master node is allowed to clean files here.\")\n        runLog.important(\"Cleaning ARMI files due to rmExternalFilesAtEOL option\")\n        for fileName in os.listdir(os.getcwd()):\n            # clean simulation inputs and outputs\n            for candidate in [\".BCD\", \".inp\", \".out\", \"ISOTXS-\"]:\n                if candidate in fileName:\n                    if \".htos.out\" in fileName:\n                        continue\n                    if \"sassys.inp\" in fileName:\n                        continue\n\n                    os.remove(fileName)\n\n            if re.search(\"ISO..F?$\", fileName):\n                # clean intermediate XS\n                os.remove(fileName)\n\n        for snapText in self.cs[\"dumpSnapshot\"]:\n            # snapText is a CCCNNN with C=cycle and N=node\n            cycle = int(snapText[0:3])\n            node = int(snapText[3:])\n            newFolder = \"snapShot{0}_{1}\".format(cycle, node)\n            utils.pathTools.cleanPath(newFolder, forceClean=True)\n\n        # delete database if it's SQLlite\n        # no need to delete because the database won't have copied it back if using fastpath.\n\n        # clean temp directories.\n        if os.path.exists(\"shuffleBranches\"):\n            utils.pathTools.cleanPath(\"shuffleBranches\")\n            # Potentially, wait for all the processes to catch up.\n\n        if os.path.exists(\"failedRuns\"):\n            utils.pathTools.cleanPath(\"failedRuns\")\n\n    def cleanLastCycleFiles(self):\n        \"\"\"Delete ARMI files from previous cycle that aren't necessary for the next cycle.\n        Unless you're doing reloads, of course.\n        \"\"\"\n        runLog.important(\"Cleaning ARMI files due to rmExternalFilesAtBOC option\")\n        for fileName in os.listdir(os.getcwd()):\n            # clean MC**2 and REBUS inputs and outputs\n            for candidate in [\".BCD\", \".inp\", \".out\", \"ISOTXS-\"]:\n                if candidate in fileName:\n                    # Do not remove .htos.out files.\n                    if \".htos.out\" in fileName:\n                        continue\n                    if re.search(r\"mcc[A-Z0-9]+\\.inp\", fileName):\n                        continue\n                    # don't remove mccIA1.inp stuff in case we go out of a burnup bound.\n                    try:\n                        os.remove(fileName)\n                    except OSError:\n                        runLog.warning(\n                            \"Error removing file {0} during cleanup. It is still in use, probably\".format(fileName)\n                        )\n"
  },
  {
    "path": "armi/bookkeeping/memoryProfiler.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nInterface to help diagnose memory issues during debugging/development.\n\nThere are many approaches to memory profiling.\n\n1. You can ask psutil for the memory used by the process from an OS perspective.\nThis is great for top-down analysis. This module provides printouts\nthat show info from every process running. This is very fast.\n\n2. You can use ``gc.get_objects()`` to list all objects that the garbage collector is tracking. If you want, you\ncan filter it down and get the counts and sizes of objects of interest (e.g. all armi objects).\n\nThis module has tools to do all of this. It should help you out.\n\nNOTE: Psutil and sys.getsizeof will certainly report slightly different results.\n\nNOTE: In Windows, it seems that even if your garbage is collected, Windows does not de-allocate all the memory.\nSo if you are a worker and you just got a 2GB reactor but then deleted it, Windows will keep you at 2GB for a while.\n\nSee Also\n--------\nhttps://pythonhosted.org/psutil/\nhttps://docs.python.org/3/library/gc.html#gc.garbage\n\"\"\"\n\nimport gc\nimport sys\nfrom os import cpu_count\nfrom typing import Optional\n\nfrom armi import context, interfaces, mpiActions, runLog\nfrom armi.reactor.composites import ArmiObject\nfrom armi.utils import tabulate\nfrom armi.utils.customExceptions import NonexistentSetting\n\ntry:\n    # psutil is an optional requirement, since it doesn't support MacOS very well\n    import psutil\n\n    _havePsutil = True\nexcept ImportError:\n    runLog.warning(\"Failed to import psutil; MemoryProfiler will not provide meaningful data.\")\n    _havePsutil = False\n\n\nORDER = interfaces.STACK_ORDER.POSTPROCESSING\nREPORT_COUNT = 100000\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    return (MemoryProfiler, {})\n\n\ndef getTotalJobMemory(nTasks, cpusPerTask):\n    \"\"\"Function to calculate the total memory of a job. This is a constant during a simulation.\"\"\"\n    cpuPerNode = cpu_count()\n    ramPerCpuGB = psutil.virtual_memory().total / (1024**3) / cpuPerNode\n    jobMem = nTasks * cpusPerTask * ramPerCpuGB\n    return jobMem\n\n\ndef getCurrentMemoryUsage():\n    \"\"\"This scavenges the memory profiler in ARMI to get the current memory usage.\"\"\"\n    memUsageAction = PrintSystemMemoryUsageAction()\n    memUsageAction.broadcast()\n    smpu = SystemAndProcessMemoryUsage()\n    memUsages = memUsageAction.gather(smpu)\n    # Grab virtual memory instead of physical. There is a large discrepancy, we will be conservative\n    memoryUsageInMB = sum([mu.processVirtualMemoryInMB for mu in memUsages])\n    return memoryUsageInMB\n\n\nclass MemoryProfiler(interfaces.Interface):\n    name = \"memoryProfiler\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self.sizes = {}\n\n    def interactBOL(self):\n        interfaces.Interface.interactBOL(self)\n        self.printCurrentMemoryState()\n        mpiAction = PrintSystemMemoryUsageAction()\n        mpiAction.broadcast().invoke(self.o, self.r, self.cs)\n        mpiAction.printUsage(\"BOL SYS_MEM\")\n\n        # so we can debug mem profiler quickly\n        if self.cs[\"debugMem\"]:\n            mpiAction = ProfileMemoryUsageAction(\"EveryNode\")\n            mpiAction.broadcast().invoke(self.o, self.r, self.cs)\n\n    def interactEveryNode(self, cycle, node):\n        self.printCurrentMemoryState()\n\n        mp = PrintSystemMemoryUsageAction()\n        mp.broadcast()\n        mp.invoke(self.o, self.r, self.cs)\n        mp.printUsage(\"c{} n{} SYS_MEM\".format(cycle, node))\n\n        self.r.core.p.minProcessMemoryInMB = round(mp.minProcessMemoryInMB * 10) / 10.0\n        self.r.core.p.maxProcessMemoryInMB = round(mp.maxProcessMemoryInMB * 10) / 10.0\n\n        if self.cs[\"debugMem\"]:\n            mpiAction = ProfileMemoryUsageAction(\"EveryNode\")\n            mpiAction.broadcast().invoke(self.o, self.r, self.cs)\n\n    def interactEOL(self):\n        \"\"\"End of life hook. Good place to wrap up or print out summary outputs.\"\"\"\n        if self.cs[\"debugMem\"]:\n            mpiAction = ProfileMemoryUsageAction(\"EOL\")\n            mpiAction.broadcast().invoke(self.o, self.r, self.cs)\n\n    def printCurrentMemoryState(self):\n        \"\"\"Print the current memory footprint and available memory.\"\"\"\n        try:\n            cpusPerTask = self.cs[\"cpusPerTask\"]\n        except NonexistentSetting:\n            runLog.extra(\n                \"To view memory consumed, remaining available, and total allocated for a case, \"\n                \"add the setting 'cpusPerTask' to your application.\"\n            )\n            return\n        nTasks = self.cs[\"nTasks\"]\n        totalMemoryInGB = getTotalJobMemory(nTasks, cpusPerTask)\n        currentMemoryUsageInGB = getCurrentMemoryUsage() / 1024\n        availableMemoryInGB = totalMemoryInGB - currentMemoryUsageInGB\n        runLog.info(\n            f\"Currently using {currentMemoryUsageInGB} GB of memory. \"\n            f\"There is {availableMemoryInGB} GB of memory left. \"\n            f\"There is a total allocation of {totalMemoryInGB} GB.\"\n        )\n\n    def displayMemoryUsage(self, timeDescription):\n        r\"\"\"\n        Print out some information to stdout about the memory usage of ARMI.\n\n        Useful when the debugMem setting is set to True.\n\n        Turn these on as appropriate to find all your problems.\n        \"\"\"\n        runLog.important(\"----- Memory Usage Report at {} -----\".format(timeDescription))\n        self._printFullMemoryBreakdown(reportSize=self.cs[\"debugMemSize\"])\n        self._reactorAssemblyTrackingBreakdown()\n        runLog.important(\"----- End Memory Usage Report at {} -----\".format(timeDescription))\n\n    def _reactorAssemblyTrackingBreakdown(self):\n        runLog.important(\"Reactor attribute ArmiObject tracking count\")\n        for attrName, attrObj in self.r.core.__dict__.items():\n            if not attrObj:\n                continue\n\n            if isinstance(attrObj, list) and isinstance(attrObj[0], ArmiObject):\n                runLog.important(\"List {:30s} has {:4d} ArmiObjects\".format(attrName, len(attrObj)))\n\n            if isinstance(attrObj, dict) and isinstance(list(attrObj.values())[0], ArmiObject):\n                runLog.important(\"Dict {:30s} has {:4d} ArmiObjects\".format(attrName, len(attrObj)))\n\n        if self.r.excore.get(\"sfp\") is not None:\n            runLog.important(\"SFP has {:4d} ArmiObjects\".format(len(self.r.excore[\"sfp\"])))\n\n    def checkForDuplicateObjectsOnArmiModel(self, attrName, refObject):\n        \"\"\"Scans through ARMI model for duplicate objects.\"\"\"\n        if self.r is None:\n            return\n        uniqueIds = set()\n        uniqueObjTypes = set()\n\n        def checkAttr(subObj):\n            if getattr(subObj, attrName, refObject) != refObject:\n                uniqueIds.add(id(getattr(subObj, attrName)))\n                uniqueObjTypes.add(subObj.__class__.__name__)\n\n        for a in self.r.core.getAssemblies(includeAll=True):\n            checkAttr(a)\n            for b in a:\n                checkAttr(b)\n                for c in b:\n                    checkAttr(c)\n                    checkAttr(c.material)\n\n        for i in self.o.getInterfaces():\n            checkAttr(i)\n            if i.name == \"xsGroups\":\n                for _, block in i.representativeBlocks.items():\n                    checkAttr(block)\n\n        if len(uniqueIds) == 0:\n            runLog.important(\"There are no duplicate `.{}` attributes\".format(attrName))\n        else:\n            runLog.error(\n                \"There are {} unique objects stored as `.{}` attributes!\\n\"\n                \"Expected id {}, but got {}.\\nExpected object:{}\\n\"\n                \"These types of objects had unique attributes: {}\".format(\n                    len(uniqueIds) + 1,\n                    attrName,\n                    id(refObject),\n                    uniqueIds,\n                    refObject,\n                    \", \".join(uniqueObjTypes),\n                )\n            )\n            raise RuntimeError\n\n    def _printFullMemoryBreakdown(self, reportSize=True, printReferrers=False):\n        \"\"\"\n        Looks for any class from any module in the garbage collector and prints their count and size.\n\n        Parameters\n        ----------\n        reportSize : bool, optional\n            calculate size as well as counting individual objects.\n\n        Notes\n        -----\n        Just because you use startsWith=armi doesn't mean you'll capture all ARMI objects. Some are in lists\n        and dictionaries.\n        \"\"\"\n        cs = self.cs\n        operator = self.o\n        reactor = self.r\n\n        if reportSize:\n            self.o.detach()\n\n        gc.collect()\n        allObjects = gc.get_objects()\n        runLog.info(\"GC returned {} objects\".format(len(allObjects)))\n\n        instanceCounters = KlassCounter(reportSize)\n        instanceCounters.countObjects(allObjects)\n\n        for counter in sorted(instanceCounters.counters.values()):\n            runLog.info(\n                \"UNIQUE_INSTANCE_COUNT: {:60s} {:10d}     {:10.1f} MB\".format(\n                    counter.classType.__name__,\n                    counter.count,\n                    counter.memSize / (1024**2.0),\n                )\n            )\n            if printReferrers and counter.memSize / (1024**2.0) > 100:\n                referrers = gc.get_referrers(counter.first)\n                runLog.info(\"          Referrers of first one: \")\n                for referrer in referrers:\n                    runLog.info(\"             {}\".format(repr(referrer)[:150]))\n\n        runLog.info(\"gc garbage: {}\".format(gc.garbage))\n        if printReferrers:\n            # if you want more info on the garbage referrers, run this. WARNING, it's generally like 1000000 lines.\n            runLog.info(\"referrers\")\n            for o in gc.garbage:\n                for r in gc.get_referrers(o):\n                    runLog.info(\"ref for {}: {}\".format(o, r))\n\n        if reportSize:\n            operator.reattach(reactor, cs)\n\n    @staticmethod\n    def getReferrers(obj):\n        \"\"\"Print referrers in a useful way (as opposed to gigabytes of text.\"\"\"\n        runLog.info(\"Printing first 100 character of first 100 referrers\")\n        for ref in gc.get_referrers(obj)[:100]:\n            runLog.important(\"ref for {}: {}\".format(obj, repr(ref)[:100]))\n\n\nclass KlassCounter:\n    \"\"\"\n    Helper class, to allow us to count instances of various classes in the\n    Python standard library garbage collector (gc).\n\n    Counting can be done simply, or by memory footprint.\n    \"\"\"\n\n    def __init__(self, reportSize):\n        self.counters = dict()\n        self.reportSize = reportSize\n        self.count = 0\n\n    def __getitem__(self, classType):\n        if classType not in self.counters:\n            self.counters[classType] = InstanceCounter(classType, self.reportSize)\n        return self.counters[classType]\n\n    def countObjects(self, ao):\n        \"\"\"\n        Recursively find objects inside arbitrarily-deeply-nested containers.\n\n        This is designed to work with the garbage collector, so it focuses on\n        objects potentially being held in dict, tuple, list, or sets.\n        \"\"\"\n        counter = self[type(ao)]\n        if counter.add(ao):\n            self.count += 1\n            if self.count % REPORT_COUNT == 0:\n                runLog.info(\"Counted {} items\".format(self.count))\n\n            if isinstance(ao, dict):\n                for k, v in ao.items():\n                    self.countObjects(k)\n                    self.countObjects(v)\n            elif isinstance(ao, (list, tuple, set)):\n                for v in iter(ao):\n                    self.countObjects(v)\n\n\nclass InstanceCounter:\n    def __init__(self, classType, reportSize):\n        self.classType = classType\n        self.count = 0\n        self.reportSize = reportSize\n        if reportSize:\n            self.memSize = 0\n        else:\n            self.memSize = float(\"nan\")\n        self.items = set()\n        self.ids = set()\n        self.first = None\n\n    def add(self, item):\n        itemId = id(item)\n        if itemId in self.ids:\n            return False\n\n        self.ids.add(itemId)\n        if self.reportSize:\n            self.memSize += sys.getsizeof(item)\n        self.count += 1\n        return True\n\n    def __cmp__(self, that):\n        return (self.count > that.count) - (self.count < that.count)\n\n    def __ls__(self, that):\n        return self.count < that.count\n\n    def __gt__(self, that):\n        return self.count > that.count\n\n\nclass ProfileMemoryUsageAction(mpiActions.MpiAction):\n    def __init__(self, timeDescription):\n        mpiActions.MpiAction.__init__(self)\n        self.timeDescription = timeDescription\n\n    def invokeHook(self):\n        mem = self.o.getInterface(\"memoryProfiler\")\n        mem.displayMemoryUsage(self.timeDescription)\n\n\nclass SystemAndProcessMemoryUsage:\n    def __init__(self):\n        self.nodeName = context.MPI_NODENAME\n        self.percentNodeRamUsed: Optional[float] = None\n        self.processMemoryInMB: Optional[float] = None\n        self.processVirtualMemoryInMB: Optional[float] = None\n        # no psutil, no memory diagnostics\n        if _havePsutil:\n            self.percentNodeRamUsed = psutil.virtual_memory().percent\n            self.processMemoryInMB = psutil.Process().memory_info().rss / (1024.0**2)\n            self.processVirtualMemoryInMB = psutil.Process().memory_info().vms / (1024.0**2)\n\n    def __isub__(self, other):\n        if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None:\n            self.percentNodeRamUsed -= other.percentNodeRamUsed\n            self.processMemoryInMB -= other.processMemoryInMB\n            self.processVirtualMemoryInMB -= other.processVirtualMemoryInMB\n        return self\n\n\nclass PrintSystemMemoryUsageAction(mpiActions.MpiAction):\n    def __init__(self):\n        mpiActions.MpiAction.__init__(self)\n        self.usages = []\n        self.percentNodeRamUsed: Optional[float] = None\n\n    def __iter__(self):\n        return iter(self.usages)\n\n    def __isub__(self, other):\n        if self.percentNodeRamUsed is not None and other.percentNodeRamUsed is not None:\n            self.percentNodeRamUsed -= other.percentNodeRamUsed\n        for mine, theirs in zip(self, other):\n            mine -= theirs\n        return self\n\n    @property\n    def minProcessMemoryInMB(self):\n        if len(self.usages) == 0:\n            return 0.0\n        return min(mu.processMemoryInMB or 0.0 for mu in self)\n\n    @property\n    def maxProcessMemoryInMB(self):\n        if len(self.usages) == 0:\n            return 0.0\n        return max(mu.processMemoryInMB or 0.0 for mu in self)\n\n    def invokeHook(self):\n        spmu = SystemAndProcessMemoryUsage()\n        self.percentNodeRamUsed = spmu.percentNodeRamUsed\n        self.usages = self.gather(spmu)\n\n    def printUsage(self, description=None):\n        \"\"\"This method prints the usage of all MPI nodes.\n\n        The printout looks something like:\n\n            SYS_MEM HOSTNAME     14.4% RAM. Proc mem (MB):   491   472   471   471   471   470\n            SYS_MEM HOSTNAME     13.9% RAM. Proc mem (MB):   474   473   472   471   460   461\n            SYS_MEM HOSTNAME     ...\n            SYS_MEM HOSTNAME     ...\n        \"\"\"\n        printedNodes = set()\n        prefix = description or \"SYS_MEM\"\n\n        memoryData = []\n        for memoryUsage in self:\n            if memoryUsage.nodeName in printedNodes:\n                continue\n            printedNodes.add(memoryUsage.nodeName)\n            nodeUsages = [mu for mu in self if mu.nodeName == memoryUsage.nodeName]\n            sysMemAvg = sum(mu.percentNodeRamUsed or 0.0 for mu in nodeUsages) / len(nodeUsages)\n\n            memoryData.append(\n                (\n                    \"{:<24}\".format(memoryUsage.nodeName),\n                    \"{:5.1f}%\".format(sysMemAvg),\n                    \"{}\".format(\" \".join(\"{:5.0f}\".format(mu.processMemoryInMB or 0.0) for mu in nodeUsages)),\n                )\n            )\n\n        runLog.info(\n            \"Summary of the system memory usage at `{}`:\\n\".format(prefix)\n            + tabulate.tabulate(\n                memoryData,\n                headers=[\n                    \"Machine\",\n                    \"Average System RAM Usage\",\n                    \"Processor Memory Usage (MB)\",\n                ],\n                tableFmt=\"armi\",\n            )\n        )\n"
  },
  {
    "path": "armi/bookkeeping/report/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Package for generating reports as printable groups and HTML in ARMI.\"\"\"\n\nfrom armi.bookkeeping.report import data\n\n\ndef setData(name, value, group=None, reports=None):\n    \"\"\"\n    Stores data in accordance with the specified parameters for use later.\n\n    Parameters\n    ----------\n    name : str\n    value : Object\n        Any value desired.\n    group : data.Group\n    reports : data.Report\n    \"\"\"\n    from armi.bookkeeping.report.reportInterface import ReportInterface\n\n    if not name or not isinstance(name, str):\n        raise AttributeError(f\"Given name {name} not acceptable.\")\n\n    group = group or UNGROUPED\n    if not isinstance(group, data.Group):\n        raise AttributeError(f\"Given group {group} not acceptable/approved.\")\n\n    reports = reports or []\n    if not isinstance(reports, (list, set, tuple)):\n        reports = [reports]\n    if ALL not in reports:\n        reports.append(ALL)\n    if not all(isinstance(tag, data.Report) for tag in reports):\n        raise AttributeError(f\"Unapproved reports for {name}\")\n\n    for report in reports:\n        if report not in ReportInterface.reports:\n            ReportInterface.reports.add(report)\n        report.addToReport(group, name, value)\n\n\n# --------------------------------------------\n#               GROUP DEFINITIONS\n# --------------------------------------------\nBLOCK_AREA_FRACS = data.Table(\n    \"Assembly Area Fractions\",\n    \" Of First Fuel Block\",\n    header=[\"Component\", \"Area (cm^2)\", \"Fraction\"],\n)\nBOND_DIMS = data.Table(\"Bond Dimensions\", \" Of First Fuel Block\")\nCASE_CONTROLS = data.Table(\"Case Controls\")\nCASE_PARAMETERS = data.Table(\"Case Parameters\")\nCLAD_DIMS = data.Table(\"Cladding Dimensions\", \" Of First Fuel Block\")\nCOOLANT_DIMS = data.Table(\"Coolant Dimensions\", \" Of First Fuel Block\")\nDUCT_DIMS = data.Table(\"Duct Dimensions\", \" Of First Fuel Block\")\nFUEL_DIMS = data.Table(\"Fuel Dimensions\", \" Of First Fuel Block\")\nGAP_DIMS = data.Table(\"Gap Dimensions\", \" Of First Fuel Block\")\nINTERCOOLANT_DIMS = data.Table(\"Intercoolant Dimensions\", \" Of First Fuel Block\")\nLINER_DIMS = data.Table(\"Liner Dimensions\", \" Of First Fuel Block\")\nNEUT_LOSS = data.Table(\"Neutron Loss\")\nNEUT_PROD = data.Table(\"Full Core Neutron Production\", header=[\"\", \"n/s\"])\nPIN_ASSEM_DESIGN = data.Table(\"Pin/Assembly Design Summary (averages)\")\nRUN_META = data.Table(\"Run Meta\")\nUNGROUPED = data.Table(\"Ungrouped\", \"No grouping specified for the following information.\")\nWIRE_DIMS = data.Table(\"Wire Dimensions\", \" Of First Fuel Block\")\n\n# -----------------------------------------\n\nASSEM_TYPES = data.Image(\n    \"Assembly Types\",\n    \"The axial block and enrichment distributions of assemblies in the core at \"\n    \"beginning of life. The percentage represents the block enrichment (U-235 or B-10), where as \"\n    \"the additional character represents the cross section id of the block. \"\n    \"The number of fine-mesh subdivisions are provided on the secondary y-axis.\",\n)\nFACE_MAP = data.Image(\"Reactor Face Map\", \"The surface map of the reactor.\")\nFLUX_PLOT = data.Image(\"Plot of flux\", \"flux plot\")\nKEFF_PLOT = data.Image(\"Plot of K-Effective vs. Time\", \"k-eff vs. time\")\nMOVES_PLOT = data.Image(\"Plot of Moves vs. Time\", \"moves vs. time\")\nTIME_PLOT = data.Image(\"Plot of Value vs. Time\", \"value vs. time\")\nTIMELINE = data.Image(\"Timeline\", \"Time occupied by certain method invocations in run\")\nXS_PLOT = data.Image(\"Plot of Xs vs. Time\", \"xs vs. time\")\n\n\n# --------------------------------------------\n#               REPORT DEFINITIONS\n# --------------------------------------------\nALL = data.Report(\n    \"Comprehensive Core Report\",\n    \"Every piece of reported information about the ARMI run.\",\n)\nDESIGN = data.Report(\"Core Design Report\", \"Information related to the core design parameters\")\n\n\n# --------------------------------------------\n#               FURTHER STYLIZATION\n# --------------------------------------------\n\n# have every report render these in the following order if present\ndata.Report.groupsOrderFirst = [\n    FACE_MAP,\n    RUN_META,\n    CASE_PARAMETERS,\n    CASE_CONTROLS,\n    ASSEM_TYPES,\n]\n\n# This a grouping of components which span the entire html page rather than being sectioned into\n# smaller columns.\ndata.Report.componentWellGroups = [\n    FACE_MAP,\n    ASSEM_TYPES,\n    CLAD_DIMS,\n    WIRE_DIMS,\n    DUCT_DIMS,\n    COOLANT_DIMS,\n    INTERCOOLANT_DIMS,\n    FUEL_DIMS,\n    BOND_DIMS,\n]\n"
  },
  {
    "path": "armi/bookkeeping/report/data.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Data formats for reports.\"\"\"\n\nimport collections\nimport copy\nimport re\n\nfrom armi import runLog\n\n\nclass Report:\n    \"\"\"Storage for data separated out for a particular kind of user.\"\"\"\n\n    # stubs for \"further stylization\" in the report package init\n    groupsOrderFirst = []\n    componentWellGroups = []\n\n    def __init__(self, title, description):\n        self.title = title\n        self.description = description\n        self.groups = {}  # {Global Instance : Local Instance}\n\n    @property\n    def _groupRenderOrder(self):\n        \"\"\"Helper method to the rendering methods on this class for rendering order of contained info.\"\"\"\n        presentGroupsOrderFirst = [group for group in self.groupsOrderFirst if group in self.groups]\n        completeGroupOrder = presentGroupsOrderFirst + [\n            group for group in self.groups.keys() if group not in presentGroupsOrderFirst\n        ]\n        specialsRemovedOrder = [group for group in completeGroupOrder if group not in self.componentWellGroups]\n        return specialsRemovedOrder\n\n    def __str__(self):\n        str_ = \"\\n{} - (REPORT) {}\\n\".format(self.title, self.description)\n        for global_group in self.groups.values():\n            str_ += re.sub(\"\\n\", \"\\n\\t\", \"{}\".format(Group.__str__(global_group)))  # Don't use subclassed methods\n        return str_\n\n    def addToReport(self, group, name, value):\n        \"\"\"Inserts the datum into the correct group of the report.\"\"\"\n        if group not in self.groups:\n            self.groups[group] = copy.deepcopy(group)\n        self.groups[group][name] = value\n\n    def __getitem__(self, group):\n        try:\n            return self.groups[group]\n        except KeyError:\n            runLog.warning(\"Cannot locate group {} in report {}\".format(group.title, self.title))\n            return None\n\n\nclass Group:\n    \"\"\"Abstract class, when extended is used for storage for data within a report.\n\n    Only accepts things wrapped in the ReportDatum class.\n    \"\"\"\n\n    def __init__(self, title, description=\"\"):\n        self.title = title\n        self.description = description\n        self.data = collections.OrderedDict()\n        self.descStyle = \"font-weight: normal; font-style: italic; font-size: 14px; padding-left: 5px;\"\n        self.titleStyle = \"font-weight: bold; padding-top: 20px;\"\n\n    def __str__(self):\n        str_ = \"\\n{} - (GROUP) {}\\n\".format(self.title, self.description)\n        for name, value in self.data.items():\n            str_ += \"\\t{:<30} {}\\n\".format(name, value)\n        return str_\n\n    def __getitem__(self, name):\n        try:\n            return self.data[name]\n        except KeyError:\n            runLog.warning(\"Given name {} not present in report group {}\".format(name, self.title))\n\n        return None\n\n    def __setitem__(self, name, value):\n        self.data[name] = value\n\n\nclass Table(Group):\n    def __init__(self, title, description=\"\", header=None):\n        Group.__init__(self, title, description=description)\n        self.header = header\n\n    def __str__(self):\n        \"\"\"Truer to content representation.\"\"\"\n        # error handling\n        if not len(self.data):\n            return \"\"\n\n        # set up\n        prototypical_data = list(self.data.values())[0]\n        num_cols = len(prototypical_data) + 1\n        border_dashes = \"-\" * (num_cols * 31) + \"\\n\"\n\n        # create header\n        str_ = border_dashes\n        str_ += \"{} - {}\\n\".format(self.title, self.description)\n        if self.header:\n            for column_title in self.header:\n                str_ += \"{:<30} \".format(column_title)\n            str_ += \"\\n\"\n        str_ += border_dashes\n\n        # create table body\n        for name, value in sorted(self.data.items(), key=self._lowerCaseSortForTuples):\n            str_ += \"{:<30} \".format(name)\n            for item in value:\n                str_ += \"{:<30} \".format(item)\n            str_ += \"\\n\"\n\n        return str_\n\n    @staticmethod\n    def _lowerCaseSortForTuples(nameValPair):\n        \"\"\"Force the key in a key-value pair to lower case.\"\"\"\n        return nameValPair[0].lower()\n\n    def __setitem__(self, name, value):\n        if not isinstance(value, list):\n            value = [value]\n\n        Group.__setitem__(self, name, value)\n\n\nclass Image(Group):\n    def __init__(self, title, description=\"\"):\n        Group.__init__(self, title, description=description)\n        self._shortformTitle = title.replace(\" \", \"\").lower()\n"
  },
  {
    "path": "armi/bookkeeping/report/reportInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis interface serves the reporting needs of ARMI.\n\nIf there is any information that a user desires to show in PDF form to\nothers this is the place to do it.\n\"\"\"\n\nimport re\n\nfrom armi import interfaces, runLog\nfrom armi.bookkeeping import report\nfrom armi.bookkeeping.report import reportingUtils\nfrom armi.physics import neutronics\nfrom armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE\nfrom armi.reactor.flags import Flags\nfrom armi.utils import reportPlotting, units\n\nORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.BOOKKEEPING\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    if cs[\"genReports\"]:\n        return (ReportInterface, {})\n    return None\n\n\nclass ReportInterface(interfaces.Interface):\n    \"\"\"An interface to manage the use of the report system.\"\"\"\n\n    name = \"report\"\n\n    reports = set()\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self.fuelCycleSummary = {\"bocFissile\": 0.0}\n\n    def distributable(self):\n        \"\"\"Disables distributing of this report by broadcast MPI.\"\"\"\n        return self.Distribute.SKIP\n\n    def interactBOL(self):\n        interfaces.Interface.interactBOL(self)\n        runLog.important(\"Beginning of BOL Reports\")\n        reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs)\n        reportingUtils.writeAssemblyMassSummary(self.r)\n\n        if self.cs[\"summarizeAssemDesign\"]:\n            reportingUtils.summarizePinDesign(self.r.core)\n\n        runLog.info(report.ALL[report.RUN_META])\n\n    def interactEveryNode(self, cycle, node):\n        self.r.core.calcBlockMaxes()\n        reportingUtils.summarizePowerPeaking(self.r.core)\n\n        runLog.important(\"Cycle {}, node {} Summary: \".format(cycle, node))\n        runLog.important(\n            \"  time= {0:8.2f} years, keff= {1:.12f} maxPD= {2:-8.2f} MW/m^2, maxBuI= {3:-8.4f} maxBuF= {4:8.4f}\".format(\n                self.r.p.time,\n                self.r.core.p.keff,\n                self.r.core.p.maxPD,\n                self.r.core.p.maxBuI,\n                self.r.core.p.maxBuF,\n            )\n        )\n\n        if self.cs[\"plots\"]:\n            adjoint = self.cs[CONF_NEUTRONICS_TYPE] == neutronics.ADJREAL_CALC\n            figName = self.cs.caseTitle + \"_{0}_{1}\".format(cycle, node) + \".mgFlux.\" + self.cs[\"outputFileExtension\"]\n\n            if self.r.core.getFirstBlock(Flags.FUEL).p.mgFlux is not None:\n                from armi.reactor import blocks\n\n                blocks.Block.plotFlux(self.r.core, fName=figName, peak=True, adjoint=adjoint)\n            else:\n                runLog.warning(\"No mgFlux to plot in reports\")\n\n    def interactBOC(self, cycle=None):\n        self.fuelCycleSummary[\"bocFissile\"] = self.r.core.getTotalBlockParam(\"kgFis\")\n\n    def interactEOC(self, cycle=None):\n        reportingUtils.writeCycleSummary(self.r.core)\n        runLog.info(self.o.timer.report(inclusionCutoff=0.001))\n\n    def generateDesignReport(self, generateFullCoreMap, showBlockAxMesh):\n        reportingUtils.makeCoreDesignReport(self.r.core, self.cs)\n        reportingUtils.makeCoreAndAssemblyMaps(self.r, self.cs, generateFullCoreMap, showBlockAxMesh)\n        reportingUtils.makeBlockDesignReport(self.r)\n\n    def interactEOL(self):\n        \"\"\"Adds the data to the report, and generates it.\"\"\"\n        b = self.r.core.getFirstBlock(Flags.FUEL)\n        b.setAreaFractionsReport()\n\n        dbi = self.o.getInterface(\"database\")\n        buGroups = self.cs[\"buGroups\"]\n        history = self.o.getInterface(\"history\")\n        reportPlotting.plotReactorPerformance(\n            self.r,\n            dbi,\n            buGroups,\n            extension=self.cs[\"outputFileExtension\"],\n            history=history,\n        )\n\n        reportingUtils.setNeutronBalancesReport(self.r.core)\n        self.writeRunSummary()\n        self.o.timer.stopAll()  # consider the run done\n        runLog.info(self.o.timer.report(inclusionCutoff=0.001, totalTime=True))\n        _timelinePlot = self.o.timer.timeline(self.cs.caseTitle, 0.03, totalTime=True)\n        runLog.info(self.printReports())\n\n    def printReports(self):\n        \"\"\"Report Interface Specific.\"\"\"\n        str_ = \"\"\n        for report_ in self.reports:\n            str_ += re.sub(\"\\n\", \"\\n\\t\", \"{}\".format(report_))\n\n        return \"---------- REPORTS BEGIN ----------\\n\" + str_ + \"\\n----------- REPORTS END -----------\"\n\n    def writeRunSummary(self):\n        \"\"\"Make a summary of the run.\"\"\"\n        # spent fuel pool report\n        if self.r.excore.get(\"sfp\") is not None:\n            self.reportSFP(self.r.excore[\"sfp\"])\n            self.countAssembliesSFP(self.r.excore[\"sfp\"])\n\n    @staticmethod\n    def reportSFP(sfp):\n        \"\"\"A high-level summary of the Spent Fuel Pool.\"\"\"\n        title = \"SpentFuelPool Report\"\n        runLog.important(\"-\" * len(title))\n        runLog.important(title)\n        runLog.important(\"-\" * len(title))\n        totFis = 0.0\n        for a in sfp:\n            runLog.important(\n                \"{assembly:15s} discharged at t={dTime:10f} after {residence:10f} yrs. It entered at cycle: {cycle}. \"\n                \"It has {fiss:10f} kg (x {mult}) fissile and peak BU={bu:.2f} %.\".format(\n                    assembly=a,\n                    dTime=a.p.dischargeTime,\n                    residence=(a.p.dischargeTime - a.p.chargeTime),\n                    cycle=a.p.chargeCycle,\n                    fiss=a.getFissileMass(),\n                    bu=a.getMaxParam(\"percentBu\"),\n                    mult=a.p.multiplicity,\n                )\n            )\n            totFis += a.getFissileMass() * a.p.multiplicity / 1000  # convert to kg\n\n        runLog.important(\"Total SFP fissile inventory of {0} is {1:.4E} MT\".format(sfp, totFis / 1000.0))\n\n    @staticmethod\n    def countAssembliesSFP(sfp):\n        \"\"\"Report on the count of assemblies in the SFP at each timestep.\"\"\"\n        if not len(sfp):\n            return\n\n        runLog.important(\"Count:\")\n        totCount = 0\n        thisTimeCount = 0\n        a = sfp[0]\n        lastTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime\n\n        for a in sfp:\n            thisTime = a.getAge() / units.DAYS_PER_YEAR + a.p.chargeTime\n\n            if thisTime != lastTime:\n                runLog.important(\n                    \"Number of assemblies moved at t={0:6.2f}: {1:04d}. Cumulative: {2:04d}\".format(\n                        lastTime, thisTimeCount, totCount\n                    )\n                )\n                lastTime = thisTime\n                thisTimeCount = 0\n            totCount += 1  # noqa: SIM113\n            thisTimeCount += 1\n"
  },
  {
    "path": "armi/bookkeeping/report/reportingUtils.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A collection of miscellaneous functions used by ReportInterface to generate various reports.\"\"\"\n\nimport collections\nimport os\nimport pathlib\nimport re\nimport subprocess\nimport sys\nimport textwrap\nimport time\nfrom copy import copy\n\nimport numpy as np\n\nfrom armi import context, interfaces, runLog\nfrom armi.bookkeeping import report\nfrom armi.operators import RunTypes\nfrom armi.reactor.components import ComponentType\nfrom armi.reactor.flags import Flags\nfrom armi.utils import (\n    getFileSHA1Hash,\n    iterables,\n    plotting,\n    tabulate,\n    textProcessors,\n    units,\n)\n\n# Set to prevent the image and text from being too small to read.\nMAX_ASSEMS_PER_ASSEM_PLOT = 6\n\n# String constants\nOperator_CaseTitle = \"Case Title:\"\nOperator_TypeOfRun = \"Run Type:\"\nOperator_NumProcessors = \"Number of Processors:\"\nOperator_WorkingDirectory = \"Working Directory:\"\nOperator_CurrentUser = \"Current User:\"\nOperator_PythonInterperter = \"Python Interpreter:\"\nOperator_PythonExecutable = \"Python Executable:\"\nOperator_ArmiCodebase = \"ARMI Location:\"\nOperator_MasterMachine = \"Master Machine:\"\nOperator_Date = \"Date and Time:\"\nOperator_CaseDescription = \"Case Description:\"\n\n\ndef writeWelcomeHeaders(o, cs):\n    \"\"\"Write welcome information using the Operator and the Case Settings.\"\"\"\n\n    def _writeCaseInformation(o, cs):\n        \"\"\"Create a table that contains basic case information.\"\"\"\n        caseInfo = [\n            (Operator_CaseTitle, cs.caseTitle),\n            (\n                Operator_CaseDescription,\n                \"{0}\".format(textwrap.fill(cs[\"comment\"], break_long_words=False)),\n            ),\n            (\n                Operator_TypeOfRun,\n                \"{} - {}\".format(cs[\"runType\"], o.__class__.__name__),\n            ),\n            (Operator_CurrentUser, context.USER),\n            (Operator_ArmiCodebase, context.ROOT),\n            (Operator_WorkingDirectory, os.getcwd()),\n            (Operator_PythonInterperter, sys.version),\n            (Operator_PythonExecutable, sys.executable),\n            (Operator_MasterMachine, getNodeName()),\n            (Operator_NumProcessors, context.MPI_SIZE),\n            (Operator_Date, context.START_TIME),\n        ]\n\n        runLog.header(\"=========== Case Information ===========\")\n        runLog.info(tabulate.tabulate(caseInfo, tableFmt=\"armi\"))\n\n    def _listInputFiles(cs):\n        \"\"\"\n        Gathers information about the input files of this case.\n\n        Returns\n        -------\n        inputInfo : list\n            (label, fileName, shaHash) tuples\n        \"\"\"\n        from armi.physics.neutronics.settings import CONF_LOADING_FILE\n\n        pathToLoading = pathlib.Path(cs.inputDirectory) / cs[CONF_LOADING_FILE]\n\n        if pathToLoading.is_file():\n            if pathToLoading.suffix.lower() in (\".h5\", \".hdf5\"):\n                # The blueprints are in a database, there aren't multiple included files\n                includedBlueprints = [pathToLoading]\n            else:\n                includedBlueprints = [inclusion[0] for inclusion in textProcessors.findYamlInclusions(pathToLoading)]\n        else:\n            includedBlueprints = []\n\n        inputInfo = []\n        inputFiles = [\n            (\n                \"Case Settings\",\n                os.path.basename(cs.path) if cs.path else cs.caseTitle + \".yaml\",\n            ),  # This could be a YAML or an h5.\n            (\"Blueprints\", cs[CONF_LOADING_FILE]),\n        ] + [(\"Included blueprints\", inclBp) for inclBp in includedBlueprints]\n\n        activeInterfaces = interfaces.getActiveInterfaceInfo(cs)\n        for klass, kwargs in activeInterfaces:\n            if not kwargs.get(\"enabled\", True):\n                # Don't consider disabled interfaces\n                continue\n            interfaceFileNames = klass.specifyInputs(cs)\n            for label, fileNames in interfaceFileNames.items():\n                for fName in fileNames:\n                    inputFiles.append((label, fName))\n\n        if cs[\"reloadDBName\"] and cs[\"runType\"] == RunTypes.SNAPSHOTS:\n            inputFiles.append((\"Database\", cs[\"reloadDBName\"]))\n        for label, fName in inputFiles:\n            shaHash = \"MISSING\" if (not fName or not os.path.exists(fName)) else getFileSHA1Hash(fName, digits=10)\n            inputInfo.append((label, fName, shaHash))\n\n        # bonus: grab the files stored in the crossSectionControl section\n        for xsID, xsSetting in cs[\"crossSectionControl\"].items():\n            fNames = []\n            # Users shouldn't ever have both of these defined, but this is not the place\n            # for code to fail if they do. Allow for both to not be None.\n            if xsSetting.xsFileLocation is not None:\n                # possibly a list of files\n                if isinstance(xsSetting.xsFileLocation, list):\n                    fNames.extend(xsSetting.xsFileLocation)\n                else:\n                    fNames.append(xsSetting.xsFileLocation)\n            if xsSetting.fluxFileLocation is not None:\n                # single file\n                fNames.append(xsSetting.fluxFileLocation)\n            for fName in fNames:\n                label = f\"crossSectionControl-{xsID}\"\n                if fName and os.path.exists(fName):\n                    shaHash = getFileSHA1Hash(os.path.abspath(fName), digits=10)\n                    inputInfo.append((label, fName, shaHash))\n\n        return inputInfo\n\n    def _writeInputFileInformation(cs):\n        \"\"\"Create a table that contains basic input file information.\"\"\"\n        inputFileData = []\n        for label, fileName, shaHash in _listInputFiles(cs):\n            inputFileData.append((label, fileName, shaHash))\n\n        runLog.header(\"=========== Input File Information ===========\")\n        runLog.info(\n            tabulate.tabulate(\n                inputFileData,\n                headers=[\"Input Type\", \"Path\", \"SHA-1 Hash\"],\n                tableFmt=\"armi\",\n            )\n        )\n\n    def _writeMachineInformation():\n        \"\"\"Create a table that contains basic machine and rank information.\"\"\"\n        processorNames = context.MPI_NODENAMES\n        uniqueNames = set(processorNames)\n        nodeMappingData = []\n        sysInfo = \"\"\n        for uniqueName in uniqueNames:\n            matchingProcs = [str(rank) for rank, procName in enumerate(processorNames) if procName == uniqueName]\n            numProcessors = str(len(matchingProcs))\n            nodeMappingData.append((uniqueName, numProcessors, \", \".join(matchingProcs)))\n\n            sysInfo += getSystemInfo()\n\n        runLog.header(\"=========== Machine Information ===========\")\n        runLog.info(\n            tabulate.tabulate(\n                nodeMappingData,\n                headers=[\"Machine\", \"Number of Processors\", \"Ranks\"],\n                tableFmt=\"armi\",\n            )\n        )\n\n        if sysInfo:\n            runLog.header(\"=========== System Information ===========\")\n            runLog.info(sysInfo)\n\n    def _writeReactorCycleInformation(o, cs):\n        \"\"\"Verify that all the operating parameters are defined for the same number of cycles.\"\"\"\n        operatingData = [\n            (\"Reactor Thermal Power (MW):\", cs[\"power\"] / units.WATTS_PER_MW),\n            (\"Number of Cycles:\", cs[\"nCycles\"]),\n        ]\n        operatingParams = {\n            \"Cycle Lengths:\": o.cycleLengths,\n            \"Availability Factors:\": o.availabilityFactors,\n            \"Power Fractions:\": o.powerFractions,\n            \"Step Lengths (days):\": o.stepLengths,\n        }\n\n        for name, param in operatingParams.items():\n            paramStr = [str(p) for p in param]\n            operatingData.append((name, textwrap.fill(\", \".join(paramStr))))\n        runLog.header(\"=========== Reactor Cycle Information ===========\")\n        runLog.info(tabulate.tabulate(operatingData, tableFmt=\"armi\"))\n\n    if context.MPI_RANK > 0:\n        return  # prevent the worker nodes from printing the same thing\n\n    _writeCaseInformation(o, cs)\n    _writeInputFileInformation(cs)\n    _writeMachineInformation()\n    _writeReactorCycleInformation(o, cs)\n\n\ndef getNodeName():\n    \"\"\"Get the name of this compute node.\n\n    First, look in context.py. Then try various Linux tools. Then try Windows commands.\n\n    Returns\n    -------\n    str\n        Compute node name.\n    \"\"\"\n    hostNames = [\n        context.MPI_NODENAME,\n        context.MPI_NODENAMES[0],\n        subprocess.run(\"hostname\", capture_output=True, text=True, shell=True).stdout,\n        subprocess.run(\"uname -n\", capture_output=True, text=True, shell=True).stdout,\n        os.environ.get(\"COMPUTERNAME\", context.LOCAL),\n    ]\n    for nodeName in hostNames:\n        if nodeName and nodeName != context.LOCAL:\n            return nodeName\n\n    return context.LOCAL\n\n\ndef _getSystemInfoWindows():\n    \"\"\"Get system information, assuming the system is Windows.\n\n    Returns\n    -------\n    str\n        Basic system information: OS name, OS version, basic processor information\n\n    Examples\n    --------\n    Example results:\n\n        OS Name:         Microsoft Windows 10 Enterprise\n        OS Version:      10.0.19041 N/A Build 19041\n        Processor(s):    1 Processor(s) Installed.\n                         [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz\n    \"\"\"\n    cmd = (\n        'systeminfo | findstr /B /C:\"OS Name\" /B /C:\"OS Version\" /B /C:\"Processor\" && systeminfo | findstr /E /C:\"Mhz\"'\n    )\n    return subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout\n\n\ndef _getSystemInfoMac():\n    \"\"\"Get system information, assuming the system is MacOS.\n\n    Returns\n    -------\n    str\n        Basic system information: OS name, OS version, basic processor information\n\n    Examples\n    --------\n    Example results:\n\n        System Software Overview:\n\n        System Version: macOS 12.1 (21C52)\n        Kernel Version: Darwin 21.2.0\n        ...\n        Hardware Overview:\n        Model Name: MacBook Pro\n        ...\n    \"\"\"\n    cmd = \"system_profiler SPSoftwareDataType SPHardwareDataType\"\n    return subprocess.check_output(cmd, shell=True).decode(\"utf-8\")\n\n\ndef _getSystemInfoLinux():\n    \"\"\"Get system information, assuming the system is Linux.\n\n    This method uses multiple, redundant variations on common Linux command utilities to get the\n    information necessary. While it is not possible to guarantee what programs or files will be\n    available on \"all Linux operating system\", this collection of tools is widely supported and\n    should provide a reasonably broad-distribution coverage.\n\n    Returns\n    -------\n    str\n        Basic system information: OS name, OS version, basic processor information\n\n    Examples\n    --------\n    Example results:\n\n        OS Info:  Ubuntu 22.04.3 LTS\n        Processor(s):\n            processor   : 0\n            vendor_id   : GenuineIntel\n            cpu family  : 6\n            model       : 126\n            model name  : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz\n            ...\n    \"\"\"\n    # get OS name / version\n    linuxOsCommands = [\n        'cat /etc/os-release | grep \"^PRETTY_NAME=\" | cut -d = -f 2',\n        \"uname -a\",\n        \"lsb_release -d | cut -d : -f 2\",\n        'hostnamectl | grep \"Operating System\" | cut -d : -f 2',\n    ]\n    osInfo = \"\"\n    for cmd in linuxOsCommands:\n        osInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout.strip()\n        if osInfo:\n            break\n\n    if not osInfo:\n        runLog.warning(\"Linux OS information not found.\")\n        return \"\"\n\n    # get processor information\n    linuxProcCommands = [\"lscpu\", \"cat /proc/cpuinfo\", \"lshw -class CPU\"]\n    procInfo = \"\"\n    for cmd in linuxProcCommands:\n        procInfo = subprocess.run(cmd, capture_output=True, text=True, shell=True).stdout\n        if procInfo:\n            break\n\n    if not procInfo:\n        runLog.warning(\"Linux processor information not found.\")\n        return \"\"\n\n    # build output string\n    out = \"OS Info:  \"\n    out += osInfo.strip()\n    out += \"\\nProcessor(s):\\n    \"\n    out += procInfo.strip().replace(\"\\n\", \"\\n    \")\n    out += \"\\n\"\n\n    return out\n\n\ndef getSystemInfo():\n    \"\"\"Get system information, assuming the system is Linux, MacOS, and Windows.\n\n    Notes\n    -----\n    The format of the system information will be different on Linux, MacOS, and Windows.\n\n    Returns\n    -------\n    str\n        Basic system information: OS name, OS version, basic processor information\n    \"\"\"\n    # Get basic system information (on Linux, MacOS, and Windows)\n    if \"darwin\" in sys.platform:\n        return _getSystemInfoMac()\n    elif \"win\" in sys.platform:\n        return _getSystemInfoWindows()\n    elif \"linux\" in sys.platform:\n        return _getSystemInfoLinux()\n    else:\n        runLog.warning(\n            f\"Cannot get system information for {sys.platform} because ARMI only \"\n            + \"supports Linux, MacOS, and Windows.\"\n        )\n        return \"\"\n\n\ndef getInterfaceStackSummary(o):\n    data = []\n    for ii, i in enumerate(o.interfaces, start=1):\n        data.append(\n            (\n                \"{:02d}\".format(ii),\n                i.__class__.__name__.replace(\"Interface\", \"\"),\n                i.name,\n                i.purpose,\n                \"Yes\" if i.enabled() else \"No\",\n                \"Reversed\" if i.reverseAtEOL else \"Normal\",\n                \"Yes\" if i.bolForce() else \"No\",\n            )\n        )\n    text = tabulate.tabulate(\n        data,\n        headers=(\n            \"Index\",\n            \"Type\",\n            \"Name\",\n            \"Purpose\",\n            \"Enabled\",\n            \"EOL order\",\n            \"BOL forced\",\n        ),\n        tableFmt=\"armi\",\n    )\n    text = text\n    return text\n\n\ndef writeTightCouplingConvergenceSummary(convergenceSummary):\n    runLog.info(\"Tight Coupling Convergence Summary\")\n    runLog.info(tabulate.tabulate(convergenceSummary, headers=\"keys\", showIndex=True, tableFmt=\"armi\"))\n\n\ndef writeAssemblyMassSummary(r):\n    \"\"\"Print out things like Assembly weights to the runLog.\n\n    Parameters\n    ----------\n    r : armi.reactor.reactors.Reactor\n    \"\"\"\n    massSum = []\n\n    for a in r.blueprints.assemblies.values():\n        mass = 0.0\n        hmMass = 0.0\n        fissileMass = 0.0\n        coolantMass = 0.0  # to calculate wet vs. dry weight.\n        types = []\n\n        for b in a:\n            # get masses in kg\n            # skip stationary blocks (grid plate doesn't count)\n            if b.hasFlags(Flags.GRID_PLATE):\n                continue\n            mass += b.getMass() / 1000.0\n            hmMass += b.getHMMass() / 1000.0\n            fissileMass += b.getFissileMass() / 1000.0\n            coolants = b.getComponents(Flags.COOLANT, exact=True) + b.getComponents(Flags.INTERCOOLANT, exact=True)\n            coolantMass += sum(coolant.getMass() for coolant in coolants) / 1000.0\n\n            blockType = b.getType()\n            if blockType not in types:\n                types.append(blockType)\n        # If the BOL fuel assem is in the center of the core, its area is 1/3 of the full area b/c\n        # its a sliced assem.\n\n        # count assemblies\n        core = r.core\n        thisTypeList = core.getChildrenOfType(a.getType())\n        count = 0\n        for t in thisTypeList:\n            ring, _pos = t.spatialLocator.getRingPos()\n            if ring == 1:\n                # only count center location once.\n                count += 1\n            else:\n                # add 3 if it's 1/3 core, etc.\n                count += core.powerMultiplier\n\n        # Get the dominant materials\n        pinMaterialKey = \"pinMaterial\"\n        pinMaterialObj = a.getDominantMaterial([Flags.FUEL, Flags.CONTROL])\n        if pinMaterialObj is None:\n            pinMaterialObj = a.getDominantMaterial()\n            pinMaterialKey = \"dominantMaterial\"\n            pinMaterial = pinMaterialObj.name\n        else:\n            pinMaterial = pinMaterialObj.name\n\n        struct = a.getDominantMaterial([Flags.CLAD, Flags.DUCT, Flags.SHIELD])\n        if struct:\n            structuralMaterial = struct.name\n        else:\n            structuralMaterial = \"[None]\"\n        cool = a.getDominantMaterial([Flags.COOLANT])\n        if cool:\n            coolantMaterial = cool.name\n        else:\n            coolantMaterial = \"[None]\"\n\n        # Get pins per assembly\n        pinsPerAssembly = 0\n        for candidate in (Flags.FUEL, Flags.CONTROL, Flags.SHIELD):\n            b = a.getFirstBlock(candidate)\n            if b:\n                pinsPerAssembly = b.getNumPins()\n            if pinsPerAssembly:\n                break\n\n        massSum.append(\n            {\n                \"type\": a.getType(),\n                \"wetMass\": mass,\n                \"hmMass\": hmMass,\n                \"fissileMass\": fissileMass,\n                \"dryMass\": mass - coolantMass,\n                \"count\": count,\n                \"components\": types,\n                pinMaterialKey: pinMaterial,\n                \"structuralMaterial\": structuralMaterial,\n                \"coolantMaterial\": coolantMaterial,\n                \"pinsPerAssembly\": pinsPerAssembly,\n            }\n        )\n\n    runLog.important(_makeBOLAssemblyMassSummary(massSum))\n    runLog.important(_makeTotalAssemblyMassSummary(massSum))\n\n\ndef _makeBOLAssemblyMassSummary(massSum):\n    str_ = [\"--- BOL Assembly Mass Summary (kg) ---\"]\n    dataLabels = [\"wetMass\", \"dryMass\", \"fissileMass\", \"hmMass\", \"count\"]\n    # print header for the printout of each assembly type\n    str_.append(\" \" * 12 + \"\".join([\"{0:25s}\".format(s[\"type\"]) for s in massSum]))\n    for val in dataLabels:\n        line = \"\"\n        for s in massSum:\n            line += \"{0:<25.3f}\".format(s[val])\n        str_.append(\"{0:12s}{1}\".format(val, line))\n\n    # print blocks in this assembly up to 10\n    for i in range(10):\n        line = \" \" * 12\n        for s in massSum:\n            try:\n                line += \"{0:25s}\".format(s[\"components\"][i])\n            except IndexError:\n                line += \" \" * 25\n        if re.search(r\"\\S\", line):  # \\S matches any non-whitespace character.\n            str_.append(line)\n\n    return \"\\n\".join(str_)\n\n\ndef _makeTotalAssemblyMassSummary(massSum):\n    massLabels = [\"wetMass\", \"dryMass\", \"fissileMass\", \"hmMass\"]\n    totals = {}\n    count = 0\n\n    str_ = [\"--Totals--\"]\n    for label in massLabels:\n        totals[label] = 0.0\n        for assemSum in massSum:\n            totals[label] += assemSum[label] * assemSum[\"count\"]\n            count += assemSum[\"count\"]\n        str_.append(\"{0:12s} {1:.2f} MT\".format(label, totals[label] / 1000.0))\n    str_.append(\"Total assembly count: {0}\".format(count // len(massLabels)))\n    return \"\\n\".join(str_)\n\n\ndef writeCycleSummary(core):\n    \"\"\"Prints a cycle summary to the runLog.\n\n    Parameters\n    ----------\n    core: armi.reactor.reactors.Core\n    cs: armi.settings.caseSettings.Settings\n    \"\"\"\n    # Would io be worth considering for this?\n    cycle = core.r.p.cycle\n    str_ = []\n    runLog.important(\"Cycle {0} Summary:\".format(cycle))\n    avgBu = core.calcAvgParam(\"percentBu\", typeSpec=Flags.FUEL, generationNum=2)\n    str_.append(\"Core Average Burnup: {0}\".format(avgBu))\n    str_.append(\"End of Cycle {0:02d}. Timestamp: {1} \".format(cycle, time.ctime()))\n\n    runLog.info(\"\\n\".join(str_))\n\n\ndef setNeutronBalancesReport(core):\n    \"\"\"Determines the various neutron balances over the full core.\n\n    Parameters\n    ----------\n    core : armi.reactor.reactors.Core\n    \"\"\"\n    if not core.getFirstBlock().p.rateCap:\n        runLog.warning(\n            \"No rate information (rateCap, rateAbs, etc.) available on the blocks. Skipping balance summary.\"\n        )\n        return\n\n    cap = core.calcAvgParam(\"rateCap\", volumeAveraged=False, generationNum=2)\n    absorb = core.calcAvgParam(\"rateAbs\", volumeAveraged=False, generationNum=2)\n    fis = core.calcAvgParam(\"rateFis\", volumeAveraged=False, generationNum=2)\n    n2nProd = core.calcAvgParam(\"rateProdN2n\", volumeAveraged=False, generationNum=2)\n    fisProd = core.calcAvgParam(\"rateProdFis\", volumeAveraged=False, generationNum=2)\n\n    leak = n2nProd + fisProd - absorb\n\n    report.setData(\n        \"Fission\",\n        \"{0:.5e} ({1:.2%})\".format(fisProd, fisProd / (fisProd + n2nProd)),\n        report.NEUT_PROD,\n    )\n    report.setData(\n        \"n, 2n\",\n        \"{0:.5e} ({1:.2%})\".format(n2nProd, n2nProd / (fisProd + n2nProd)),\n        report.NEUT_PROD,\n    )\n    report.setData(\n        \"Capture\",\n        \"{0:.5e} ({1:.2%})\".format(cap, cap / (absorb + leak)),\n        report.NEUT_LOSS,\n    )\n    report.setData(\n        \"Fission\",\n        \"{0:.5e} ({1:.2%})\".format(fis, fis / (absorb + leak)),\n        report.NEUT_LOSS,\n    )\n    report.setData(\n        \"Absorption\",\n        \"{0:.5e} ({1:.2%})\".format(absorb, absorb / (absorb + leak)),\n        report.NEUT_LOSS,\n    )\n    report.setData(\n        \"Leakage\",\n        \"{0:.5e} ({1:.2%})\".format(leak, leak / (absorb + leak)),\n        report.NEUT_LOSS,\n    )\n\n    runLog.info(report.ALL[report.NEUT_PROD])\n    runLog.info(report.ALL[report.NEUT_LOSS])\n\n\ndef summarizePinDesign(core):\n    \"\"\"Prints out some information about the pin assembly/duct design.\n\n    Handles multiple types of dimensions simplistically by taking the average.\n\n    Parameters\n    ----------\n    core : armi.reactor.reactors.Core\n    \"\"\"\n    designInfo = collections.defaultdict(list)\n\n    try:\n        for b in core.iterBlocks(Flags.FUEL):\n            fuel = b.getComponent(Flags.FUEL)\n            duct = b.getComponent(Flags.DUCT)\n            clad = b.getComponent(Flags.CLAD)\n            wire = b.getComponent(Flags.WIRE)\n            designInfo[\"hot sd\"].append(b.getSmearDensity(cold=False))\n            designInfo[\"sd\"].append(b.getSmearDensity())\n            designInfo[\"ductThick\"].append(\n                (duct.getDimension(\"op\") - duct.getDimension(\"ip\")) * 5.0\n            )  # convert to mm and divide by 2\n            designInfo[\"cladThick\"].append((clad.getDimension(\"od\") - clad.getDimension(\"id\")) * 5.0)\n            pinOD = clad.getDimension(\"od\") * 10.0\n            wireOD = wire.getDimension(\"od\") * 10.0\n            pitch = pinOD + wireOD  # pitch has half a wire on each side.\n            assemPitch = b.getPitch() * 10  # convert cm to mm.\n            designInfo[\"pinOD\"].append(pinOD)\n            designInfo[\"wireOD\"].append(wireOD)\n            designInfo[\"pin pitch\"].append(pitch)\n            pinToDuctGap = b.getPinToDuctGap()\n            if pinToDuctGap is not None:\n                designInfo[\"pinToDuct\"].append(b.getPinToDuctGap() * 10.0)\n            designInfo[\"assemPitch\"].append(assemPitch)\n            designInfo[\"duct gap\"].append(assemPitch - duct.getDimension(\"op\") * 10.0)\n            designInfo[\"nPins\"].append(b.p.nPins)\n            designInfo[\"zrFrac\"].append(fuel.getMassFrac(\"ZR\"))\n\n        # assumption made that all lists contain only numerical data\n        designInfo = {key: np.average(data) for key, data in designInfo.items()}\n\n        dimensionless = {\"sd\", \"hot sd\", \"zrFrac\", \"nPins\"}\n        for key, average_value in designInfo.items():\n            dim = \"{0:10s}\".format(key)\n            val = \"{0:.4f}\".format(average_value)\n            if key not in dimensionless:\n                val += \" mm\"\n            report.setData(dim, val, report.PIN_ASSEM_DESIGN)\n\n        a = core.refAssem\n        report.setData(\n            \"Fuel Height (cm):\",\n            \"{0:.2f}\".format(a.getHeight(Flags.FUEL)),\n            report.PIN_ASSEM_DESIGN,\n        )\n        report.setData(\n            \"Plenum Height (cm):\",\n            \"{0:.2f}\".format(a.getHeight(Flags.PLENUM)),\n            report.PIN_ASSEM_DESIGN,\n        )\n        runLog.info(report.ALL[report.PIN_ASSEM_DESIGN])\n\n        first_fuel_block = core.getFirstBlock(Flags.FUEL)\n        runLog.info(\"Design & component information for first fuel block {}\".format(first_fuel_block))\n\n        runLog.info(first_fuel_block.setAreaFractionsReport())\n\n        for component_ in sorted(first_fuel_block):\n            runLog.info(component_.setDimensionReport())\n\n    except Exception as error:\n        runLog.warning(\"Pin summarization failed to work\")\n        runLog.warning(error)\n\n\ndef summarizePowerPeaking(core):\n    \"\"\"Prints reactor Fz, Fxy, Fq.\n\n    Parameters\n    ----------\n    core : armi.reactor.reactors.Core\n    \"\"\"\n    # Fz is the axial peaking of the highest power assembly\n    _maxPow, maxPowBlock = core.getMaxParam(\"power\", returnObj=True, generationNum=2)\n    maxPowAssem = maxPowBlock.parent\n    avgPDens = maxPowAssem.calcAvgParam(\"pdens\")\n    peakPDens = maxPowAssem.getMaxParam(\"pdens\")\n    if not avgPDens:\n        # protect against divide-by-zero. Peaking doesn't make sense if there is no power\n        return\n    axPeakF = peakPDens / avgPDens\n\n    # Fxy is the radial peaking factor, looking at ALL assemblies with axially integrated powers.\n    power = 0.0\n    n = 0\n    for n, a in enumerate(core):\n        power += a.calcTotalParam(\"power\", typeSpec=Flags.FUEL)\n    avgPow = power / (n + 1)\n    radPeakF = maxPowAssem.calcTotalParam(\"power\", typeSpec=Flags.FUEL) / avgPow\n\n    runLog.important(\n        \"Power Peaking: Fz= {0:.3f} Fxy= {1:.3f} Fq= {2:.3f}\".format(axPeakF, radPeakF, axPeakF * radPeakF)\n    )\n\n\ndef makeCoreDesignReport(core, cs):\n    \"\"\"Builds report to summarize core design inputs.\n\n    Parameters\n    ----------\n    core: armi.reactor.reactors.Core\n    cs: armi.settings.caseSettings.Settings\n    \"\"\"\n    coreDesignTable = report.data.Table(\"SUMMARY OF CORE: {}\".format(cs.caseTitle.upper()))\n    coreDesignTable.header = [\"\", \"Input Parameter\"]\n\n    # Change the ordering of the core design table in the report relative to the other data\n    report.data.Report.groupsOrderFirst.insert(0, coreDesignTable)\n    report.data.Report.componentWellGroups.insert(0, coreDesignTable)\n\n    _setGeneralCoreDesignData(cs, coreDesignTable)\n    _setGeneralCoreParametersData(core, cs, coreDesignTable)\n    _setGeneralSimulationData(core, cs, coreDesignTable)\n\n\ndef _setGeneralCoreDesignData(cs, coreDesignTable):\n    from armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC\n    from armi.physics.neutronics.settings import CONF_LOADING_FILE\n\n    report.setData(\"Case Title\", \"{}\".format(cs.caseTitle), coreDesignTable, report.DESIGN)\n    report.setData(\"Run Type\", \"{}\".format(cs[\"runType\"]), coreDesignTable, report.DESIGN)\n    report.setData(\n        \"Loading File\",\n        \"{}\".format(cs[CONF_LOADING_FILE]),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Fuel Shuffling Logic File\",\n        \"{}\".format(cs[CONF_SHUFFLE_LOGIC]),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Reactor State Loading\",\n        \"{}\".format(cs[\"loadStyle\"]),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    if cs[\"loadStyle\"] == \"fromDB\":\n        report.setData(\n            \"Database File\",\n            \"{}\".format(cs[\"reloadDBName\"]),\n            coreDesignTable,\n            report.DESIGN,\n        )\n        report.setData(\n            \"Starting Cycle\",\n            \"{}\".format(cs[\"startCycle\"]),\n            coreDesignTable,\n            report.DESIGN,\n        )\n        report.setData(\n            \"Starting Node\",\n            \"{}\".format(cs[\"startNode\"]),\n            coreDesignTable,\n            report.DESIGN,\n        )\n\n\ndef _setGeneralCoreParametersData(core, cs, coreDesignTable):\n    blocks = core.getBlocks()\n    totalMass = sum(b.getMass() for b in blocks)\n    fissileMass = sum(b.getFissileMass() for b in blocks)\n    heavyMetalMass = sum(b.getHMMass() for b in blocks)\n    totalVolume = sum(b.getVolume() for b in blocks)\n    report.setData(\" \", \"\", coreDesignTable, report.DESIGN)\n    report.setData(\n        \"Core Power\",\n        \"{:.2f} MWth\".format(cs[\"power\"] / units.WATTS_PER_MW),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Base Capacity Factor\",\n        \"{}\".format(cs[\"availabilityFactor\"]),\n        coreDesignTable,\n        report.DESIGN,\n    )  # note this doesn't consider availabilityFactors\n    report.setData(\n        \"Cycle Length\",\n        \"{} days\".format(cs[\"cycleLength\"]),\n        coreDesignTable,\n        report.DESIGN,\n    )  # note this doesn't consider cycleLengths\n    report.setData(\"Burnup Cycles\", \"{}\".format(cs[\"nCycles\"]), coreDesignTable, report.DESIGN)\n    report.setData(\n        \"Burnup Steps per Cycle\",\n        \"{}\".format(cs[\"burnSteps\"]),\n        coreDesignTable,\n        report.DESIGN,\n    )  # note this doesn't consider the detailed cycle input option\n    corePowerMult = int(core.powerMultiplier)\n    report.setData(\n        \"Core Total Volume\",\n        \"{:.2f} cc\".format(totalVolume * corePowerMult),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Core Fissile Mass\",\n        \"{:.2f} kg\".format(fissileMass / units.G_PER_KG * corePowerMult),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Core Heavy Metal Mass\",\n        \"{:.2f} kg\".format(heavyMetalMass / units.G_PER_KG * corePowerMult),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Core Total Mass\",\n        \"{:.2f} kg\".format(totalMass / units.G_PER_KG * corePowerMult),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Assembly Rings\",\n        \"{}\".format(core.getNumRings()),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Assemblies\",\n        \"{}\".format(len(core.getAssemblies() * corePowerMult)),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Fuel Assemblies\",\n        \"{}\".format(len(core.getAssemblies(Flags.FUEL) * corePowerMult)),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Control Assemblies\",\n        \"{}\".format(len(core.getAssemblies(Flags.CONTROL) * corePowerMult)),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Reflector Assemblies\",\n        \"{}\".format(len(core.getAssemblies(Flags.REFLECTOR) * corePowerMult)),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Number of Shield Assemblies\",\n        \"{}\".format(len(core.getAssemblies(Flags.SHIELD) * corePowerMult)),\n        coreDesignTable,\n        report.DESIGN,\n    )\n\n\ndef _setGeneralSimulationData(core, cs, coreDesignTable):\n    from armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE\n\n    report.setData(\"  \", \"\", coreDesignTable, report.DESIGN)\n    report.setData(\"Full Core Model\", \"{}\".format(core.isFullCore), coreDesignTable, report.DESIGN)\n    report.setData(\n        \"Tight Physics Coupling Enabled\",\n        \"{}\".format(bool(cs[\"tightCoupling\"])),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Lattice Physics Enabled for\",\n        \"{}\".format(cs[CONF_GEN_XS]),\n        coreDesignTable,\n        report.DESIGN,\n    )\n    report.setData(\n        \"Neutronics Enabled for\",\n        \"{}\".format(cs[CONF_GLOBAL_FLUX_ACTIVE]),\n        coreDesignTable,\n        report.DESIGN,\n    )\n\n\ndef makeBlockDesignReport(r):\n    \"\"\"Summarize the block designs from the loading file.\n\n    Parameters\n    ----------\n    r : armi.reactor.reactors.Reactor\n    \"\"\"\n    for bDesign in r.blueprints.blockDesigns:\n        loadingFileTable = report.data.Table(\"SUMMARY OF BLOCK: {}\".format(bDesign.name))\n        loadingFileTable.header = [\"\", \"Input Parameter\"]\n\n        # Change the ordering of the loading file table in the report relative to the other data\n        report.data.Report.groupsOrderFirst.append(loadingFileTable)\n        report.data.Report.componentWellGroups.append(loadingFileTable)\n        report.setData(\"Number of Components\", [len(bDesign)], loadingFileTable, report.DESIGN)\n        for i, cDesign in enumerate(bDesign):\n            cType = cDesign.name\n            componentSplitter = (i + 1) * \" \" + \"\\n\"\n            report.setData(componentSplitter, [\"\"], loadingFileTable, report.DESIGN)\n            dimensions = _getComponentInputDimensions(cDesign)\n            for label, values in dimensions.items():\n                value, unit = values\n                report.setData(\n                    \"{} {}\".format(cType, label),\n                    \"{} {}\".format(value, unit),\n                    loadingFileTable,\n                    report.DESIGN,\n                )\n\n\ndef _getComponentInputDimensions(cDesign):\n    \"\"\"Get the input dimensions of a component and place them in a dictionary with labels and units.\"\"\"\n    dims = collections.OrderedDict()\n    dims[\"Shape\"] = (cDesign.shape, \"\")\n    dims[\"Material\"] = (cDesign.material, \"\")\n    dims[\"Cold Temperature\"] = (cDesign.Tinput, \"C\")\n    dims[\"Hot Temperature\"] = (cDesign.Thot, \"C\")\n\n    if cDesign.isotopics is not None:\n        dims[\"Custom Isotopics\"] = (cDesign.isotopics, \"\")\n\n    for dimName in ComponentType.TYPES[cDesign.shape.lower()].DIMENSION_NAMES:\n        value = getattr(cDesign, dimName)\n\n        if value is not None:\n            # if not default, add it to the report\n            dims[dimName] = (getattr(cDesign, dimName).value, \"cm\")\n\n    return dims\n\n\ndef makeCoreAndAssemblyMaps(r, cs, generateFullCoreMap=False, showBlockAxMesh=True):\n    \"\"\"Create core and assembly design plots.\n\n    Parameters\n    ----------\n    r : armi.reactor.reactors.Reactor\n    cs: armi.settings.caseSettings.Settings\n    generateFullCoreMap : bool, default False\n    showBlockAxMesh : bool, default True\n    \"\"\"\n    assems = []\n    blueprints = r.blueprints\n    for aKey in blueprints.assemDesigns.keys():\n        a = blueprints.constructAssem(cs, name=aKey)\n        # since we will be plotting cold input heights, we need to make sure that\n        # that these new assemblies have access to a blueprints somewhere up the\n        # composite chain. normally this would happen through an assembly's parent\n        # reactor, but because these newly created assemblies are in the load queue,\n        # they will not have a parent reactor. to get around this, we just attach\n        # the blueprints to the assembly directly.\n        a.blueprints = blueprints\n        assems.append(a)\n\n    core = r.core\n    for plotNum, assemBatch in enumerate(iterables.chunk(assems, MAX_ASSEMS_PER_ASSEM_PLOT), start=1):\n        assemPlotImage = copy(report.ASSEM_TYPES)\n        assemPlotImage.title = assemPlotImage.title + \" ({})\".format(plotNum)\n        report.data.Report.groupsOrderFirst.insert(-1, assemPlotImage)\n        report.data.Report.componentWellGroups.insert(-1, assemPlotImage)\n        assemPlotName = os.path.abspath(f\"{core.name}AssemblyTypes{plotNum}.png\")\n        plotting.plotAssemblyTypes(\n            assemBatch,\n            assemPlotName,\n            maxAssems=MAX_ASSEMS_PER_ASSEM_PLOT,\n            showBlockAxMesh=showBlockAxMesh,\n            hot=False,\n        )\n\n    # Create radial core map\n    if generateFullCoreMap:\n        core.growToFullCore(cs)\n\n    counts = {\n        assemDesign.name: len(core.getChildrenOfType(assemDesign.name)) for assemDesign in r.blueprints.assemDesigns\n    }\n    # assemDesigns.keys is ordered based on input, assemOrder only contains types that are in the core\n    assemOrder = [aType for aType in r.blueprints.assemDesigns.keys() if counts[aType] > 0]\n    data = [assemOrder.index(a.p.type) for a in core]\n    labels = [r.blueprints.assemDesigns[a.p.type].specifier for a in core]\n    legendMap = [\n        (\n            ai,\n            assemDesign.specifier,\n            \"{} ({})\".format(assemDesign.name, counts[assemDesign.name]),\n        )\n        for ai, assemDesign in enumerate(r.blueprints.assemDesigns)\n        if counts[assemDesign.name] > 0\n    ]\n\n    fName = \"\".join([cs.caseTitle, \"RadialCoreMap.\", cs[\"outputFileExtension\"]])\n    plotting.plotFaceMap(\n        core,\n        title=\"{} Radial Core Map\".format(cs.caseTitle),\n        fName=fName,\n        cmapName=\"RdYlBu\",\n        data=data,\n        labels=labels,\n        legendMap=legendMap,\n        axisEqual=True,\n        bare=True,\n        titleSize=10,\n        fontSize=8,\n    )\n\n    report.setData(\"Radial Core Map\", os.path.abspath(fName), report.FACE_MAP, report.DESIGN)\n\n\nCOMPONENT_INFO = \"Component Information\"\n"
  },
  {
    "path": "armi/bookkeeping/report/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/bookkeeping/report/tests/test_report.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Really basic tests of the report Utils.\"\"\"\n\nimport logging\nimport os\nimport subprocess\nimport sys\nimport unittest\nfrom glob import glob\nfrom unittest.mock import patch\n\nfrom armi import runLog, settings\nfrom armi.bookkeeping import report\nfrom armi.bookkeeping.report import data, reportInterface\nfrom armi.bookkeeping.report.reportingUtils import (\n    _getSystemInfoLinux,\n    _getSystemInfoMac,\n    _getSystemInfoWindows,\n    getNodeName,\n    getSystemInfo,\n    makeBlockDesignReport,\n    makeCoreDesignReport,\n    setNeutronBalancesReport,\n    summarizePinDesign,\n    summarizePowerPeaking,\n    writeAssemblyMassSummary,\n    writeCycleSummary,\n    writeWelcomeHeaders,\n)\nfrom armi.testing import loadTestReactor\nfrom armi.tests import mockRunLogs\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass _MockReturnResult:\n    \"\"\"Mocking the subprocess.run() return object.\"\"\"\n\n    def __init__(self, stdout):\n        self.stdout = stdout\n\n\nclass TestReportingUtils(unittest.TestCase):\n    def test_getSystemInfoLinux(self):\n        \"\"\"Test _getSystemInfoLinux() on any operating system, by mocking the system calls.\"\"\"\n        osInfo = '\"Ubuntu 22.04.3 LTS\"'\n        procInfo = \"\"\"processor : 0\nvendor_id   : GenuineIntel\ncpu family  : 6\nmodel       : 126\nmodel name  : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz\n...\n\"\"\"\n        correctResult = \"\"\"OS Info:  \"Ubuntu 22.04.3 LTS\"\nProcessor(s):\n    processor : 0\n    vendor_id   : GenuineIntel\n    cpu family  : 6\n    model       : 126\n    model name  : Intel(R) Core(TM) i5-1035G1 CPU @ 1.00GHz\n    ...\"\"\"\n\n        def __mockSubprocessRun(*args, **kwargs):\n            if \"os-release\" in args[0]:\n                return _MockReturnResult(osInfo)\n            else:\n                return _MockReturnResult(procInfo)\n\n        with patch.object(subprocess, \"run\", side_effect=__mockSubprocessRun):\n            out = _getSystemInfoLinux()\n            self.assertEqual(out.strip(), correctResult)\n\n    @patch(\"subprocess.run\")\n    def test_getSystemInfoWindows(self, mockSubprocess):\n        \"\"\"Test _getSystemInfoWindows() on any operating system, by mocking the system call.\"\"\"\n        windowsResult = \"\"\"OS Name:         Microsoft Windows 10 Enterprise\nOS Version:      10.0.19041 N/A Build 19041\nProcessor(s):    1 Processor(s) Installed.\n                 [01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~801 Mhz\"\"\"\n\n        mockSubprocess.return_value = _MockReturnResult(windowsResult)\n\n        out = _getSystemInfoWindows()\n        self.assertEqual(out, windowsResult)\n\n    @patch(\"subprocess.run\")\n    def test_getSystemInfoMac(self, mockSubprocess):\n        \"\"\"Test _getSystemInfoMac() on any operating system, by mocking the system call.\"\"\"\n        macResult = b\"\"\"System Software Overview:\n\n        System Version: macOS 12.1 (21C52)\n        Kernel Version: Darwin 21.2.0\n        ...\n        Hardware Overview:\n        Model Name: MacBook Pro\n        ...\"\"\"\n\n        mockSubprocess.return_value = _MockReturnResult(macResult)\n\n        out = _getSystemInfoMac()\n        self.assertEqual(out, macResult.decode(\"utf-8\"))\n\n    def test_getSystemInfo(self):\n        \"\"\"Basic sanity check of getSystemInfo() running in the wild.\n\n        This test should pass if it is run on Window or mainstream Linux distros. But we expect this\n        to fail if the test is run on some other OS.\n        \"\"\"\n        if \"darwin\" in sys.platform:\n            # too complicated to test MacOS in this method\n            return\n\n        out = getSystemInfo()\n        substrings = [\"OS \", \"Processor(s):\"]\n\n        for sstr in substrings:\n            self.assertIn(sstr, out)\n\n        self.assertGreater(len(out), sum(len(sstr) + 5 for sstr in substrings))\n\n    def test_getNodeName(self):\n        \"\"\"Test that the getNodeName() method returns a non-empty string.\n\n        It is hard to know what string SHOULD be return here, and it would depend on how the OS is\n        set up on your machine or cluster. But this simple test needs to pass as-is on Windows\n        and Linux.\n        \"\"\"\n        self.assertGreater(len(getNodeName()), 0)\n\n\nclass TestReport(unittest.TestCase):\n    def setUp(self):\n        self.test_group = data.Table(settings.Settings(), \"banana\")\n\n    def test_setData(self):\n        report.setData(\"banana_1\", [\"sundae\", \"plain\"])\n        report.setData(\"banana_2\", [\"sundae\", \"vanilla\"], self.test_group)\n        report.setData(\"banana_3\", [\"sundae\", \"chocolate\"], self.test_group, [report.ALL])\n\n        with self.assertRaises(AttributeError):\n            report.setData(\"banana_4\", [\"sundae\", \"strawberry\"], \"no_workie\", [report.ALL])\n        with self.assertRaises(AttributeError):\n            report.setData(\"banana_5\", [\"sundae\", \"peanut_butter\"], self.test_group, \"no_workie\")\n\n        ungroup_instance = report.ALL[report.UNGROUPED]\n        self.assertEqual(ungroup_instance[\"banana_1\"], [\"sundae\", \"plain\"])\n\n        filled_instance = report.ALL[self.test_group]\n        self.assertEqual(filled_instance[\"banana_2\"], [\"sundae\", \"vanilla\"])\n        self.assertEqual(filled_instance[\"banana_3\"], [\"sundae\", \"chocolate\"])\n\n    def test_getData(self):\n        # test the null case\n        self.assertIsNone(self.test_group[\"fake\"])\n\n        # insert some data\n        self.test_group[\"banana_1\"] = [\"sundae\", \"plain\"]\n\n        # validate we can pull that data back out again\n        data = self.test_group[\"banana_1\"]\n        self.assertEqual(len(data), 2)\n        self.assertIn(\"sundae\", data)\n        self.assertIn(\"plain\", data)\n\n    def test_reactorSpecificReporting(self):\n        \"\"\"Test a number of reporting utils that require reactor/core information.\"\"\"\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        # make sure makeCoreDesignReport() doesn't fail, though it won't generate an output here\n        makeCoreDesignReport(r.core, o.cs)\n        self.assertEqual(len(glob(\"*.html\")), 0)\n\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_reactorSpecificReporting\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            writeAssemblyMassSummary(r)\n            self.assertIn(\"BOL Assembly Mass Summary\", mock.getStdout())\n            self.assertIn(\"igniter fuel\", mock.getStdout())\n            mock.emptyStdout()\n\n            setNeutronBalancesReport(r.core)\n            self.assertIn(\"No rate information\", mock.getStdout())\n            mock.emptyStdout()\n\n            r.core.getFirstBlock().p.rateCap = 1.0\n            r.core.getFirstBlock().p.rateProdFis = 1.02\n            r.core.getFirstBlock().p.rateFis = 1.01\n            r.core.getFirstBlock().p.rateAbs = 1.0\n            setNeutronBalancesReport(r.core)\n            self.assertIn(\"Fission\", mock.getStdout())\n            self.assertIn(\"Capture\", mock.getStdout())\n            self.assertIn(\"Absorption\", mock.getStdout())\n            self.assertIn(\"Leakage\", mock.getStdout())\n            mock.emptyStdout()\n\n            summarizePinDesign(r.core)\n            self.assertIn(\"Assembly Design Summary\", mock.getStdout())\n            self.assertIn(\"Design & component information\", mock.getStdout())\n            self.assertIn(\"Multiplicity\", mock.getStdout())\n            mock.emptyStdout()\n\n            writeCycleSummary(r.core)\n            self.assertIn(\"Core Average\", mock.getStdout())\n            self.assertIn(\"End of Cycle\", mock.getStdout())\n            mock.emptyStdout()\n\n            # this report won't do much for the test reactor - improve test reactor\n            makeBlockDesignReport(r)\n            self.assertEqual(len(mock.getStdout()), 0)\n            mock.emptyStdout()\n\n            # this report won't do much for the test reactor - improve test reactor\n            summarizePowerPeaking(r.core)\n            self.assertEqual(len(mock.getStdout()), 0)\n\n    def test_writeWelcomeHeaders(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        # grab this file path\n        randoFile = os.path.abspath(__file__)\n\n        # pass that random file into the settings\n        o.cs[\"crossSectionControl\"][\"DA\"].xsFileLocation = randoFile\n        o.cs[\"crossSectionControl\"][\"DA\"].fluxFileLocation = randoFile\n\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_writeWelcomeHeaders\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            writeWelcomeHeaders(o, o.cs)\n\n            # assert our random file (and a lot of other stuff) is in the welcome\n            self.assertIn(\"Case Info\", mock.getStdout())\n            self.assertIn(\"Input File Info\", mock.getStdout())\n            self.assertIn(\"crossSectionControl-DA\", mock.getStdout())\n            self.assertIn(\"Python Executable\", mock.getStdout())\n            self.assertIn(randoFile, mock.getStdout())\n\n\nclass TestReportInterface(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.td = TemporaryDirectoryChanger()\n        cls.td.__enter__()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n\n    def test_printReports(self):\n        \"\"\"Testing printReports method.\"\"\"\n        repInt = reportInterface.ReportInterface(None, None)\n        rep = repInt.printReports()\n\n        self.assertIn(\"REPORTS BEGIN\", rep)\n        self.assertIn(\"REPORTS END\", rep)\n\n    def test_distributableReportInt(self):\n        repInt = reportInterface.ReportInterface(None, None)\n        self.assertEqual(repInt.distributable(), 4)\n\n    def test_interactBOLReportInt(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        repInt = reportInterface.ReportInterface(r, o.cs)\n\n        with mockRunLogs.BufferLog() as mock:\n            repInt.interactBOL()\n            self.assertIn(\"Writing assem layout\", mock.getStdout())\n            self.assertIn(\"BOL Assembly\", mock.getStdout())\n            self.assertIn(\"wetMass\", mock.getStdout())\n\n    def test_interactEveryNode(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        repInt = reportInterface.ReportInterface(r, o.cs)\n\n        with mockRunLogs.BufferLog() as mock:\n            repInt.interactEveryNode(0, 0)\n            self.assertIn(\"Cycle 0\", mock.getStdout())\n            self.assertIn(\"node 0\", mock.getStdout())\n            self.assertIn(\"keff=\", mock.getStdout())\n\n    def test_interactBOC(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        repInt = reportInterface.ReportInterface(r, o.cs)\n\n        self.assertEqual(repInt.fuelCycleSummary[\"bocFissile\"], 0.0)\n        repInt.interactBOC(1)\n        self.assertAlmostEqual(repInt.fuelCycleSummary[\"bocFissile\"], 4.290603409612653)\n\n    def test_interactEOC(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        repInt = reportInterface.ReportInterface(r, o.cs)\n\n        with mockRunLogs.BufferLog() as mock:\n            repInt.interactEOC(0)\n            self.assertIn(\"Cycle 0\", mock.getStdout())\n            self.assertIn(\"TIMER REPORTS\", mock.getStdout())\n\n    def test_interactEOL(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        repInt = reportInterface.ReportInterface(r, o.cs)\n\n        with mockRunLogs.BufferLog() as mock:\n            repInt.interactEOL()\n            self.assertIn(\"Comprehensive Core Report\", mock.getStdout())\n            self.assertIn(\"Assembly Area Fractions\", mock.getStdout())\n"
  },
  {
    "path": "armi/bookkeeping/snapshotInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nControls points during a calculation where snapshots will be triggered, signaling more detailed treatments.\n\nSnapshots are user-defined cycle/timenode points where something special is to be done.\nWhat in particular is done is dependent on the case settings and the collection of active plugins\n\n* At the very basic level,\n  third-party code input files are dumped out and stored in special snapshot folders at these times.\n  This can be useful when you are sharing third-party input files with another party (e.g. for review or\n  collaboration).\n* You may want to run extra long-running physics simulations only at a few time points (e.g. BOL, EOL). This\n  is useful for detailed transient analysis, or other follow-on analysis.\n\nSnapshots can be requested through the settings: ``dumpSnapshot`` and/or ``defaultSnapshots``.\n\"\"\"\n\nfrom armi import interfaces, operators, runLog\nfrom armi.utils import getStepLengths\n\nORDER = interfaces.STACK_ORDER.POSTPROCESSING\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    return (SnapshotInterface, {})\n\n\nclass SnapshotInterface(interfaces.Interface):\n    \"\"\"\n    Snapshot managerial interface.\n\n    .. impl:: Save extra data to be saved from a run, at specified time nodes.\n        :id: I_ARMI_SNAPSHOT0\n        :implements: R_ARMI_SNAPSHOT\n\n        This is a special :py:class:`Interface <armi.interfaces.Interface>` that is\n        designed to run along all the other Interfaces during a simulation, to save off\n        important or helpful data. By default, this is designed to be used with the\n        ``\"defaultSnapshots\"`` and ``\"\"dumpSnapshot\"\"`` settings. These settings were\n        added so users can control if snapshot data will be recorded during their run.\n        Broadly, this class is implemented to run the Operator method\n        :py:meth:`o.snapshotRequest <armi.operators.Operator.snapshotRequest>`.\n    \"\"\"\n\n    name = \"snapshot\"\n\n    def interactBOL(self):\n        \"\"\"Active the default snapshots at BOL.\"\"\"\n        interfaces.Interface.interactBOL(self)\n        if self.cs[\"defaultSnapshots\"]:\n            self.activateDefaultSnapshots()\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"Call the snapshot interface to copy files at each node, if requested.\"\"\"\n        snapText = getCycleNodeStamp(cycle, node)  # CCCNNN\n        if self.cs[\"dumpSnapshot\"] and snapText in self.cs[\"dumpSnapshot\"]:\n            self.o.snapshotRequest(cycle, node)\n\n    def interactCoupled(self, iteration):\n        \"\"\"Call the snapshot interface to copy files for coupled iterations, if requested.\"\"\"\n        snapText = getCycleNodeStamp(self.r.p.cycle, self.r.p.timeNode)  # CCCNNN\n        if self.cs[\"dumpSnapshot\"] and snapText in self.cs[\"dumpSnapshot\"]:\n            self.o.snapshotRequest(self.r.p.cycle, self.r.p.timeNode, iteration)\n\n    def activateDefaultSnapshots(self):\n        \"\"\"Figure out and assign some default snapshots (BOL, MOL, EOL).\"\"\"\n        if self.cs[\"runType\"] == operators.RunTypes.EQUILIBRIUM:\n            snapTimeCycleNodePairs = self._getSnapTimesEquilibrium()\n        else:\n            snapTimeCycleNodePairs = self._getSnapTimesNormal()\n\n        snapText = [\"{0:03d}{1:03d}\".format(c, n) for c, n in snapTimeCycleNodePairs]\n\n        # determine if there are new snapshots to add to the settings file\n        for snapT in snapText:\n            if snapT not in self.cs[\"dumpSnapshot\"]:\n                runLog.info(\"Adding default snapshot {0} to snapshot queue.\".format(snapT))\n                self.cs[\"dumpSnapshot\"] = self.cs[\"dumpSnapshot\"] + [snapT]\n\n    def _getSnapTimesEquilibrium(self):\n        \"\"\"Set BOEC, MOEC, EOEC snapshots.\"\"\"\n        if not self.cs[\"eqToDatabaseOnlyWhenConverged\"]:\n            raise ValueError(\"Cannot create default snapshots when `eqToDatabaseOnlyWhenConverged` setting is active\")\n        return [(0, 0), (0, self.cs[\"burnSteps\"] // 2), (0, self.cs[\"burnSteps\"])]\n\n    def _getSnapTimesNormal(self):\n        try:\n            curCycle = self.r.p.cycle\n        except AttributeError:\n            # none has no attribute getParam (no reactor for whatever reason)\n            curCycle = 0\n        eolCycle = self.cs[\"nCycles\"] - 1\n\n        molCycle = eolCycle // 2\n        bolCycle = 0\n\n        snapTimeCycleNodePairs = []\n        if bolCycle >= curCycle:\n            snapTimeCycleNodePairs.append([bolCycle, 0])\n        if molCycle >= curCycle:\n            snapTimeCycleNodePairs.append([molCycle, 0])\n        if eolCycle >= curCycle:\n            eolCycleLastNode = len(getStepLengths(self.cs)[-1])\n            snapTimeCycleNodePairs.append([eolCycle, eolCycleLastNode])\n\n        return snapTimeCycleNodePairs\n\n\ndef extractCycleNodeFromStamp(stamp):\n    \"\"\"\n    Returns cycle and node from a CCCNNN stamp.\n\n    See Also\n    --------\n    getCycleNodeStamp : the opposite\n    \"\"\"\n    cycle = int(stamp[:3])\n    node = int(stamp[3:])\n    return cycle, node\n\n\ndef getCycleNodeStamp(cycle, node):\n    \"\"\"\n    Returns a CCCNNN stamp for this cycle and node.\n\n    Useful for comparing the current cycle/node with requested snapshots in the settings\n\n    See Also\n    --------\n    isRequestedDetailPoint : compares a cycle,node to the dumpSnapshot list.\n    extractCycleNodeFromStamp : does the opposite\n    \"\"\"\n    return \"{0:03d}{1:03d}\".format(cycle, node)\n"
  },
  {
    "path": "armi/bookkeeping/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBookkeeping test package.\n\nThis may seem a little bit over-engineered, but the jupyter notebooks that get run by\nthe test_historyTracker are also used in the documentation system, so providing a list\nof related files from this package is useful. Also, these are organized like this to\nprevent having to import the world just to get something like a list of strings.\n\"\"\"\n\nfrom armi.bookkeeping.tests._constants import *  # noqa: F403\n"
  },
  {
    "path": "armi/bookkeeping/tests/_constants.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlain old data for the bookkeeping tests.\n\nThese are stored here so that they can be accessed from within this test package, but\nalso re-exported by `__init__.py`, so that other things (like the documentation system)\ncan use it without having to import the rest of ARMI.\n\"\"\"\n\nimport os\n\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import TEST_ROOT\n\n# These files are needed to run the data_model ipython notebook, which is done in\n# test_historyTracker, and when building the docs.\nTUTORIAL_FILES = [\n    os.path.join(TESTING_ROOT, \"reactors\", \"anl-afci-177\", \"anl-afci-177-blueprints.yaml\"),\n    os.path.join(TESTING_ROOT, \"reactors\", \"anl-afci-177\", \"anl-afci-177-coreMap.yaml\"),\n    os.path.join(TESTING_ROOT, \"reactors\", \"anl-afci-177\", \"anl-afci-177-fuelManagement.py\"),\n    os.path.join(TESTING_ROOT, \"reactors\", \"anl-afci-177\", \"anl-afci-177.yaml\"),\n    os.path.join(TEST_ROOT, \"tutorials\", \"data_model.ipynb\"),\n]\n"
  },
  {
    "path": "armi/bookkeeping/tests/test_historyTracker.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for the history tracker interface.\n\nThese tests actually run a jupyter notebook that is in the documentation to build a valid HDF5 file to load from as a\ntest fixtures. Thus they take a little longer than usual.\n\"\"\"\n\nimport os\nimport shutil\n\nimport numpy as np\n\nfrom armi import init as armi_init\nfrom armi import settings, utils\nfrom armi.reactor.flags import Flags\nfrom armi.tests import TEST_ROOT, ArmiTestHelper\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)  # because tests do not run in this folder\nTEST_FILE = os.path.join(TEST_ROOT, \"smallestTestReactor\", \"armiRunSmallest.yaml\")\n\n\nclass TestHistoryTracker(ArmiTestHelper):\n    \"\"\"History tracker tests that require a Reactor Model.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.dirChanger = TemporaryDirectoryChanger()\n        cls.dirChanger.__enter__()\n\n        # modify the input settings for our tests\n        dbPath = os.path.join(cls.dirChanger.destination, \"armiRunSmallest.h5\")\n        reloadPath = os.path.join(cls.dirChanger.destination, \"armiRunSmallestReload.h5\")\n        cs = settings.Settings(TEST_FILE)\n        newSettings = {}\n        newSettings[\"db\"] = True\n        newSettings[\"nCycles\"] = 1\n        newSettings[\"detailAssemLocationsBOL\"] = [\"001-001\"]\n        newSettings[\"loadStyle\"] = \"fromDB\"\n        newSettings[\"reloadDBName\"] = reloadPath\n        newSettings[\"startNode\"] = 1\n        newSettings[\"verbosity\"] = \"error\"\n        cs = cs.modified(newSettings=newSettings)\n\n        # build the ARMI operator (and Reactor)\n        o = armi_init(fName=TEST_FILE, cs=cs)\n\n        def _setFakePower(core):\n            peakPower = 1e6\n            mgFluxBase = np.arange(5)\n            for a in core:\n                for b in a:\n                    vol = b.getVolume()\n                    fuelFlag = 10 if b.isFuel() else 1.0\n                    b.p.power = peakPower * fuelFlag\n                    b.p.pdens = b.p.power / vol\n                    b.p.mgFlux = mgFluxBase * b.p.pdens\n\n        # put some test power values on the Reactor object\n        _setFakePower(o.r.core)\n\n        # write some data to the DB\n        dbi = o.getInterface(\"database\")\n        dbi.initDB(fName=dbPath)\n        dbi.database.writeToDB(o.r)\n        o.r.p.timeNode += 1\n        dbi.database.writeToDB(o.r)\n\n        cls.o = o\n        cls.r = o.r\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.dirChanger.__exit__(None, None, None)\n        try:\n            cls.o.getInterface(\"database\").database.close()\n        except FileNotFoundError:\n            pass\n        cls.r = None\n        cls.o = None\n\n    def test_calcMGFluence(self):\n        \"\"\"\n        This test confirms that mg flux has many groups when loaded with the history tracker.\n\n        .. test:: Demonstrate that a parameter stored at differing time nodes can be recovered.\n            :id: T_ARMI_HIST_TRACK0\n            :tests: R_ARMI_HIST_TRACK\n        \"\"\"\n        o = self.o\n        b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL)\n        bVolume = b.getVolume()\n        bName = b.name\n\n        # duration is None in this DB\n        hti = o.getInterface(\"history\")\n        timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()]\n        timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))]\n        hti.preloadBlockHistoryVals([bName], [\"mgFlux\"], timeStepsToRead)\n\n        mgFluence = None\n        for ts, years in enumerate(timesInYears):\n            cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs)\n            mgFlux = hti.getBlockHistoryVal(bName, \"mgFlux\", (cycle, node))\n            mgFlux /= bVolume\n            timeInSec = years * 365 * 24 * 3600\n            if mgFluence is None:\n                mgFluence = timeInSec * mgFlux\n            else:\n                mgFluence += timeInSec * mgFlux\n\n        self.assertGreater(len(mgFluence), 1, \"mgFluence should have more than 1 group\")\n\n        # test that unloadBlockHistoryVals() is working\n        self.assertIsNotNone(hti._preloadedBlockHistory)\n        hti.unloadBlockHistoryVals()\n        self.assertIsNone(hti._preloadedBlockHistory)\n\n    def test_historyParameters(self):\n        \"\"\"Retrieve various parameters from the history.\n\n        .. test:: Demonstrate that various parameters stored at differing time nodes can be recovered.\n            :id: T_ARMI_HIST_TRACK1\n            :tests: R_ARMI_HIST_TRACK\n        \"\"\"\n        o = self.o\n        b = o.r.core.childrenByLocator[o.r.core.spatialGrid[0, 0, 0]].getFirstBlock(Flags.FUEL)\n        b.getVolume()\n        bName = b.name\n\n        # duration is None in this DB\n        hti = o.getInterface(\"history\")\n        timesInYears = [duration or 1.0 for duration in hti.getTimeSteps()]\n        timeStepsToRead = [utils.getCycleNodeFromCumulativeNode(i, self.o.cs) for i in range(len(timesInYears))]\n        hti.preloadBlockHistoryVals([bName], [\"power\"], timeStepsToRead)\n\n        # read some parameters\n        params = {}\n        for param in [\"height\", \"pdens\", \"power\"]:\n            params[param] = []\n            for ts, years in enumerate(timesInYears):\n                cycle, node = utils.getCycleNodeFromCumulativeNode(ts, self.o.cs)\n                params[param].append(hti.getBlockHistoryVal(bName, param, (cycle, node)))\n\n        # verify the height parameter doesn't change over time\n        self.assertGreater(params[\"height\"][0], 0)\n        self.assertEqual(params[\"height\"][0], params[\"height\"][1])\n\n        # verify the power parameter is retrievable from the history\n        refPower = 1000000.0\n        self.assertEqual(o.cs[\"power\"], refPower)\n        self.assertAlmostEqual(params[\"power\"][0], refPower * 10.0, delta=0.1)\n\n        # verify the power density parameter is retrievable from the history\n        refDens = 1636.4803548458785\n        self.assertAlmostEqual(params[\"pdens\"][0], refDens, delta=0.001)\n        self.assertAlmostEqual(params[\"pdens\"][0], params[\"pdens\"][1])\n\n        # test that unloadBlockHistoryVals() is working\n        self.assertIsNotNone(hti._preloadedBlockHistory)\n        hti.unloadBlockHistoryVals()\n        self.assertIsNone(hti._preloadedBlockHistory)\n\n    def test_historyReport(self):\n        \"\"\"\n        Test generation of history report.\n\n        This does a swap for 5 timesteps::\n\n            |       TS  0     1      2       3       4\n            |LOC      (1,1) (2,1)  (3,1)   (4,1)   SFP\n        \"\"\"\n        history = self.o.getInterface(\"history\")\n        history.interactBOL()\n        history.interactEOL()\n        testLoc = self.o.r.core.spatialGrid[0, 0, 0]\n        testAssem = self.o.r.core.childrenByLocator[testLoc]\n        fileName = history._getAssemHistoryFileName(testAssem)\n        actualFilePath = os.path.join(THIS_DIR, fileName)\n        expectedFileName = os.path.join(THIS_DIR, fileName.replace(\".txt\", \"-ref.txt\"))\n        # copy from fast path so the file is retrievable.\n        shutil.move(fileName, os.path.join(THIS_DIR, fileName))\n        self.compareFilesLineByLine(expectedFileName, actualFilePath)\n\n        # test that detailAssemblyNames() is working\n        self.assertEqual(len(history.detailAssemblyNames), 1)\n        history.addAllDetailedAssems()\n        self.assertEqual(len(history.detailAssemblyNames), 1)\n\n    def test_getAssemHistories(self):\n        \"\"\"Get the histories for all blocks in detailed assemblies.\"\"\"\n        history = self.o.getInterface(\"history\")\n        history.interactBOL()\n        assemList = history.getDetailAssemblies()\n        params = history.getTrackedParams()\n        assemHistories = history.getAssemHistories(assemList)\n        for a in assemList:\n            for b in history.nonStationaryBlocks(a):\n                self.assertIn(b, assemHistories)\n                for param in params:\n                    self.assertIn(param, assemHistories[b])\n\n    def test_getBlockInAssembly(self):\n        history = self.o.getInterface(\"history\")\n        aFuel = self.o.r.core.getFirstAssembly(Flags.FUEL)\n\n        b = history._getBlockInAssembly(aFuel)\n        self.assertGreater(b.p.height, 1.0)\n        self.assertEqual(b.getType(), \"fuel\")\n\n        with self.assertRaises(AttributeError):\n            aShield = self.o.r.core.getFirstAssembly(Flags.SHIELD)\n            history._getBlockInAssembly(aShield)\n"
  },
  {
    "path": "armi/bookkeeping/tests/test_memoryProfiler.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for memoryProfiler.\"\"\"\n\nimport logging\nimport unittest\nfrom unittest.mock import MagicMock, patch\n\nfrom armi import runLog\nfrom armi.bookkeeping import memoryProfiler\nfrom armi.bookkeeping.memoryProfiler import (\n    getCurrentMemoryUsage,\n    getTotalJobMemory,\n)\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import TEST_ROOT, mockRunLogs\n\n\nclass TestMemoryProfiler(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = test_reactors.loadTestReactor(\n            TEST_ROOT,\n            {\"debugMem\": True},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n        self.memPro: memoryProfiler.MemoryProfiler = self.o.getInterface(\"memoryProfiler\")\n\n    def tearDown(self):\n        self.o.removeInterface(self.memPro)\n\n    def test_fullBreakdown(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_fullBreakdown\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            self.memPro._printFullMemoryBreakdown(reportSize=False)\n\n            # do some basic testing\n            self.assertTrue(mock.getStdout().count(\"UNIQUE_INSTANCE_COUNT\") > 10)\n            self.assertIn(\"garbage\", mock.getStdout())\n\n    def test_displayMemoryUsage(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_displayMemUsage\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            self.memPro.displayMemoryUsage(1)\n\n            # do some basic testing\n            self.assertIn(\"End Memory Usage Report\", mock.getStdout())\n\n    def test_printFullMemoryBreakdown(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_displayMemUsage\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            self.memPro._printFullMemoryBreakdown(reportSize=True)\n\n            # do some basic testing\n            self.assertIn(\"UNIQUE_INSTANCE_COUNT\", mock.getStdout())\n            self.assertIn(\" MB\", mock.getStdout())\n\n    def test_getReferrers(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            testName = \"test_getReferrers\"\n            runLog.LOG.startLog(testName)\n            runLog.LOG.setVerbosity(logging.DEBUG)\n\n            # grab the referrers\n            self.memPro.getReferrers(self.r)\n            memLog = mock.getStdout()\n\n        # test the results\n        self.assertGreater(memLog.count(\"ref for\"), 10)\n        self.assertLess(memLog.count(\"ref for\"), 50)\n        self.assertIn(testName, memLog)\n        self.assertIn(\"Reactor\", memLog)\n        self.assertIn(\"core\", memLog)\n\n    def test_checkForDuplicateObjectsOnArmiModel(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            testName = \"test_checkForDuplicateObjectsOnArmiModel\"\n            runLog.LOG.startLog(testName)\n            runLog.LOG.setVerbosity(logging.IMPORTANT)\n\n            # check for duplicates\n            with self.assertRaises(RuntimeError):\n                self.memPro.checkForDuplicateObjectsOnArmiModel(\"cs\", self.r.core)\n\n            # validate the outputs are as we expect\n            self.assertIn(\"There are 2 unique objects stored as `.cs`\", mock.getStdout())\n            self.assertIn(\"Expected id\", mock.getStdout())\n            self.assertIn(\"Expected object\", mock.getStdout())\n            self.assertIn(\"These types of objects\", mock.getStdout())\n            self.assertIn(\"MemoryProfiler\", mock.getStdout())\n            self.assertIn(\"MainInterface\", mock.getStdout())\n\n    def test_profileMemoryUsageAction(self):\n        pmua = memoryProfiler.ProfileMemoryUsageAction(\"timeDesc\")\n        self.assertEqual(pmua.timeDescription, \"timeDesc\")\n\n    @patch(\"psutil.virtual_memory\")\n    @patch(\"armi.bookkeeping.memoryProfiler.cpu_count\")\n    def test_getTotalJobMemory(self, mockCpuCount, mockVMem):\n        \"\"\"Use an example node with 50 GB of total physical memory and 10 CPUs.\"\"\"\n        mockCpuCount.return_value = 10\n        vMem = MagicMock()\n        vMem.total = (1024**3) * 50\n        mockVMem.return_value = vMem\n\n        expectedArrangement = {\n            (10, 1): 50,\n            (1, 10): 50,\n            (2, 5): 50,\n            (3, 3): 45,\n            (4, 1): 20,\n            (2, 4): 40,\n            (5, 2): 50,\n        }\n        for compReq, jobMemory in expectedArrangement.items():\n            # compReq[0] is nTasks and compReq[1] is cpusPerTask\n            self.assertEqual(getTotalJobMemory(compReq[0], compReq[1]), jobMemory)\n\n    @patch(\"armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction\")\n    @patch(\"armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage\")\n    def test_getCurrentMemoryUsage(self, mockSysAndProcMemUse, mockPrintSysMemUseAction):\n        \"\"\"Mock the memory usage across 3 different processes and that the total usage is as expected (6 MB).\"\"\"\n        self._setMemUseMock(mockPrintSysMemUseAction)\n        self.assertAlmostEqual(getCurrentMemoryUsage(), 6 * 1024)\n\n    @patch(\"armi.bookkeeping.memoryProfiler.PrintSystemMemoryUsageAction\")\n    @patch(\"armi.bookkeeping.memoryProfiler.SystemAndProcessMemoryUsage\")\n    @patch(\"psutil.virtual_memory\")\n    @patch(\"armi.bookkeeping.memoryProfiler.cpu_count\")\n    def test_printCurrentMemoryState(self, mockCpuCount, mockVMem, mock1, mockPrintSysMemUseAction):\n        \"\"\"Use an example node with 50 GB of total physical memory and 10 CPUs while using 6 GB.\"\"\"\n        mockCpuCount.return_value = 10\n        vMem = MagicMock()\n        vMem.total = (1024**3) * 50\n        mockVMem.return_value = vMem\n        self._setMemUseMock(mockPrintSysMemUseAction)\n        with mockRunLogs.BufferLog() as mockLogs:\n            self.memPro.cs = {\"cpusPerTask\": 1, \"nTasks\": 10}\n            self.memPro.printCurrentMemoryState()\n            stdOut = mockLogs.getStdout()\n            self.assertIn(\"Currently using 6.0 GB of memory.\", stdOut)\n            self.assertIn(\"There is 44.0 GB of memory left.\", stdOut)\n            self.assertIn(\"There is a total allocation of 50.0 GB\", stdOut)\n            # Try another for funzies where we only use half the available resources on the node\n            mockLogs.emptyStdout()\n            self.memPro.cs = {\"cpusPerTask\": 5, \"nTasks\": 1}\n            self.memPro.printCurrentMemoryState()\n            stdOut = mockLogs.getStdout()\n            self.assertIn(\"Currently using 6.0 GB of memory.\", stdOut)\n            self.assertIn(\"There is 19.0 GB of memory left.\", stdOut)\n            self.assertIn(\"There is a total allocation of 25.0 GB\", stdOut)\n\n    def test_printCurrentMemoryState_noSetting(self):\n        \"\"\"Test that the try/except works as it should.\"\"\"\n        expectedStr = (\n            \"To view memory consumed, remaining available, and total allocated for a case, \"\n            \"add the setting 'cpusPerTask' to your application.\"\n        )\n        with mockRunLogs.BufferLog() as mockLogs:\n            self.memPro.printCurrentMemoryState()\n            self.assertIn(expectedStr, mockLogs.getStdout())\n\n    def _setMemUseMock(self, mockPrintSysMemUseAction):\n        class mockMemUse:\n            def __init__(self, mem: float):\n                self.processVirtualMemoryInMB = mem\n\n        instance = mockPrintSysMemUseAction.return_value\n        instance.gather.return_value = [\n            mockMemUse(1 * 1024),\n            mockMemUse(2 * 1024),\n            mockMemUse(3 * 1024),\n        ]\n\n\nclass KlassCounterTests(unittest.TestCase):\n    def get_containers(self):\n        container1 = [1, 2, 3, 4, 5, 6, 7, 2.0]\n        container2 = (\"a\", \"b\", container1, None)\n        container3 = {\n            \"yo\": container2,\n            \"yo1\": container1,\n            (\"t1\", \"t2\"): True,\n            \"yeah\": [],\n            \"nope\": {},\n        }\n\n        return container3\n\n    def test_expandContainer(self):\n        container = self.get_containers()\n\n        counter = memoryProfiler.KlassCounter(False)\n        counter.countObjects(container)\n\n        self.assertEqual(counter.count, 24)\n        self.assertEqual(counter[list].count, 2)\n        self.assertEqual(counter[dict].count, 2)\n        self.assertEqual(counter[tuple].count, 2)\n        self.assertEqual(counter[int].count, 7)\n\n    def test_countHandlesRecursion(self):\n        container = self.get_containers()\n        container1 = container[\"yo1\"]\n        container1.append(container1)\n\n        counter = memoryProfiler.KlassCounter(False)\n        counter.countObjects(container)\n\n        # despite it now being recursive ... we get the same counts\n        self.assertEqual(counter.count, 24)\n        self.assertEqual(counter[list].count, 2)\n        self.assertEqual(counter[dict].count, 2)\n        self.assertEqual(counter[tuple].count, 2)\n        self.assertEqual(counter[int].count, 7)\n"
  },
  {
    "path": "armi/bookkeeping/tests/test_snapshot.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test Snapshots.\"\"\"\n\nimport unittest\nfrom unittest.mock import patch\n\nfrom armi import settings\nfrom armi.bookkeeping import snapshotInterface\nfrom armi.operators.operator import Operator\n\n\nclass MockReactorParams:\n    def __init__(self):\n        self.cycle = 0\n        self.timeNode = 1\n\n\nclass MockReactor:\n    def __init__(self, cs):\n        self.p = MockReactorParams()\n        self.o = Operator(cs)\n\n\nclass TestSnapshotInterface(unittest.TestCase):\n    @classmethod\n    def setUpClass(self):\n        self.cs = settings.Settings()\n\n    def setUp(self):\n        self.cs.revertToDefaults()\n        self.si = snapshotInterface.SnapshotInterface(MockReactor(self.cs), self.cs)\n\n    @patch(\"armi.operators.operator.Operator.snapshotRequest\")\n    def test_interactEveryNode(self, mockSnapshotRequest):\n        newSettings = {}\n        newSettings[\"dumpSnapshot\"] = [\"000001\"]\n        self.si.cs = self.si.cs.modified(newSettings=newSettings)\n        self.si.interactEveryNode(0, 1)\n        self.assertTrue(mockSnapshotRequest.called)\n\n    @patch(\"armi.operators.operator.Operator.snapshotRequest\")\n    def test_interactCoupled(self, mockSnapshotRequest):\n        newSettings = {}\n        newSettings[\"dumpSnapshot\"] = [\"000001\"]\n        self.si.cs = self.si.cs.modified(newSettings=newSettings)\n        self.si.interactCoupled(2)\n        self.assertTrue(mockSnapshotRequest.called)\n\n    def test_activateDefSnapshots_30cyc2burns(self):\n        \"\"\"\n        Test snapshots for 30 cycles and 2 burnsteps, checking the dumpSnapshot setting.\n\n        .. test:: Allow extra data to be saved from a run, at specified time nodes.\n            :id: T_ARMI_SNAPSHOT0\n            :tests: R_ARMI_SNAPSHOT\n        \"\"\"\n        self.assertEqual([], self.cs[\"dumpSnapshot\"])\n\n        newSettings = {}\n        newSettings[\"nCycles\"] = 30\n        newSettings[\"burnSteps\"] = 2\n        newSettings[\"cycleLength\"] = 365\n        self.si.cs = self.si.cs.modified(newSettings=newSettings)\n        self.cs = self.si.cs\n\n        self.si.activateDefaultSnapshots()\n        self.assertEqual([\"000000\", \"014000\", \"029002\"], self.si.cs[\"dumpSnapshot\"])\n\n    def test_activateDeftSnapshots_17cyc5surns(self):\n        \"\"\"\n        Test snapshots for 17 cycles and 5 burnsteps, checking the dumpSnapshot setting.\n\n        .. test:: Allow extra data to be saved from a run, at specified time nodes.\n            :id: T_ARMI_SNAPSHOT1\n            :tests: R_ARMI_SNAPSHOT\n        \"\"\"\n        self.assertEqual([], self.cs[\"dumpSnapshot\"])\n\n        newSettings = {}\n        newSettings[\"nCycles\"] = 17\n        newSettings[\"burnSteps\"] = 5\n        newSettings[\"cycleLength\"] = 365\n        self.si.cs = self.si.cs.modified(newSettings=newSettings)\n        self.cs = self.si.cs\n\n        self.si.activateDefaultSnapshots()\n        self.assertEqual([\"000000\", \"008000\", \"016005\"], self.si.cs[\"dumpSnapshot\"])\n"
  },
  {
    "path": "armi/bookkeeping/visualization/__init__.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe Visualization package contains functionality and entry points for producing files\namenable to visualization of ARMI run results.\n\nThis could theoretically support all sorts of visualization file formats, but for now,\nonly VTK files are supported. VTK was selected because it has wide support from vis\ntools, while being a simple-enough format that quality pure-Python libraries exist to\nproduce them. Other formats (e.g., SILO) tend to require more system-dependent binary\ndependencies, so optional support for them may be added later.\n\"\"\"\n\nfrom armi import plugins  # noqa: F401\nfrom armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint  # noqa: F401\n"
  },
  {
    "path": "armi/bookkeeping/visualization/dumper.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Abstract base class for visualization file dumpers.\"\"\"\n\nfrom abc import ABC, abstractmethod\n\nfrom armi.reactor import reactors\n\n\nclass VisFileDumper(ABC):\n    @abstractmethod\n    def dumpState(self, r: reactors.Reactor):\n        \"\"\"Dump a single reactor state to the vis file.\"\"\"\n\n    @abstractmethod\n    def __enter__(self):\n        \"\"\"Invoke initialize when entering a context manager.\"\"\"\n\n    @abstractmethod\n    def __exit__(self, type, value, traceback):\n        \"\"\"Invoke initialize when entering a context manager.\"\"\"\n"
  },
  {
    "path": "armi/bookkeeping/visualization/entryPoint.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Entry point for producing visualization files.\"\"\"\n\nimport pathlib\nimport re\nimport sys\n\nfrom armi import runLog\nfrom armi.cli import entryPoint\n\n\nclass VisFileEntryPoint(entryPoint.EntryPoint):\n    \"\"\"Create visualization files from database files.\"\"\"\n\n    name = \"vis-file\"\n    description = \"Convert ARMI databases in to visualization files\"\n\n    _FORMAT_VTK = \"vtk\"\n    _FORMAT_XDMF = \"xdmf\"\n    _SUPPORTED_FORMATS = {_FORMAT_VTK, _FORMAT_XDMF}\n\n    def __init__(self):\n        entryPoint.EntryPoint.__init__(self)\n\n    def addOptions(self):\n        self.parser.add_argument(\"h5db\", help=\"Input database path\", type=str)\n        self.parser.add_argument(\n            \"--output-name\",\n            \"-o\",\n            help=\"Base name for output file(s). File extensions will be added as appropriate\",\n            type=str,\n            default=None,\n        )\n        self.parser.add_argument(\n            \"--format\",\n            \"-f\",\n            help=\"Output format. Supported formats: `vtk` and `xdmf`\",\n            default=\"vtk\",\n        )\n        self.parser.add_argument(\n            \"--nodes\",\n            help=\"An optional list of time nodes to include. Should look like `(1,0)(1,1)(1,2)`, etc\",\n            type=str,\n            default=None,\n        )\n        self.parser.add_argument(\n            \"--max-node\",\n            help=\"An optional (cycle,timeNode) tuple to specify the latest time step that should be included\",\n            type=str,\n            default=None,\n        )\n        self.parser.add_argument(\n            \"--min-node\",\n            help=\"An optional (cycle,timeNode) tuple to specify the earliest time step that should be included\",\n            type=str,\n            default=None,\n        )\n\n    def parse(self, args):\n        \"\"\"\n        Process user input.\n\n        Strings are parsed against some regular expressions and saved back to their\n        original locations in the ``self.args`` namespace for later use.\n        \"\"\"\n        entryPoint.EntryPoint.parse(self, args)\n\n        cycleNodePattern = r\"\\((\\d+),(\\d+)\\)\"\n\n        if self.args.nodes is not None:\n            self.args.nodes = [(int(cycle), int(node)) for cycle, node in re.findall(cycleNodePattern, self.args.nodes)]\n\n        if self.args.max_node is not None:\n            nodes = re.findall(cycleNodePattern, self.args.max_node)\n            if len(nodes) != 1:\n                runLog.error(\"Bad --max-node: `{}`. Should look like (c,n).\".format(self.args.max_node))\n                sys.exit(1)\n            cycle, node = nodes[0]\n            self.args.max_node = (int(cycle), int(node))\n\n        if self.args.min_node is not None:\n            nodes = re.findall(cycleNodePattern, self.args.min_node)\n            if len(nodes) != 1:\n                runLog.error(\"Bad --min-node: `{}`. Should look like (c,n).\".format(self.args.min_node))\n                sys.exit(1)\n            cycle, node = nodes[0]\n            self.args.min_node = (int(cycle), int(node))\n\n        if self.args.format not in self._SUPPORTED_FORMATS:\n            runLog.error(\n                \"Requested format `{}` not among the supported options: {}\".format(\n                    self.args.format, self._SUPPORTED_FORMATS\n                )\n            )\n            sys.exit(1)\n\n        if self.args.output_name is None:\n            # infer name from input\n            inp = pathlib.Path(self.args.h5db)\n            self.args.output_name = inp.stem\n\n    def invoke(self):\n        # late imports so that we dont have to import the world to do anything\n        from armi.bookkeeping.db import databaseFactory\n        from armi.bookkeeping.visualization import vtk, xdmf\n\n        # a little baroque, but easy to extend with future formats\n        formatMap = {\n            self._FORMAT_VTK: vtk.VtkDumper,\n            self._FORMAT_XDMF: xdmf.XdmfDumper,\n        }\n\n        dumper = formatMap[self.args.format](self.args.output_name, self.args.h5db)\n\n        nodes = self.args.nodes\n        db = databaseFactory(self.args.h5db, \"r\")\n        with db:\n            dbNodes = list(db.genTimeSteps())\n\n            if nodes is not None and any(node not in dbNodes for node in nodes):\n                raise RuntimeError(\n                    \"Some of the requested nodes are not in the source database.\\nRequested: {}\\nPresent: {}\".format(\n                        nodes, dbNodes\n                    )\n                )\n\n            with dumper:\n                for cycle, node in dbNodes:\n                    if nodes is not None and (cycle, node) not in nodes:\n                        continue\n\n                    if self.args.min_node is not None and (cycle, node) < self.args.min_node:\n                        continue\n\n                    if self.args.max_node is not None and (cycle, node) > self.args.max_node:\n                        continue\n\n                    runLog.info(\"Creating visualization file for cycle {}, time node {}...\".format(cycle, node))\n                    r = db.load(cycle, node)\n                    dumper.dumpState(r)\n"
  },
  {
    "path": "armi/bookkeeping/visualization/tests/__init__.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/bookkeeping/visualization/tests/test_vis.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test report visualization.\"\"\"\n\nimport unittest\n\nimport numpy as np\nfrom pyevtk.vtk import VtkTetra\n\nfrom armi import settings\nfrom armi.bookkeeping.db import Database\nfrom armi.bookkeeping.visualization import utils, vtk, xdmf\nfrom armi.reactor import blocks, components\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestVtkMesh(unittest.TestCase):\n    \"\"\"Test the VtkMesh utility class.\"\"\"\n\n    def test_testVtkMesh(self):\n        mesh = utils.VtkMesh.empty()\n\n        self.assertEqual(mesh.vertices.size, 0)\n        self.assertEqual(mesh.vertices.shape, (0, 3))\n        self.assertEqual(mesh.connectivity.size, 0)\n        self.assertEqual(mesh.offsets.size, 0)\n        self.assertEqual(mesh.cellTypes.size, 0)\n\n        verts = np.array([[0.0, 0.0, 0.0], [0.0, 1.0, 0.0], [1.0, 0.0, 0.0], [0.25, 0.25, 0.5]])\n        conn = np.array([0, 1, 2, 3])\n        offsets = np.array([4])\n        cellTypes = np.array([VtkTetra.tid])\n        newMesh = utils.VtkMesh(verts, conn, offsets, cellTypes)\n\n        mesh.append(newMesh)\n        mesh.append(newMesh)\n\n        self.assertEqual(mesh.vertices.size, 3 * 8)\n        self.assertEqual(mesh.offsets.size, 2)\n        self.assertEqual(mesh.connectivity.size, 8)\n        self.assertEqual(mesh.cellTypes.size, 2)\n\n        self.assertEqual(mesh.offsets[-1], 8)\n        self.assertEqual(mesh.connectivity[-1], 7)\n\n\nclass TestVisDump(unittest.TestCase):\n    \"\"\"Test dumping a whole reactor and some specific block types.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        caseSetting = settings.Settings()\n        _, cls.r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        cls.hexBlock = next(cls.r.core.iterBlocks())\n\n        cls.cartesianBlock = blocks.CartesianBlock(\"TestCartesianBlock\", caseSetting)\n        cartesianComponent = components.HoledSquare(\n            \"duct\",\n            \"UZr\",\n            Tinput=273.0,\n            Thot=273.0,\n            holeOD=68.0,\n            widthOuter=12.5,\n            mult=1.0,\n        )\n        cls.cartesianBlock.add(cartesianComponent)\n        cls.cartesianBlock.add(components.Circle(\"clad\", \"HT9\", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0))\n\n    def test_dumpReactorVtk(self):\n        # This does a lot, and is hard to verify. at least make sure it doesn't crash\n        with TemporaryDirectoryChanger(dumpOnException=False):\n            dumper = vtk.VtkDumper(\"testVtk\", inputName=None)\n            with dumper:\n                dumper.dumpState(self.r)\n\n    def test_dumpReactorXdmf(self):\n        # This does a lot, and is hard to verify. at least make sure it doesn't crash\n        with TemporaryDirectoryChanger(dumpOnException=False):\n            db = Database(\"testDatabase.h5\", \"w\")\n            with db:\n                db.writeToDB(self.r)\n            dumper = xdmf.XdmfDumper(\"testVtk\", inputName=\"testDatabase.h5\")\n            with dumper:\n                dumper.dumpState(self.r)\n\n    def test_hexMesh(self):\n        mesh = utils.createBlockMesh(self.hexBlock)\n\n        self.assertEqual(mesh.vertices.size, 12 * 3)\n        self.assertEqual(mesh.cellTypes[0], 16)\n\n    def test_cartesianMesh(self):\n        mesh = utils.createBlockMesh(self.cartesianBlock)\n\n        self.assertEqual(mesh.vertices.size, 8 * 3)\n        self.assertEqual(mesh.cellTypes[0], 12)\n"
  },
  {
    "path": "armi/bookkeeping/visualization/tests/test_xdmf.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nfrom armi.bookkeeping.visualization import xdmf\n\n\nclass TestXdmf(unittest.TestCase):\n    \"\"\"\n    Test XDMF-specific functionality.\n\n    This is for testing XDMF functions that can reasonably be tested in a vacuum. The\n    main dump methods are hard to test without resorting to checking whole files, which\n    isn't particularly useful. Those tests can be found in test_vis.\n    \"\"\"\n\n    def test_dedupTimes(self):\n        # no duplicates\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([1.0 * t for t in range(10)]),\n            [1.0 * t for t in range(10)],\n        )\n\n        # ends in duplicates\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0]),\n            [0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008],\n        )\n\n        # ends in unique\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([0.0, 1.0, 2.0, 2.0, 3.0, 4.0, 4.0, 4.0, 5.0]),\n            [0.0, 1.0, 2.0, 2.000000002, 3.0, 4.0, 4.000000004, 4.000000008, 5.0],\n        )\n\n        # all duplicates\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([0.0] * 5),\n            [0.0, 1e-09, 2e-09, 3.0000000000000004e-09, 4e-09],\n        )\n\n        # single value\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([1.0]),\n            [1.0],\n        )\n\n        # empty list\n        self.assertEqual(\n            xdmf.XdmfDumper._dedupTimes([]),\n            [],\n        )\n\n        with self.assertRaises(AssertionError):\n            # input should be sorted\n            xdmf.XdmfDumper._dedupTimes([float(t) for t in reversed(range(10))])\n"
  },
  {
    "path": "armi/bookkeeping/visualization/utils.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUtility classes/functions for visualization.\n\nMost of these are derived from the VTK format, which tends to be general enough to\nsupport other formats. Most of the work goes into figuring out where the vertices should\nbe for a given block/assembly shape. If this coupling becomes problematic, abstractions\nfor primitive shapes should be created.\n\"\"\"\n\nimport math\n\nimport numpy as np\nfrom pyevtk.hl import unstructuredGridToVTK\nfrom pyevtk.vtk import VtkHexahedron, VtkQuadraticHexahedron\n\nfrom armi.reactor import assemblies, blocks, reactors\nfrom armi.utils import hexagon\n\n# The hex prism cell type is not very well-documented, and so is not described in\n# pyevtk. Digging into the header reveals that `16` does the trick.\n_HEX_PRISM_TID = 16\n\n\nclass VtkMesh:\n    \"\"\"\n    Container for VTK unstructured mesh data.\n\n    This provides a container for the necessary data to describe a mesh to VTK (vertex\n    locations, connectivity, offsets, cell types). It supports appending one set of mesh\n    data onto another, handling the necessary index offsets.\n\n    While the specifics are somewhat specific to the VTK format, the concept of storing\n    a bunch of vertices and their connectivity is a relatively general one, so this may\n    be of use to other formats as well.\n    \"\"\"\n\n    def __init__(self, vertices, connectivity, offsets, cellTypes):\n        \"\"\"\n        Parameters\n        ----------\n        vertices : np.ndarray\n            An Nx3 numpy array with one row per (x,y,z) vertex\n        connectivity : np.ndarray\n            A 1-D array containing the vertex indices belonging to each cell\n        offsets : np.ndarray\n            A 1-D array containing the index of the first vertex for the next cell\n        cellTypes : np.ndarray\n            A 1-D array containing the cell type ID for each cell\n        \"\"\"\n        self.vertices = vertices\n        self.connectivity = connectivity\n        self.offsets = offsets\n        self.cellTypes = cellTypes\n\n    @staticmethod\n    def empty():\n        return VtkMesh(\n            np.empty((0, 3), dtype=np.float64),\n            np.array([], dtype=np.int32),\n            np.array([], dtype=np.int32),\n            np.array([], dtype=np.int32),\n        )\n\n    @property\n    def x(self):\n        return np.array(self.vertices[:, 0])\n\n    @property\n    def y(self):\n        return np.array(self.vertices[:, 1])\n\n    @property\n    def z(self):\n        return np.array(self.vertices[:, 2])\n\n    def append(self, other):\n        \"\"\"Add more cells to the mesh.\"\"\"\n        connectOffset = self.vertices.shape[0]\n        offsetOffset = self.offsets[-1] if self.offsets.size > 0 else 0\n\n        self.vertices = np.vstack((self.vertices, other.vertices))\n        self.connectivity = np.append(self.connectivity, other.connectivity + connectOffset)\n        self.offsets = np.append(self.offsets, other.offsets + offsetOffset)\n        self.cellTypes = np.append(self.cellTypes, other.cellTypes)\n\n    def write(self, path, data) -> str:\n        \"\"\"\n        Write this mesh and the passed data to a VTK file. Returns the base path, plus\n        relevant extension.\n        \"\"\"\n        fullPath = unstructuredGridToVTK(\n            path,\n            self.x,\n            self.y,\n            self.z,\n            connectivity=self.connectivity,\n            offsets=self.offsets,\n            cell_types=self.cellTypes,\n            cellData=data,\n        )\n        return fullPath\n\n\ndef createReactorBlockMesh(r: reactors.Reactor) -> VtkMesh:\n    mesh = VtkMesh.empty()\n    blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))\n    for b in blks:\n        mesh.append(createBlockMesh(b))\n\n    return mesh\n\n\ndef createReactorAssemMesh(r: reactors.Reactor) -> VtkMesh:\n    mesh = VtkMesh.empty()\n    assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))\n    for a in assems:\n        mesh.append(createAssemMesh(a))\n\n    return mesh\n\n\ndef createBlockMesh(b: blocks.Block) -> VtkMesh:\n    if isinstance(b, blocks.HexBlock):\n        return _createHexBlockMesh(b)\n    if isinstance(b, blocks.CartesianBlock):\n        return _createCartesianBlockMesh(b)\n    if isinstance(b, blocks.ThRZBlock):\n        return _createTRZBlockMesh(b)\n    else:\n        raise TypeError(\n            \"Unsupported block type `{}`. Supported types are: {}\".format(\n                type(b).__name__,\n                {t.__name__ for t in {blocks.CartesianBlock, blocks.HexBlock, blocks.ThRZBlock}},\n            )\n        )\n\n\ndef createAssemMesh(a: assemblies.Assembly) -> VtkMesh:\n    # Kind of hacky, but since all blocks in an assembly are the same type, let's just\n    # use the block mesh functions and change their z coordinates to match the size of\n    # the whole assem 🤯\n    mesh = createBlockMesh(a[0])\n\n    # we should only have a single VTK mesh primitive per block\n    assert len(mesh.cellTypes) == 1\n\n    zMin = a.spatialGrid._bounds[2][0]\n    zMax = a.spatialGrid._bounds[2][-1]\n\n    if mesh.cellTypes[0] == VtkHexahedron:\n        mesh.vertices[0:4, 2] = zMin\n        mesh.vertices[4:8, 2] = zMax\n    elif mesh.cellTypes[0] == _HEX_PRISM_TID:\n        mesh.vertices[0:6, 2] = zMin\n        mesh.vertices[6:12, 2] = zMax\n    elif mesh.cellTypes[0] == VtkQuadraticHexahedron.tid:\n        # again, quadratic hexahedra are a pain\n        mesh.vertices[0:4, 2] = zMin\n        mesh.vertices[8:12, 2] = zMin\n        mesh.vertices[4:8, 2] = zMax\n        mesh.vertices[12:16, 2] = zMax\n\n    return mesh\n\n\ndef _createHexBlockMesh(b: blocks.HexBlock) -> VtkMesh:\n    assert b.spatialLocator is not None\n\n    zMin = b.p.zbottom\n    zMax = b.p.ztop\n\n    gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]\n    gridOffset = np.tile(gridOffset, (6, 1))\n\n    pitch = b.getPitch()\n    hexVerts2d = np.array(hexagon.corners(rotation=0)) * pitch\n    hexVerts2d += gridOffset\n\n    # we need a top and bottom hex\n    hexVerts2d = np.vstack((hexVerts2d, hexVerts2d))\n\n    # fold in z locations to get 3d coordinates\n    hexVerts = np.hstack((hexVerts2d, np.array([[zMin] * 6 + [zMax] * 6]).transpose()))\n\n    return VtkMesh(\n        hexVerts,\n        np.array(list(range(12))),\n        np.array([12]),\n        np.array([_HEX_PRISM_TID]),\n    )\n\n\ndef _createCartesianBlockMesh(b: blocks.CartesianBlock) -> VtkMesh:\n    assert b.spatialLocator is not None\n\n    zMin = b.p.zbottom\n    zMax = b.p.ztop\n\n    gridOffset = b.spatialLocator.getGlobalCoordinates()[:2]\n    gridOffset = np.tile(gridOffset, (4, 1))\n\n    pitch = b.getPitch()\n    halfPitchX = pitch[0] * 0.5\n    halfPitchY = pitch[0] * 0.5\n\n    rectVerts = np.array(\n        [\n            [halfPitchX, halfPitchY],\n            [-halfPitchX, halfPitchY],\n            [-halfPitchX, -halfPitchY],\n            [halfPitchX, -halfPitchY],\n        ]\n    )\n    rectVerts += gridOffset\n\n    # make top/bottom rectangles\n    boxVerts = np.vstack((rectVerts, rectVerts))\n\n    # fold in z coordinates\n    boxVerts = np.hstack((boxVerts, np.array([[zMin] * 4 + [zMax] * 4]).transpose()))\n\n    return VtkMesh(\n        boxVerts,\n        np.array(list(range(8))),\n        np.array([8]),\n        np.array([VtkHexahedron.tid]),\n    )\n\n\ndef _createTRZBlockMesh(b: blocks.ThRZBlock) -> VtkMesh:\n    # This could be improved.\n    rIn = b.radialInner()\n    rOut = b.radialOuter()\n    thIn = b.thetaInner()\n    thOut = b.thetaOuter()\n    zIn = b.p.zbottom\n    zOut = b.p.ztop\n\n    vertsRTZ = [\n        (rIn, thOut, zIn),\n        (rIn, thIn, zIn),\n        (rOut, thIn, zIn),\n        (rOut, thOut, zIn),\n        (rIn, thOut, zOut),\n        (rIn, thIn, zOut),\n        (rOut, thIn, zOut),\n        (rOut, thOut, zOut),\n        (rIn, (thIn + thOut) * 0.5, zIn),\n        ((rIn + rOut) * 0.5, thIn, zIn),\n        (rOut, (thIn + thOut) * 0.5, zIn),\n        ((rIn + rOut) * 0.5, thOut, zIn),\n        (rIn, (thIn + thOut) * 0.5, zOut),\n        ((rIn + rOut) * 0.5, thIn, zOut),\n        (rOut, (thIn + thOut) * 0.5, zOut),\n        ((rIn + rOut) * 0.5, thOut, zOut),\n        (rIn, thOut, (zIn + zOut) * 0.5),\n        (rIn, thIn, (zIn + zOut) * 0.5),\n        (rOut, thIn, (zIn + zOut) * 0.5),\n        (rOut, thOut, (zIn + zOut) * 0.5),\n    ]\n    vertsXYZ = np.array([[r * math.cos(th), r * math.sin(th), z] for r, th, z in vertsRTZ])\n\n    return VtkMesh(\n        vertsXYZ,\n        np.array(list(range(20))),\n        np.array([20]),\n        np.array([VtkQuadraticHexahedron.tid]),\n    )\n"
  },
  {
    "path": "armi/bookkeeping/visualization/vtk.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nVisualization implementation for VTK files.\n\nLimitations\n-----------\nThis version of the VTK file writer comes with a number of limitations and/or aspects\nthat can be improved upon. For instance:\n\n* Only the Block and Assembly meshes and related parameters are exported to the VTK\n  file. Adding Core data is totally doable, and will be the product of future work.\n  With more considerable effort, arbitrary components may be visualizable!\n* No efforts are made to de-duplicate the vertices in the mesh, so there are more\n  vertices than needed. Some fancy canned algorithms probably exist to do this, and it\n  wouldn't be too difficult to do here either. Also future work, but probably not super\n  important unless dealing with really big meshes.\n\"\"\"\n\nfrom typing import Any, Dict, List, Optional, Set, Tuple\n\nimport numpy as np\nfrom pyevtk.vtk import VtkGroup\n\nfrom armi import runLog\nfrom armi.bookkeeping.db import database\nfrom armi.bookkeeping.visualization import dumper, utils\nfrom armi.reactor import assemblies, blocks, composites, parameters, reactors\n\n\nclass VtkDumper(dumper.VisFileDumper):\n    \"\"\"\n    Dumper for VTK data.\n\n    This handles writing unstructured meshes and associated Block parameter data to VTK\n    files. The context manager keeps track of how many files have been written (one per\n    time node), and creates a group/collection file when finished.\n    \"\"\"\n\n    def __init__(self, baseName: str, inputName: str):\n        self._baseName = baseName\n        self._assemFiles: List[Tuple[str, float]] = []\n        self._blockFiles: List[Tuple[str, float]] = []\n\n    def dumpState(\n        self,\n        r: reactors.Reactor,\n        includeParams: Optional[Set[str]] = None,\n        excludeParams: Optional[Set[str]] = None,\n    ):\n        \"\"\"\n        Dump a reactor to a VTK file.\n\n        Parameters\n        ----------\n        r : reactors.Reactor\n            The reactor state to visualize\n        includeParams : list of str, optional\n            A list of parameter names to include in the viz file. Defaults to all\n            params.\n        excludeParams : list of str, optional\n            A list of parameter names to exclude from the output. Defaults to no params.\n        \"\"\"\n        cycle = r.p.cycle\n        timeNode = r.p.timeNode\n\n        # you never know...\n        assert cycle < 1000\n        assert timeNode < 1000\n\n        # We avoid using cXnY, since VisIt doesn't support .pvd files, but *does* know\n        # to lump data with similar file names and integers at the end.\n        blockPath = \"{}_blk_{:0>3}{:0>3}\".format(self._baseName, cycle, timeNode)\n        assemPath = \"{}_asy_{:0>3}{:0>3}\".format(self._baseName, cycle, timeNode)\n\n        # include and exclude params are mutually exclusive\n        if includeParams is not None and excludeParams is not None:\n            raise ValueError(\"includeParams and excludeParams can not both be used at the same time\")\n\n        blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))\n        assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))\n\n        blockMesh = utils.createReactorBlockMesh(r)\n        assemMesh = utils.createReactorAssemMesh(r)\n\n        # collect param data\n        blockData = _collectObjectData(blks, includeParams, excludeParams)\n        assemData = _collectObjectData(assems, includeParams, excludeParams)\n        # block number densities are special, since they aren't stored as params\n        blockNdens = database.collectBlockNumberDensities(blks)\n        # we need to copy the number density vectors to guarantee unit stride, which\n        # pyevtk requires. Kinda seems like something it could do for us, but oh well.\n        blockNdens = {key: np.array(value) for key, value in blockNdens.items()}\n        blockData.update(blockNdens)\n\n        fullPath = blockMesh.write(blockPath, blockData)\n        self._blockFiles.append((fullPath, r.p.time))\n\n        fullPath = assemMesh.write(assemPath, assemData)\n        self._assemFiles.append((fullPath, r.p.time))\n\n    def __enter__(self):\n        self._assemFiles = []\n        self._blockFiles = []\n\n    def __exit__(self, type, value, traceback):\n        assert len(self._assemFiles) == len(self._blockFiles)\n        if len(self._assemFiles) > 1:\n            # multiple files need to be wrapped up into groups. VTK does not like having\n            # multiple meshes in the same group, so we write out separate Collection\n            # files for them\n            asyGroup = VtkGroup(f\"{self._baseName}_asm\")\n            for path, time in self._assemFiles:\n                asyGroup.addFile(filepath=path, sim_time=time)\n            asyGroup.save()\n\n            blockGroup = VtkGroup(f\"{self._baseName}_blk\")\n            for path, time in self._blockFiles:\n                blockGroup.addFile(filepath=path, sim_time=time)\n            blockGroup.save()\n\n\ndef _collectObjectData(\n    objs: List[composites.ArmiObject],\n    includeParams: Optional[Set[str]] = None,\n    excludeParams: Optional[Set[str]] = None,\n) -> Dict[str, Any]:\n    allData = dict()\n\n    for pDef in type(objs[0]).pDefs.toWriteToDB(parameters.SINCE_ANYTHING):\n        if includeParams is not None and pDef.name not in includeParams:\n            continue\n        if excludeParams is not None and pDef.name in excludeParams:\n            continue\n\n        data = []\n        for obj in objs:\n            val = obj.p[pDef.name]\n            data.append(val)\n\n        data = np.array(data)\n\n        if data.dtype.kind == \"S\" or data.dtype.kind == \"U\":\n            # no string support!\n            continue\n        if data.dtype.kind == \"O\":\n            # datatype is \"object\", usually because it's jagged, or has Nones. We are\n            # willing to try handling the Nones, but jagged also isn't visualizable.\n            nones = np.where([d is None for d in data])[0]\n\n            if len(nones) == data.shape[0]:\n                # all Nones, so give up\n                continue\n\n            if len(nones) == 0:\n                # looks like Nones had nothing to do with it. bail\n                continue\n\n            try:\n                data = database.replaceNonesWithNonsense(data, pDef.name, nones=nones)\n            except (ValueError, TypeError):\n                # Looks like we have some weird data. We might be able to handle it\n                # with more massaging, but probably not visualizable anyhow\n                continue\n\n            if data.dtype.kind == \"O\":\n                # Didn't work\n                runLog.warning(\n                    \"The parameter data for  `{}` could not be coerced into a native type for output; skipping.\".format(\n                        pDef.name\n                    )\n                )\n                continue\n        if len(data.shape) != 1:\n            # We aren't interested in vector data on each block\n            continue\n        allData[pDef.name] = data\n\n    return allData\n"
  },
  {
    "path": "armi/bookkeeping/visualization/xdmf.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nSupport for dumping XDMF files.\n\n`XDMF <http://www.xdmf.org/index.php/Main_Page>`_ is a data interchange format that\nallows for separate representation of the data itself and a description of how those\ndata are to be interpreted. The data description (\"light\" data) lives in an XML file,\nwhile the actual data (in our case, data to be plotted), as well as the data describing\nthe mesh (\"hard\" data) can be stored in HDF5 files, binary files, or embedded directly\ninto the XML file. In most cases, this allows for visualizing data directly out of an\nARMI database file. Using the ``XdmfDumper`` will produce an XML file (with an ``.xdmf``\nextension) containing the description of data, as well as an HDF5 file containing the\nmesh. Together with the input database, the ``.xdmf`` file can be opened in a\nvisualization tool that supports XDMF.\n\n.. note::\n    Paraview seems to have rather good support for XDMF, while VisIt does not. The main\n    issue seems to be that VisIt does not properly render the general polyhedra that\n    XDMF supports. Unfortunately, we __need__ to use this to show hexagonal geometries,\n    since it's the only way to get a hexagonal prism without splitting up the mesh into\n    wedges. To do that would require splitting the parameter data, which would defeat\n    the main benefit of using XMDF in the first place (to be able to plot out of the\n    original Database file). Cartesian and R-X-Theta geometries in VisIt seem to work\n    fine.\n\"\"\"\n\nimport io\nimport math\nimport pathlib\nimport xml.dom.minidom\nimport xml.etree.ElementTree as ET\nfrom typing import Dict, List, Optional, Set, Tuple\n\nimport h5py\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.bookkeeping.db import database\nfrom armi.bookkeeping.visualization import dumper, utils\nfrom armi.reactor import assemblies, blocks, composites, reactors\n\n_VTK_TO_XDMF_CELLS = {16: 16}\n\n_POLYHEDRON = 16\n_HEXAHEDRON = 9\n_QUADRATIC_HEXAHEDRON = 48\n\n# The topology of a hexagonal prism, represented as a general polyhedron. To get this in\n# proper XDMF, these need to be offset to the proper vertex indices in the full mesh,\n# and have the number of face vertices inserted into the proper locations (notice the\n# [0] placeholders).\n_HEX_PRISM_TOPO = np.array(\n    [0]\n    + list(range(6))\n    + [0]\n    + list(range(6, 12))\n    + [0]\n    + [0, 1, 7, 6]\n    + [0]\n    + [1, 2, 8, 7]\n    + [0]\n    + [2, 3, 9, 8]\n    + [0]\n    + [3, 4, 10, 9]\n    + [0]\n    + [4, 5, 11, 10]\n    + [0]\n    + [5, 0, 6, 11]\n)\n\n# The indices of the placeholder zeros from _HEX_PRISM_TOPO array above\n_HEX_PRISM_FACE_SIZE_IDX = np.array([0, 7, 14, 19, 24, 29, 34, 39])\n\n# The number of vertices for each face\n_HEX_PRISM_FACE_SIZES = np.array([6, 6, 4, 4, 4, 4, 4, 4])\n\n\ndef _getAttributesFromDataset(d: h5py.Dataset) -> Dict[str, str]:\n    dataType = {\n        np.dtype(\"int32\"): \"Int\",\n        np.dtype(\"int64\"): \"Int\",\n        np.dtype(\"float32\"): \"Float\",\n        np.dtype(\"float64\"): \"Float\",\n    }[d.dtype]\n\n    precision = {\n        np.dtype(\"int32\"): \"4\",\n        np.dtype(\"int64\"): \"8\",\n        np.dtype(\"float32\"): \"4\",\n        np.dtype(\"float64\"): \"8\",\n    }[d.dtype]\n\n    return {\n        \"Dimensions\": \" \".join(str(i) for i in d.shape),\n        \"DataType\": dataType,\n        \"Precision\": precision,\n        \"Format\": \"HDF\",\n    }\n\n\nclass XdmfDumper(dumper.VisFileDumper):\n    \"\"\"\n    VisFileDumper implementation for XDMF format.\n\n    The general strategy of this dumper is to create a new HDF5 file that contains just\n    the necessary mesh information for each dumped time step. The XML that\n    describes/points to these data is stored internally as ``ElementTree`` objects until\n    the end. When all time steps have been processed, these elements have time\n    information added to them, and are collected into a \"TemporalCollection\" Grid and\n    written to an ``.xdmf`` file.\n    \"\"\"\n\n    def __init__(self, baseName: str, inputName: Optional[str] = None):\n        self._baseName = baseName\n        if inputName is None:\n            runLog.warning(\"No input database name was given, so only an XMDF mesh will be created\")\n        self._inputName = inputName\n\n        # Check that the inputName is a relative path. XDMF doesn't seem to like\n        # absolute paths; at least on windows with ParaView\n        if pathlib.Path(inputName).is_absolute():\n            raise ValueError(\n                \"XDMF tools tend not to like absolute paths; provide a relative path to the input database.\"\n            )\n\n        self._meshH5 = None\n        self._inputDb = None\n        self._times = []\n        self._blockGrids = []\n        self._assemGrids = []\n\n    def __enter__(self):\n        \"\"\"\n        Prepare to write states.\n\n        The dumper keeps track of ``<Grid>`` tags that need to be written into a\n        Collection at the end. This also opens an auxiliary HDF5 file for writing meshes\n        at each time step.\n        \"\"\"\n        self._meshH5 = h5py.File(self._baseName + \"_mesh.h5\", \"w\")\n\n        if self._inputName is None:\n            # we could handle the case where the database wasn't passed by pumping state\n            # into a new h5 file, but why?\n            raise ValueError(\"Input database needed to generate XDMF output!\")\n\n        self._inputDb = database.Database(self._inputName, \"r\")\n        with self._inputDb as db:\n            dbVersion = db.version\n\n        if math.floor(float(dbVersion)) != 3:\n            raise ValueError(\"XDMF output requires Database version 3. Got version `{}`\".format(dbVersion))\n\n        self._times = []\n        self._blockGrids = []\n        self._assemGrids = []\n\n    def __exit__(self, type, value, traceback):\n        \"\"\"\n        Finalize file writing.\n\n        This writes all of the ``<Grid>`` tags into a Collection for all time steps, and\n        closes the input database and mesh-bearing HDF5 file.\n        \"\"\"\n        self._meshH5.close()\n        self._meshH5 = None\n        if self._inputDb is not None:\n            self._inputDb.close()\n            self._inputDb = None\n\n        timeCollectionBlk = ET.Element(\"Grid\", attrib={\"GridType\": \"Collection\", \"CollectionType\": \"Temporal\"})\n        timeCollectionAsm = ET.Element(\"Grid\", attrib={\"GridType\": \"Collection\", \"CollectionType\": \"Temporal\"})\n\n        # make sure all times are unique. Paraview will crash if they are not\n        times = self._dedupTimes(self._times)\n\n        for aGrid, bGrid, time in zip(self._assemGrids, self._blockGrids, times):\n            timeElement = ET.Element(\"Time\", attrib={\"TimeType\": \"Single\", \"Value\": str(time)})\n            bGrid.append(timeElement)\n            timeCollectionBlk.append(bGrid)\n\n            aGrid.append(timeElement)\n            timeCollectionAsm.append(aGrid)\n\n        for collection, typ in [\n            (timeCollectionBlk, \"_blk\"),\n            (timeCollectionAsm, \"_asm\"),\n        ]:\n            xdmf = ET.Element(\"Xdmf\", attrib={\"Version\": \"3.0\"})\n            domain = ET.Element(\"Domain\", attrib={\"Name\": \"Reactor\"})\n\n            domain.append(collection)\n            xdmf.append(domain)\n\n            # Write to an internal buffer so that we can print more fancy below\n            tree = ET.ElementTree(element=xdmf)\n            buf = io.StringIO()\n            tree.write(buf, encoding=\"unicode\")\n            buf.seek(0)\n\n            # Round-trip through minidom to do the pretty print\n            dom = xml.dom.minidom.parse(buf)\n            with open(self._baseName + typ + \".xdmf\", \"w\") as f:\n                f.write(dom.toprettyxml())\n\n    @staticmethod\n    def _dedupTimes(times: List[float]) -> List[float]:\n        \"\"\"\n        Make sure that no two times are the same.\n\n        Duplicates will be resolved by bumping each subsequent duplicate time forward by\n        some epsilon, cascading following duplicates by the same amount until no\n        duplicates remain. This will fail in the case where there are already times that\n        are within Ndup*epsilon of each other. In such cases, this function probably\n        isn't valid anyways.\n        \"\"\"\n        assert all(a <= b for a, b in zip(times, times[1:])), \"Input list must be sorted\"\n\n        # This should be used as a multiplicative epsilon, to avoid precision issues\n        # with large times\n        _EPS = 1.0e-9\n\n        # ...except when close enough to 0. Floating-point is a pain\n        mapZeroToOne = lambda x: x if x > _EPS else 1.0\n\n        dups = [0] * len(times)\n\n        # We iterate in reverse so that each entry in dups will contain the number of\n        # duplicate entries that **precede** it\n        for i in reversed(range(len(times))):\n            ti = times[i]\n            nDup = 0\n            for j in range(i - 1, -1, -1):\n                if times[j] == ti:\n                    nDup += 1\n                else:\n                    break\n            dups[i] = nDup\n\n        return [t + dups * _EPS * mapZeroToOne(t) for dups, t in zip(dups, times)]\n\n    def dumpState(\n        self,\n        r: reactors.Reactor,\n        includeParams: Optional[Set[str]] = None,\n        excludeParams: Optional[Set[str]] = None,\n    ):\n        \"\"\"Produce a ``<Grid>`` for a single timestep, as well as supporting HDF5 datasets.\"\"\"\n        cycle = r.p.cycle\n        node = r.p.timeNode\n\n        timeGroupName = database.getH5GroupName(cycle, node)\n\n        # careful here! we are trying to use the database datasets as the source of hard\n        # data without copying, so the order that we make the mesh needs to be the same\n        # order as the data in the database. There is no guarantee that the way a loaded\n        # reactor is ordered is the same way that it was ordered in the database (though\n        # perhaps we should do some work to specify that better). We need to look at the\n        # layout in the input database to re-order the objects.\n        with self._inputDb as db:\n            layout = db.getLayout(cycle, node)\n\n        snToIdx = {sn: i for i, sn in zip(layout.indexInData, layout.serialNum)}\n\n        blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))\n        blks = sorted(blks, key=lambda b: snToIdx[b.p.serialNum])\n\n        assems = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))\n        assems = sorted(assems, key=lambda a: snToIdx[a.p.serialNum])\n\n        blockGrid = self._makeBlockMesh(r, snToIdx)\n        self._collectObjectData(blks, timeGroupName, blockGrid)\n\n        assemGrid = self._makeAssemblyMesh(r, snToIdx)\n        self._collectObjectData(assems, timeGroupName, assemGrid)\n\n        self._blockGrids.append(blockGrid)\n        self._assemGrids.append(assemGrid)\n        self._times.append(r.p.time)\n\n    def _collectObjectData(self, objs: List[composites.ArmiObject], timeGroupName, node: ET.Element):\n        \"\"\"\n        Scan for things that look plottable in the input database.\n\n        \"Plottable\" things are anything that have int or float data, and the same number\n        of elements as there are objects.\n\n        .. warning::\n            This makes some assumptions as to the structure of the database.\n        \"\"\"\n        if self._inputDb is None:\n            # If we weren't given a database to draw data from, we will just skip this\n            # for now. Most of the time, a dumper should have an input database.\n            # Otherwise, this **could** extract from the reactor state.\n            return\n\n        typeNames = {type(o).__name__ for o in objs}\n        if len(typeNames) != 1:\n            raise ValueError(\"Currently only supporting homogeneous block types\")\n        typeName = next(iter(typeNames))\n        dataGroupName = \"/\".join((timeGroupName, typeName))\n        with self._inputDb as db:\n            for key, val in db.h5db[dataGroupName].items():\n                if val.shape != (len(objs),):\n                    continue\n                try:\n                    dataItem = ET.Element(\"DataItem\", attrib=_getAttributesFromDataset(val))\n                except KeyError:\n                    continue\n                dataItem.text = \":\".join((db.fileName, val.name))\n                attrib = ET.Element(\n                    \"Attribute\",\n                    attrib={\"Name\": key, \"Center\": \"Cell\", \"AttributeType\": \"Scalar\"},\n                )\n                attrib.append(dataItem)\n                node.append(attrib)\n\n    def _makeBlockMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:\n        cycle = r.p.cycle\n        node = r.p.timeNode\n\n        blks = r.getChildren(deep=True, predicate=lambda o: isinstance(o, blocks.Block))\n        blks = sorted(blks, key=lambda b: indexMap[b.p.serialNum])\n\n        groupName = \"c{}n{}\".format(cycle, node)\n\n        # VTK stuff turns out to be pretty flexible\n        blockMesh = utils.VtkMesh.empty()\n        for b in blks:\n            blockMesh.append(utils.createBlockMesh(b))\n\n        verts = blockMesh.vertices\n\n        verticesInH5 = groupName + \"/blk_vertices\"\n        self._meshH5[verticesInH5] = verts\n\n        topoValues = np.array([], dtype=np.int32)\n        offset = 0\n        for b in blks:\n            nVerts, cellTopo = _getTopologyFromShape(b, offset)\n            topoValues = np.append(topoValues, cellTopo)\n            offset += nVerts\n\n        topoInH5 = groupName + \"/blk_topology\"\n        self._meshH5[topoInH5] = topoValues\n\n        return self._makeGenericMesh(\"Blocks\", len(blks), self._meshH5[verticesInH5], self._meshH5[topoInH5])\n\n    def _makeAssemblyMesh(self, r: reactors.Reactor, indexMap) -> ET.Element:\n        cycle = r.p.cycle\n        node = r.p.timeNode\n        asys = r.getChildren(deep=True, predicate=lambda o: isinstance(o, assemblies.Assembly))\n        asys = sorted(asys, key=lambda b: indexMap[b.p.serialNum])\n\n        groupName = \"c{}n{}\".format(cycle, node)\n\n        # VTK stuff turns out to be pretty flexible\n        assemMesh = utils.VtkMesh.empty()\n        for assem in asys:\n            assemMesh.append(utils.createAssemMesh(assem))\n\n        verts = assemMesh.vertices\n\n        verticesInH5 = groupName + \"/asy_vertices\"\n        self._meshH5[verticesInH5] = verts\n\n        topoValues = np.array([], dtype=np.int32)\n        offset = 0\n        for a in asys:\n            nVerts, cellTopo = _getTopologyFromShape(a[0], offset)\n            topoValues = np.append(topoValues, cellTopo)\n            offset += nVerts\n\n        topoInH5 = groupName + \"/asy_topology\"\n        self._meshH5[topoInH5] = topoValues\n\n        return self._makeGenericMesh(\"Assemblies\", len(asys), self._meshH5[verticesInH5], self._meshH5[topoInH5])\n\n    @staticmethod\n    def _makeGenericMesh(name: str, nCells: int, vertexData: h5py.Dataset, topologyData: h5py.Dataset) -> ET.Element:\n        grid = ET.Element(\"Grid\", attrib={\"GridType\": \"Uniform\", \"Name\": name})\n        geometry = ET.Element(\"Geometry\", attrib={\"GeometryType\": \"XYZ\"})\n        geomData = ET.Element(\n            \"DataItem\",\n            attrib={\n                \"Dimensions\": \"{} {}\".format(*vertexData.shape),\n                \"NumberType\": \"Float\",\n                \"Format\": \"HDF\",\n            },\n        )\n\n        geomData.text = \":\".join((vertexData.file.filename, vertexData.name))\n        geometry.append(geomData)\n\n        topology = ET.Element(\n            \"Topology\",\n            attrib={\"TopologyType\": \"Mixed\", \"NumberOfElements\": str(nCells)},\n        )\n\n        topoData = ET.Element(\n            \"DataItem\",\n            attrib={\n                \"Dimensions\": \"{}\".format(topologyData.size),\n                \"NumberType\": \"Int\",\n                \"Format\": \"HDF\",\n            },\n        )\n        topoData.text = \":\".join((topologyData.file.filename, topologyData.name))\n        topology.append(topoData)\n\n        grid.append(geometry)\n        grid.append(topology)\n\n        return grid\n\n\ndef _getTopologyFromShape(b: blocks.Block, offset: int) -> Tuple[int, List[int]]:\n    \"\"\"\n    Returns the number of vertices used to make the shape, and XDMF topology values.\n\n    The size of the XDMF topology values cannot be used directly in computing the next\n    offset because it sometimes contains vertex indices __and__ sizing information.\n    \"\"\"\n    if isinstance(b, blocks.HexBlock):\n        # polyhedron, 8 faces\n        prefix = [_POLYHEDRON, 8]\n        topo = _HEX_PRISM_TOPO + offset\n        topo[_HEX_PRISM_FACE_SIZE_IDX] = _HEX_PRISM_FACE_SIZES\n        topo = np.append(prefix, topo)\n\n        return 12, topo\n\n    if isinstance(b, blocks.CartesianBlock):\n        return (\n            8,\n            [\n                _HEXAHEDRON,\n            ]\n            + list(range(offset, offset + 8)),\n        )\n    if isinstance(b, blocks.ThRZBlock):\n        return 20, [_QUADRATIC_HEXAHEDRON] + list(range(offset, offset + 20))\n\n    else:\n        raise TypeError(\"Unsupported block type `{}`\".format(type(b)))\n"
  },
  {
    "path": "armi/cases/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCase and CaseSuite objects for running and analyzing ARMI cases.\n\nA ``Case`` is a collection of inputs that represents one particular run. Cases have special knowledge about dependencies\nand can perform useful operations like compare, clone, and run.\n\nA ``CaseSuite`` is a set of (often related) Cases. These are fundamental to parameter sweeps and test suites.\n\nSee Also\n--------\narmi.cli : Entry points that build Cases and/or CaseSuites and send them off to do work\narmi.operators : Operations that ARMI will perform on a reactor model.\n    Generally these are made by an individual Case.\n\nExamples\n--------\nCreate a Case and run it::\n\n    case = Case(settings.Settings(\"path-to-settings.yaml\"))\n    case.run()\n\n    # do something with output database\n\nCreate a case suite from existing files, and run the suite::\n\n    cs = settings.Settings()  # default settings\n    suite = CaseSuite(settings.Settings())  # default settings\n    suite.discover(\"my-cases*.yaml\", recursive=True)\n    suite.run()\n\n.. warning:: Suite running may not work yet if the cases have interdependencies.\n\nCreate a ``burnStep`` sensitivity study from some base CS::\n\n    baseCase = Case(settings.Settings(\"base-settings.yaml\"))  # default settings\n    suite = CaseSuite(baseCase.cs)  # basically just sets armiLocation\n\n    for numSteps in range(3, 11):\n        with ForcedCreationDirectoryChanger(\"{}steps\".format(numSteps)):\n            case = baseCase.clone(title=baseCase.title + f\"-with{numSteps}steps\", settings={\"burnSteps\": numSteps})\n            suite.add(case)\n\n    suite.writeInputs()\n\nThen submit the inputs to your HPC cluster.\n\"\"\"\n\nfrom armi.cases.case import Case  # noqa: F401\nfrom armi.cases.suite import CaseSuite  # noqa: F401\n"
  },
  {
    "path": "armi/cases/case.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe ``Case`` object is responsible for running, and executing a set of user inputs. Many entry\npoints redirect into ``Case`` methods, such as ``clone``, ``compare``, and ``run``.\n\nThe ``Case`` object provides an abstraction around ARMI inputs to allow for manipulation and\ncollection of cases.\n\nSee Also\n--------\narmi.cases.suite : A collection of Cases\n\"\"\"\n\nimport ast\nimport cProfile\nimport glob\nimport io\nimport os\nimport pathlib\nimport pstats\nimport re\nimport sys\nimport textwrap\nimport time\nimport trace\nfrom typing import Dict, Optional, Sequence, Set, Union\n\nimport coverage\n\nfrom armi import context, getPluginManager, interfaces, operators, runLog, settings\nfrom armi.bookkeeping.db import compareDatabases\nfrom armi.nucDirectory import nuclideBases\nfrom armi.physics.neutronics.settings import CONF_LOADING_FILE\nfrom armi.reactor import blueprints, reactors\nfrom armi.utils import pathTools, tabulate, textProcessors\nfrom armi.utils.customExceptions import NonexistentSetting\nfrom armi.utils.directoryChangers import (\n    DirectoryChanger,\n    ForcedCreationDirectoryChanger,\n)\n\n# Change from default .coverage to help with Windows dotfile issues.\n# Must correspond with data_file entry in `pyproject.toml`!\nCOVERAGE_RESULTS_FILE = \"coverage_results.cov\"\n\n\nclass Case:\n    \"\"\"\n    An ARMI Case that can be used for suite set up and post-analysis.\n\n    A Case is capable of loading inputs, checking that they are valid, and initializing a reactor\n    model. Cases can also compare against other cases and be collected into multiple\n    :py:class:`armi.cases.suite.CaseSuite`.\n    \"\"\"\n\n    def __init__(self, cs, caseSuite=None, bp=None):\n        \"\"\"\n        Initialize a Case from user input.\n\n        Parameters\n        ----------\n        cs : Settings\n            Settings for this Case\n        caseSuite : CaseSuite, optional\n            CaseSuite this particular case belongs. Passing this in allows dependency tracking\n            across the other cases (e.g. if one case uses the output of another as input, as happens\n            in in-use testing for reactivity coefficient snapshot testing or more complex analysis\n            sequences).\n        bp : Blueprints, optional\n            :py:class:`armi.reactor.blueprints.Blueprints` object containing the assembly\n            definitions and other information. If not supplied, it will be loaded from the ``cs`` as\n            needed.\n        \"\"\"\n        self._startTime = time.time()\n        self._caseSuite = caseSuite\n        self._tasks = []\n        self._dependencies: Set[Case] = set()\n        self.enabled = True\n\n        # set the signal if the user passes in a blueprint object, instead of a file\n        if bp is not None:\n            cs.filelessBP = True\n\n        # NOTE: in order to prevent slow submission times for loading massively large blueprints\n        # (e.g. certain computer-generated input files), self.bp can be None.\n        self.cs = cs\n        self._bp = bp\n\n        # this is used in parameter sweeps\n        self._independentVariables = {}\n\n    @property\n    def independentVariables(self):\n        \"\"\"\n        Get dictionary of independent variables and their values.\n\n        This unpacks independent variables from the cs object's independentVariables setting the\n        first time it is run. This is used in parameter sweeps.\n\n        See Also\n        --------\n        writeInputs : writes the ``independentVariabls`` setting\n        \"\"\"\n        if not self._independentVariables:\n            for indepStr in self.cs[\"independentVariables\"]:\n                indepName, value = ast.literal_eval(indepStr)\n                self._independentVariables[indepName] = value\n        return self._independentVariables\n\n    def __repr__(self):\n        return \"<Case cs: {}>\".format(self.cs.path)\n\n    @property\n    def bp(self):\n        \"\"\"\n        Blueprint object for this case.\n\n        Notes\n        -----\n        This property allows lazy loading.\n        \"\"\"\n        if self._bp is None:\n            self._bp = blueprints.loadFromCs(self.cs, roundTrip=True)\n        return self._bp\n\n    @bp.setter\n    def bp(self, bp):\n        self._bp = bp\n\n    @property\n    def dependencies(self):\n        \"\"\"\n        Get a list of parent Case objects.\n\n        Notes\n        -----\n        This is performed on demand so that if someone changes the underlying Settings, the case\n        will reflect the correct dependencies. As a result, if this is being done iteratively,\n        you may want to cache it somehow (in a dict?).\n\n        Ideally, this should not be the responsibility of the Case, but rather the suite!\n        \"\"\"\n        dependencies = set()\n        if self._caseSuite is not None:\n            pm = getPluginManager()\n            if pm is not None:\n                for pluginDependencies in pm.hook.defineCaseDependencies(case=self, suite=self._caseSuite):\n                    dependencies.update(pluginDependencies)\n\n            # the ([^\\/]) capture basically gets the file name portion and excludes any\n            # directory separator\n            dependencies.update(\n                self.getPotentialParentFromSettingValue(\n                    self.cs[\"explicitRepeatShuffles\"],\n                    r\"^(?P<dirName>.*[\\/\\\\])?(?P<title>[^\\/\\\\]+)-SHUFFLES\\.txt$\",\n                )\n            )\n        # ensure that a case doesn't appear to be its own dependency\n        dependencies.update(self._dependencies)\n        dependencies.discard(self)\n\n        return dependencies\n\n    def addExplicitDependency(self, case):\n        \"\"\"\n        Register an explicit dependency.\n\n        When evaluating the ``dependency`` property, dynamic dependencies are probed\n        using the current case settings and plugin hooks. Sometimes, it is necessary to\n        impose dependencies that are not expressed through settings and hooks. This\n        method stores another case as an explicit dependency, which will be included\n        with the other, implicitly discovered, dependencies.\n        \"\"\"\n        if case in self._dependencies:\n            runLog.warning(\"The case {} is already explicitly specified as a dependency of {}\".format(case, self))\n        self._dependencies.add(case)\n\n    def getPotentialParentFromSettingValue(self, settingValue, filePattern):\n        \"\"\"\n        Get a parent case based on a setting value and a pattern.\n\n        This is a convenient way for a plugin to express a dependency. It uses the\n        ``match.groupdict`` functionality to pull the directory and case name out of a\n        specific setting value an regular expression.\n\n        Parameters\n        ----------\n        settingValue : str\n            A particular setting value that might contain a reference to an input that\n            is produced by a dependency.\n        filePattern : str\n            A regular expression for extracting the location and name of the dependency.\n            If the ``settingValue`` matches the passed pattern, this function will\n            attempt to extract the ``dirName`` and ``title`` groups to find the dependency.\n        \"\"\"\n        m = re.match(filePattern, settingValue, re.IGNORECASE)\n        deps = self._getPotentialDependencies(**m.groupdict()) if m else set()\n        if len(deps) > 1:\n            raise KeyError(\"Found more than one case matching {}\".format(settingValue))\n        return deps\n\n    def _getPotentialDependencies(self, dirName, title):\n        \"\"\"Get a parent case based on a directory and case title.\"\"\"\n        if dirName is None:\n            dirName = self.directory\n        elif not os.path.isabs(dirName):\n            dirName = os.path.join(self.directory, dirName)\n\n        def caseMatches(case):\n            if os.path.normcase(case.title) != os.path.normcase(title):\n                return False\n\n            return os.path.normcase(os.path.abspath(case.directory)) == os.path.normcase(os.path.abspath(dirName))\n\n        return {case for case in self._caseSuite if caseMatches(case)}\n\n    @property\n    def title(self):\n        \"\"\"The case title.\"\"\"\n        return self.cs.caseTitle\n\n    @title.setter\n    def title(self, name):\n        self.cs.caseTitle = name\n\n    @property\n    def dbName(self):\n        \"\"\"The case output database name.\"\"\"\n        return os.path.splitext(self.cs.path)[0] + \".h5\"\n\n    @property\n    def directory(self):\n        \"\"\"The working directory of the case.\"\"\"\n        return self.cs.inputDirectory\n\n    def __eq__(self, that):\n        \"\"\"\n        Compares two cases to determine if they are equivalent by looking at the ``title`` and\n        ``directory``.\n\n        Notes\n        -----\n        No other attributes except those stated above are used for the comparison; the above stated\n        attributes can be considered the \"primary key\" for a Case object and identify it as being\n        unique. Both of these comparisons are simple string comparisons, so a reference and an\n        absolute path to the same case would be considered different.\n        \"\"\"\n        return self.title == that.title and self.directory == that.directory\n\n    def __hash__(self):\n        \"\"\"Computes the hash of a Case object.\n\n        This is required when __eq__ is been defined. Take the hash of the tuple of the \"primary key\".\n        \"\"\"\n        return hash((self.title, self.directory))\n\n    def setUpTaskDependence(self):\n        \"\"\"\n        Set the task dependence based on the :code:`dependencies`.\n\n        This accounts for whether or not the dependency is enabled.\n        \"\"\"\n        if not self.enabled:\n            return\n\n        for dependency in self.dependencies:\n            if dependency.enabled:\n                self._tasks[0].add_parent(dependency._tasks[-1])\n\n    def run(self):\n        \"\"\"\n        Run an ARMI case.\n\n        .. impl:: The case class allows for a generic ARMI simulation.\n            :id: I_ARMI_CASE\n            :implements: R_ARMI_CASE\n\n            This method is responsible for \"running\" the ARMI simulation instigated by the inputted\n            settings. This initializes an :py:class:`~armi.operators.operator.Operator`, a\n            :py:class:`~armi.reactor.reactors.Reactor` and invokes\n            :py:meth:`Operator.operate <armi.operators.operator.Operator.operate>`. It also\n            activates supervisory things like code coverage checking, profiling, or tracing, if\n            requested by users during debugging.\n\n        Notes\n        -----\n        Room for improvement: The coverage, profiling, etc. stuff can probably be moved out of here\n        to a more elegant place (like a context manager?).\n        \"\"\"\n        # Start the log here so that the verbosities for the head and workers can be configured\n        # based on the user settings for the rest of the run.\n        runLog.LOG.startLog(self.cs.caseTitle)\n        if context.MPI_RANK == 0:\n            runLog.setVerbosity(self.cs[\"verbosity\"])\n        else:\n            runLog.setVerbosity(self.cs[\"branchVerbosity\"])\n\n        # if in the settings, start the coverage and profiling\n        cov = self._startCoverage()\n        profiler = self._startProfiling()\n\n        self.checkInputs()\n        o = self.initializeOperator()\n\n        with o:\n            if self.cs[\"trace\"] and context.MPI_RANK == 0:\n                # only trace primary node.\n                tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=1)\n                tracer.runctx(\"o.operate()\", globals(), locals())\n            else:\n                o.operate()\n\n        # if in the settings, report the coverage and profiling\n        Case._endCoverage(self.cs[\"coverageConfigFile\"], cov)\n        Case._endProfiling(profiler)\n\n    def _startCoverage(self):\n        \"\"\"Helper to the Case.run: spin up the code coverage tooling, if the Settings file says to.\n\n        Returns\n        -------\n        coverage.Coverage\n            Coverage object for pytest or unittest\n        \"\"\"\n        cov = None\n        if self.cs[\"coverage\"]:\n            cov = coverage.Coverage(\n                config_file=Case._getCoverageRcFile(userCovFile=self.cs[\"coverageConfigFile\"], makeCopy=True),\n                debug=[\"dataio\"],\n            )\n            if context.MPI_SIZE > 1:\n                # interestingly, you cannot set the parallel flag in the constructor without\n                # auto-specifying the data suffix. This should enable parallel coverage with\n                # auto-generated data file suffixes and combinations.\n                cov.config.parallel = True\n            cov.start()\n\n        return cov\n\n    @staticmethod\n    def _endCoverage(userCovFile, cov=None):\n        \"\"\"Helper to the Case.run(): stop and report code coverage, if the Settings file says to.\n\n        Parameters\n        ----------\n        userCovFile : str\n            File path to user-supplied coverage configuration file (default setting is empty string)\n        cov: coverage.Coverage (optional)\n            Hopefully, a valid and non-empty set of coverage data.\n        \"\"\"\n        if cov is None:\n            return\n\n        cov.stop()\n        cov.save()\n\n        if context.MPI_SIZE > 1:\n            context.MPI_COMM.barrier()  # force waiting for everyone to finish\n\n        if context.MPI_RANK == 0 and context.MPI_SIZE > 1:\n            # combine all the parallel coverage data files into one and make the XML and HTML\n            # reports for the whole run.\n            combinedCoverage = coverage.Coverage(config_file=Case._getCoverageRcFile(userCovFile), debug=[\"dataio\"])\n            combinedCoverage.config.parallel = True\n            # combine does delete the files it merges\n            combinedCoverage.combine()\n            combinedCoverage.save()\n            combinedCoverage.html_report()\n            combinedCoverage.xml_report()\n\n    @staticmethod\n    def _getCoverageRcFile(userCovFile, makeCopy=False):\n        \"\"\"Helper to provide the coverage configuration file according to the OS. A user-supplied\n        file will take precedence, and is not checked for a dot-filename.\n\n        Notes\n        -----\n        ARMI replaced the \".coveragerc\" file has been replaced by \"pyproject.toml\".\n\n        Parameters\n        ----------\n        userCovFile : str\n            File path to user-supplied coverage configuration file (default setting is empty string)\n        makeCopy : bool (optional)\n            Whether or not to copy the coverage config file to an alternate file path\n\n        Returns\n        -------\n        covFile : str\n            path of pyprojec.toml file\n        \"\"\"\n        # User-defined file takes precedence.\n        if userCovFile:\n            return os.path.abspath(userCovFile)\n\n        covRcDir = os.path.abspath(context.PROJECT_ROOT)\n        return os.path.join(covRcDir, \"pyproject.toml\")\n\n    def _startProfiling(self):\n        \"\"\"Helper to the Case.run(): start the Python profiling, if the Settings file says to.\n\n        Returns\n        -------\n        cProfile.Profile\n            Standard Python profiling object\n        \"\"\"\n        profiler = None\n        if self.cs[\"profile\"]:\n            profiler = cProfile.Profile()\n            profiler.enable(subcalls=True, builtins=True)\n\n        return profiler\n\n    @staticmethod\n    def _endProfiling(profiler=None):\n        \"\"\"Helper to the Case.run(): stop and report python profiling,\n        if the Settings file says to.\n\n        Parameters\n        ----------\n        profiler: cProfile.Profile (optional)\n            Hopefully, a valid and non-empty set of profiling data.\n        \"\"\"\n        if profiler is None:\n            return\n\n        profiler.disable()\n        profiler.dump_stats(\"profiler.{:0>3}.stats\".format(context.MPI_RANK))\n        statsStream = io.StringIO()\n        summary = pstats.Stats(profiler, stream=statsStream).sort_stats(\"cumulative\")\n        summary.print_stats()\n        if context.MPI_SIZE > 0 and context.MPI_COMM is not None:\n            allStats = context.MPI_COMM.gather(statsStream.getvalue(), root=0)\n            if context.MPI_RANK == 0:\n                for rank, statsString in enumerate(allStats):\n                    # using print statements because the logger has been turned off\n                    print(\"=\" * 100)\n                    print(\"{:^100}\".format(\" Profiler statistics for RANK={} \".format(rank)))\n                    print(statsString)\n                    print(\"=\" * 100)\n        else:\n            print(statsStream.getvalue())\n\n    def initializeOperator(self, r=None):\n        \"\"\"Creates and returns an Operator.\"\"\"\n        with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False):\n            self._initBurnChain()\n            o = operators.factory(self.cs)\n            if r is None:\n                r = reactors.factory(self.cs, self.bp)\n            o.initializeInterfaces(r)\n            # Set this here to make sure the full duration of initialization is properly captured.\n            # Cannot be done in reactors since the above self.bp call implicitly initializes blueprints.\n            r.core.timeOfStart = self._startTime\n            return o\n\n    def _initBurnChain(self):\n        \"\"\"\n        Apply the burn chain setting to the nucDir.\n\n        Notes\n        -----\n        This is admittedly an odd place for this but the burn chain info must be applied sometime\n        after user-input has been loaded (for custom burn chains) but not long after (because nucDir\n        is framework-level and expected to be up-to-date by lots of modules).\n        \"\"\"\n        if not self.cs[\"initializeBurnChain\"]:\n            runLog.info(\"Skipping burn-chain initialization since `initializeBurnChain` setting is disabled.\")\n            return\n\n        if not os.path.exists(self.cs[\"burnChainFileName\"]):\n            raise ValueError(\n                f\"The burn-chain file {self.cs['burnChainFileName']} does not exist. The \"\n                \"data cannot be loaded. Fix this path or disable burn-chain initialization using \"\n                \"the `initializeBurnChain` setting.\"\n            )\n\n        with open(self.cs[\"burnChainFileName\"]) as burnChainStream:\n            nuclideBases.imposeBurnChain(burnChainStream)\n\n    def checkInputs(self):\n        \"\"\"\n        Checks ARMI inputs for consistency.\n\n        .. impl:: Perform validity checks on case inputs.\n            :id: I_ARMI_CASE_CHECK\n            :implements: R_ARMI_CASE_CHECK\n\n            This method checks the validity of the current settings. It relies on an\n            :py:class:`~armi.settings.settingsValidation.Inspector` object from the\n            :py:class:`~armi.operators.operator.Operator` to generate a list of\n            :py:class:`~armi.settings.settingsValidation.Query` objects that represent potential\n            issues in the settings. After gathering the queries, this method prints a table of query\n            \"statements\" and \"questions\" to the console. If running in an interactive mode, the user\n            then has the opportunity to address the questions posed by the queries by either\n            addressing the potential issue or ignoring it.\n\n        Returns\n        -------\n        bool\n            True if the inputs are all good, False otherwise\n        \"\"\"\n        runLog.header(\"=========== Settings Validation Checks ===========\")\n        with DirectoryChanger(self.cs.inputDirectory, dumpOnException=False):\n            operatorClass = operators.getOperatorClassFromSettings(self.cs)\n            inspector = operatorClass.inspector(self.cs)\n            inspectorIssues = [query for query in inspector.queries if query]\n\n            # Write out the settings validation issues that will be prompted for resolution if in an\n            # interactive session or forced to be resolved otherwise.\n            queryData = []\n            for i, query in enumerate(inspectorIssues, start=1):\n                queryData.append(\n                    (\n                        i,\n                        textwrap.fill(query.statement, width=50, break_long_words=False),\n                        textwrap.fill(query.question, width=50, break_long_words=False),\n                    )\n                )\n\n            if queryData and context.MPI_RANK == 0:\n                runLog.info(\n                    tabulate.tabulate(\n                        queryData,\n                        headers=[\"Number\", \"Statement\", \"Question\"],\n                        tableFmt=\"armi\",\n                    )\n                )\n            if context.CURRENT_MODE == context.Mode.INTERACTIVE:\n                # if interactive, ask user to deal with settings issues\n                inspector.run()\n\n            return not any(inspectorIssues)\n\n    def clone(\n        self,\n        additionalFiles=None,\n        title=None,\n        modifiedSettings=None,\n        writeStyle=\"short\",\n    ):\n        \"\"\"\n        Clone existing ARMI inputs to current directory with optional settings modifications.\n\n        Since each case depends on multiple inputs, this is a safer way to move cases around without\n        having to wonder if you copied all the files appropriately.\n\n        Parameters\n        ----------\n        additionalFiles : list (optional)\n            additional file paths to copy to cloned case\n        title : str (optional)\n            title of new case\n        modifiedSettings : dict (optional)\n            settings to set/modify before creating the cloned case\n        writeStyle : str (optional)\n            Writing style for which settings get written back to the settings files\n            (short, medium, or full).\n\n        Raises\n        ------\n        RuntimeError\n            If the source and destination are the same\n        \"\"\"\n        cloneCS = self.cs.duplicate()\n\n        if modifiedSettings is not None:\n            cloneCS = cloneCS.modified(newSettings=modifiedSettings)\n\n        clone = Case(cloneCS)\n        clone.cs.path = pathTools.armiAbsPath(title or self.title) + \".yaml\"\n\n        if pathTools.armiAbsPath(clone.cs.path) == pathTools.armiAbsPath(self.cs.path):\n            raise RuntimeError(\n                \"The source file and destination file are the same: {}\\nCannot use armi-clone to \"\n                \"modify armi settings file.\".format(pathTools.armiAbsPath(clone.cs.path))\n            )\n\n        newSettings = copyInterfaceInputs(self.cs, clone.cs.inputDirectory)\n        newCs = clone.cs.modified(newSettings=newSettings)\n        clone.cs = newCs\n\n        runLog.important(f\"writing settings file {clone.cs.path}\")\n        clone.cs.writeToYamlFile(clone.cs.path, style=writeStyle, fromFile=self.cs.path)\n        runLog.important(f\"finished writing {clone.cs}\")\n\n        fromPath = lambda f: pathTools.armiAbsPath(self.cs.inputDirectory, f)\n\n        fileName = self.cs[CONF_LOADING_FILE]\n        if fileName:\n            pathTools.copyOrWarn(\n                CONF_LOADING_FILE,\n                fromPath(fileName),\n                os.path.join(clone.cs.inputDirectory, fileName),\n            )\n        else:\n            runLog.warning(f\"skipping {CONF_LOADING_FILE}, there is no file specified\")\n\n        with open(self.cs[CONF_LOADING_FILE], \"r\") as f:\n            # The root for handling YAML includes is relative to the YAML file, not the\n            # settings file\n            root = pathlib.Path(self.cs.inputDirectory) / pathlib.Path(self.cs[CONF_LOADING_FILE]).parent\n            cloneRoot = pathlib.Path(clone.cs.inputDirectory) / pathlib.Path(clone.cs[CONF_LOADING_FILE]).parent\n            for includePath, mark in textProcessors.findYamlInclusions(f, root=root):\n                if not includePath.is_absolute():\n                    includeSrc = root / includePath\n                    includeDest = cloneRoot / includePath\n                else:\n                    # don't bother copying absolute files\n                    continue\n                if not includeSrc.exists():\n                    raise OSError(\"The input file file `{}` referenced at {} does not exist.\".format(includeSrc, mark))\n                pathTools.copyOrWarn(\n                    \"auxiliary input file `{}` referenced at {}\".format(includeSrc, mark),\n                    includeSrc,\n                    includeDest,\n                )\n\n        for fileName in additionalFiles or []:\n            pathTools.copyOrWarn(\"additional file\", fromPath(fileName), clone.cs.inputDirectory)\n\n        return clone\n\n    def compare(\n        self,\n        that,\n        exclusion: Optional[Sequence[str]] = None,\n        tolerance=0.01,\n        timestepCompare=None,\n    ) -> int:\n        \"\"\"\n        Compare the output databases from two run cases. Return number of differences.\n\n        This is useful both for in-use testing and engineering analysis.\n        \"\"\"\n        runLog.info(\"Comparing the following databases:\\nREF: {}\\nSRC: {}\".format(self.dbName, that.dbName))\n        diffResults = compareDatabases(\n            self.dbName,\n            that.dbName,\n            tolerance=tolerance,\n            exclusions=exclusion,\n            timestepCompare=timestepCompare,\n        )\n\n        code = 1 if diffResults is None else diffResults.nDiffs()\n\n        sameOrDifferent = \"different\" if diffResults is None or diffResults.nDiffs() > 0 else \"the same\"\n        runLog.important(\"Cases are {}.\".format(sameOrDifferent))\n\n        return code\n\n    def writeInputs(self, sourceDir: Optional[str] = None, writeStyle: Optional[str] = \"short\"):\n        \"\"\"\n        Write the inputs to disk.\n\n        This allows input objects that have been modified in memory (e.g. for a parameter sweep or\n        migration) to be written out as input for a forthcoming case.\n\n        Parameters\n        ----------\n        sourceDir : str (optional)\n            The path to copy inputs from (if different from the cs.path). Needed\n            in SuiteBuilder cases to find the baseline inputs from plugins (e.g. shuffleLogic)\n        writeStyle : str (optional)\n            Writing style for which settings get written back to the settings files\n            (short, medium, or full).\n\n        Notes\n        -----\n        This will rename the ``loadingFile`` to ``title-blueprints + '.yaml'``.\n\n        See Also\n        --------\n        independentVariables\n            parses/reads the independentVariables setting\n\n        clone\n            Similar to this but doesn't let you write out new/modified blueprints objects\n        \"\"\"\n        with ForcedCreationDirectoryChanger(self.cs.inputDirectory, dumpOnException=False):\n            # These seemingly no-ops load the bp via properties if they are not yet initialized.\n            self.bp\n\n            newSettings = {}\n            newSettings[CONF_LOADING_FILE] = self.title + \"-blueprints.yaml\"\n            if self.independentVariables:\n                newSettings[\"independentVariables\"] = [\n                    f\"({repr(varName)}, {repr(val)})\" for varName, val in self.independentVariables.items()\n                ]\n\n            with open(newSettings[CONF_LOADING_FILE], \"w\") as loadingFile:\n                blueprints.Blueprints.dump(self.bp, loadingFile)\n\n            # copy input files from other modules/plugins\n            interfaceSettings = copyInterfaceInputs(self.cs, \".\", sourceDir)\n            for settingName, value in interfaceSettings.items():\n                newSettings[settingName] = value\n\n            self.cs = self.cs.modified(newSettings=newSettings)\n            if sourceDir:\n                fromPath = os.path.join(sourceDir, self.title + \".yaml\")\n            else:\n                fromPath = self.cs.path\n            self.cs.writeToYamlFile(f\"{self.title}.yaml\", style=writeStyle, fromFile=fromPath)\n\n\ndef _copyInputsHelper(fileDescription: str, sourcePath: str, destPath: str, origFile: str) -> str:\n    \"\"\"\n    Helper function for copyInterfaceInputs: Creates an absolute file path, and copies the file to\n    that location. If that file path does not exist, returns the file path from the original\n    settings file.\n\n    Parameters\n    ----------\n    fileDescription : str\n        A file description for the copyOrWarn method\n    sourcePath : str\n        The absolute file path of the file to copy\n    destPath : str\n        The target directory to copy input files to\n    origFile : str\n        File path as defined in the original settings file\n\n    Returns\n    -------\n    destFilePath (or origFile) : str\n    \"\"\"\n    sourceName = pathlib.Path(sourcePath).name\n    destFilePath = os.path.join(destPath, sourceName)\n    try:\n        pathTools.copyOrWarn(fileDescription, sourcePath, destFilePath)\n        if pathlib.Path(destFilePath).exists():\n            # the basename gets written back to the settings file to protect against potential\n            # future dir structure changes\n            return os.path.basename(destFilePath)\n        else:\n            # keep original filepath in the settings file if file copy was unsuccessful\n            return origFile\n    except Exception:\n        return origFile\n\n\ndef copyInterfaceInputs(cs, destination: str, sourceDir: Optional[str] = None) -> Dict[str, Union[str, list]]:\n    \"\"\"\n    Ping active interfaces to determine which files are considered \"input\". This enables developers\n    to add new inputs in a plugin-dependent/ modular way.\n\n    This function should now be able to handle the updating of:\n\n      - a single file (relative or absolute)\n      - a list of files (relative or absolute)\n      - a file entry that has a wildcard processing into multiple files. Glob is used to offer\n        support for wildcards.\n      - a directory and its contents\n\n    If the file paths are absolute, do nothing. The case will be able to find the file.\n\n    In case suites or parameter sweeps, these files often have a sourceDir associated with them that\n    is different from the cs.inputDirectory. So, if relative or wildcard, update the file paths to\n    be absolute in the case settings and copy the file to the destination directory.\n\n    Parameters\n    ----------\n    cs : Settings\n        The source case settings to find input files\n    destination : str\n        The target directory to copy input files to\n    sourceDir : str, optional\n        The directory from which to copy files. Defaults to cs.inputDirectory\n\n    Returns\n    -------\n    dict\n        A new settings object that contains settings for the keys and values that are either an\n        absolute file path, a list of absolute file paths, or the original file path if absolute\n        paths could not be resolved.\n\n    Notes\n    -----\n    Regarding the handling of relative file paths: In the future this could be simplified by adding\n    a concept for a suite root directory, below which it is safe to copy files without needing to\n    update settings that point with a relative path to files that are below it.\n    \"\"\"\n    activeInterfaces = interfaces.getActiveInterfaceInfo(cs)\n    sourceDir = sourceDir or cs.inputDirectory\n    sourceDirPath = pathlib.Path(sourceDir)\n\n    assert pathlib.Path(destination).is_dir()\n\n    newSettings = {}\n\n    for klass, _ in activeInterfaces:\n        interfaceFileNames = klass.specifyInputs(cs)\n        for key, files in interfaceFileNames.items():\n            if not isinstance(key, settings.Setting):\n                try:\n                    key = cs.getSetting(key)\n                    label = key.name\n                    isSetting = True\n                except NonexistentSetting(key):\n                    runLog.debug(f\"{key} is not a valid setting; continuing on anyway.\")\n                    label = key\n                    isSetting = False\n            else:\n                isSetting = True\n                label = key.name\n\n            newFiles = []\n            for f in files:\n                WILDCARD = False\n                EMPTY = False\n                ABSOLUTE = False\n                if \"*\" in f:\n                    WILDCARD = True\n                if not f:\n                    # beware: pathlib.path(\"\") returns \".\" which can be bad news, so we handle empty\n                    # strings as their own category\n                    EMPTY = True\n                path = pathlib.Path(f)\n                if not EMPTY and path.is_absolute():\n                    ABSOLUTE = True\n\n                # Attempt to construct an absolute file path\n                srcFullPath = os.path.join(sourceDirPath, f)\n                destFilePath = None\n                if WILDCARD:\n                    globFilePaths = [pathlib.Path(os.path.join(sourceDirPath, g)) for g in glob.glob(srcFullPath)]\n                    if len(globFilePaths) == 0:\n                        destFilePath = f\n                        newFiles.append(str(destFilePath))\n                    else:\n                        for gFile in globFilePaths:\n                            destFilePath = _copyInputsHelper(label, gFile, destination, f)\n                            newFiles.append(str(destFilePath))\n                elif EMPTY:\n                    pass\n                elif ABSOLUTE:\n                    if path.exists():\n                        # Path is absolute, no settings modification or filecopy needed\n                        newFiles.append(path)\n                else:\n                    # treat as a relative path\n                    destFilePath = _copyInputsHelper(label, srcFullPath, destination, f)\n                    newFiles.append(str(destFilePath))\n\n                if destFilePath == f:\n                    runLog.debug(\n                        f\"No input files for `{label}` could be resolved with the following path: \"\n                        f\"`{srcFullPath}`. Will not update `{label}`.\"\n                    )\n\n            # Some settings are a single filename. Others are lists of files. Make\n            # sure we are returning what the setting expects\n            if isSetting and len(newFiles):\n                if len(files) == 1 and not WILDCARD and key.name in cs and not isinstance(cs[key.name], list):\n                    newSettings[label] = newFiles[0]\n                else:\n                    newSettings[label] = newFiles\n\n    return newSettings\n"
  },
  {
    "path": "armi/cases/inputModifiers/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCode that changes input files and writes them back out. Useful for parameter sweeps.\n\nSee Also\n--------\narmi.reactor.converters\n    Code that changes reactor objects at runtime. These often take longer to run than\n    these but can be used in the middle of ARMI analyses.\n\"\"\"\n"
  },
  {
    "path": "armi/cases/inputModifiers/inputModifiers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Modifies inputs.\"\"\"\n\n\nclass InputModifier:\n    \"\"\"\n    Object that modifies input definitions in some well-defined way.\n\n    .. impl:: A generic tool to modify user inputs on multiple cases.\n        :id: I_ARMI_CASE_MOD1\n        :implements: R_ARMI_CASE_MOD\n\n        This class serves as an abstract base class for modifying the inputs of a case, typically\n        case settings. Child classes must implement a ``__call__`` method accepting a\n        :py:class:`~armi.settings.caseSettings.Settings` and\n        :py:class:`~armi.reactor.blueprints.Blueprints` and return the appropriately modified\n        version of these objects. The class attribute ``FAIL_IF_AFTER`` should be a tuple defining\n        what, if any, modifications this should fail if performed after. For example, one should not\n        adjust the smear density (a function of Cladding ID) before adjusting the Cladding ID. Some\n        generic child classes are provided in this module, but it is expected that design-specific\n        modifiers are built individually.\n    \"\"\"\n\n    FAIL_IF_AFTER = ()\n\n    def __init__(self, independentVariable=None):\n        \"\"\"\n        Constructor.\n\n        Parameters\n        ----------\n        independentVariable : dict or None, optional\n            Name/value pairs to associate with the independent variable being modified by this\n            object. Will be analyzed and plotted against other modifiers with the same name.\n        \"\"\"\n        if independentVariable is None:\n            independentVariable = {}\n        self.independentVariable = independentVariable\n\n    def __call__(self, cs, bp):\n        \"\"\"Perform the desired modifications to input objects.\"\"\"\n        raise NotImplementedError\n\n\nclass SamplingInputModifier(InputModifier):\n    \"\"\"\n    Object that modifies input definitions in some well-defined way.\n\n    (This class is abstract.)\n\n    Subclasses must implement a ``__call__`` method accepting a ``Settings``,\n    ``Blueprints``, and ``SystemLayoutInput``.\n\n    This is a modified version of the InputModifier abstract class that imposes structure for\n    parameters in a design space that will be sampled by a quasi-random sampling algorithm. These\n    algorithms require input modifiers to specify if the parameter is continuous or discrete and\n    have the bounds specified.\n    \"\"\"\n\n    def __init__(self, name: str, paramType: str, bounds: list, independentVariable=None):\n        \"\"\"Constructor for the Sampling input modifier.\n\n        Parameters\n        ----------\n        name: str\n            Name of input modifier.\n        paramType : str\n            specify if parameter is 'continuous' or 'discrete'\n        bounds : list\n            If continuous, provide floating points [a, b] specifying the inclusive bounds.\n            If discrete, provide a list of potential values [a, b, c, ...]\n        independentVariable : [type], optional\n            Name/value pairs to associate with the independent variable being modified\n            by this object.  Will be analyzed and plotted against other modifiers with\n            the same name, by default None\n        \"\"\"\n        InputModifier.__init__(self, independentVariable=independentVariable)\n        self.name = name\n        self.paramType = paramType\n        self.bounds = bounds\n\n    def __call__(self, cs, blueprints):\n        \"\"\"Perform the desired modifications to input objects.\"\"\"\n        raise NotImplementedError\n\n\nclass FullCoreModifier(InputModifier):\n    \"\"\"\n    Grow the SystemLayoutInput to from a symmetric core to a full core.\n\n    Notes\n    -----\n    Besides the Core, other grids may also be of interest for expansion, like a grid that defines\n    fuel management. However, the expansion of a fuel management schedule to full core is less\n    trivial than just expanding the core itself. Thus, this modifier currently does not attempt to\n    update fuel management grids, but an expanded implementation could do so in the future if\n    needed. For now, users must expand fuel management grids to full core themself.\n    \"\"\"\n\n    def __call__(self, cs, bp):\n        coreBp = bp.gridDesigns[\"core\"]\n        coreBp.expandToFull()\n\n        return cs, bp\n\n\nclass SettingsModifier(InputModifier):\n    \"\"\"Adjust setting to specified value.\"\"\"\n\n    def __init__(self, settingName, value):\n        InputModifier.__init__(self, independentVariable={settingName: value})\n        self.settingName = settingName\n        self.value = value\n\n    def __call__(self, cs, bp):\n        cs = cs.modified(newSettings={self.settingName: self.value})\n        return cs, bp\n\n\nclass MultiSettingModifier(InputModifier):\n    \"\"\"\n    Adjust multiple settings to specified values.\n\n    Examples\n    --------\n    >>> inputModifiers.MultiSettingModifier({CONF_NEUTRONICS_TYPE: \"both\", CONF_COARSE_MESH_REBALANCE: -1})\n\n    \"\"\"\n\n    def __init__(self, settingVals: dict):\n        InputModifier.__init__(self, independentVariable=settingVals)\n        self.settings = settingVals\n\n    def __call__(self, cs, bp):\n        newSettings = {}\n        for name, val in self.settings.items():\n            newSettings[name] = val\n\n        cs = cs.modified(newSettings=newSettings)\n        return cs, bp\n\n\nclass BluePrintBlockModifier(InputModifier):\n    \"\"\"Adjust blueprint block->component->dimension to specified value.\"\"\"\n\n    def __init__(self, block, component, dimension, value):\n        InputModifier.__init__(self, independentVariable={dimension: value})\n        self.block = block\n        self.component = component\n        self.dimension = dimension\n        self.value = value\n\n    def __call__(self, cs, bp):\n        # parse block\n        for blockDesign in bp.blockDesigns:\n            if blockDesign.name == self.block:\n                # parse component\n                for componentDesign in blockDesign:\n                    if componentDesign.name == self.component:\n                        # set new value\n                        setattr(componentDesign, self.dimension, self.value)\n                        return cs, bp\n\n        return cs, bp\n"
  },
  {
    "path": "armi/cases/inputModifiers/neutronicsModifiers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModifies inputs related to neutronics controls.\n\nNotes\n-----\nThis may make more sense in the neutronics physics plugin.\n\"\"\"\n\nfrom armi.cases.inputModifiers import inputModifiers\nfrom armi.physics.neutronics.settings import (\n    CONF_EPS_EIG,\n    CONF_EPS_FSAVG,\n    CONF_EPS_FSPOINT,\n)\n\n\nclass NeutronicConvergenceModifier(inputModifiers.InputModifier):\n    \"\"\"\n    Adjust the neutronics convergence parameters ``CONF_EPS_EIG``, ``CONF_EPS_FSAVG``, and\n    ``CONF_EPS_FSPOINT``.\n\n    The supplied value is used for ``CONF_EPS_EIG``. ``CONF_EPS_FSAVG`` and ``CONF_EPS_FSPOINT`` are\n    set to 100 times the supplied value.\n\n    This can be used to perform sensitivity studies on convergence criteria.\n    \"\"\"\n\n    def __init__(self, value):\n        inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value})\n        self.value = value\n        if value > 1e-2 or value <= 0.0:\n            raise ValueError(\n                f\"Neutronic convergence modifier value must be greater than 0 and less than 1e-2 (got {value})\"\n            )\n\n    def __call__(self, cs, bp):\n        newSettings = {}\n        newSettings[CONF_EPS_FSAVG] = self.value * 100\n        newSettings[CONF_EPS_FSPOINT] = self.value * 100\n        newSettings[CONF_EPS_EIG] = self.value\n        cs = cs.modified(newSettings=newSettings)\n\n        return cs, bp\n\n\nclass NeutronicMeshsSizeModifier(inputModifiers.InputModifier):\n    \"\"\"\n    Adjust the neutronics mesh in all assemblies by a multiplication factor.\n\n    This can be useful when switching between nodal and finite difference approximations, or when\n    doing mesh convergence sensitivity studies.\n\n    Attributes\n    ----------\n    multFactor : int\n        Factor to multiply the number of axial mesh points per block by.\n    \"\"\"\n\n    def __init__(self, multFactor):\n        inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: multFactor})\n        if not isinstance(multFactor, int):\n            raise TypeError(\"multFactor must be an integer, but got {}\".format(multFactor))\n        self.multFactor = multFactor\n\n    def __call__(self, cs, bp):\n        for assemDesign in bp.assemDesigns:\n            assemDesign.axialMeshPoints = [ax * self.multFactor for ax in assemDesign.axialMeshPoints]\n\n        return cs, bp\n"
  },
  {
    "path": "armi/cases/inputModifiers/pinTypeInputModifiers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom armi.cases.inputModifiers import inputModifiers\nfrom armi.reactor import flags\nfrom armi.reactor.components import component\nfrom armi.reactor.converters import pinTypeBlockConverters\n\n\nclass _PinTypeAssemblyModifier(inputModifiers.InputModifier):\n    \"\"\"\n    Abstract class for modifying something about a pin, within a block.\n\n    This will construct blocks, determine if the block should be modified by checking\n    the ``_getBlockTypesToModify``, and then run ``_adjustBlock(b)``. The ``Blueprints``\n    are then updated based on the modification assuming that dimension names match\n    exactly to ComponenBlueprint attributes (which is true, because ComponentBlueprint\n    attributes are programmatically derived from Component constructors).\n    \"\"\"\n\n    def __init__(self, value):\n        inputModifiers.InputModifier.__init__(self, {self.__class__.__name__: value})\n        self.value = value\n\n    def __call__(self, cs, bp):\n        for bDesign in bp.blockDesigns:\n            # bDesign construct requires lots of arguments, many of which have no impact.\n            # The following can safely be defaulted to meaningless inputs:\n            # axialIndex: a block can be reused at any axial index, modifications made\n            #     dependent on will not translate back to the input in a  meaningful\n            #     fashion\n            # axialMeshPoints: similar to above, this is specified by the assembly, and\n            #     a block can be within any section of an assembly.\n            # height: similar to above. a block can have any height specified by an\n            #     assembly. if height-specific modifications are required, then a new\n            #     block definition should be created in the input\n            # xsType: similar to above. a block can have any xsType specified through\n            #     the assembly definition assembly. if xsType-specific modifications are\n            #     required, then a new block definition should be created in the input\n            # materialInput: this is the materialModifications from the assembly\n            #     definition. if material modifications are required on a block-specific\n            #     basis, they should be edited directly\n            b = bDesign.construct(\n                cs,\n                bp,\n                axialIndex=1,\n                axialMeshPoints=1,\n                height=1,\n                xsType=\"A\",\n                materialInput={},\n            )\n\n            if not b.hasFlags(self._getBlockTypesToModify()):\n                continue\n\n            self._adjustBlock(b)\n\n            for cDesign, c in zip(bDesign, b):\n                for dimName in c.DIMENSION_NAMES:\n                    inpDim = getattr(cDesign, dimName)\n                    newDim = getattr(c.p, dimName)\n                    if isinstance(newDim, tuple):\n                        # map linked component dimension\n                        link = component._DimensionLink(newDim)\n                        newDim = str(link)\n                    if inpDim != newDim:\n                        setattr(cDesign, dimName, newDim)\n\n        return cs, bp\n\n    def _getBlockTypesToModify(self):\n        \"\"\"Hook method to determine blocks that should be modified.\"\"\"\n        raise NotImplementedError\n\n    def _adjustBlock(self, b):\n        \"\"\"Hook method for `__call__` template method.\"\"\"\n        raise NotImplementedError\n\n\nclass SmearDensityModifier(_PinTypeAssemblyModifier):\n    \"\"\"\n    Adjust the smeared density to the specified value.\n\n    This is effectively how much of the space inside the cladding tube is occupied by\n    fuel at fabrication.\n    \"\"\"\n\n    def _getBlockTypesToModify(self):\n        \"\"\"Hook method to determine blocks that should be modified.\"\"\"\n        return flags.Flags.FUEL\n\n    def _adjustBlock(self, b):\n        \"\"\"Hook method for `__call__` template method.\"\"\"\n        pinTypeBlockConverters.adjustSmearDensity(b, self.value)\n\n\nclass CladThicknessByODModifier(_PinTypeAssemblyModifier):\n    \"\"\"Adjust the cladding thickness by adjusting the inner diameter of all cladding components.\"\"\"\n\n    FAIL_IF_AFTER = (SmearDensityModifier,)\n\n    def _getBlockTypesToModify(self):\n        \"\"\"Hook method to determine blocks that should be modified.\"\"\"\n        return \"\"\n\n    def _adjustBlock(self, b):\n        pinTypeBlockConverters.adjustCladThicknessByOD(b, self.value)\n\n\nclass CladThicknessByIDModifier(_PinTypeAssemblyModifier):\n    \"\"\"Adjust the cladding thickness by adjusting the outer diameter of the cladding component.\"\"\"\n\n    FAIL_IF_AFTER = (SmearDensityModifier,)\n\n    def _getBlockTypesToModify(self):\n        \"\"\"Hook method to determine blocks that should be modified.\"\"\"\n        return \"\"\n\n    def _adjustBlock(self, b):\n        pinTypeBlockConverters.adjustCladThicknessByID(b, self.value)\n"
  },
  {
    "path": "armi/cases/inputModifiers/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/cases/inputModifiers/tests/test_inputModifiers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for input modifiers.\"\"\"\n\nimport os\nimport unittest\n\nfrom ruamel import yaml\n\nfrom armi import cases, settings\nfrom armi.cases import suiteBuilder\nfrom armi.cases.inputModifiers import (\n    inputModifiers,\n    neutronicsModifiers,\n    pinTypeInputModifiers,\n)\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n)\nfrom armi.physics.neutronics.settings import (\n    CONF_EPS_EIG,\n    CONF_EPS_FSAVG,\n    CONF_EPS_FSPOINT,\n)\nfrom armi.reactor import blueprints\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils import directoryChangers\n\nFLAGS_INPUT = \"\"\"nuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\n    MN: {burn: false, xs: true}\n    FE: {burn: false, xs: true}\n    SI: {burn: false, xs: true}\n    C: {burn: false, xs: true}\n    CR: {burn: false, xs: true}\n    MO: {burn: false, xs: true}\n    NI: {burn: false, xs: true}\n    V: {burn: false, xs: true}\n    W: {burn: false, xs: true}\"\"\"\nCLAD = \"\"\"clad: &fuel_1_clad\n            Tinput: 350.0\n            Thot: 350.0\n            shape: circle\n            id: 1.0\n            od: 1.1\n            material: HT9\"\"\"\nCLAD_LINKED = \"\"\"clad: &fuel_1_clad\n            Tinput: 350.0\n            Thot: 350.0\n            shape: circle\n            id: fuel.od\n            od: 1.1\n            material: HT9\"\"\"\nBLOCKS_INPUT = \"\"\"blocks:\n    fuel 1: &fuel_1\n        fuel: &fuel_1_fuel\n            Tinput: 350.0\n            Thot: 350.0\n            shape: circle\n            id: 0.0\n            od: 0.5\n            material: UZr\n        {clad}\n        hex: &fuel_1_hex\n            Tinput: 350.0\n            Thot: 350.0\n            shape: hexagon\n            ip: 1.0\n            op: 10.0\n            material: HT9\n    fuel 2: *fuel_1\n    block 3: *fuel_1                                        # non-fuel blocks\n    block 4: {{<<: *fuel_1}}                                  # non-fuel blocks\n    block 5: {{fuel: *fuel_1_fuel, clad: *fuel_1_clad, hex: *fuel_1_hex}}       # non-fuel blocks\"\"\"\nBLOCKS_INPUT_1 = BLOCKS_INPUT.format(clad=CLAD)\nBLOCKS_INPUT_2 = BLOCKS_INPUT.format(clad=CLAD_LINKED)\nBLUEPRINT_INPUT = f\"\"\"\n{FLAGS_INPUT}\n{BLOCKS_INPUT_1}\nassemblies: {{}}\n\"\"\"\nBLUEPRINT_INPUT_LINKS = f\"\"\"\n{FLAGS_INPUT}\n{BLOCKS_INPUT_2}\nassemblies: {{}}\n\"\"\"\n\nCORE_INPUT = \"\"\"\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        geom: hex\n        symmetry: third core periodic\n        grid contents:\n            [0, 0]: A1\n            [1, 0]: A2\n            [1, 1]: A3\n            [2, -2]: A4\n            [2, -1]: A5\n            [2, 0]: A6\n            [2, 1]: A7\n            [2, 2]: A8\n\"\"\"\n\n\nclass TestsuiteBuilderIntegrations(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        bp = blueprints.Blueprints.load(BLUEPRINT_INPUT_LINKS + CORE_INPUT)\n        cs = settings.Settings()\n        bp._prepConstruction(cs)\n        cls.baseCase = cases.Case(cs=cs, bp=bp)\n\n    def test_smearDensityFail(self):\n        builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase)\n\n        builder.addDegreeOfFreedom(pinTypeInputModifiers.SmearDensityModifier(v) for v in (0.5, 0.6))\n        builder.addDegreeOfFreedom(pinTypeInputModifiers.CladThicknessByIDModifier(v) for v in (0.05, 0.01))\n        self.assertEqual(4, len(builder))\n\n        with self.assertRaisesRegex(RuntimeError, \"before .*SmearDensityModifier\"):\n            builder.buildSuite()\n\n    def test_settingsModifier(self):\n        builder = suiteBuilder.SeparateEffectsSuiteBuilder(self.baseCase)\n        builder.addDegreeOfFreedom(\n            inputModifiers.SettingsModifier(CONF_FP_MODEL, v) for v in (\"noFissionProducts\", \"infinitelyDilute\", \"MO99\")\n        )\n        builder.addDegreeOfFreedom(inputModifiers.SettingsModifier(\"detailedAxialExpansion\", v) for v in (True,))\n        builder.addDegreeOfFreedom(\n            inputModifiers.SettingsModifier(\"buGroups\", v)\n            for v in (\n                [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 100],\n                [3, 5, 7, 9, 10, 20, 100],\n                [3, 5, 10, 15, 20, 100],\n            )\n        )\n        builder.addDegreeOfFreedom((inputModifiers.FullCoreModifier(),))\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            suite = builder.buildSuite()\n            for c in suite:\n                c.writeInputs()\n\n            self.assertTrue(os.path.exists(\"case-suite\"))\n\n    def test_bluePrintBlockModifier(self):\n        \"\"\"Test BluePrintBlockModifier with build suite naming function argument.\"\"\"\n        case_nbr = 1\n        builder = suiteBuilder.FullFactorialSuiteBuilder(self.baseCase)\n\n        builder.addDegreeOfFreedom(\n            [inputModifiers.BluePrintBlockModifier(\"fuel 1\", \"clad\", \"od\", float(\"{:.2f}\".format(22 / 7)))]\n        )\n        builder.addDegreeOfFreedom([inputModifiers.BluePrintBlockModifier(\"block 5\", \"clad\", \"od\", 3.14159)])\n\n        def SuiteNaming(index, _case, _mods):\n            uniquePart = \"{:0>4}\".format(index + case_nbr)\n            return os.path.join(\n                \".\",\n                \"case-suite-testBPBM\",\n                uniquePart,\n                self.baseCase.title + \"-\" + uniquePart,\n            )\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            suite = builder.buildSuite(namingFunc=SuiteNaming)\n            suite.writeInputs()\n\n            self.assertTrue(os.path.exists(\"case-suite-testBPBM\"))\n\n            yamlfile = open(\n                f\"case-suite-testBPBM/000{case_nbr}/armi-000{case_nbr}-blueprints.yaml\",\n                \"r\",\n            )\n            bp_dict = yaml.YAML().load(yamlfile)\n            yamlfile.close()\n\n            self.assertEqual(bp_dict[\"blocks\"][\"fuel 1\"][\"clad\"][\"od\"], 3.14)\n            self.assertEqual(bp_dict[\"blocks\"][\"block 5\"][\"clad\"][\"od\"], 3.14159)\n\n\nclass TestSettingsModifiers(unittest.TestCase):\n    def test_NeutronicConvergenceModifier(self):\n        cs = settings.Settings()\n\n        with self.assertRaises(ValueError):\n            _ = neutronicsModifiers.NeutronicConvergenceModifier(0.0)\n\n        with self.assertRaises(ValueError):\n            _ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2 + 1e-15)\n\n        cs, _ = neutronicsModifiers.NeutronicConvergenceModifier(1e-2)(cs, None)\n        self.assertAlmostEqual(cs[CONF_EPS_EIG], 1e-2)\n        self.assertAlmostEqual(cs[CONF_EPS_FSAVG], 1.0)\n        self.assertAlmostEqual(cs[CONF_EPS_FSPOINT], 1.0)\n\n\nclass NeutronicsKernelOpts(inputModifiers.InputModifier):\n    def __init__(self, neutronicsKernelOpts):\n        inputModifiers.InputModifier.__init__(self)\n        self.neutronicsKernelOpts = neutronicsKernelOpts\n\n    def __call__(self, cs, bp):\n        cs = cs.modified(self.neutronicsKernelOpts)\n        return cs, bp\n\n\nclass TestFullCoreModifier(unittest.TestCase):\n    \"\"\"Ensure full core conversion works.\"\"\"\n\n    def test_fullCoreConversion(self):\n        cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, \"armiRun.yaml\"))\n        case = cases.Case(cs=cs)\n        mod = inputModifiers.FullCoreModifier()\n        self.assertEqual(case.bp.gridDesigns[\"core\"].symmetry, \"third periodic\")\n        case, case.bp = mod(case, case.bp)\n        self.assertEqual(case.bp.gridDesigns[\"core\"].symmetry, \"full\")\n\n    def test_fullCoreConversionWithOrientation(self):\n        \"\"\"Tests modifying a reactor to full core that includes beginning of life orientations.\"\"\"\n        cs = settings.Settings(os.path.join(test_reactors.TEST_ROOT, \"armiRun.yaml\"))\n        case = cases.Case(cs=cs)\n        mod = inputModifiers.FullCoreModifier()\n        self.assertEqual(case.bp.gridDesigns[\"core\"].symmetry, \"third periodic\")\n\n        # Add beginning of life orientations\n        case.bp.gridDesigns[\"core\"].orientationBOL = {(2, 1): 30.0}\n\n        # Modify to full core\n        case, case.bp = mod(case, case.bp)\n\n        # Check results\n        self.assertEqual(case.bp.gridDesigns[\"core\"].symmetry, \"full\")\n        self.assertIn((2, 3), case.bp.gridDesigns[\"core\"].orientationBOL)\n        self.assertEqual(150.0, case.bp.gridDesigns[\"core\"].orientationBOL[(2, 3)])\n        self.assertIn((2, 5), case.bp.gridDesigns[\"core\"].orientationBOL)\n        self.assertEqual(270.0, case.bp.gridDesigns[\"core\"].orientationBOL[(2, 5)])\n"
  },
  {
    "path": "armi/cases/inputModifiers/tests/test_pinTypeInputModifiers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for input modifiers.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi import settings\nfrom armi.cases.inputModifiers import pinTypeInputModifiers\nfrom armi.cases.inputModifiers.tests.test_inputModifiers import BLUEPRINT_INPUT\nfrom armi.reactor import blueprints\n\n\nclass TestBlueprintModifiers(unittest.TestCase):\n    def setUp(self):\n        self.bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)\n        self.bp._prepConstruction(settings.Settings())\n\n    def test_AdjustSmearDensity(self):\n        r\"\"\"\n        Compute the smear density where clad.id is 1.0.\n\n        .. math::\n\n            areaFuel = smearDensity * innerCladArea\n            fuelOD^2 / 4 = 0.5 * cladID^2 / 4\n            fuelOD = \\sqrt{0.5}\n\n        Notes\n        -----\n        The area of fuel is 0.5 * inner area of clad.\n        \"\"\"\n        bp = self.bp\n        self.assertEqual(1.0, bp.blockDesigns[\"fuel 1\"][\"clad\"].id)\n        self.assertEqual(0.5, bp.blockDesigns[\"fuel 1\"][\"fuel\"].od)\n        self.assertEqual(0.5, bp.blockDesigns[\"fuel 2\"][\"fuel\"].od)\n        self.assertEqual(0.5, bp.blockDesigns[\"block 3\"][\"fuel\"].od)\n        self.assertEqual(0.5, bp.blockDesigns[\"block 4\"][\"fuel\"].od)\n        self.assertEqual(0.5, bp.blockDesigns[\"block 5\"][\"fuel\"].od)\n\n        pinTypeInputModifiers.SmearDensityModifier(0.5)(settings.Settings(), bp)\n\n        self.assertEqual(math.sqrt(0.5), bp.blockDesigns[\"fuel 1\"][\"fuel\"].od)\n        self.assertEqual(math.sqrt(0.5), bp.blockDesigns[\"fuel 2\"][\"fuel\"].od)\n        self.assertEqual(math.sqrt(0.5), bp.blockDesigns[\"block 3\"][\"fuel\"].od)\n        self.assertEqual(math.sqrt(0.5), bp.blockDesigns[\"block 4\"][\"fuel\"].od)\n        self.assertEqual(0.5, bp.blockDesigns[\"block 5\"][\"fuel\"].od)  # unique instance\n\n    def test_CladThickenessByODModifier(self):\n        \"\"\"\n        Adjust the clad thickness by outer diameter.\n\n        .. math::\n\n            cladThickness = (clad.od - clad.id) / 2\n            clad.od = 2 * cladThicness - clad.id\n\n        when ``clad.id = 1.0`` and ``cladThickness = 0.12``,\n\n        .. math::\n\n            clad.od = 2 * 0.12 - 1.0\n            clad.od = 1.24\n        \"\"\"\n        bp = self.bp\n        self.assertEqual(1.1, bp.blockDesigns[\"fuel 1\"][\"clad\"].od)\n        self.assertEqual(1.1, bp.blockDesigns[\"fuel 2\"][\"clad\"].od)\n        self.assertEqual(1.1, bp.blockDesigns[\"block 3\"][\"clad\"].od)\n        self.assertEqual(1.1, bp.blockDesigns[\"block 4\"][\"clad\"].od)\n        self.assertEqual(1.1, bp.blockDesigns[\"block 5\"][\"clad\"].od)\n\n        pinTypeInputModifiers.CladThicknessByODModifier(0.12)(settings.Settings(), bp)\n\n        self.assertEqual(1.24, bp.blockDesigns[\"fuel 1\"][\"clad\"].od)\n        self.assertEqual(1.24, bp.blockDesigns[\"fuel 2\"][\"clad\"].od)\n        self.assertEqual(1.24, bp.blockDesigns[\"block 3\"][\"clad\"].od)\n        self.assertEqual(1.24, bp.blockDesigns[\"block 4\"][\"clad\"].od)\n        self.assertEqual(1.24, bp.blockDesigns[\"block 5\"][\"clad\"].od)  # modifies all blocks\n\n    def test_CladThickenessByIDModifier(self):\n        \"\"\"\n        Adjust the clad thickness by inner diameter.\n\n        .. math::\n\n            cladThickness = (clad.od - clad.id) / 2\n            clad.id = cladod - 2 * cladThicness\n\n        when ``clad.id = 1.1`` and ``cladThickness = 0.025``,\n\n        .. math::\n\n            clad.od = 1.1 - 2 * 0.025\n            clad.od = 1.05\n        \"\"\"\n        bp = self.bp\n        self.assertEqual(1.0, bp.blockDesigns[\"fuel 1\"][\"clad\"].id)\n        self.assertEqual(1.0, bp.blockDesigns[\"fuel 2\"][\"clad\"].id)\n        self.assertEqual(1.0, bp.blockDesigns[\"block 3\"][\"clad\"].id)\n        self.assertEqual(1.0, bp.blockDesigns[\"block 4\"][\"clad\"].id)\n        self.assertEqual(1.0, bp.blockDesigns[\"block 5\"][\"clad\"].id)\n\n        pinTypeInputModifiers.CladThicknessByIDModifier(0.025)(settings.Settings(), bp)\n\n        self.assertEqual(1.05, bp.blockDesigns[\"fuel 1\"][\"clad\"].id)\n        self.assertEqual(1.05, bp.blockDesigns[\"fuel 2\"][\"clad\"].id)\n        self.assertEqual(1.05, bp.blockDesigns[\"block 3\"][\"clad\"].id)\n        self.assertEqual(1.05, bp.blockDesigns[\"block 4\"][\"clad\"].id)\n        self.assertEqual(1.05, bp.blockDesigns[\"block 5\"][\"clad\"].id)  # modifies all blocks\n"
  },
  {
    "path": "armi/cases/suite.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThe ``CaseSuite`` object is responsible for running, and executing a set of user inputs.  Many\nentry points redirect into ``CaseSuite`` methods, such as ``clone``, ``compare``, and ``submit``.\n\nUsed in conjunction with the :py:class:`~armi.cases.case.Case` object, ``CaseSuite`` can be used to\ncollect a series of cases and submit them to a cluster for execution. Furthermore, a ``CaseSuite``\ncan be used to gather executed cases for post-analysis.\n\n``CaseSuite``\\ s should allow ``Cases`` to be added from totally separate directories. This is\nuseful for plugin-informed testing as well as other things.\n\nSee Also\n--------\narmi.cases.case : An individual item of a case suite.\n\"\"\"\n\nimport os\nimport traceback\nfrom typing import Optional, Sequence\n\nfrom armi import runLog, settings\nfrom armi.cases import case as armicase\nfrom armi.utils import directoryChangers, tabulate\n\n\nclass CaseSuite:\n    \"\"\"\n    A CaseSuite is a collection of possibly related Case objects.\n\n    .. impl:: CaseSuite allows for one case to start after another completes.\n        :id: I_ARMI_CASE_SUITE\n        :implements: R_ARMI_CASE_SUITE\n\n        The CaseSuite object allows multiple, often related,\n        :py:class:`~armi.cases.case.Case` objects to be run sequentially. A CaseSuite\n        is intended to be both a pre-processing or a post-processing tool to facilitate\n        case generation and analysis. Under most circumstances one may wish to subclass\n        a CaseSuite to meet the needs of a specific calculation. A CaseSuite is a\n        collection that is keyed off Case titles.\n    \"\"\"\n\n    def __init__(self, cs):\n        self._cases = list()\n        self.cs = cs\n\n    def add(self, case):\n        \"\"\"\n        Add a Case object to the CaseSuite.\n\n        Case objects within a CaseSuite must have unique ``title`` attributes, a\n        KeyError will be raised\n        \"\"\"\n        existing = next((c for c in self if case == c), None)\n        if existing is not None:\n            raise ValueError(\n                \"CaseSuite already contains case with title `{}`\\nFirst case:  {}\\nSecond case: {}\".format(\n                    case.title, existing, case\n                )\n            )\n        self._cases.append(case)\n        case._caseSuite = self\n\n    def remove(self, case):\n        \"\"\"Remove a case from a suite.\"\"\"\n        self._cases.remove(case)\n        case._caseSuite = None\n\n    def __iter__(self):\n        return iter(self._cases)\n\n    def __len__(self):\n        return len(self._cases)\n\n    def discover(\n        self,\n        rootDir=None,\n        patterns=None,\n        ignorePatterns=None,\n        recursive=True,\n        skipInspection=False,\n    ):\n        \"\"\"\n        Finds case objects by searching for a pattern of file paths, and adds them to\n        the suite.\n\n        This searches for Settings input files and loads them to create Case objects.\n\n        Parameters\n        ----------\n        rootDir : str, optional\n            root directory to search for settings files\n        patterns : list of str, optional\n            file pattern to use to filter file names\n        ignorePatterns : list of str, optional\n            file patterns to exclude matching file names\n        recursive : bool, optional\n            if True, recursively search for settings files\n        skipInspection : bool, optional\n            if True, skip running the check inputs\n        \"\"\"\n        csFiles = settings.recursivelyLoadSettingsFiles(\n            rootDir or os.path.abspath(os.getcwd()),\n            patterns or [\"*.yaml\"],\n            recursive=recursive,\n            ignorePatterns=ignorePatterns,\n            handleInvalids=False,\n        )\n\n        for cs in csFiles:\n            case = armicase.Case(cs=cs, caseSuite=self)\n            if not skipInspection:\n                case.checkInputs()\n            self.add(case)\n\n    def echoConfiguration(self):\n        \"\"\"\n        Print information about this suite to the run log.\n\n        Notes\n        -----\n        Some of these printouts won't make sense for all users, and may make sense to\n        be delegated to the plugins/app.\n        \"\"\"\n        for setting in self.cs.environmentSettings:\n            runLog.important(\"{}: {}\".format(self.cs.getSetting(setting).label, self.cs[setting]))\n\n        runLog.important(\"Test inputs will be taken from test case results when they have finished\")\n        runLog.important(\n            tabulate.tabulate(\n                [\n                    (\n                        c.title,\n                        \"T\" if c.enabled else \"F\",\n                        \",\".join(d.title for d in c.dependencies),\n                    )\n                    for c in self\n                ],\n                headers=[\"Title\", \"Enabled\", \"Dependencies\"],\n                tableFmt=\"armi\",\n            )\n        )\n\n    def clone(self, oldRoot=None, writeStyle=\"short\"):\n        \"\"\"\n        Clone a CaseSuite to a new place.\n\n        Creates a clone for each case within a CaseSuite. If ``oldRoot`` is not\n        specified, then each case clone is made in a directory with the title of the\n        case. If ``oldRoot`` is specified, then a relative path from ``oldRoot`` will\n        be used to determine a new relative path to the current directory ``oldRoot``.\n\n        Parameters\n        ----------\n        oldRoot : str (optional)\n            root directory of original case suite used to help filter when a suite\n            contains one or more cases with the same case title.\n        writeStyle : str (optional)\n            Writing style for which settings get written back to the settings files\n            (short, medium, or full).\n\n        Notes\n        -----\n        By design, a CaseSuite has no location dependence; this allows any set of cases\n        to compose a CaseSuite. The thought is that the post-analysis capabilities\n        without restricting a root directory could be beneficial. For example, this\n        allows one to perform analysis on cases analyzed by Person A and Person B, even\n        if the analyses were performed in completely different locations. As a\n        consequence, when you want to clone, we need to infer a \"root\" of the original\n        cases to attempt to mirror whatever existing directory structure there may have\n        been.\n        \"\"\"\n        clone = CaseSuite(self.cs.duplicate())\n\n        modifiedSettings = {ss.name: ss.value for ss in self.cs.values() if ss.offDefault}\n        for case in self:\n            if oldRoot:\n                newDir = os.path.dirname(os.path.relpath(case.cs.path, oldRoot))\n            else:\n                newDir = case.title\n            with directoryChangers.ForcedCreationDirectoryChanger(newDir, dumpOnException=False):\n                clone.add(case.clone(modifiedSettings=modifiedSettings, writeStyle=writeStyle))\n        return clone\n\n    def run(self):\n        \"\"\"\n        Run each case, one after the other.\n\n        Warning\n        -------\n        Suite running may not work yet if the cases have interdependencies. We typically run on a\n        HPC but are still working on a platform independent way of handling HPCs.\n        \"\"\"\n        for ci, case in enumerate(self):\n            runLog.important(f\"Running case {ci + 1}/{len(self)}: {case}\")\n            with directoryChangers.DirectoryChanger(case.directory):\n                try:\n                    case.run()\n                except Exception:\n                    # allow all errors and continue to next run\n                    runLog.error(f\"{case} failed during execution.\")\n                    traceback.print_exc()\n\n    def compare(\n        self,\n        that,\n        exclusion: Optional[Sequence[str]] = None,\n        weights=None,\n        tolerance=0.01,\n        timestepCompare=None,\n    ) -> int:\n        \"\"\"\n        Compare one case suite with another.\n\n        Returns\n        -------\n        The number of problem differences encountered.\n        \"\"\"\n        runLog.important(\"Comparing case suites.\")\n\n        nIssues = 0\n\n        refTitles = set(c.title for c in self)\n        cmpTitles = set(c.title for c in that)\n        suiteHasMissingFiles = False\n        tableResults = {}\n        for caseTitle in refTitles.union(cmpTitles):\n            refCase = next((c for c in self if c.title == caseTitle), None)\n            cmpCase = next((c for c in that if c.title == caseTitle), None)\n            caseStatus = []\n            for case in (refCase, cmpCase):\n                status = \"Found\"\n                if case is None or not os.path.exists(case.dbName):\n                    status = \"Missing\"\n                caseStatus.append(status)\n            refFile, userFile = caseStatus\n            if any(stat != \"Found\" for stat in caseStatus):\n                # Case was not run, or failed to produce a database.\n                # In either case, this is an issue.\n                # It could possibly be a new test, but there is no way to tell this\n                # versus a reference file being missing so when a new test is made\n                # it will be an issue. After the first push with the new tests the files\n                # will be copied over and future tests will be fine.\n                caseIssues = 1\n                suiteHasMissingFiles = False\n            else:\n                caseIssues = refCase.compare(\n                    cmpCase,\n                    exclusion=exclusion,\n                    tolerance=tolerance,\n                    timestepCompare=timestepCompare,\n                )\n            nIssues += caseIssues\n            tableResults[caseTitle] = (userFile, refFile, caseIssues)\n\n        self.writeTable(tableResults)\n        if suiteHasMissingFiles:\n            runLog.warning((UNMISSABLE_FAILURE.format(\", \".join(t for t in refTitles - cmpTitles))))\n\n        return nIssues\n\n    def writeInputs(self, writeStyle=\"short\"):\n        \"\"\"\n        Write inputs for all cases in the suite.\n\n        writeStyle : str (optional)\n            Writing style for which settings get written back to the settings files\n            (short, medium, or full).\n\n        See Also\n        --------\n        clone\n            Similar to this but doesn't let you write out new geometry or blueprints objects.\n        \"\"\"\n        for case in self:\n            case.writeInputs(sourceDir=self.cs.inputDirectory, writeStyle=writeStyle)\n\n    @staticmethod\n    def writeTable(tableResults):\n        \"\"\"Write a table summarizing the test differences.\"\"\"\n        fmt = \"psql\"\n        print(\n            (\n                tabulate.tabulate(\n                    [[\"Integration test directory: {}\".format(os.getcwd())]],\n                    [\"SUMMARIZED INTEGRATION TEST DIFFERENCES:\"],\n                    tableFmt=fmt,\n                )\n            )\n        )\n        header = [\"Test\", \"User File\", \"Reference File\", \"# Problem Diff Lines\"]\n        totalDiffs = 0\n        data = []\n        for testName in sorted(tableResults.keys()):\n            userFile, refFile, caseIssues = tableResults[testName]\n            data.append((testName, userFile, refFile, caseIssues))\n            totalDiffs += caseIssues\n\n        print(tabulate.tabulate(data, header, tableFmt=fmt))\n        print(tabulate.tabulate([[\"Total number of differences: {}\".format(totalDiffs)]], tableFmt=fmt))\n\n\nUNMISSABLE_FAILURE = '''\n!! THESE TESTS HAVE UNEXPECTED ABSENT RESULTS !!\n\n                     uuuuuuu\n                 uu$$$$$$$$$$$uu\n              uu$$$$$$$$$$$$$$$$$uu\n             u$$$$$$$$$$$$$$$$$$$$$u\n            u$$$$$$$$$$$$$$$$$$$$$$$u\n           u$$$$$$$$$$$$$$$$$$$$$$$$$u\n           u$$$$$$$$$$$$$$$$$$$$$$$$$u\n           u$$$$$$\"   \"$$$\"   \"$$$$$$u\n           \"$$$$\"      u$u       $$$$\"\n            $$$u       u$u       u$$$\n            $$$u      u$$$u      u$$$\n             \"$$$$uu$$$   $$$uu$$$$\"\n              \"$$$$$$$\"   \"$$$$$$$\"\n                u$$$$$$$u$$$$$$$u\n                 u$\"$\"$\"$\"$\"$\"$u\n      uuu        $$u$ $ $ $ $u$$       uuu\n     u$$$$        $$$$$u$u$u$$$       u$$$$\n      $$$$$uu      \"$$$$$$$$$\"     uu$$$$$$\n    u$$$$$$$$$$$uu    \"\"\"\"\"    uuuu$$$$$$$$$$\n    $$$$\"\"\"$$$$$$$$$$uuu   uu$$$$$$$$$\"\"\"$$$\"\n     \"\"\"      \"\"$$$$$$$$$$$uu \"\"$\"\"\"\n               uuuu \"\"$$$$$$$$$$uuu\n      u$$$uuu$$$$$$$$$uu \"\"$$$$$$$$$$$uuu$$$\n      $$$$$$$$$$\"\"\"\"           \"\"$$$$$$$$$$$\"\n       \"$$$$$\"                      \"\"$$$$\"\"\n         $$$\"                         $$$$\"\n\nComparison suite is missing the following case titles: {}\n\n'''\n"
  },
  {
    "path": "armi/cases/suiteBuilder.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nContains classes that build case suites from perturbing inputs.\n\nThe general use case is to create a :py:class:`~SuiteBuilder` with a base\n:py:class:`~armi.cases.case.Case`, use :py:meth:`~SuiteBuilder.addDegreeOfFreedom` to\nadjust inputs according to the supplied arguments, and finally use ``.buildSuite`` to\ngenerate inputs. The case suite can then be discovered, submitted, and analyzed using\nthe standard ``CaseSuite`` objects.\n\nThis module contains a variety of ``InputModifier`` objects as well, which are examples\nof how you can modify inputs for parameter sweeping. Power-users will generally make\ntheir own ``Modifier``\\ s that are design-specific.\n\"\"\"\n\nimport copy\nimport os\nimport random\nfrom typing import List\n\nfrom armi.cases import suite\n\n\ndef getInputModifiers(cls):\n    return cls.__subclasses__() + [g for s in cls.__subclasses__() for g in getInputModifiers(s)]\n\n\nclass SuiteBuilder:\n    \"\"\"\n    Class for constructing a CaseSuite from combinations of modifications on base inputs.\n\n    .. impl:: A generic tool to modify user inputs on multiple cases.\n        :id: I_ARMI_CASE_MOD0\n        :implements: R_ARMI_CASE_MOD\n\n        This class provides the capability to create a :py:class:`~armi.cases.suite.CaseSuite` based\n        on programmatic perturbations/modifications to case settings. It works by being constructed\n        with a base or nominal :py:class:`~armi.cases.case.Case` object. Children classes then\n        append the ``self.modifierSets`` member. Each entry in ``self.modifierSets`` is a\n        :py:class:`~armi.cases.inputModifiers.inputModifiers.InputModifier` representing a case to\n        add to the suite by specifying modifications to the settings of the base case.\n        :py:meth:`SuiteBuilder.buildSuite` is then invoked, returning an instance of the\n        :py:class:`~armi.cases.suite.CaseSuite` containing all the cases with modified settings.\n\n    Attributes\n    ----------\n    baseCase : armi.cases.case.Case\n        A Case object to perturb\n\n    modifierSets : list(tuple(InputModifier))\n        Contains a list of tuples of ``InputModifier`` instances. A single case is constructed by\n        running a series (the tuple) of InputModifiers on the case.\n\n    Notes\n    -----\n    This is public such that someone could pop an item out of the list if it is known to not work,\n    or be unnecessary.\n    \"\"\"\n\n    def __init__(self, baseCase):\n        self.baseCase = baseCase\n        self.modifierSets = []\n\n        from armi.cases.inputModifiers import inputModifiers\n\n        # use an instance variable instead of global lookup. this could allow someone to add their own\n        # modifiers, and also prevents it memory usage / discovery from simply loading the module.\n        self._modifierLookup = {k.__name__: k for k in getInputModifiers(inputModifiers.InputModifier)}\n\n    def __len__(self):\n        return len(self.modifierSets)\n\n    def __repr__(self):\n        return \"<SuiteBuilder len:{} baseCase:{}>\".format(len(self), self.baseCase)\n\n    def addDegreeOfFreedom(self, inputModifiers):\n        \"\"\"\n        Add a degree of freedom to the SweepBuilder.\n\n        The exact application of this is dependent on a subclass.\n\n        Parameters\n        ----------\n        inputModifiers : list(callable(Settings, Blueprints, SystemLayoutInput))\n            A list of callable objects with the signature\n            ``(Settings, Blueprints, SystemLayoutInput)``. When these objects are called they should\n            perturb the settings or blueprints by some amount determined by their construction.\n        \"\"\"\n        raise NotImplementedError\n\n    def addModifierSet(self, inputModifierSet: List):\n        \"\"\"\n        Add a single input modifier set to the suite.\n\n        Used to add modifications that are not necessarily another degree of freedom.\n        \"\"\"\n        self.modifierSets.append(inputModifierSet)\n\n    def buildSuite(self, namingFunc=None):\n        \"\"\"\n        Builds a ``CaseSuite`` based on the modifierSets contained in the SuiteBuilder.\n\n        For each sequence of modifications, this creates a new ``Case`` from the ``baseCase``, and\n        runs the sequence of modifications on the new ``Case``'s inputs. The modified ``Case`` is\n        then added to a ``CaseSuite``. The resulting ``CaseSuite`` is returned.\n\n        Parameters\n        ----------\n        namingFunc : callable(index, case, tuple(InputModifier)), (optional)\n            Function used to name each case. It is supplied with the index (int), the case (Case),\n            and a tuple of InputModifiers used to edit the case. This should be enough information\n            for someone to derive a meaningful name.\n\n            The function should return a string specifying the path of the ``Settings``, this\n            allows the user to specify the directories where each case will be run.\n\n            If not supplied the path will be ``./case-suite/<0000>/<title>-<0000>``, where\n            ``<0000>`` is the four-digit case index, and ``<title>`` is the ``baseCase.title``.\n\n        Raises\n        ------\n        RuntimeError\n            When order of modifications is deemed to be invalid.\n\n        Returns\n        -------\n        caseSuite : CaseSuite\n            Derived from the ``baseCase`` and modifications.\n        \"\"\"\n        caseSuite = suite.CaseSuite(self.baseCase.cs)\n\n        if namingFunc is None:\n\n            def namingFunc(index, _case, _mods):\n                uniquePart = \"{:0>4}\".format(index)\n                return os.path.join(\n                    \".\",\n                    \"case-suite\",\n                    uniquePart,\n                    self.baseCase.title + \"-\" + uniquePart,\n                )\n\n        for index, modList in enumerate(self.modifierSets):\n            case = copy.deepcopy(self.baseCase)\n            previousMods = []\n            case.bp._prepConstruction(case.cs)\n            for mod in modList:\n                # it may seem late to figure this out, but since we are doing it now, someone could\n                # filter these conditions out before the buildSuite. optionally, we could have a\n                # flag for \"skipInvalidModficationCombos=False\"\n                shouldHaveBeenBefore = [fail for fail in getattr(mod, \"FAIL_IF_AFTER\", ()) if fail in previousMods]\n\n                if any(shouldHaveBeenBefore):\n                    raise RuntimeError(\n                        \"{} must occur before {}\".format(mod, \",\".join(repr(m) for m in shouldHaveBeenBefore))\n                    )\n\n                previousMods.append(type(mod))\n                case.cs, case.bp = mod(case.cs, case.bp)\n                case.independentVariables.update(mod.independentVariable)\n\n            case.cs.path = namingFunc(index, case, modList)\n            caseSuite.add(case)\n\n        return caseSuite\n\n\nclass FullFactorialSuiteBuilder(SuiteBuilder):\n    \"\"\"Builds a suite that has every combination of each modifier.\"\"\"\n\n    def __init__(self, baseCase):\n        SuiteBuilder.__init__(self, baseCase)\n        # initialize with empty tuple to trick cross-product to always work\n        self.modifierSets.append(())\n\n    def addDegreeOfFreedom(self, inputModifiers):\n        \"\"\"\n        Add a degree of freedom to the SuiteBuilder.\n\n        Creates the Cartesian product of the ``inputModifiers`` supplied and those already applied.\n\n        For example::\n\n            class SettingModifier(InputModifier):\n                def __init__(self, settingName, value):\n                    self.settingName = settingName\n                    self.value = value\n\n                def __call__(self, cs, bp):\n                    cs = cs.modified(newSettings={self.settingName: self.value})\n                    return cs, bp\n\n\n            builder = FullFactorialSuiteBuilder(someCase)\n            builder.addDegreeOfFreedom(SettingModifier(\"settingName1\", value) for value in (1, 2))\n            builder.addDegreeOfFreedom(SettingModifier(\"settingName2\", value) for value in (3, 4, 5))\n\n        would result in 6 cases:\n\n        +-------+------------------+------------------+\n        | Index | ``settingName1`` | ``settingName2`` |\n        +=======+==================+==================+\n        | 0     | 1                | 3                |\n        +-------+------------------+------------------+\n        | 1     | 2                | 3                |\n        +-------+------------------+------------------+\n        | 2     | 1                | 4                |\n        +-------+------------------+------------------+\n        | 3     | 2                | 4                |\n        +-------+------------------+------------------+\n        | 4     | 1                | 5                |\n        +-------+------------------+------------------+\n        | 5     | 2                | 5                |\n        +-------+------------------+------------------+\n\n        See Also\n        --------\n        SuiteBuilder.addDegreeOfFreedom\n        \"\"\"\n        # Cartesian product. Append a new modifier to the end of a chain of previously defined.\n        new = [\n            existingModSet + (newModifier,) for newModifier in inputModifiers for existingModSet in self.modifierSets\n        ]\n        del self.modifierSets[:]\n        self.modifierSets.extend(new)\n\n\nclass FullFactorialSuiteBuilderNoisy(FullFactorialSuiteBuilder):\n    \"\"\"\n    Adds a bit of noise to each independent variable to avoid duplicates.\n\n    This can be useful in some statistical postprocessors.\n\n    .. warning:: Use with caution. This is part of ongoing research.\n    \"\"\"\n\n    def __init__(self, baseCase, noiseFraction):\n        FullFactorialSuiteBuilder.__init__(self, baseCase)\n        self.noiseFraction = noiseFraction\n\n    def addDegreeOfFreedom(self, inputModifiers):\n        new = []\n        for newMod in inputModifiers:\n            for existingModSet in self.modifierSets:\n                existingModSetCopy = copy.deepcopy(existingModSet)\n                for mod in existingModSetCopy:\n                    self._perturb(mod)\n                newModCopy = copy.deepcopy(newMod)\n                self._perturb(newModCopy)\n                new.append(existingModSetCopy + (newModCopy,))\n\n        del self.modifierSets[:]\n        self.modifierSets.extend(new)\n\n    def _perturb(self, mod):\n        indeps = {}\n        for key, val in mod.independentVariable.items():\n            # perturb values by 10% randomly\n            newVal = val + val * self.noiseFraction * (2 * random.random() - 1)\n            indeps[key] = newVal\n        mod.independentVariable = indeps\n\n\nclass SeparateEffectsSuiteBuilder(SuiteBuilder):\n    \"\"\"Varies each degree of freedom in isolation.\"\"\"\n\n    def addDegreeOfFreedom(self, inputModifiers):\n        \"\"\"\n        Add a degree of freedom to the SuiteBuilder.\n\n        Adds a case for each modifier supplied.\n\n        For example::\n\n            class SettingModifier(InputModifier):\n                def __init__(self, settingName, value):\n                    self.settingName = settingName\n                    self.value = value\n\n                def __call__(self, cs, bp):\n                    cs = cs.modified(newSettings={self.settignName: self.value})\n                    return cs, bp\n\n\n            builder = SeparateEffectsSuiteBuilder(someCase)\n            builder.addDegreeOfFreedom(SettingModifier(\"settingName1\", value) for value in (1, 2))\n            builder.addDegreeOfFreedom(SettingModifier(\"settingName2\", value) for value in (3, 4, 5))\n\n        would result in 5 cases:\n\n        +-------+------------------+------------------+\n        | Index | ``settingName1`` | ``settingName2`` |\n        +=======+==================+==================+\n        | 0     | 1                | default          |\n        +-------+------------------+------------------+\n        | 1     | 2                | default          |\n        +-------+------------------+------------------+\n        | 2     | default          | 3                |\n        +-------+------------------+------------------+\n        | 3     | default          | 4                |\n        +-------+------------------+------------------+\n        | 4     | default          | 5                |\n        +-------+------------------+------------------+\n\n        See Also\n        --------\n        SuiteBuilder.addDegreeOfFreedom\n        \"\"\"\n        self.modifierSets.extend((modifier,) for modifier in inputModifiers)\n"
  },
  {
    "path": "armi/cases/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/cases/tests/test_cases.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for Case and CaseSuite objects.\"\"\"\n\nimport copy\nimport cProfile\nimport logging\nimport os\nimport platform\nimport unittest\n\nimport h5py\n\nfrom armi import cases, context, getApp, interfaces, plugins, runLog, settings\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC\nfrom armi.reactor import blueprints\nfrom armi.reactor.tests import test_reactors\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs\nfrom armi.utils import directoryChangers\n\nBLUEPRINT_INPUT = \"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\n    MN: {burn: false, xs: true}\n    FE: {burn: false, xs: true}\n    SI: {burn: false, xs: true}\n    C: {burn: false, xs: true}\n    CR: {burn: false, xs: true}\n    MO: {burn: false, xs: true}\n    NI: {burn: false, xs: true}\nblocks:\n    fuel 1: &fuel_1\n        fuel: &fuel_1_fuel\n            Tinput: 350.0\n            Thot: 350.0\n            shape: circle\n            id: 0.0\n            od: 0.5\n            material: UZr\n        clad: &fuel_1_clad\n            Tinput: 350.0\n            Thot: 350.0\n            shape: circle\n            id: 1.0\n            od: 1.1\n            material: SS316\n    fuel 2: *fuel_1\n    block 3: *fuel_1                                   # non-fuel blocks\n    block 4: {<<: *fuel_1}                             # non-fuel blocks\n    block 5: {fuel: *fuel_1_fuel, clad: *fuel_1_clad}  # non-fuel blocks\nassemblies: {}\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        geom: hex\n        symmetry: third core periodic\n        grid contents:\n            [0, 0]: A1\n            [1, 0]: A2\n            [1, 1]: A3\n\"\"\"\n\n\nclass TestArmiCase(unittest.TestCase):\n    \"\"\"Class to tests armi.cases.Case methods.\"\"\"\n\n    def test_independentVariables(self):\n        \"\"\"Ensure that independentVariables added to a case move with it.\"\"\"\n        bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)\n        cs = settings.Settings(ARMI_RUN_PATH)\n        cs = cs.modified(newSettings={\"verbosity\": \"important\"})\n        baseCase = cases.Case(cs, bp=bp)\n        with directoryChangers.TemporaryDirectoryChanger():\n            vals = {\"cladThickness\": 1, \"control strat\": \"good\", \"enrich\": 0.9}\n            case = baseCase.clone()\n            case._independentVariables = vals\n            case.writeInputs()\n            newCs = settings.Settings(fName=case.title + \".yaml\")\n            newCase = cases.Case(newCs)\n            for name, val in vals.items():\n                self.assertEqual(newCase.independentVariables[name], val)\n\n    def test_setUpTaskDependence(self):\n        case = cases.Case(settings.Settings())\n        case.enabled = False\n        case.setUpTaskDependence()\n        case.enabled = True\n        case.setUpTaskDependence()\n        self.assertTrue(case.enabled)\n        self.assertEqual(len(case._tasks), 0)\n        self.assertEqual(len(case.dependencies), 0)\n\n    def test_getCoverageRcFile(self):\n        case = cases.Case(settings.Settings())\n        covRcDir = os.path.abspath(context.PROJECT_ROOT)\n        # Don't actually copy the file, just check the file paths match\n        covRcFile = case._getCoverageRcFile(userCovFile=\"\", makeCopy=False)\n        self.assertEqual(covRcFile, os.path.join(covRcDir, \"pyproject.toml\"))\n\n        userFile = \"UserCovRc\"\n        covRcFile = case._getCoverageRcFile(userCovFile=userFile, makeCopy=False)\n        self.assertEqual(covRcFile, os.path.abspath(userFile))\n\n    def test_startCoverage(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n\n            # Test the null case\n            cs = cs.modified(newSettings={\"coverage\": False})\n            case = cases.Case(cs)\n            cov = case._startCoverage()\n            self.assertIsNone(cov)\n\n            # NOTE: We can't test coverage=True, because it breaks coverage on CI\n\n    def test_endCoverage(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n            cs = cs.modified(newSettings={\"coverage\": False})\n            case = cases.Case(cs)\n\n            # NOTE: We can't test coverage=True, because it breaks coverage on CI\n            outFile = \"coverage_results.cov\"\n            prof = case._startCoverage()\n            self.assertFalse(os.path.exists(outFile))\n            case._endCoverage(userCovFile=\"\", cov=prof)\n            self.assertFalse(os.path.exists(outFile))\n\n    @unittest.skipUnless(context.MPI_RANK == 0, \"test only on root node\")\n    def test_startProfiling(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n\n            # Test the null case\n            cs = cs.modified(newSettings={\"profile\": False})\n            case = cases.Case(cs)\n            prof = case._startProfiling()\n            self.assertIsNone(prof)\n\n            # Test when we start coverage correctly\n            cs = cs.modified(newSettings={\"profile\": True})\n            case = cases.Case(cs)\n            prof = case._startProfiling()\n            self.assertTrue(isinstance(prof, cProfile.Profile))\n\n    @unittest.skipUnless(context.MPI_RANK == 0, \"test only on root node\")\n    def test_endProfiling(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n            cs = cs.modified(newSettings={\"profile\": True})\n            case = cases.Case(cs)\n\n            # run the profiler\n            prof = case._startProfiling()\n            case._endProfiling(prof)\n            self.assertTrue(isinstance(prof, cProfile.Profile))\n\n    def test_run(self):\n        \"\"\"\n        Test running a case.\n\n        .. test:: There is a generic mechanism to allow simulation runs.\n            :id: T_ARMI_CASE\n            :tests: R_ARMI_CASE\n\n        .. test:: Test case settings object is created, settings can be edited, and case can run.\n            :id: T_ARMI_SETTING\n            :tests: R_ARMI_SETTING\n        \"\"\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n            newSettings = {\n                \"branchVerbosity\": \"important\",\n                \"coverage\": False,\n                \"nCycles\": 2,\n                \"profile\": False,\n                \"trace\": False,\n                \"verbosity\": \"important\",\n            }\n            cs = cs.modified(newSettings=newSettings)\n            case = cases.Case(cs)\n\n            with mockRunLogs.BufferLog() as mock:\n                # start with a clean slate\n                self.assertEqual(\"\", mock.getStdout())\n                runLog.LOG.startLog(\"test_run\")\n                runLog.LOG.setVerbosity(logging.INFO)\n\n                case.run()\n\n                stdOut = mock.getStdout()\n                self.assertIn(\"Triggering BOL Event\", stdOut)\n                self.assertIn(\"xsGroups\", stdOut)\n                self.assertIn(\"Completed EveryNode - timestep: cycle 0, node 0, year 0.00 Event\", stdOut)\n\n    def test_clone(self):\n        testTitle = \"CLONE_TEST\"\n        # test the short write style\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n            case = cases.Case(cs)\n            shortCase = case.clone(\n                additionalFiles=[\"ISOAA\"],\n                title=testTitle,\n                modifiedSettings={\"verbosity\": \"important\"},\n            )\n            # Check additional files made it\n            self.assertTrue(os.path.exists(\"ISOAA\"))\n            # Check title change made it\n            clonedYaml = testTitle + \".yaml\"\n            self.assertTrue(os.path.exists(clonedYaml))\n            self.assertTrue(shortCase.title, testTitle)\n            # Check on some expected settings\n            # Availability factor is in the original settings file but since it is a\n            # default value, gets removed for the write-out\n            txt = open(clonedYaml, \"r\").read()\n            self.assertNotIn(\"availabilityFactor\", txt)\n            self.assertIn(\"verbosity: important\", txt)\n\n        # test the medium write style\n        with directoryChangers.TemporaryDirectoryChanger():\n            cs = settings.Settings(ARMI_RUN_PATH)\n            case = cases.Case(cs)\n            case.clone(writeStyle=\"medium\")\n            clonedYaml = \"armiRun.yaml\"\n            self.assertTrue(os.path.exists(clonedYaml))\n            # Availability factor is in the original settings file and it is a default\n            # value. While \"short\" (default writing style) removes, \"medium\" should not\n            txt = open(clonedYaml, \"r\").read()\n            self.assertIn(\"availabilityFactor\", txt)\n\n\nclass TestCaseSuiteDependencies(unittest.TestCase):\n    \"\"\"CaseSuite tests.\"\"\"\n\n    def setUp(self):\n        self.suite = cases.CaseSuite(settings.Settings())\n\n        bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)\n\n        self.c1 = cases.Case(cs=settings.Settings(), bp=bp)\n        self.c1.cs.path = \"c1.yaml\"\n        self.suite.add(self.c1)\n\n        self.c2 = cases.Case(cs=settings.Settings(), bp=bp)\n        self.c2.cs.path = \"c2.yaml\"\n        self.suite.add(self.c2)\n\n    def test_clone(self):\n        \"\"\"If you pass an invalid path, the clone can't happen, but it won't do any damage either.\"\"\"\n        with self.assertRaises(RuntimeError):\n            _clone = self.suite.clone(\"test_clone\")\n\n    def test_checkInputs(self):\n        \"\"\"\n        Test the checkInputs() method on a couple of cases.\n\n        .. test:: Check the ARMI inputs for consistency and validity.\n            :id: T_ARMI_CASE_CHECK\n            :tests: R_ARMI_CASE_CHECK\n        \"\"\"\n        self.c1.checkInputs()\n        self.c2.checkInputs()\n\n    def test_dependenciesWithObscurePaths(self):\n        \"\"\"Test directory dependence for strangely-written file paths (escape characters).\"\"\"\n        checks = [\n            (\"c1.yaml\", \"c2.yaml\", \"c1.h5\", True),\n            (r\"\\\\case\\1\\c1.yaml\", r\"\\\\case\\2\\c2.yaml\", \"c1.h5\", False),\n            (r\"\\\\case\\1\\c1.yaml\", r\"\\\\case\\2\\c2.yaml\", r\"..\\1\\c1.h5\", False),\n        ]\n        if platform.system() == \"Windows\":\n            # windows-specific case insensitivity\n            checks.extend(\n                [\n                    (\"c1.yaml\", \"c2.yaml\", \"C1.H5\", True),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"..\\..\\1\\c1.h5\",\n                        True,\n                    ),\n                    (\n                        r\"c1.yaml\",\n                        r\"c2.yaml\",\n                        r\".\\c1.h5\",\n                        True,\n                    ),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"../..\\1\\c1.h5\",\n                        True,\n                    ),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"../../1\\c1.h5\",\n                        True,\n                    ),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"..\\../1\\c1.h5\",\n                        True,\n                    ),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"\\\\cas\\es\\1\\c1.h5\",\n                        True,\n                    ),\n                    # below False because getcwd() != \\\\case\\es\\2\n                    (\n                        r\"..\\..\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"\\\\cas\\es\\1\\c1.h5\",\n                        False,\n                    ),\n                    (\n                        r\"\\\\cas\\es\\1\\c1.yaml\",\n                        r\"\\\\cas\\es\\2\\c2.yaml\",\n                        r\"..\\..\\2\\c1.h5\",\n                        False,\n                    ),\n                ]\n            )\n\n        for p1, p2, dbPath, isIn in checks:\n            self.c1.cs.path = p1\n            self.c2.cs.path = p2\n\n            newSettings = {}\n            newSettings[\"loadStyle\"] = \"fromDB\"\n            newSettings[\"reloadDBName\"] = dbPath\n            self.c2.cs = self.c2.cs.modified(newSettings=newSettings)\n\n            # note that case.dependencies is a property and\n            # will actually reflect these changes\n            self.assertEqual(\n                isIn,\n                self.c1 in self.c2.dependencies,\n                \"where p1: {} p2: {} dbPath: {}\".format(p1, p2, dbPath),\n            )\n\n    def test_dependencyFromDBName(self):\n        # no effect -> need to specify loadStyle, 'fromDB'\n        newSettings = {\"reloadDBName\": \"c1.h5\"}\n        self.c2.cs = self.c2.cs.modified(newSettings=newSettings)\n        self.assertEqual(0, len(self.c2.dependencies))\n\n        newSettings = {\"loadStyle\": \"fromDB\"}\n        self.c2.cs = self.c2.cs.modified(newSettings=newSettings)\n        self.assertIn(self.c1, self.c2.dependencies)\n\n        # the .h5 extension is optional\n        newSettings = {\"reloadDBName\": \"c1\"}\n        self.c2.cs = self.c2.cs.modified(newSettings=newSettings)\n        self.assertIn(self.c1, self.c2.dependencies)\n\n    def test_dependencyFromExplictRepeatShuffles(self):\n        self.assertEqual(0, len(self.c2.dependencies))\n        newSettings = {\"explicitRepeatShuffles\": \"c1-SHUFFLES.txt\"}\n        self.c2.cs = self.c2.cs.modified(newSettings=newSettings)\n        self.assertIn(self.c1, self.c2.dependencies)\n\n    def test_explicitDependency(self):\n        \"\"\"\n        Test dependencies for case suites.\n\n        .. test:: Dependence allows for one case to start after the completion of another.\n            :id: T_ARMI_CASE_SUITE\n            :tests: R_ARMI_CASE_SUITE\n        \"\"\"\n        self.c1.addExplicitDependency(self.c2)\n\n        self.assertIn(self.c2, self.c1.dependencies)\n\n    def test_titleSetterGetter(self):\n        self.assertEqual(self.c1.title, \"c1\")\n        self.c1.title = \"new_bob\"\n        self.assertEqual(self.c1.title, \"new_bob\")\n\n\nclass TestCaseSuiteComparison(unittest.TestCase):\n    \"\"\"CaseSuite.compare() tests.\"\"\"\n\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_compareNoDiffs(self):\n        \"\"\"As a baseline, this test should always reveal zero diffs.\"\"\"\n        # build two super-simple H5 files for testing\n        o, r = test_reactors.loadTestReactor(\n            TEST_ROOT,\n            customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n\n        suites = []\n        for _i in range(2):\n            # Build the cases\n            suite = cases.CaseSuite(settings.Settings())\n\n            bp = blueprints.Blueprints.load(BLUEPRINT_INPUT)\n\n            c1 = cases.Case(cs=settings.Settings(), bp=bp)\n            c1.cs.path = \"c1.yaml\"\n            suite.add(c1)\n\n            c2 = cases.Case(cs=settings.Settings(), bp=bp)\n            c2.cs.path = \"c2.yaml\"\n            suite.add(c2)\n\n            suites.append(suite)\n\n            # create two DBs, identical but for file names\n            tmpDir = os.getcwd()\n            dbs = []\n            for i in range(1, 3):\n                # create the tests DB\n                dbi = DatabaseInterface(r, o.cs)\n                dbi.initDB(fName=f\"{tmpDir}/c{i}.h5\")\n                db = dbi.database\n\n                # validate the file exists, and force it to be readable again\n                b = h5py.File(db._fullPath, \"r\")\n                self.assertEqual(list(b.keys()), [\"inputs\"])\n                self.assertEqual(sorted(b[\"inputs\"].keys()), [\"blueprints\", \"settings\"])\n                b.close()\n\n                # append to lists\n                dbs.append(db)\n\n            # do a comparison that should have no diffs\n            diff = c1.compare(c2)\n            self.assertEqual(diff, 0)\n\n        diff = suites[0].compare(suites[1])\n        self.assertEqual(diff, 0)\n\n        diff = suites[1].compare(suites[0])\n        self.assertEqual(diff, 0)\n\n\nclass TestExtraInputWriting(unittest.TestCase):\n    \"\"\"Make sure extra inputs from interfaces are written.\"\"\"\n\n    def test_writeInput(self):\n        fName = os.path.join(TEST_ROOT, \"armiRun.yaml\")\n        cs = settings.Settings(fName)\n        baseCase = cases.Case(cs)\n        with directoryChangers.TemporaryDirectoryChanger():\n            case = baseCase.clone()\n            case.writeInputs()\n            self.assertTrue(os.path.exists(cs[CONF_SHUFFLE_LOGIC]))\n            # Availability factor is in the original settings file but since it is a default value,\n            # gets removed for the write-out\n            txt = open(\"armiRun.yaml\", \"r\").read()\n            self.assertNotIn(\"availabilityFactor\", txt)\n            self.assertIn(\"armiRun-blueprints.yaml\", txt)\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            case = baseCase.clone(writeStyle=\"medium\")\n            case.writeInputs(writeStyle=\"medium\")\n            # Availability factor is in the original settings file and it is a default value. While\n            # \"short\" (default writing style) removes, \"medium\" should not\n            txt = open(\"armiRun.yaml\", \"r\").read()\n            self.assertIn(\"availabilityFactor\", txt)\n\n\nclass MultiFilesInterfaces(interfaces.Interface):\n    \"\"\"\n    A little test interface that adds a setting that we need to test copyInterfaceInputs with\n    multiple files.\n    \"\"\"\n\n    name = \"MultiFilesInterfaces\"\n\n    @staticmethod\n    def specifyInputs(cs):\n        settingName = \"multipleFilesSetting\"\n        return {settingName: cs[settingName]}\n\n\nclass TestPluginWithDuplicateSetting(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define a duplicate setting.\"\"\"\n        return [\n            settings.setting.Setting(\n                \"power\",\n                default=123,\n                label=\"power\",\n                description=\"duplicate power\",\n            )\n        ]\n\n\nclass TestPluginCopyInterfaceFiles(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return [\n            settings.setting.Setting(\n                \"multipleFilesSetting\",\n                default=[],\n                label=\"multiple files\",\n                description=\"testing stuff\",\n            )\n        ]\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        \"\"\"A plugin is mostly just a vehicle to add Interfaces to an Application.\"\"\"\n        return [\n            interfaces.InterfaceInfo(\n                interfaces.STACK_ORDER.PREPROCESSING,\n                MultiFilesInterfaces,\n                {\"enabled\": True},\n            )\n        ]\n\n\nclass TestCopyInterfaceInputs(unittest.TestCase):\n    \"\"\"Ensure file path is found and updated properly.\"\"\"\n\n    def setUp(self):\n        \"\"\"\n        Manipulate the standard App. We can't just configure our own, since the\n        pytest environment bleeds between tests.\n        \"\"\"\n        self._backupApp = copy.deepcopy(getApp())\n\n    def tearDown(self):\n        \"\"\"Restore the App to its original state.\"\"\"\n        import armi\n\n        armi._app = self._backupApp\n        context.APP_NAME = \"armi\"\n\n    def test_copyInputsHelper(self):\n        \"\"\"Test the helper function for copyInterfaceInputs.\"\"\"\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        shuffleFile = cs[testSetting]\n\n        # test it passes\n        sourceFullPath = os.path.join(TEST_ROOT, shuffleFile)\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            destFilePath = cases.case._copyInputsHelper(\n                testSetting,\n                sourcePath=sourceFullPath,\n                destPath=newDir.destination,\n                origFile=shuffleFile,\n            )\n            newFilePath = os.path.join(newDir.destination, shuffleFile)\n            self.assertTrue(os.path.exists(newFilePath))\n            self.assertEqual(destFilePath, os.path.basename(newFilePath))\n\n        # test with bad file path, should return original file ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            destFilePath = cases.case._copyInputsHelper(\n                testSetting,\n                sourcePath=sourceFullPath,\n                destPath=\"fakeDest\",\n                origFile=shuffleFile,\n            )\n            self.assertFalse(os.path.exists(destFilePath))\n            self.assertEqual(destFilePath, shuffleFile)\n\n    def test_copyInterfaceInputsSingleFile(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        shuffleFile = cs[testSetting]\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            newFilePath = os.path.join(newDir.destination, shuffleFile)\n            self.assertTrue(os.path.exists(newFilePath))\n            self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath))\n\n    def test_copyInterfaceInputsNonFilePath(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        fakeShuffle = \"fakeFile.py\"\n        cs = cs.modified(newSettings={testSetting: fakeShuffle})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            self.assertFalse(os.path.exists(newSettings[testSetting]))\n            self.assertEqual(newSettings[testSetting], fakeShuffle)\n\n    def test_copyInterfaceInputs_emptyFilePath(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        fakeShuffle = \"\"\n        cs = cs.modified(newSettings={testSetting: fakeShuffle})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            with self.assertRaises(KeyError):\n                # shouldn't process this setting as anything to worry about, so it won't be added to the dict\n                _shuffleLogic = newSettings[testSetting]\n\n    def test_failOnDuplicateSetting(self):\n        \"\"\"That that if a plugin attempts to add a duplicate setting, it raises an error.\"\"\"\n        # register the new Plugin\n        app = getApp()\n        app.pluginManager.register(TestPluginWithDuplicateSetting)\n\n        with self.assertRaises(ValueError):\n            _ = settings.Settings(ARMI_RUN_PATH)\n\n    def test_copyInterfaceInputsMultipleFiles(self):\n        # register the new Plugin\n        app = getApp()\n        app.pluginManager.register(TestPluginCopyInterfaceFiles)\n\n        pluginPath = \"armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles\"\n        settingFiles = [str(os.path.join(TESTING_ROOT, \"resources\", \"COMPXS.ascii\")), \"ISOAA\"]\n        testName = \"test_copyInterfaceInputs_multipleFiles\"\n        testSetting = \"multipleFilesSetting\"\n\n        cs = settings.Settings(ARMI_RUN_PATH)\n        cs = cs.modified(\n            caseTitle=testName,\n            newSettings={testName: [pluginPath]},\n        )\n        cs = cs.modified(newSettings={testSetting: settingFiles})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles]\n            for newFilePath in newFilePaths:\n                self.assertTrue(os.path.exists(newFilePath))\n            self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles])\n\n    def test_copyInterfaceInputsOneFile(self):\n        # register the new Plugin\n        app = getApp()\n        app.pluginManager.register(TestPluginCopyInterfaceFiles)\n\n        pluginPath = \"armi.cases.tests.test_cases.TestPluginCopyInterfaceFiles\"\n        settingFiles = [str(os.path.join(TESTING_ROOT, \"resources\", \"COMPXS.ascii\"))]\n        testName = \"test_copyInterfaceInputsOneFile\"\n        testSetting = \"multipleFilesSetting\"\n\n        cs = settings.Settings(ARMI_RUN_PATH)\n        cs = cs.modified(\n            caseTitle=testName,\n            newSettings={testName: [pluginPath]},\n        )\n        cs = cs.modified(newSettings={testSetting: settingFiles})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            newFilePaths = [os.path.join(newDir.destination, f) for f in settingFiles]\n            for newFilePath in newFilePaths:\n                self.assertTrue(os.path.exists(newFilePath))\n            self.assertEqual([str(s) for s in newSettings[testSetting]], [str(s) for s in settingFiles])\n\n    def test_copyInterfaceInputsWildcardFile(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        # Use something that isn't the shuffle logic file in the case settings\n        wcFile = \"ISO*\"\n        cs = cs.modified(newSettings={testSetting: wcFile})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            newFilePath = [os.path.join(newDir.destination, \"ISOAA\")]\n            self.assertTrue(os.path.exists(newFilePath[0]))\n            self.assertEqual(newSettings[testSetting], [os.path.basename(newFilePath[0])])\n\n        # Check on a file that doesn't exist (so globFilePaths len is 0)\n        wcFile = \"fakeFile*\"\n        cs = cs.modified(newSettings={testSetting: wcFile})\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            self.assertFalse(os.path.exists(newSettings[testSetting][0]))\n            self.assertEqual(newSettings[testSetting], [wcFile])\n\n    def test_copyInterfaceInputsRelPath(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        shuffleFile = cs[testSetting]\n        relFile = \"../tests/\" + shuffleFile\n        cs = cs.modified(newSettings={testSetting: relFile})\n\n        # ensure we are not in TEST_ROOT\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            newFilePath = os.path.join(newDir.destination, shuffleFile)\n            self.assertTrue(os.path.exists(newFilePath))\n            self.assertEqual(newSettings[testSetting], os.path.basename(newFilePath))\n\n    def test_copyInterfaceInputsAbsPath(self):\n        testSetting = CONF_SHUFFLE_LOGIC\n        cs = settings.Settings(ARMI_RUN_PATH)\n        shuffleFile = cs[testSetting]\n        absFile = os.path.dirname(os.path.abspath(ARMI_RUN_PATH))\n        absFile = str(os.path.join(absFile, os.path.basename(shuffleFile)))\n        cs = cs.modified(newSettings={testSetting: absFile})\n\n        with directoryChangers.TemporaryDirectoryChanger() as newDir:\n            newSettings = cases.case.copyInterfaceInputs(cs, destination=newDir.destination)\n            # file exists\n            self.assertTrue(os.path.exists(newSettings[testSetting]))\n            # but not copied to this dir\n            self.assertFalse(os.path.exists(os.path.basename(newSettings[testSetting])))\n            self.assertEqual(str(newSettings[testSetting]), absFile)\n"
  },
  {
    "path": "armi/cases/tests/test_suiteBuilder.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for the SuiteBuilder.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi import cases, settings\nfrom armi.cases.inputModifiers.inputModifiers import InputModifier\nfrom armi.cases.suiteBuilder import FullFactorialSuiteBuilder, SeparateEffectsSuiteBuilder\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nAFCI_PATH = os.path.join(THIS_DIR, \"..\", \"..\", \"testing\", \"reactors\", \"anl-afci-177\", \"anl-afci-177.yaml\")\n\n\nclass SettingModifier(InputModifier):\n    def __init__(self, settingName, value):\n        self.settingName = settingName\n        self.value = value\n\n    def __call__(self, cs, bp):\n        cs = cs.modified(newSettings={self.settingName: self.value})\n        return cs, bp\n\n\nclass TestFullFactorialSuiteBuilder(unittest.TestCase):\n    \"\"\"Class to test FullFactorialSuiteBuilder.\"\"\"\n\n    def test_buildSuite(self):\n        \"\"\"Initialize a full factorial suite of cases.\n\n        .. test:: A generic mechanism to allow users to modify user inputs in cases.\n            :id: T_ARMI_CASE_MOD1\n            :tests: R_ARMI_CASE_MOD\n        \"\"\"\n        cs = settings.Settings(AFCI_PATH)\n        case = cases.Case(cs)\n        builder = FullFactorialSuiteBuilder(case)\n        builder.addDegreeOfFreedom(SettingModifier(\"settingName1\", value) for value in (1, 2))\n        builder.addDegreeOfFreedom(SettingModifier(\"settingName2\", value) for value in (3, 4, 5))\n\n        self.assertEqual(builder.modifierSets[0][0].value, 1)\n        self.assertEqual(builder.modifierSets[0][1].value, 3)\n\n        self.assertEqual(builder.modifierSets[1][0].value, 2)\n        self.assertEqual(builder.modifierSets[1][1].value, 3)\n\n        self.assertEqual(builder.modifierSets[2][0].value, 1)\n        self.assertEqual(builder.modifierSets[2][1].value, 4)\n\n        self.assertEqual(builder.modifierSets[3][0].value, 2)\n        self.assertEqual(builder.modifierSets[3][1].value, 4)\n\n        self.assertEqual(builder.modifierSets[4][0].value, 1)\n        self.assertEqual(builder.modifierSets[4][1].value, 5)\n\n        self.assertEqual(builder.modifierSets[5][0].value, 2)\n        self.assertEqual(builder.modifierSets[5][1].value, 5)\n\n        self.assertEqual(len(builder.modifierSets), 6)\n\n\nclass TestSeparateEffectsBuilder(unittest.TestCase):\n    \"\"\"Class to test separate effects builder.\"\"\"\n\n    def test_buildSuite(self):\n        \"\"\"Initialize a full factorial suite of cases.\n\n        .. test:: A generic mechanism to allow users to modify user inputs in cases.\n            :id: T_ARMI_CASE_MOD2\n            :tests: R_ARMI_CASE_MOD\n        \"\"\"\n        cs = settings.Settings(AFCI_PATH)\n        case = cases.Case(cs)\n        builder = SeparateEffectsSuiteBuilder(case)\n        builder.addDegreeOfFreedom(SettingModifier(\"settingName1\", value) for value in (1, 2))\n        builder.addDegreeOfFreedom(SettingModifier(\"settingName2\", value) for value in (3, 4, 5))\n\n        self.assertEqual(builder.modifierSets[0][0].value, 1)\n        self.assertEqual(builder.modifierSets[0][0].settingName, \"settingName1\")\n\n        self.assertEqual(builder.modifierSets[1][0].value, 2)\n        self.assertEqual(builder.modifierSets[1][0].settingName, \"settingName1\")\n\n        self.assertEqual(builder.modifierSets[2][0].value, 3)\n        self.assertEqual(builder.modifierSets[2][0].settingName, \"settingName2\")\n\n        self.assertEqual(builder.modifierSets[3][0].value, 4)\n        self.assertEqual(builder.modifierSets[3][0].settingName, \"settingName2\")\n\n        self.assertEqual(builder.modifierSets[4][0].value, 5)\n        self.assertEqual(builder.modifierSets[4][0].settingName, \"settingName2\")\n\n        self.assertEqual(len(builder.modifierSets), 5)\n"
  },
  {
    "path": "armi/cli/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package provides various operations users can ask ARMI to do with their inputs.\n\nAn Entry Point might run a simulation, migrate inputs, build a suite of related inputs\nand submit them in a parameter sweep, validate inputs, open the GUI, run a test suite,\nor other similar things. There are built-in entry points, and additional ones may\nbe specified by custom plugins.\n\nThe full :doc:`docs for entry points are here </developer/entrypoints>`.\n\nSee Also\n--------\narmi.cases : Individual collections of tasks that may run one or more entry points.\n    These allow one entry point to create a sequence of events that may call one\n    or more additional entry points. For example, the ``submitSuite`` entry point builds\n    a case suite with many related cases that will all call the ``run`` entry point from\n    a HPC cluster.\n\narmi.operators :  Operations that ARMI will perform on a reactor model.\n    These may be created by ``Case`` objects created by certain entry points (e.g. ``run``).\n\narmi : Fundamental entry point that calls this package.\n\"\"\"\n\n# importing each module causes the any EntryPoints defined in the module that\n# are decorated with @armi.command to be added to the collection of registered\n# classes\n\nimport argparse\nimport re\nimport textwrap\nfrom typing import Optional\n\nfrom armi import context, meta, plugins, runLog\n\n\nclass EntryPointsPlugin(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineEntryPoints():\n        from armi.cli import (\n            checkInputs,\n            # testing\n            cleanTemps,\n            clone,\n            compareCases,\n            gridGui,\n            migrateInputs,\n            modify,\n            reportsEntryPoint,\n            run,\n            runSuite,\n        )\n\n        entryPoints = []\n        entryPoints.append(checkInputs.CheckInputEntryPoint)\n        entryPoints.append(checkInputs.ExpandBlueprints)\n        entryPoints.append(clone.CloneArmiRunCommandBatch)\n        entryPoints.append(clone.CloneArmiRunCommandInteractive)\n        entryPoints.append(clone.CloneSuiteCommand)\n        entryPoints.append(compareCases.CompareCases)\n        entryPoints.append(compareCases.CompareSuites)\n        entryPoints.append(migrateInputs.MigrateInputs)\n        entryPoints.append(modify.ModifyCaseSettingsCommand)\n        entryPoints.append(run.RunEntryPoint)\n        entryPoints.append(runSuite.RunSuiteCommand)\n        entryPoints.append(gridGui.GridGuiEntryPoint)\n\n        # testing\n        entryPoints.append(cleanTemps.CleanTemps)\n        entryPoints.append(reportsEntryPoint.ReportsEntryPoint)\n\n        return entryPoints\n\n\nclass ArmiParser(argparse.ArgumentParser):\n    \"\"\"Subclass of default ArgumentParser to better handle application splash text.\"\"\"\n\n    def print_help(self, file=None):\n        splash()\n        argparse.ArgumentParser.print_help(self, file)\n\n\nclass ArmiCLI:\n    \"\"\"\n    ARMI CLI -- The main entry point into ARMI. There are various commands available. To get help\n    for the individual commands, run again with `<command> --help`. Typically, the CLI implements\n    functions that already exist within ARMI.\n\n    .. impl:: The basic ARMI CLI, for running a simulation.\n        :id: I_ARMI_CLI_CS\n        :implements: R_ARMI_CLI_CS\n\n        Provides a basic command-line interface (CLI) for running an ARMI simulation. Available\n        commands can be listed with ``-l``. Information on individual commands can be obtained by\n        running with ``<command> --help``.\n    \"\"\"\n\n    def __init__(self):\n        from armi import getPluginManager\n\n        self._entryPoints = dict()\n        for pluginEntryPoints in getPluginManager().hook.defineEntryPoints():\n            for entryPoint in pluginEntryPoints:\n                if entryPoint.name in self._entryPoints:\n                    raise KeyError(\n                        \"Duplicate entry points defined for `{}`: {} and {}\".format(\n                            entryPoint.name,\n                            self._entryPoints[entryPoint.name],\n                            entryPoint,\n                        )\n                    )\n                self._entryPoints[entryPoint.name] = entryPoint\n\n        parser = ArmiParser(\n            prog=context.APP_NAME,\n            description=self.__doc__.split(\".. impl\")[0],\n            usage=\"%(prog)s [-h] [-l | command [args]]\",\n        )\n\n        group = parser.add_mutually_exclusive_group()\n\n        group.add_argument(\"-v\", \"--version\", action=\"store_true\", help=\"display the version\")\n\n        group.add_argument(\"-l\", \"--list-commands\", action=\"store_true\", help=\"list commands\")\n        group.add_argument(\"command\", nargs=\"?\", default=\"help\", help=argparse.SUPPRESS)\n        parser.add_argument(\"args\", nargs=argparse.REMAINDER, help=argparse.SUPPRESS)\n\n        self.parser = parser\n\n    @staticmethod\n    def showVersion():\n        \"\"\"Print the App name and version on the command line.\"\"\"\n        from armi import getApp\n\n        prog = context.APP_NAME\n        app = getApp()\n        if app is None or prog == \"armi\":\n            print(\"{0} {1}\".format(prog, meta.__version__))\n        else:\n            print(\"{0} {1}\".format(prog, app.version))\n\n    def listCommands(self):\n        \"\"\"List commands with a short description.\"\"\"\n        splash()\n\n        indent = 22\n        initial_indent = \"  \"\n        subsequent_indent = initial_indent + \" \" * indent\n        wrapper = textwrap.TextWrapper(initial_indent=initial_indent, subsequent_indent=subsequent_indent, width=79)\n\n        sub = re.compile(r\"\\s+\").sub\n\n        # given a string, condense white space into a single space\n        condense = lambda s: sub(\" \", s.strip())\n\n        commands = self._entryPoints.values()\n\n        formatter = \"{name:<{width}}{desc}\".format\n        print(\"\\ncommands:\")\n        for cmd in sorted(commands, key=lambda cmd: cmd.name):\n            \"\"\"Each command can optionally define a class attribute `description`\n            as documentation. If description is not defined (default=None since\n            it should inherit from EntryPoint), then the docstring is used.\n            If the docstring is also None, then fall back to an empty string.\"\"\"\n            desc = condense(cmd.description or cmd.__doc__ or \"\")\n            print(wrapper.fill(formatter(width=indent, name=cmd.name, desc=desc)))\n\n    def run(self) -> Optional[int]:\n        args = self.parser.parse_args()\n\n        if args.list_commands:\n            self.listCommands()\n            return 0\n        elif args.version:\n            ArmiCLI.showVersion()\n            return 0\n        elif args.command == \"help\":\n            self.parser.print_help()\n            return 0\n\n        return self.executeCommand(args.command, args.args)\n\n    def executeCommand(self, command, args) -> Optional[int]:\n        \"\"\"Execute `command` with arguments `args`, return optional exit code.\"\"\"\n        command = command.lower()\n        if command not in self._entryPoints:\n            print('Unrecognized command \"{}\". Valid commands are listed below.'.format(command))\n            self.listCommands()\n\n            return 1\n\n        commandClass = self._entryPoints[command]\n        cmd = commandClass()\n        if cmd.splash:\n            splash()\n\n        # parse the arguments... command can have their own\n        cmd.parse(args)\n\n        if cmd.args.batch:\n            context.Mode.setMode(context.Mode.BATCH)\n        elif cmd.mode is not None:\n            context.Mode.setMode(cmd.mode)\n\n        # do whatever there is to be done!\n        return cmd.invoke()\n\n\ndef splash():\n    \"\"\"Emit a the active App's splash text to the runLog for the primary node.\"\"\"\n    from armi import getApp\n\n    app = getApp()\n    assert app is not None\n    if context.MPI_RANK == 0:\n        runLog.raw(app.splashText)\n"
  },
  {
    "path": "armi/cli/checkInputs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Entry point into ARMI to check inputs of a case or a whole folder of cases.\"\"\"\n\nimport pathlib\nimport sys\n\nfrom armi import runLog\nfrom armi.cli.entryPoint import EntryPoint\nfrom armi.utils.textProcessors import resolveMarkupInclusions\n\n\nclass ExpandBlueprints(EntryPoint):\n    \"\"\"\n    Perform expansion of !include directives in a blueprint file.\n\n    This is useful for testing inputs that make heavy use of !include directives.\n    \"\"\"\n\n    name = \"expand-bp\"\n\n    splash = False\n\n    def addOptions(self):\n        self.parser.add_argument(\"blueprints\", type=str, help=\"Path to root blueprints file\")\n\n    def invoke(self):\n        p = pathlib.Path(self.args.blueprints)\n        if not p.exists():\n            runLog.error(\"Blueprints file `{}` does not exist\".format(str(p)))\n            return 1\n        stream = resolveMarkupInclusions(p)\n        sys.stdout.write(stream.read())\n\n        return None\n\n\nclass CheckInputEntryPoint(EntryPoint):\n    \"\"\"\n    Check ARMI inputs for errors, inconsistencies, and the ability to initialize a reactor.\n\n    Also has functionality to generate a summary report of the input design. This can be run on\n    multiple cases and creates a table detailing the results of the input check.\n    \"\"\"\n\n    name = \"check-input\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"--recursive\",\n            \"-r\",\n            action=\"store_true\",\n            default=False,\n            help=\"Recursively check directory structure for valid settings files\",\n        )\n        self.parser.add_argument(\n            \"--skip-checks\",\n            \"-C\",\n            action=\"store_true\",\n            default=False,\n            help=\"Skip checking inputs (might be useful if you only want to generate a report).\",\n        )\n        self.parser.add_argument(\n            \"patterns\",\n            type=str,\n            nargs=\"*\",\n            default=[\"*.yaml\"],\n            help=\"File names or patterns\",\n        )\n\n    def invoke(self):\n        from armi import cases\n        from armi.utils import tabulate\n\n        suite = cases.CaseSuite(self.cs)\n        suite.discover(patterns=self.args.patterns, recursive=self.args.recursive)\n\n        table = []  # tuples (case, hasIssues, hasErrors)\n        for case in suite:\n            hasIssues = \"UNKNOWN\"\n            if not self.args.skip_checks:\n                hasIssues = \"PASSED\" if case.checkInputs() else \"HAS ISSUES\"\n\n            canStart = \"UNKNOWN\"\n            table.append((case.cs.path, case.title, canStart, hasIssues))\n\n        runLog.important(\n            tabulate.tabulate(\n                table,\n                headers=[\"case\", \"can start\", \"input is self consistent\"],\n                tableFmt=\"armi\",\n            )\n        )\n\n        if any(t[3] == \"HAS ISSUES\" for t in table):\n            runLog.error(\"The case is not self consistent\")\n\n        if any(t[2] == \"FAILED\" for t in table):\n            runLog.error(\"The case can not start\")\n"
  },
  {
    "path": "armi/cli/cleanTemps.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom armi import context\nfrom armi.cli.entryPoint import EntryPoint\n\n\nclass CleanTemps(EntryPoint):\n    \"\"\"\n    Delete all temp directories created by any ARMI run.\n\n    Useful for occasionally cleaning temporary dirs from crashed runs.\n\n    .. warning:: This will break any ongoing runs.\n    \"\"\"\n\n    name = \"clean-temps\"\n\n    def invoke(self):\n        context.cleanFastPathAfterSimulation()\n"
  },
  {
    "path": "armi/cli/clone.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nfrom armi.cli.entryPoint import EntryPoint\n\n\nclass CloneArmiRunCommandBatch(EntryPoint):\n    \"\"\"\n    Clone existing ARMI settings input, and associated files, to the current\n    directory and modify it according to the supplied settings (on the\n    command line).\n    \"\"\"\n\n    name = \"clone-batch\"\n    settingsArgument = \"required\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"--additional-files\",\n            nargs=\"*\",\n            default=[],\n            help=\"Additional files from the source directory to copy into the target directory\",\n        )\n        self.parser.add_argument(\n            \"--settingsWriteStyle\",\n            type=str,\n            default=\"short\",\n            help=\"Writing style for which settings get written back to the settings files.\",\n            choices=[\"short\", \"medium\", \"full\"],\n        )\n        # somehow running `armi clone-batch -h` on the command line requires this to\n        # not be first?\n        for settingName in self.cs.keys():\n            self.createOptionFromSetting(settingName, suppressHelp=True)\n\n    def invoke(self):\n        # get the case title.\n        from armi import cases\n\n        inputCase = cases.Case(cs=self.cs)\n        inputCase.clone(\n            additionalFiles=self.args.additional_files,\n            writeStyle=self.args.settingsWriteStyle,\n        )\n\n\nclass CloneArmiRunCommandInteractive(CloneArmiRunCommandBatch):\n    \"\"\"\n    Interactively clone existing ARMI settings input, and associated files, to the current\n    directory and modify it according to the supplied settings (on the command line).\n    \"\"\"\n\n    name = \"clone\"\n    settingsArgument = \"required\"\n\n\nclass CloneSuiteCommand(EntryPoint):\n    \"\"\"Clone existing ARMI cases as a new suite.\"\"\"\n\n    name = \"clone-suite\"\n\n    def addOptions(self):\n        for settingName in self.cs.environmentSettings:\n            self.createOptionFromSetting(settingName)\n\n        self.parser.add_argument(\n            \"--directory\",\n            \"-d\",\n            type=str,\n            default=os.getcwd(),\n            help=\"Root directory to search for cases\",\n        )\n        self.parser.add_argument(\n            \"patterns\",\n            nargs=\"*\",\n            type=str,\n            default=[\"*.yaml\"],\n            help=\"Pattern to use while searching for ARMI settings files.\",\n        )\n        self.parser.add_argument(\n            \"--ignore\",\n            \"-i\",\n            nargs=\"+\",\n            type=str,\n            default=[],\n            help=\"Pattern to search for inputs to ignore.\",\n        )\n        self.parser.add_argument(\n            \"--list\",\n            \"-l\",\n            action=\"store_true\",\n            default=False,\n            help=\"Just list the settings files found, don't actually submit them.\",\n        )\n        self.parser.add_argument(\n            \"--settingsWriteStyle\",\n            type=str,\n            default=\"short\",\n            help=\"Writing style for which settings get written back to the settings files.\",\n            choices=[\"short\", \"medium\", \"full\"],\n        )\n\n    def invoke(self):\n        from armi import cases\n\n        suite = cases.CaseSuite(self.cs)\n        suite.discover(\n            patterns=self.args.patterns,\n            rootDir=self.args.directory,\n            ignorePatterns=self.args.ignore,\n        )\n        suite.clone(oldRoot=self.args.directory, writeStyle=self.args.settingsWriteStyle)\n"
  },
  {
    "path": "armi/cli/compareCases.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport sys\n\nfrom armi import runLog\nfrom armi.cli.entryPoint import EntryPoint\n\n# Params that are well-known to vary from run to run. In the future we should probably\n# derive this from a parameter category so that it is extensible\nDEFAULT_EXCLUSIONS = (\n    \"^.*/minutesSinceStart$\",\n    \"^.*/maxProcessMemoryInMB$\",\n    \"^.*/minProcessMemoryInMB$\",\n)\n\n# Parameters that under normal circumstances would be the same, but may not be\n# faithfully represented by an old database format.\nCONVERTED_EXCLUSIONS = DEFAULT_EXCLUSIONS + (\n    \"^.*/serialNum$\",\n    \"^.*/temperatureInC$\",\n    \"^.*/volume$\",\n    \"^.*/layout/temperatures$\",\n)\n\n\nclass CompareCases(EntryPoint):\n    \"\"\"Compare the databases from two ARMI cases.\"\"\"\n\n    name = \"compare\"\n\n    def _addComparisonOptions(self):\n        parser = self.parser\n        parser.add_argument(\n            \"--tolerance\",\n            default=0.01,\n            action=\"store\",\n            type=float,\n            help=(\n                \"If a test database entry differs by more than this percent \"\n                \"from the reference database, then it will be marked \"\n                \"as a difference between the two databases.\"\n            ),\n        )\n        parser.add_argument(\n            \"--weights\",\n            nargs=\"*\",\n            action=\"store\",\n            help=\"Period separated key/value pairs for database table weights\",\n        )\n        parser.add_argument(\n            \"--exclude\",\n            default=CONVERTED_EXCLUSIONS,\n            action=\"store\",\n            nargs=\"+\",\n            help=(\"Patterns for parameters to ignore in comparisons\"),\n        )\n        parser.add_argument(\n            \"--timestepCompare\",\n            default=None,\n            action=\"store\",\n            nargs=\"+\",\n            help=(\n                \"List of timesteps to compare. Note that any timestep not listed will \"\n                \"not be compared. Format the cycle and node separated by a period. E.g. \"\n                \"0.0 0.1 1.2 3.3 will compare c0n0, c0n1, c1n2, c3n3 and skip all others\"\n            ),\n        )\n\n    def addOptions(self):\n        self._addComparisonOptions()\n        parser = self.parser\n        parser.add_argument(\n            \"refDB\",\n            type=str,\n            help=\"The database to be used as the reference, baseline case.\",\n        )\n        parser.add_argument(\n            \"cmpDB\",\n            type=str,\n            help=\"The database to be used as the comparison, evaluated case.\",\n        )\n        parser.add_argument(\"--output\", \"-o\", type=str, default=\"\", help=\"Output file name.\")\n\n    def parse(self, args):\n        EntryPoint.parse(self, args)\n\n        if self.args.timestepCompare:\n            self.args.timestepCompare = list(tuple(map(int, step.split(\".\"))) for step in self.args.timestepCompare)\n\n        if self.args.weights:\n            self.args.weights = dict(w.split(\".\") for w in self.args.weights)\n\n    def invoke(self):\n        from armi.bookkeeping.db import compareDatabases\n\n        diffs = compareDatabases(\n            self.args.refDB,\n            self.args.cmpDB,\n            tolerance=self.args.tolerance,\n            exclusions=self.args.exclude,\n            timestepCompare=self.args.timestepCompare,\n        )\n        return diffs.nDiffs()\n\n\nclass CompareSuites(CompareCases):\n    \"\"\"Do a case-by-case comparison between two CaseSuites.\"\"\"\n\n    name = \"compare-suites\"\n\n    def addOptions(self):\n        self._addComparisonOptions()\n        self.parser.add_argument(\n            \"reference\",\n            type=str,\n            help=\"The root directory of the reference, or baseline, suite.\",\n        )\n        self.parser.add_argument(\n            \"comparison\",\n            type=str,\n            help=\"The root directory of the comparison, or evaluated, suite.\",\n        )\n        self.parser.add_argument(\n            \"--patterns\",\n            \"-p\",\n            nargs=\"*\",\n            type=str,\n            default=[\"*.yaml\"],\n            help=\"Pattern to use while searching for ARMI settings files.\",\n        )\n\n        self.parser.add_argument(\n            \"--additional_comparisons\",\n            nargs=\"*\",\n            type=str,\n            default=[],\n            help=\"Pattern tests that were not run but should appear in table.\",\n        )\n\n        self.parser.add_argument(\n            \"--ignore\",\n            \"-i\",\n            nargs=\"*\",\n            type=str,\n            default=[],\n            help=\"Pattern to search for inputs to ignore.\",\n        )\n        self.parser.add_argument(\n            \"--skip-inspection\",\n            \"-I\",\n            action=\"store_true\",\n            default=False,\n            help=\"Skip inspection. By default, setting files are checked for integrity and consistency. These \"\n            \"checks result in needing to manually resolve a number of differences. Using this option will \"\n            \"suppress the inspection step.\",\n        )\n\n    def invoke(self):\n        from armi import cases\n\n        if not os.path.exists(self.args.reference):\n            runLog.error(\"Could not find reference directory {}\".format(self.args.reference))\n            sys.exit(1)\n\n        if not os.path.exists(self.args.comparison):\n            runLog.error(\"Could not find comparison directory {}\".format(self.args.comparison))\n            sys.exit(1)\n\n        refSuite = cases.CaseSuite(self.cs)\n\n        # contains all tests that user had access to\n        allTests = []\n        for pat in self.args.patterns + self.args.additional_comparisons:\n            allTests.append(pat)\n        refSuite.discover(\n            rootDir=self.args.reference,\n            patterns=allTests,\n            ignorePatterns=self.args.ignore,\n            skipInspection=self.args.skip_inspection,\n        )\n\n        cmpSuite = cases.CaseSuite(self.cs)\n        cmpSuite.discover(\n            rootDir=self.args.comparison,\n            patterns=self.args.patterns,\n            ignorePatterns=self.args.ignore,\n            skipInspection=self.args.skip_inspection,\n        )\n\n        nIssues = refSuite.compare(\n            cmpSuite,\n            weights=self.args.weights,\n            tolerance=self.args.tolerance,\n            exclusion=self.args.exclude,\n            timestepCompare=self.args.timestepCompare,\n        )\n\n        if nIssues > 0:\n            sys.exit(1)\n"
  },
  {
    "path": "armi/cli/database.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Entry point into ARMI for manipulating output databases.\"\"\"\n\nimport os\nimport pathlib\n\nfrom armi import context, runLog\nfrom armi.cli.entryPoint import EntryPoint\nfrom armi.utils.textProcessors import resolveMarkupInclusions\n\n\nclass ExtractInputs(EntryPoint):\n    \"\"\"\n    Recover input files from a database file.\n\n    This can come in handy when input files need to be hand-migrated to facilitate loading or\n    migration of the database file itself, or when attempting to re-run a slightly-modified version\n    of a case.\n    \"\"\"\n\n    name = \"extract-inputs\"\n    mode = context.Mode.BATCH\n\n    def addOptions(self):\n        self.parser.add_argument(\"h5db\", help=\"Path to input database\", type=str)\n        self.parser.add_argument(\n            \"--output-base\",\n            \"-o\",\n            help=\"Base name for extracted inputs. If not provided, base name is implied from the database name.\",\n            type=str,\n            default=None,\n        )\n\n    def parse_args(self, args):\n        EntryPoint.parse_args(self, args)\n\n        if self.args.output_base is None:\n            self.args.output_base = os.path.splitext(self.args.h5db)[0]\n\n    def invoke(self):\n        from armi.bookkeeping.db.database import Database\n\n        db = Database(self.args.h5db, \"r\")\n\n        with db:\n            settings, bp = db.readInputsFromDB()\n\n        settingsPath = self.args.output_base + \"_settings.yaml\"\n        bpPath = self.args.output_base + \"_blueprints.yaml\"\n\n        bail = False\n        for path in [settingsPath, bpPath]:\n            if os.path.exists(settingsPath):\n                runLog.error(\"`{}` already exists. Aborting.\".format(path))\n                bail = True\n        if bail:\n            return\n\n        for path, data, inp in [\n            (settingsPath, settings, \"settings\"),\n            (bpPath, bp, \"blueprints\"),\n        ]:\n            if path is None:\n                continue\n            runLog.info(\"Writing {} to `{}`\".format(inp, path))\n            if isinstance(data, bytes):\n                data = data.decode()\n            with open(path, \"w\") as f:\n                f.write(data)\n\n\nclass InjectInputs(EntryPoint):\n    \"\"\"\n    Insert new inputs into a database file, overwriting any existing inputs.\n\n    This is useful for performing hand migrations of inputs to facilitate database migrations.\n    \"\"\"\n\n    name = \"inject-inputs\"\n    mode = context.Mode.BATCH\n\n    def addOptions(self):\n        self.parser.add_argument(\"h5db\", help=\"Path to affected database\", type=str)\n        self.parser.add_argument(\"--blueprints\", help=\"Path to blueprints file\", type=str, default=None)\n        self.parser.add_argument(\"--settings\", help=\"Path to settings file\", type=str, default=None)\n\n    def invoke(self):\n        from armi.bookkeeping.db.database import Database\n\n        if all(li is None for li in [self.args.blueprints, self.args.settings]):\n            runLog.error(\"No settings, blueprints, or geometry files specified; nothing to do.\")\n            return\n\n        bp = None\n        settings = None\n\n        if self.args.blueprints is not None:\n            bp = resolveMarkupInclusions(pathlib.Path(self.args.blueprints)).read()\n\n        if self.args.settings is not None:\n            settings = resolveMarkupInclusions(pathlib.Path(self.args.settings)).read()\n\n        db = Database(self.args.h5db, \"a\")\n\n        with db:\n            # Not calling writeInputsToDb, since it makes too many assumptions about where the\n            # inputs are coming from, and which ones we want to write. Instead, we assume that we\n            # know where to store them, and do it ourselves.\n            for data, key in [\n                (bp, \"blueprints\"),\n                (settings, \"settings\"),\n            ]:\n                if data is not None:\n                    dSetName = \"inputs/\" + key\n                    if dSetName in db.h5db:\n                        del db.h5db[dSetName]\n                    db.h5db[dSetName] = data\n"
  },
  {
    "path": "armi/cli/entryPoint.py",
    "content": "\"\"\"\nEntryPoint base classes.\n\nSee :doc:`/developer/entrypoints`.\n\"\"\"\n# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nfrom typing import Optional, Union\n\nfrom armi import context, runLog, settings\n\n\nclass _EntryPointEnforcer(type):\n    \"\"\"\n    Simple metaclass used for the EntryPoint abstract base class to enforce class\n    attributes.\n    \"\"\"\n\n    def __new__(mcs, name, bases, attrs):\n        if \"name\" not in attrs:\n            raise AttributeError(\"Subclasses of EntryPoint must define a `name` class attribute.\")\n\n        # basic input validation. Will throw a KeyError if argument is incorrect\n        clsSettings = {\"optional\": \"optional\", \"required\": \"required\", None: None}[attrs.get(\"settingsArgument\", None)]\n        attrs[\"settingsArgument\"] = clsSettings\n\n        return type.__new__(mcs, name, bases, attrs)\n\n\nclass EntryPoint(metaclass=_EntryPointEnforcer):\n    \"\"\"\n    Generic command line entry point.\n\n    A valid subclass must provide at least a ``name`` class attribute, and may also specify the\n    other class attributes described below.\n\n    .. impl:: Generic CLI base class for developers to use.\n        :id: I_ARMI_CLI_GEN\n        :implements: R_ARMI_CLI_GEN\n\n        Provides a base class for plugin developers to use in creating application-specific CLIs.\n        Valid subclasses must at least provide a ``name`` class attribute.\n\n        Optional class attributes that a subclass may provide include ``description``, a string\n        describing the command's actions, ``splash``, a boolean specifying whether to display a\n        splash screen upon execution, and ``settingsArgument``. If ``settingsArgument`` is specified\n        as ``required``, then a settings files is a required positional argument. If\n        ``settingsArgument`` is set to ``optional``, then a settings file is an optional positional\n        argument. If None is specified for the ``settingsArgument``, then no settings file argument\n        is added.\n    \"\"\"\n\n    #: The <command-name> that is used to call the command from the command line\n    name: Optional[str] = None\n\n    description: Optional[str] = None\n    \"\"\"A string summarizing the command's actions. This is summary that is printed when\n    you run `python -m armi --list-commands` or `python -m armi <command-name>\n    --help`. If not provided, the docstring of the decorated class will be used\n    instead. In general, the docstring is probably sufficient but this argument allows\n    you to provide a short description of the command while retaining a long and\n    detailed docstring.\"\"\"\n\n    settingsArgument: Union[str, None] = None\n    \"\"\"\n    One of {'optional', 'required', None}, or unspecified.\n    Specifies whether a settings file argument is to be added to the\n    command's argument parser. If settingsArgument == 'required', then a settings\n    file is a required positional argument. If settingsArgument == 'optional',\n    then it is an optional positional argument. Finally, if settingsArgument is\n    None, then no settings file argument is added.\"\"\"\n\n    splash = True\n    \"\"\"\n    Whether running the entry point should produce a splash text upon executing.\n\n    Setting this to ``False`` is useful for utility commands that produce standard\n    output that would be needlessly cluttered by the splash text.\n    \"\"\"\n\n    #: One of {armi.Mode.BATCH, armi.Mode.INTERACTIVE, armi.Mode.GUI}, optional.\n    #: Specifies the ARMI mode in which the command is run. Default is armi.Mode.BATCH.\n    mode: Optional[int] = None\n\n    def __init__(self):\n        if self.name is None:\n            raise AttributeError(\"Subclasses of EntryPoint must define a `name` class attribute\")\n\n        self.cs = self._initSettings()\n\n        self.parser = argparse.ArgumentParser(\n            prog=\"{} {}\".format(context.APP_NAME, self.name),\n            description=self.description or self.__doc__,\n        )\n        if self.settingsArgument is not None:\n            if self.settingsArgument not in [\"required\", \"optional\"]:\n                raise AttributeError(\n                    \"Subclasses of EntryPoint must specify if the a case settings file is `required` or `optional`\"\n                )\n            if self.settingsArgument == \"optional\":\n                self.parser.add_argument(\n                    \"settings_file\",\n                    nargs=\"?\",\n                    action=loadSettings(self.cs),\n                    help=\"path to the settings file to load.\",\n                )\n            elif self.settingsArgument == \"required\":\n                self.parser.add_argument(\n                    \"settings_file\",\n                    action=loadSettings(self.cs),\n                    help=\"path to the settings file to load.\",\n                )\n\n        # optional arguments\n        self.parser.add_argument(\n            \"--caseTitle\",\n            type=str,\n            nargs=None,\n            action=setCaseTitle(self.cs),\n            help=\"update the case title of the run.\",\n        )\n        self.parser.add_argument(\n            \"--batch\",\n            action=\"store_true\",\n            default=False,\n            help=\"Run in batch mode even on TTY, silencing all queries.\",\n        )\n        self.createOptionFromSetting(\"verbosity\", \"-v\")\n        self.createOptionFromSetting(\"branchVerbosity\", \"-V\")\n\n        self.args = argparse.Namespace()\n        self.settingsProvidedOnCommandLine = []\n\n    @staticmethod\n    def _initSettings():\n        \"\"\"\n        Initialize settings for this entry point.\n\n        Settings given on command line will update this data structure.\n        Override to provide specific settings in the entry point.\n        \"\"\"\n        return settings.Settings()\n\n    def addOptions(self):\n        \"\"\"\n        Add additional command line options.\n\n        Values of options added to ``self.parser`` will be available\n        on ``self.args``. Values added with ``createOptionFromSetting``\n        will override the setting values in the settings input file.\n\n        See Also\n        --------\n        createOptionFromSetting : A method often called from here to creat CLI options from\n            application settings.\n\n        argparse.ArgumentParser.add_argument : Often called from here using\n            ``self.parser.add_argument`` to add custom argparse arguments.\n        \"\"\"\n\n    def parse_args(self, args):\n        self.parser.parse_args(args, namespace=self.args)\n        runLog.setVerbosity(self.cs[\"verbosity\"])\n\n    def parse(self, args):\n        \"\"\"Parse the command line arguments, with the command specific arguments.\"\"\"\n        self.addOptions()\n        self.parse_args(args)\n\n    def invoke(self) -> Optional[int]:\n        \"\"\"\n        Body of the entry point.\n\n        This is an abstract method, and must must be overridden in sub-classes.\n\n        Returns\n        -------\n        exitcode : int or None\n            Implementations should return an exit code, or ``None``, which is interpreted the\n            same as zero (successful completion).\n        \"\"\"\n        raise NotImplementedError(\"Subclasses of EntryPoint must override the .invoke() method\")\n\n    def createOptionFromSetting(self, settingName: str, additionalAlias: str = None, suppressHelp: bool = False):\n        \"\"\"\n        Create a CLI option from an ARMI setting.\n\n        This will override whatever is in the settings file.\n\n        Parameters\n        ----------\n        settingName : str\n            the setting name\n\n        additionalAlises : str\n            additional alias for the command line option, be careful and make sure they are all distinct!\n\n        supressHelp : bool\n            option to suppress the help message when using the command line :code:`--help` function. This is\n            particularly beneficial when many options are being added as they can clutter the :code:`--help` to be\n            almost unusable.\n        \"\"\"\n        settingsInstance = self.cs.getSetting(settingName)\n\n        if settings.isBoolSetting(settingsInstance):\n            helpMessage = argparse.SUPPRESS if suppressHelp else settingsInstance.description\n            self._createToggleFromSetting(settingName, helpMessage, additionalAlias)\n\n        else:\n            choices = None\n            if suppressHelp:\n                helpMessage = argparse.SUPPRESS\n            else:\n                helpMessage = settingsInstance.description.replace(\"%\", \"%%\")\n\n            aliases = [\"--\" + settingName]\n            if additionalAlias is not None:\n                aliases.append(additionalAlias)\n\n            isListType = settingsInstance.underlyingType is list\n\n            try:\n                self.parser.add_argument(\n                    *aliases,\n                    type=str,  # types are properly converted by _SetSettingAction\n                    nargs=\"*\" if isListType else None,\n                    action=setSetting(self),\n                    default=settingsInstance.default,\n                    choices=choices,\n                    help=helpMessage,\n                )\n            # Capture an argument error here to prevent errors when duplicate options are attempting\n            # to be added. This may also be captured by exploring the parser's `_actions` list as well\n            # but this avoid accessing a private attribute.\n            except argparse.ArgumentError:\n                pass\n\n    def _createToggleFromSetting(self, settingName, helpMessage, additionalAlias=None):\n        aliases = [\"--\" + settingName]\n        if additionalAlias is not None:\n            aliases.append(additionalAlias)\n\n        group = self.parser.add_mutually_exclusive_group()\n\n        group.add_argument(*aliases, action=storeBool(True, self), help=helpMessage)\n\n        # not really sure what to do about the help message here. Don't\n        # want to suppress it since it won't show up at all, but can't\n        # exactly \"negate\" the text automatically. Ideas?\n        if helpMessage is not argparse.SUPPRESS:\n            helpMessage = \"\"\n\n        group.add_argument(\n            \"--no-\" + settingName,\n            action=storeBool(False, self),\n            dest=settingName,\n            help=helpMessage,\n        )\n        # ^^ overwrites settingName with False\n\n\ndef storeBool(boolDefault, ep):\n    class _StoreBoolAction(argparse.Action):\n        def __init__(self, option_strings, dest, help=None):\n            super(_StoreBoolAction, self).__init__(\n                option_strings=option_strings,\n                dest=dest,\n                nargs=0,\n                const=boolDefault,\n                default=False,\n                required=False,\n                help=help,\n            )\n\n        def __call__(self, parser, namespace, values, option_string=None):\n            ep.cs[self.dest] = self.const\n            ep.settingsProvidedOnCommandLine.append(self.dest)\n            ep.cs.failOnLoad()\n\n    return _StoreBoolAction\n\n\ndef setSetting(ep):\n    class _SetSettingAction(argparse.Action):\n        \"\"\"This class loads the command line supplied setting values into the\n        :py:data:`armi.settings.cs`.\n        \"\"\"\n\n        def __call__(self, parser, namespace, values, option_string=None):\n            # correctly converts type\n            ep.cs[self.dest] = values\n            ep.settingsProvidedOnCommandLine.append(self.dest)\n            ep.cs.failOnLoad()\n\n    return _SetSettingAction\n\n\n# Q: Why does this require special treatment? Why not treat it like the other\n#    case settings and use setSetting action?\n# A: Because caseTitle is no longer an actual cs setting. It's a instance attr.\ndef setCaseTitle(cs):\n    class _SetCaseTitleAction(argparse.Action):\n        \"\"\"This class sets the case title to the supplied value of the\n        :py:data:`armi.settings.cs`.\n        \"\"\"\n\n        def __call__(self, parser, namespace, value, option_string=None):\n            cs.caseTitle = value\n\n    return _SetCaseTitleAction\n\n\n# Careful, this is used by physicalProgramming\ndef loadSettings(cs):\n    class LoadSettingsAction(argparse.Action):\n        \"\"\"This class loads the command line supplied settings file into the\n        :py:data:`armi.settings.cs`.\n        \"\"\"\n\n        def __call__(self, parser, namespace, values, option_string=None):\n            # since this is a positional argument, it can be called with values is\n            # None (i.e. default)\n            if values is not None:\n                cs.loadFromInputFile(values)\n\n    return LoadSettingsAction\n"
  },
  {
    "path": "armi/cli/gridGui.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nGrid editor GUI entry point.\n\nCLI entry point to spin up the GridEditor GUI.\n\"\"\"\n\nfrom armi.cli import entryPoint\n\n\nclass GridGuiEntryPoint(entryPoint.EntryPoint):\n    \"\"\"Load the grid editor GUI.\"\"\"\n\n    name = \"grids\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"blueprints\",\n            nargs=\"?\",\n            type=str,\n            default=None,\n            help=\"Optional path to a blueprint file to open\",\n        )\n\n    def invoke(self):\n        # Import late since wxpython is kind of big and only needed when actually\n        # invoking the entry point\n        try:\n            import wx\n\n            from armi.utils import gridEditor\n        except ImportError:\n            raise RuntimeError(\n                \"wxPython is not installed in this \"\n                \"environment, but is required for the Grid GUI. wxPython is not \"\n                \"installed during the default ARMI installation process. Refer to \"\n                \"installation instructions to install extras like wxPython.\"\n            )\n\n        app = wx.App()\n\n        frame = wx.Frame(None, wx.ID_ANY, title=\"Grid Editor\", size=(1000, 1000))\n\n        gui = gridEditor.GridBlueprintControl(frame)\n        frame.Show()\n        if self.args.blueprints is not None:\n            gui.loadFile(self.args.blueprints)\n        app.MainLoop()\n"
  },
  {
    "path": "armi/cli/migrateInputs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Entry point into ARMI to migrate inputs to the latest version of ARMI.\"\"\"\n\nimport os\n\nfrom armi.cli.entryPoint import EntryPoint\nfrom armi.migration import ACTIVE_MIGRATIONS, base\nfrom armi.utils import directoryChangers\n\n\nclass MigrateInputs(EntryPoint):\n    \"\"\"Migrate ARMI Inputs and/or outputs to Latest ARMI Code Base.\"\"\"\n\n    name = \"migrate-inputs\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"--settings-path\",\n            \"--cs\",\n            help=\"Migrate a case settings file to be compatible with the latest ARMI code base\",\n            type=str,\n        )\n        self.parser.add_argument(\n            \"--database-path\",\n            \"--db\",\n            help=\"Migrate a database file to be compatible with the latest ARMI code base\",\n            type=str,\n        )\n\n    def invoke(self):\n        \"\"\"Run the entry point.\"\"\"\n        if self.args.settings_path:\n            path, _fname = os.path.split(self.args.settings_path)\n            with directoryChangers.DirectoryChanger(path, dumpOnException=False):\n                self._migrate(self.args.settings_path, self.args.database_path)\n        else:\n            self._migrate(self.args.settings_path, self.args.database_path)\n\n    @staticmethod\n    def _migrate(settingsPath, dbPath):\n        \"\"\"\n        Run all migrations.\n\n        Notes\n        -----\n        Some migrations change the paths so we update them one by one.\n        \"\"\"\n        for migrationI in ACTIVE_MIGRATIONS:\n            if issubclass(migrationI, (base.SettingsMigration, base.BlueprintsMigration)) and settingsPath:\n                mig = migrationI(path=settingsPath)\n                mig.apply()\n                if issubclass(migrationI, base.SettingsMigration):\n                    # don't update on blueprints migration paths, that's not settings!\n                    settingsPath = mig.path\n            elif issubclass(migrationI, base.DatabaseMigration) and dbPath:\n                mig = migrationI(path=dbPath)\n                mig.apply()\n                dbPath = mig.path\n"
  },
  {
    "path": "armi/cli/modify.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSearch through a directory tree and modify ARMI settings in existing input\nfile(s). All valid settings may be used as keyword arguments.\n\"\"\"\n\nfrom armi import operators, runLog, settings\nfrom armi.cli.entryPoint import EntryPoint\n\n\nclass ModifyCaseSettingsCommand(EntryPoint):\n    \"\"\"\n    Search through a directory tree and modify ARMI settings in existing input file(s).\n    All valid settings may be used as keyword arguments.\n\n    Run the entry point like this::\n\n        $ python -m armi modify --nTasks=3 *.yaml\n\n    \"\"\"\n\n    name = \"modify\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"--list-setting-files\",\n            \"-l\",\n            action=\"store_true\",\n            help=(\"Just list the settings files found and the proposed changes to make. Don't actually modify them.\"),\n        )\n        self.parser.add_argument(\n            \"--skip-inspection\",\n            \"-I\",\n            action=\"store_true\",\n            default=False,\n            help=\"Skip inspection. By default, setting files are checked for integrity and consistency. These \"\n            \"checks result in needing to manually resolve a number of differences. Using this option will \"\n            \"suppress the inspection step.\",\n        )\n        self.parser.add_argument(\n            \"--rootDir\",\n            type=str,\n            default=\".\",\n            help=\"A root directory in which to search for settings files, e.g., armi/tests.\",\n        )\n        self.parser.add_argument(\n            \"--settingsWriteStyle\",\n            type=str,\n            default=\"short\",\n            help=\"Writing style for which settings get written back to the settings files.\",\n            choices=[\"short\", \"medium\", \"full\"],\n        )\n        self.parser.add_argument(\n            \"patterns\",\n            type=str,\n            nargs=\"*\",\n            default=[\"*.yaml\"],\n            help=\"Pattern(s) to use to find match file names (e.g. *.yaml)\",\n        )\n        for settingName in self.cs.keys():\n            self.createOptionFromSetting(settingName, suppressHelp=True)\n\n    def invoke(self):\n        csInstances = settings.recursivelyLoadSettingsFiles(self.args.rootDir, self.args.patterns)\n        messages = (\"found\", \"listing\") if self.args.list_setting_files else (\"writing\", \"modifying\")\n\n        for cs in csInstances:\n            runLog.important(\"{} settings file {}\".format(messages[0], cs.path))\n            for settingName in self.settingsProvidedOnCommandLine:\n                if cs[settingName] != self.cs[settingName]:\n                    runLog.info(\n                        \"  changing `{}` from : {}\\n           {} to  -> {}\".format(\n                            settingName,\n                            cs[settingName],\n                            \" \" * (2 + len(settingName)),\n                            self.cs[settingName],\n                        )\n                    )\n                cs[settingName] = self.cs[settingName]\n\n            # if we are only listing setting files, don't write them; it is OK that we modified them in memory\n            if not self.args.skip_inspection:\n                inspector = operators.getOperatorClassFromSettings(cs).inspector(cs)\n                inspector.run()\n\n            if not self.args.list_setting_files:\n                cs.writeToYamlFile(cs.path, style=self.args.settingsWriteStyle)\n\n        runLog.important(\"Finished {} {} settings files.\".format(messages[1], len(csInstances)))\n"
  },
  {
    "path": "armi/cli/reportsEntryPoint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom armi.cli import entryPoint\n\n\nclass ReportsEntryPoint(entryPoint.EntryPoint):\n    \"\"\"\n    Placeholder for an ARMI reports entry point.\n\n    Subclass this if you want to parse the ARMI DB or Reactor data model to build your reports.\n    \"\"\"\n\n    name = \"report\"\n    settingsArgument = \"optional\"\n\n    def __init__(self):\n        entryPoint.EntryPoint.__init__(self)\n\n    def invoke(self):\n        pass\n"
  },
  {
    "path": "armi/cli/run.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Run an ARMI case.\"\"\"\n\nfrom armi.cli.entryPoint import EntryPoint\n\n\nclass RunEntryPoint(EntryPoint):\n    \"\"\"Run an ARMI case.\"\"\"\n\n    name = \"run\"\n    settingsArgument = \"required\"\n\n    def invoke(self):\n        from armi import cases\n\n        inputCase = cases.Case(cs=self.cs)\n        inputCase.run()\n"
  },
  {
    "path": "armi/cli/runSuite.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Run multiple ARMI cases one after the other on the local machine.\"\"\"\n\nimport os\n\nfrom armi import cases\nfrom armi.cli.run import RunEntryPoint\nfrom armi.utils import directoryChangers\n\n\nclass RunSuiteCommand(RunEntryPoint):\n    \"\"\"\n    Recursively run all the cases in a suite one after the other on the local machine.\n\n    Invoke with ``mpirun`` or ``mpiexec`` to activate parallelism within each individual case.\n    \"\"\"\n\n    name = \"run-suite\"\n\n    def addOptions(self):\n        RunEntryPoint.addOptions(self)\n        self.parser.add_argument(\n            \"patterns\",\n            nargs=\"*\",\n            type=str,\n            default=[\"*.yaml\"],\n            help=\"Pattern to use while searching for ARMI settings files.\",\n        )\n        self.parser.add_argument(\n            \"--ignore\",\n            \"-i\",\n            nargs=\"+\",\n            type=str,\n            default=[],\n            help=\"Pattern to search for inputs to ignore.\",\n        )\n        self.parser.add_argument(\n            \"--list\",\n            \"-l\",\n            action=\"store_true\",\n            default=False,\n            help=\"Just list the settings files found, don't actually run them.\",\n        )\n        self.parser.add_argument(\n            \"--suiteDir\",\n            type=str,\n            default=os.getcwd(),\n            help=(\"The path containing the case suite to run. Default current working directory.\"),\n        )\n\n    def invoke(self):\n        with directoryChangers.DirectoryChanger(self.args.suiteDir, dumpOnException=False):\n            suite = cases.CaseSuite(self.cs)\n            suite.discover(patterns=self.args.patterns, ignorePatterns=self.args.ignore)\n            if self.args.list:\n                suite.echoConfiguration()\n            else:\n                suite.run()\n"
  },
  {
    "path": "armi/cli/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/cli/tests/test_runEntryPoint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for run cli entry point.\"\"\"\n\nimport logging\nimport os\nimport sys\nimport unittest\nfrom shutil import copyfile\n\nfrom armi import runLog\nfrom armi.__main__ import main\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.bookkeeping.visualization.entryPoint import VisFileEntryPoint\nfrom armi.cli.checkInputs import CheckInputEntryPoint, ExpandBlueprints\nfrom armi.cli.clone import CloneArmiRunCommandBatch, CloneSuiteCommand\nfrom armi.cli.compareCases import CompareCases, CompareSuites\nfrom armi.cli.database import ExtractInputs, InjectInputs\nfrom armi.cli.entryPoint import EntryPoint\nfrom armi.cli.migrateInputs import MigrateInputs\nfrom armi.cli.modify import ModifyCaseSettingsCommand\nfrom armi.cli.reportsEntryPoint import ReportsEntryPoint\nfrom armi.cli.run import RunEntryPoint\nfrom armi.cli.runSuite import RunSuiteCommand\nfrom armi.physics.neutronics.diffIsotxs import CompareIsotxsLibraries\nfrom armi.testing import loadTestReactor\nfrom armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\nfrom armi.utils.dynamicImporter import getEntireFamilyTree\n\n\ndef buildTestDB(fileName, numNodes=1, numCycles=1):\n    \"\"\"This function builds a (super) simple test DB.\n\n    Notes\n    -----\n    This needs to be run inside a temp directory.\n\n    Parameters\n    ----------\n    fileName : str\n        The file name (not path) we want for the ARMI test DB.\n    numNodes : int, optional\n        The number of nodes we want in the DB, default 1.\n    numCycles : int, optional\n        The number of cycles we want in the DB, default 1.\n\n    Returns\n    -------\n    str\n        Database file name.\n    \"\"\"\n    o, r = loadTestReactor(\n        TEST_ROOT,\n        inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n    )\n\n    # create the tests DB\n    dbi = DatabaseInterface(r, o.cs)\n    dbi.initDB(fName=f\"{fileName}.h5\")\n    db = dbi.database\n\n    # populate the db with something\n    r.p.cycle = 0\n    for node in range(abs(numNodes)):\n        for cycle in range(abs(numCycles)):\n            r.p.timeNode = node\n            r.p.cycle = cycle\n            r.p.cycleLength = 100\n            db.writeToDB(r)\n\n    db.close()\n    return f\"{fileName}.h5\"\n\n\nclass TestInitializationEntryPoints(unittest.TestCase):\n    def test_entryPointInitialization(self):\n        \"\"\"Tests the initialization of all subclasses of `EntryPoint`.\n\n        .. test:: Test initialization of many basic CLIs.\n            :id: T_ARMI_CLI_GEN0\n            :tests: R_ARMI_CLI_GEN\n        \"\"\"\n        entryPoints = getEntireFamilyTree(EntryPoint)\n\n        # Comparing to a minimum number of entry points, in case more are added.\n        self.assertGreater(len(entryPoints), 15)\n\n        for e in entryPoints:\n            entryPoint = e()\n            entryPoint.addOptions()\n            settingsArg = None\n            if entryPoint.settingsArgument is not None:\n                for a in entryPoint.parser._actions:\n                    if \"settings_file\" in a.dest:\n                        settingsArg = a\n                        break\n                self.assertIsNotNone(\n                    settingsArg,\n                    msg=(\n                        f\"A settings file argument was expected for {entryPoint}, \"\n                        \"but does not exist. This is a error in the EntryPoint \"\n                        \"implementation.\"\n                    ),\n                )\n\n\nclass TestCheckInputEntryPoint(unittest.TestCase):\n    def test_checkInputEntryPointBasics(self):\n        ci = CheckInputEntryPoint()\n        ci.addOptions()\n        ci.parse_args([\"/path/to/fake.yaml\", \"-C\"])\n\n        self.assertEqual(ci.name, \"check-input\")\n        self.assertEqual(ci.args.patterns, [\"/path/to/fake.yaml\"])\n        self.assertEqual(ci.args.skip_checks, True)\n\n    def test_checkInputEntryPointInvoke(self):\n        \"\"\"Test the \"check inputs\" entry point.\n\n        .. test:: A working CLI child class, to validate inputs.\n            :id: T_ARMI_CLI_GEN1\n            :tests: R_ARMI_CLI_GEN\n        \"\"\"\n        ci = CheckInputEntryPoint()\n        ci.addOptions()\n        ci.parse_args([ARMI_RUN_PATH])\n\n        with mockRunLogs.BufferLog() as mock:\n            runLog.LOG.startLog(\"test_checkInputEntryPointInvoke\")\n            runLog.LOG.setVerbosity(logging.INFO)\n            self.assertEqual(\"\", mock.getStdout())\n\n            ci.invoke()\n\n            self.assertIn(ARMI_RUN_PATH, mock.getStdout())\n            self.assertIn(\"input is self consistent\", mock.getStdout())\n\n\nclass TestCloneArmiRunCommandBatch(unittest.TestCase):\n    def test_cloneArmiRunCommandBatchBasics(self):\n        ca = CloneArmiRunCommandBatch()\n        ca.addOptions()\n        ca.parse_args(\n            [\n                ARMI_RUN_PATH,\n                \"--additional-files\",\n                \"test\",\n                \"--settingsWriteStyle\",\n                \"full\",\n            ]\n        )\n\n        self.assertEqual(ca.name, \"clone-batch\")\n        self.assertEqual(ca.settingsArgument, \"required\")\n        self.assertEqual(ca.args.additional_files, [\"test\"])\n        self.assertEqual(ca.args.settingsWriteStyle, \"full\")\n\n    def test_cloneArmiRunCommandBatchInvokeShort(self):\n        # Test short write style\n        ca = CloneArmiRunCommandBatch()\n        ca.addOptions()\n        ca.parse_args([ARMI_RUN_PATH])\n\n        with TemporaryDirectoryChanger():\n            ca.invoke()\n\n            self.assertEqual(ca.settingsArgument, \"required\")\n            self.assertEqual(ca.args.settingsWriteStyle, \"short\")\n            clonedYaml = \"armiRun.yaml\"\n            self.assertTrue(os.path.exists(clonedYaml))\n            # validate a setting that has a default value was removed\n            txt = open(clonedYaml, \"r\").read()\n            self.assertNotIn(\"availabilityFactor\", txt)\n\n    def test_cloneArmiRunCommandBatchInvokeMedium(self):\n        \"\"\"Test the \"clone armi run\" batch entry point, on medium detail.\n\n        .. test:: A working CLI child class, to clone a run.\n            :id: T_ARMI_CLI_GEN2\n            :tests: R_ARMI_CLI_GEN\n        \"\"\"\n        # Test medium write style\n        ca = CloneArmiRunCommandBatch()\n        ca.addOptions()\n        ca.parse_args([ARMI_RUN_PATH, \"--settingsWriteStyle\", \"medium\"])\n\n        with TemporaryDirectoryChanger():\n            ca.invoke()\n\n            self.assertEqual(ca.settingsArgument, \"required\")\n            self.assertEqual(ca.args.settingsWriteStyle, \"medium\")\n            clonedYaml = \"armiRun.yaml\"\n            self.assertTrue(os.path.exists(clonedYaml))\n            # validate a setting that has a  default value is still there\n            txt = open(clonedYaml, \"r\").read()\n            self.assertIn(\"availabilityFactor\", txt)\n\n\nclass TestCloneSuiteCommand(unittest.TestCase):\n    def test_cloneSuiteCommandBasics(self):\n        cs = CloneSuiteCommand()\n        cs.addOptions()\n        cs.parse_args([\"-d\", \"test\", \"--settingsWriteStyle\", \"medium\"])\n\n        self.assertEqual(cs.name, \"clone-suite\")\n        self.assertEqual(cs.args.directory, \"test\")\n        self.assertEqual(cs.args.settingsWriteStyle, \"medium\")\n\n\nclass TestCompareCases(unittest.TestCase):\n    def test_compareCasesBasics(self):\n        with TemporaryDirectoryChanger():\n            cc = CompareCases()\n            cc.addOptions()\n            cc.parse_args([\"/path/to/fake1.h5\", \"/path/to/fake2.h5\"])\n\n            self.assertEqual(cc.name, \"compare\")\n            self.assertIsNone(cc.args.timestepCompare)\n            self.assertIsNone(cc.args.weights)\n\n            with self.assertRaises(ValueError):\n                # The \"fake\" files do exist, so this should fail.\n                cc.invoke()\n\n\nclass TestCompareSuites(unittest.TestCase):\n    def test_compareSuitesBasics(self):\n        with TemporaryDirectoryChanger():\n            cs = CompareSuites()\n            cs.addOptions()\n            cs.parse_args([\"/path/to/fake1.h5\", \"/path/to/fake2.h5\", \"-I\"])\n\n            self.assertEqual(cs.name, \"compare-suites\")\n            self.assertEqual(cs.args.reference, \"/path/to/fake1.h5\")\n            self.assertTrue(cs.args.skip_inspection)\n            self.assertIsNone(cs.args.weights)\n\n\nclass TestExpandBlueprints(unittest.TestCase):\n    def test_expandBlueprintsBasics(self):\n        ebp = ExpandBlueprints()\n        ebp.addOptions()\n        ebp.parse_args([\"/path/to/fake.yaml\"])\n\n        self.assertEqual(ebp.name, \"expand-bp\")\n        self.assertEqual(ebp.args.blueprints, \"/path/to/fake.yaml\")\n\n        # Since the file is fake, invoke() should exit early.\n        with mockRunLogs.BufferLog() as mock:\n            runLog.LOG.startLog(\"test_expandBlueprintsBasics\")\n            runLog.LOG.setVerbosity(logging.INFO)\n            self.assertEqual(\"\", mock.getStdout())\n            ebp.invoke()\n            self.assertIn(\"does not exist\", mock.getStdout())\n\n\nclass TestExtractInputs(unittest.TestCase):\n    def test_extractInputsBasics(self):\n        with TemporaryDirectoryChanger() as newDir:\n            # build test DB\n            o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n            dbi = DatabaseInterface(r, o.cs)\n            dbPath = os.path.join(newDir.destination, f\"{self._testMethodName}.h5\")\n            dbi.initDB(fName=dbPath)\n            db = dbi.database\n            db.writeToDB(r)\n\n            # init the CLI\n            ei = ExtractInputs()\n            ei.addOptions()\n            ei.parse_args([dbPath])\n\n            # test the CLI initialization\n            self.assertEqual(ei.name, \"extract-inputs\")\n            self.assertEqual(ei.args.output_base, dbPath[:-3])\n\n            # run the CLI on a test DB, verify it worked via logging\n            with mockRunLogs.BufferLog() as mock:\n                runLog.LOG.startLog(\"test_extractInputsBasics\")\n                runLog.LOG.setVerbosity(logging.INFO)\n                self.assertEqual(\"\", mock.getStdout())\n                ei.invoke()\n                self.assertIn(\"Writing settings to\", mock.getStdout())\n                self.assertIn(\"Writing blueprints to\", mock.getStdout())\n\n            db.close()\n\n\nclass TestInjectInputs(unittest.TestCase):\n    def test_injectInputsBasics(self):\n        ii = InjectInputs()\n        ii.addOptions()\n        ii.parse_args([\"/path/to/fake.h5\"])\n\n        self.assertEqual(ii.name, \"inject-inputs\")\n        self.assertIsNone(ii.args.blueprints)\n\n    def test_injectInputsInvokeIgnore(self):\n        ii = InjectInputs()\n        ii.addOptions()\n        ii.parse_args([\"/path/to/fake.h5\"])\n\n        with mockRunLogs.BufferLog() as mock:\n            runLog.LOG.startLog(\"test_injectInputsInvokeIgnore\")\n            runLog.LOG.setVerbosity(logging.INFO)\n            self.assertEqual(\"\", mock.getStdout())\n            ii.invoke()\n            self.assertIn(\"No settings\", mock.getStdout())\n\n    def test_injectInputsInvokeNoData(self):\n        with TemporaryDirectoryChanger():\n            # init CLI\n            ii = InjectInputs()\n            ii.addOptions()\n\n            bp = os.path.join(TEST_ROOT, \"refSmallReactor.yaml\")\n            ii.parse_args([\"/path/to/fake.h5\", \"--blueprints\", bp])\n\n            # invoke and check log\n            with self.assertRaises(FileNotFoundError):\n                # The \"fake.h5\" doesn't exist, so this should fail.\n                ii.invoke()\n\n\nclass TestMigrateInputs(unittest.TestCase):\n    def test_migrateInputsBasics(self):\n        mi = MigrateInputs()\n        mi.addOptions()\n        mi.parse_args([\"--settings-path\", \"cs_path\"])\n\n        self.assertEqual(mi.name, \"migrate-inputs\")\n        self.assertEqual(mi.args.settings_path, \"cs_path\")\n\n\nclass TestModifyCaseSettingsCommand(unittest.TestCase):\n    def test_modifyCaseSettingsCommandBasics(self):\n        mcs = ModifyCaseSettingsCommand()\n        mcs.addOptions()\n        mcs.parse_args([\"--rootDir\", \"/path/to/\", \"--settingsWriteStyle\", \"medium\", \"fake.yaml\"])\n\n        self.assertEqual(mcs.name, \"modify\")\n        self.assertEqual(mcs.args.rootDir, \"/path/to/\")\n        self.assertEqual(mcs.args.settingsWriteStyle, \"medium\")\n        self.assertEqual(mcs.args.patterns, [\"fake.yaml\"])\n\n    def test_modifyCaseSettingsCommandInvoke(self):\n        mcs = ModifyCaseSettingsCommand()\n        mcs.addOptions()\n\n        with TemporaryDirectoryChanger():\n            # copy over settings files\n            for fileName in [\n                \"armiRun.yaml\",\n                \"refSmallReactor.yaml\",\n                \"refSmallReactorShuffleLogic.py\",\n            ]:\n                copyfile(os.path.join(TEST_ROOT, fileName), fileName)\n\n            # pass in --nTasks=333\n            mcs.parse_args([\"--nTasks=333\", \"--rootDir\", \".\", \"armiRun.yaml\"])\n\n            # invoke the CLI\n            mcs.invoke()\n\n            # validate the change to nTasks was made\n            txt = open(\"armiRun.yaml\", \"r\").read()\n            self.assertIn(\"nTasks: 333\", txt)\n\n\nclass MockFakeReportsEntryPoint(ReportsEntryPoint):\n    name = \"MockFakeReport\"\n\n    def invoke(self):\n        return \"mock fake\"\n\n\nclass TestReportsEntryPoint(unittest.TestCase):\n    def test_cleanArgs(self):\n        rep = MockFakeReportsEntryPoint()\n        result = rep.invoke()\n        self.assertEqual(result, \"mock fake\")\n\n\nclass TestCompareIsotxsLibsEntryPoint(unittest.TestCase):\n    def test_compareIsotxsLibsBasics(self):\n        com = CompareIsotxsLibraries()\n        com.addOptions()\n        com.parse_args([\"--fluxFile\", \"/path/to/fluxfile.txt\", \"reference\", \"comparisonFiles\"])\n\n        self.assertEqual(com.name, \"diff-isotxs\")\n        self.assertIsNone(com.settingsArgument)\n\n        with self.assertRaises(FileNotFoundError):\n            # The provided files don't exist, so this should fail.\n            com.invoke()\n\n\nclass TestRunEntryPoint(unittest.TestCase):\n    def test_runEntryPointBasics(self):\n        rep = RunEntryPoint()\n        rep.addOptions()\n        rep.parse_args([ARMI_RUN_PATH])\n\n        self.assertEqual(rep.name, \"run\")\n        self.assertEqual(rep.settingsArgument, \"required\")\n\n    def test_runCommandHelp(self):\n        \"\"\"Ensure main entry point with no args completes.\"\"\"\n        with self.assertRaises(SystemExit) as excinfo:\n            # have to override the pytest args\n            sys.argv = [\"\"]\n            main()\n        self.assertEqual(excinfo.exception.code, 0)\n\n    def test_executeCommand(self):\n        \"\"\"Use executeCommand to call run.\n\n        But we expect it to fail because we provide a fictional settings YAML.\n        \"\"\"\n        with self.assertRaises(SystemExit) as excinfo:\n            # override the pytest args\n            sys.argv = [\"run\", \"path/to/fake.yaml\"]\n            main()\n        self.assertEqual(excinfo.exception.code, 1)\n\n\nclass TestRunSuiteCommand(unittest.TestCase):\n    def test_runSuiteCommandBasics(self):\n        rs = RunSuiteCommand()\n        rs.addOptions()\n        rs.parse_args([\"/path/to/fake.yaml\", \"-l\"])\n\n        self.assertEqual(rs.name, \"run-suite\")\n        self.assertIsNone(rs.settingsArgument)\n\n        # test the invoke method\n        with mockRunLogs.BufferLog() as mock:\n            runLog.LOG.startLog(\"test_runSuiteCommandBasics\")\n            runLog.LOG.setVerbosity(logging.INFO)\n            self.assertEqual(\"\", mock.getStdout())\n            rs.invoke()\n            self.assertIn(\"Finding potential settings files\", mock.getStdout())\n            self.assertIn(\"Checking for valid settings\", mock.getStdout())\n            self.assertIn(\"Primary Log Verbosity\", mock.getStdout())\n\n\nclass TestVisFileEntryPointCommand(unittest.TestCase):\n    def test_visFileEntryPointBasics(self):\n        with TemporaryDirectoryChanger() as newDir:\n            # build test DB\n            self.o, self.r = loadTestReactor(\n                TEST_ROOT,\n                customSettings={\"reloadDBName\": \"reloadingDB.h5\"},\n                inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n            )\n            self.dbi = DatabaseInterface(self.r, self.o.cs)\n            dbPath = os.path.join(newDir.destination, f\"{self._testMethodName}.h5\")\n            self.dbi.initDB(fName=dbPath)\n            self.db = self.dbi.database\n            self.db.writeToDB(self.r)\n\n            # create Viz entry point\n            vf = VisFileEntryPoint()\n            vf.addOptions()\n            vf.parse_args([dbPath])\n\n            self.assertEqual(vf.name, \"vis-file\")\n            self.assertIsNone(vf.settingsArgument)\n\n            # test the invoke method\n            with mockRunLogs.BufferLog() as mock:\n                runLog.LOG.startLog(\"test_visFileEntryPointBasics\")\n                runLog.LOG.setVerbosity(logging.INFO)\n                self.assertEqual(\"\", mock.getStdout())\n\n                vf.invoke()\n\n                desired = \"Creating visualization file for cycle 0, time node 0...\"\n                self.assertIn(desired, mock.getStdout())\n\n            # test the parse method (using the same DB to save time)\n            vf = VisFileEntryPoint()\n            vf.parse([dbPath])\n            self.assertIsNone(vf.args.nodes)\n            self.assertIsNone(vf.args.min_node)\n            self.assertIsNone(vf.args.max_node)\n            self.assertEqual(vf.args.output_name, \"test_visFileEntryPointBasics\")\n\n            self.db.close()\n"
  },
  {
    "path": "armi/cli/tests/test_runSuite.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for runsuite cli entry point.\"\"\"\n\nimport io\nimport sys\nimport unittest\nfrom unittest.mock import patch\n\nfrom armi import meta\nfrom armi.cli import ArmiCLI\n\n\nclass TestRunSuiteSuite(unittest.TestCase):\n    def test_listCommand(self):\n        \"\"\"Ensure run-suite entry point is registered.\n\n        .. test:: The ARMI CLI can be correctly initialized.\n            :id: T_ARMI_CLI_CS0\n            :tests: R_ARMI_CLI_CS\n        \"\"\"\n        acli = ArmiCLI()\n\n        origout = sys.stdout\n        try:\n            out = io.StringIO()\n            sys.stdout = out\n            acli.listCommands()\n        finally:\n            sys.stdout = origout\n\n        self.assertIn(\"run-suite\", out.getvalue())\n\n    def test_showVersion(self):\n        \"\"\"Test the ArmiCLI.showVersion method.\n\n        .. test:: The ARMI CLI's basic \"--version\" functionality works.\n            :id: T_ARMI_CLI_CS1\n            :tests: R_ARMI_CLI_CS\n        \"\"\"\n        origout = sys.stdout\n        try:\n            out = io.StringIO()\n            sys.stdout = out\n            ArmiCLI.showVersion()\n        finally:\n            sys.stdout = origout\n\n        self.assertIn(\"armi\", out.getvalue())\n        self.assertIn(meta.__version__, out.getvalue())\n\n    @patch(\"armi.cli.ArmiCLI.executeCommand\")\n    def test_run(self, mockExeCmd):\n        \"\"\"Test the ArmiCLI.run method.\n\n        .. test:: The ARMI CLI's import run() method works.\n            :id: T_ARMI_CLI_CS2\n            :tests: R_ARMI_CLI_CS\n        \"\"\"\n        correct = 0\n        acli = ArmiCLI()\n        mockExeCmd.return_value = correct\n        ret = acli.run()\n        self.assertEqual(ret, correct)\n"
  },
  {
    "path": "armi/conftest.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nPer-directory pytest plugin configuration used only during development/testing.\n\nThis is a used to manipulate the environment under which pytest runs the unit tests. This can act as a one-stop-shop for\nmanipulating the sys.path, or the ARMI App used to run the tests.\n\nTests must be invoked via pytest for this to have any affect, for example::\n\n    $ pytest -n 6 armi\n\n\"\"\"\n\nimport os\n\nimport matplotlib\n\nfrom armi import apps, configure, context\nfrom armi.settings import caseSettings\nfrom armi.tests import TEST_ROOT\n\n\ndef pytest_sessionstart(session):\n    print(\"Initializing generic ARMI Framework application\")\n    configure(apps.App())\n    bootstrapArmiTestEnv()\n\n\ndef bootstrapArmiTestEnv():\n    \"\"\"\n    Perform ARMI config appropriate for running unit tests.\n\n    .. tip:: This can be imported and run from other ARMI applications for test support.\n    \"\"\"\n    from armi.nucDirectory import nuclideBases\n\n    cs = caseSettings.Settings()\n\n    context.Mode.setMode(context.Mode.BATCH)\n    # Need to init burnChain. (See Reactor._initBurnChain)\n    with open(cs[\"burnChainFileName\"]) as burnChainStream:\n        nuclideBases.imposeBurnChain(burnChainStream)\n\n    # turn on a non-interactive mpl backend to minimize errors related to initializing Tcl in parallel tests\n    matplotlib.use(\"agg\")\n\n    # Set and create a test-specific FAST_PATH for parallel unit testing. Not all unit tests have operators, and\n    # operators are usually responsible for making FAST_PATH, so we make it here. It will be deleted by the atexit hook.\n    context.activateLocalFastPath()\n    if not os.path.exists(context.getFastPath()):\n        os.makedirs(context.getFastPath())\n\n    # some tests need to find the TEST_ROOT via an env variable when they're filling in templates with ``$ARMITESTBASE``\n    # in them or opening input files use the variable in an `!include` tag. Thus we provide it here.\n    os.environ[\"ARMITESTBASE\"] = TEST_ROOT\n"
  },
  {
    "path": "armi/context.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule containing global constants that reflect the executing context of ARMI.\n\nARMI's global state information: operating system information, environment data, user data, memory\nparallelism, temporary storage locations, and if operational mode (interactive, gui, or batch).\n\"\"\"\n\nimport datetime\nimport enum\nimport gc\nimport getpass\nimport os\nimport sys\nimport time\nfrom logging import DEBUG\n\n# h5py needs to be imported here, so that the disconnectAllHdfDBs() call that gets bound to atexit\n# below doesn't lead to a segfault on python exit.\n#\n# Minimal code to reproduce the issue:\n#\n# >>> import atexit\n#\n# >>> def willSegFault():\n# >>>     import h5py\n#\n# >>> atexit.register(willSegFault)\nimport h5py  # noqa: F401\n\nBLUEPRINTS_IMPORTED = False\nBLUEPRINTS_IMPORT_CONTEXT = \"\"\n\n# App name is used when spawning new tasks that should invoke a specific ARMI application. Sometimes\n# these tasks only use ARMI functionality, so running `python -m armi` is fine. Other times, the\n# task is specific to an application, requiring something like: `python -m myArmiApp`\nAPP_NAME = \"armi\"\n\n\nclass Mode(enum.Enum):\n    \"\"\"\n    Mode represents different run types possible in ARMI.\n\n    The modes can be Batch, Interactive, or GUI. Mode is generally auto-detected based on your\n    terminal. It can also be set in various CLI entry points. Each entry point has a ``--batch``\n    command line argument that can force Batch mode.\n    \"\"\"\n\n    BATCH = 1\n    INTERACTIVE = 2\n    GUI = 4\n\n    @classmethod\n    def setMode(cls, mode):\n        \"\"\"Set the run mode of the current ARMI case.\"\"\"\n        global CURRENT_MODE\n        assert isinstance(mode, cls), \"Invalid mode {}\".format(mode)\n        CURRENT_MODE = mode\n\n\nROOT = os.path.abspath(os.path.dirname(__file__))\nPROJECT_ROOT = os.path.join(ROOT, \"..\")\nRES = os.path.join(ROOT, \"resources\")\nDOC = os.path.abspath(os.path.join(PROJECT_ROOT, \"doc\"))\nUSER = getpass.getuser()\nSTART_TIME = time.ctime()\n\n# Set batch mode if not a TTY, which means you're on a cluster writing to a stdout file. In this\n# mode you cannot respond to prompts. (This does not work reliably for both Windows and Linux so an\n# os-specific solution is applied.)\nIS_WINDOWS = (\"win\" in sys.platform) and (\"darwin\" not in sys.platform)\nisatty = sys.stdout.isatty() if IS_WINDOWS else sys.stdin.isatty()\nCURRENT_MODE = Mode.INTERACTIVE if isatty else Mode.BATCH\nMode.setMode(CURRENT_MODE)\n\nMPI_COMM = None\n# MPI_RANK represents the index of the CPU that is running.\n# 0 is typically the primary CPU, while 1+ are typically workers.\nMPI_RANK = 0\n# MPI_SIZE is the total number of CPUs.\nMPI_SIZE = 1\nLOCAL = \"local\"\nMPI_NODENAME = LOCAL\nMPI_NODENAMES = [LOCAL]\n\n\ntry:\n    # Check for MPI\n    from mpi4py import MPI\n    from mpi4py.util import pkl5\n\n    MPI_COMM = pkl5.Intracomm(MPI.COMM_WORLD)\n    MPI_RANK = MPI_COMM.Get_rank()\n    MPI_SIZE = MPI_COMM.Get_size()\n    MPI_NODENAME = MPI.Get_processor_name()\n    MPI_NODENAMES = MPI_COMM.allgather(MPI_NODENAME)\nexcept ImportError:\n    # stick with defaults\n    pass\nexcept RuntimeError:\n    # likely from MPI not being on system, this is OK for many ARMI invocations\n    # Note this exception was introduced upon upgrading to mpi4py 4.1.1\n    pass\n\nif sys.platform.startswith(\"win\"):\n    # trying a Windows approach\n    APP_DATA = os.path.join(os.environ[\"APPDATA\"], \"armi\")\n    APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\nelse:\n    # non-Windows: /tmp/ if possible, if not home\n    if os.access(\"/tmp/\", os.W_OK):\n        APP_DATA = \"/tmp/.armi\"\n    else:\n        APP_DATA = os.path.expanduser(\"~/.armi\")\n\nif MPI_NODENAMES.index(MPI_NODENAME) == MPI_RANK:\n    if not os.path.isdir(APP_DATA):\n        try:\n            os.makedirs(APP_DATA)\n            os.chmod(APP_DATA, 0o0777)\n        except OSError:\n            pass\n    if not os.path.isdir(APP_DATA):\n        raise OSError(\"Directory doesn't exist {0}\".format(APP_DATA))\n\nif MPI_COMM is not None:\n    # Make sure app data exists before workers proceed.\n    MPI_COMM.barrier()\n\nMPI_DISTRIBUTABLE = MPI_SIZE > 1\n\n_FAST_PATH = os.path.join(os.getcwd())\n\"\"\"\nA directory available for high-performance I/O.\n\n.. warning:: This is not a constant and can change at runtime.\n\"\"\"\n\n_FAST_PATH_IS_TEMPORARY = False\n\"\"\"Flag indicating whether or not the FAST_PATH should be cleaned up on exit.\"\"\"\n\n\ndef activateLocalFastPath() -> None:\n    \"\"\"\n    Specify a local temp directory to be the fast path.\n\n    ``FAST_PATH`` is often a local hard drive on a cluster node. It should be a high-performance\n    scratch space. Different processors on the same node should have different fast paths.\n\n    Notes\n    -----\n    This path will be obliterated when the job ends.\n\n    This path is set at import time, so if a series of unit tests come through that instantiate one\n    operator after the other, the path will already exist the second time. The directory is created\n    in the Operator constructor.\n    \"\"\"\n    global _FAST_PATH, _FAST_PATH_IS_TEMPORARY, APP_DATA\n\n    # Try to fix pathing issues in Windows.\n    if os.name == \"nt\":\n        APP_DATA = APP_DATA.replace(\"/\", \"\\\\\")\n\n    _FAST_PATH = os.path.join(\n        APP_DATA,\n        \"{}{}-{}\".format(\n            MPI_RANK,\n            os.environ.get(\"PYTEST_XDIST_WORKER\", \"\"),  # for parallel unit testing,\n            datetime.datetime.now().strftime(\"%Y%m%d%H%M%S%f\"),\n        ),\n    )\n\n    _FAST_PATH_IS_TEMPORARY = True\n\n\ndef getFastPath() -> str:\n    \"\"\"\n    Callable to get the current FAST_PATH.\n\n    Notes\n    -----\n    This exists because it's dangerous to use ``FAST_PATH`` directly. as it can change between\n    import and runtime.\n    \"\"\"\n    return _FAST_PATH\n\n\ndef cleanFastPathAfterSimulation():\n    \"\"\"\n    Clean up temporary files after a run.\n\n    Some Windows HPC systems send a SIGBREAK signal when the user cancels a job, which is NOT\n    handled by ``atexit``. Notably, SIGBREAK does not exist outside Windows. For the SIGBREAK signal\n    to work with a Windows HPC, the ``TaskCancelGracePeriod`` option must be configured to be non-\n    zero. This sets the period between SIGBREAK and SIGTERM/SIGINT. To do cleanups in this case, we\n    must use the ``signal`` module. Actually, even then it does not work because MS ``mpiexec`` does\n    not pass signals through.\n    \"\"\"\n    from armi import runLog\n    from armi.utils.pathTools import cleanPath\n\n    disconnectAllHdfDBs()\n    printMsg = runLog.getVerbosity() <= DEBUG\n    if _FAST_PATH_IS_TEMPORARY and os.path.exists(_FAST_PATH):\n        if printMsg:\n            print(\n                \"Cleaning up temporary files in: {}\".format(_FAST_PATH),\n                file=sys.stdout,\n            )\n        try:\n            cleanPath(_FAST_PATH, mpiRank=MPI_RANK)\n        except Exception as error:\n            for outputStream in (sys.stderr, sys.stdout):\n                if printMsg:\n                    print(\n                        \"Failed to delete temporary files in: {}\\n    error: {}\".format(_FAST_PATH, error),\n                        file=outputStream,\n                    )\n\n\ndef disconnectAllHdfDBs() -> None:\n    \"\"\"\n    Forcibly disconnect all instances of HdfDB objects.\n\n    Notes\n    -----\n    This is a hack to help ARMI exit gracefully when the garbage collector and h5py have issues\n    destroying objects. The root cause for why this was having issues was never identified. It\n    appears that when several HDF5 files are open in the same run (e.g. when calling ``armi.init()``\n    multiple times from a post-processing script), when these h5py File objects were closed, the\n    garbage collector would raise an exception related to the repr'ing the object. We get around\n    this by using the garbage collector to manually disconnect all open HdfDBs.\n    \"\"\"\n    from armi.bookkeeping.db import Database\n\n    h5dbs = [db for db in gc.get_objects() if isinstance(db, Database)]\n    for db in h5dbs:\n        db.close()\n"
  },
  {
    "path": "armi/interfaces.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nInterfaces are objects of code that interact with ARMI. They read information off the state, perform calculations (or\\\nrun external codes), and then store the results back in the state.\n\nLearn all about interfaces in :doc:`/developer/guide`\n\nSee Also\n--------\narmi.operators : Schedule calls to various interfaces\narmi.plugins : Register various interfaces\n\"\"\"\n\nimport copy\nfrom typing import Dict, List, NamedTuple, Tuple, Union\n\nimport numpy as np\nfrom numpy.linalg import norm\n\nfrom armi import getPluginManagerOrFail, runLog, settings, utils\nfrom armi.reactor import parameters\nfrom armi.utils import textProcessors\n\n\nclass STACK_ORDER:  # noqa: N801\n    \"\"\"\n    Constants that help determine the order of modules in the interface stack.\n\n    Each module defines an ``ORDER`` constant that specifies where in this order it should be placed in the Interface\n    Stack.\n\n    .. impl:: Define an ordered list of interfaces.\n        :id: I_ARMI_OPERATOR_INTERFACES0\n        :implements: R_ARMI_OPERATOR_INTERFACES\n\n        At each time node during a simulation, an ordered collection of Interfaces are run (referred\n        to as the interface stack). But ARMI does not force the order upon the analyst. Instead,\n        each Interface registers where in that ordered list it belongs by giving itself an order\n        number (which can be an integer or a decimal). This class defines a set of constants which\n        can be imported and used by Interface developers to define that Interface's position in the\n        stack.\n\n        The constants defined are given names, based on common stack orderings in the ARMI\n        ecosystem. But in the end, these are just constant values, and the names they are given are\n        merely suggestions.\n\n    See Also\n    --------\n    armi.operators.operator.Operator.createInterfaces\n    armi.physics.neutronics.globalFlux.globalFluxInterface.ORDER\n    \"\"\"\n\n    BEFORE = -0.1\n    AFTER = 0.1\n    PREPROCESSING = 1.0\n    FUEL_MANAGEMENT = PREPROCESSING + 1\n    DEPLETION = FUEL_MANAGEMENT + 1\n    FUEL_PERFORMANCE = DEPLETION + 1\n    CROSS_SECTIONS = FUEL_PERFORMANCE + 1\n    CRITICAL_CONTROL = CROSS_SECTIONS + 1\n    FLUX = CRITICAL_CONTROL + 1\n    THERMAL_HYDRAULICS = FLUX + 1\n    REACTIVITY_COEFFS = THERMAL_HYDRAULICS + 1\n    TRANSIENT = REACTIVITY_COEFFS + 1\n    BOOKKEEPING = TRANSIENT + 1\n    POSTPROCESSING = BOOKKEEPING + 1\n\n\nclass TightCoupler:\n    \"\"\"\n    Data structure that defines tight coupling attributes that are implemented\n    within an Interface and called upon when ``interactAllCoupled`` is called.\n\n    .. impl:: The TightCoupler defines the convergence criteria for physics coupling.\n        :id: I_ARMI_OPERATOR_PHYSICS0\n        :implements: R_ARMI_OPERATOR_PHYSICS\n\n        During a simulation, the developers of an ARMI application frequently want to\n        iterate on some physical calculation until that calculation has converged to\n        within some small tolerance. This is typically done to solve the nonlinear\n        dependence of different physical properties of the reactor, like fuel\n        performance. However, what parameter is being tightly coupled is configurable\n        by the developer.\n\n        This class provides a way to calculate if a single parameter has converged\n        based on some convergence tolerance. The user provides the parameter,\n        tolerance, and a maximum number of iterations to define a basic convergence\n        calculation. If in the ``isConverged`` method the parameter has not converged,\n        the number of iterations is incremented, and this class will wait, presuming\n        another iteration is forthcoming.\n\n    Parameters\n    ----------\n    param : str\n        The name of a parameter defined in the ARMI Reactor model.\n\n    tolerance : float\n        Defines the allowable error between the current and previous parameter values\n        to determine if the selected coupling parameter has converged.\n\n    maxIters : int\n        Maximum number of tight coupling iterations allowed\n    \"\"\"\n\n    _SUPPORTED_TYPES = [float, int, list, np.ndarray]\n\n    def __init__(self, param, tolerance, maxIters):\n        self.parameter = param\n        self.tolerance = tolerance\n        self.maxIters = maxIters\n        self._numIters = 0\n        self._previousIterationValue = None\n        self.eps = np.inf\n\n    def __repr__(self):\n        return (\n            f\"<{self.__class__.__name__}, Parameter: {self.parameter}, Convergence Criteria: \"\n            + f\"{self.tolerance}, Maximum Coupled Iterations: {self.maxIters}>\"\n        )\n\n    def storePreviousIterationValue(self, val: _SUPPORTED_TYPES):\n        \"\"\"\n        Stores the previous iteration value of the given parameter.\n\n        Parameters\n        ----------\n        val : _SUPPORTED_TYPES\n            the value to store. Is commonly equal to interface.getTightCouplingValue()\n\n        Raises\n        ------\n        TypeError\n            Checks the type of the val against ``_SUPPORTED_TYPES`` before storing.\n            If invalid, a TypeError is raised.\n        \"\"\"\n        if type(val) not in self._SUPPORTED_TYPES:\n            raise TypeError(\n                f\"{val} supplied has type {type(val)} which is not supported in {self}. \"\n                f\"Supported types: {self._SUPPORTED_TYPES}\"\n            )\n        self._previousIterationValue = val\n\n    def isConverged(self, val: _SUPPORTED_TYPES) -> bool:\n        \"\"\"\n        Return boolean indicating if the convergence criteria between the current and previous iteration values are met.\n\n        Parameters\n        ----------\n        val : _SUPPORTED_TYPES\n            The most recent value for computing convergence criteria. Is commonly equal to\n            interface.getTightCouplingValue()\n\n        Returns\n        -------\n        boolean\n            True (False) interface is (not) converged\n\n        Notes\n        -----\n        - On convergence, this class is automatically reset to its initial condition to avoid\n          retaining or holding a stale state. Calling this method will increment a counter that when\n          exceeded will clear the state. A warning will be reported if the state is cleared prior to\n          the convergence criteria being met.\n        - For computing convergence of arrays, only up to 2D is allowed. 3D arrays would arise from\n          considering component level parameters. However, converging on component level parameters\n          is not supported at this time.\n\n        Raises\n        ------\n        ValueError\n            If the previous iteration value has not been assigned. The ``storePreviousIterationValue`` method must be\n            called first.\n        RuntimeError\n            Only support calculating norms for up to 2D arrays.\n        \"\"\"\n        if self._previousIterationValue is None:\n            raise ValueError(\n                f\"Cannot check convergence of {self} with no previous iteration value set. Set using \"\n                \"`storePreviousIterationValue` first.\"\n            )\n\n        previous = self._previousIterationValue\n\n        # calculate convergence of val and previous\n        if isinstance(val, (int, float)):\n            self.eps = abs(val - previous)\n        else:\n            dim = self.getListDimension(val)\n            if dim == 1:  # 1D array\n                self.eps = norm(np.subtract(val, previous), ord=2)\n            elif dim == 2:  # 2D array\n                epsVec = []\n                for old, new in zip(previous, val):\n                    epsVec.append(norm(np.subtract(old, new), ord=2))\n                self.eps = norm(epsVec, ord=np.inf)\n            else:\n                raise RuntimeError(\"Currently only support up to 2D arrays for calculating convergence of arrays.\")\n\n        # Check if convergence is satisfied. If so, or if reached max number of iters, then reset\n        # the number of iterations\n        converged = self.eps < self.tolerance\n        if converged:\n            self._numIters = 0\n        else:\n            self._numIters += 1\n            if self._numIters == self.maxIters:\n                runLog.warning(\n                    f\"Maximum number of iterations for {self.parameter} reached without convergence! Prescribed \"\n                    f\"convergence criteria is {self.tolerance}.\"\n                )\n                self._numIters = 0\n\n        return converged\n\n    @staticmethod\n    def getListDimension(listToCheck: list, dim: int = 1) -> int:\n        \"\"\"Return the dimension of a python list.\n\n        Parameters\n        ----------\n        listToCheck: list\n            the supplied python list to have its dimension returned\n        dim: int, optional\n            the dimension of the list\n\n        Returns\n        -------\n        dim, int\n            the dimension of the list. Typically 1, 2, or 3 but can be arbitrary order, N.\n        \"\"\"\n        for v in listToCheck:\n            if isinstance(v, list):\n                dim += 1\n                dim = TightCoupler.getListDimension(v, dim)\n            break\n        return dim\n\n\nclass Interface:\n    \"\"\"\n    The eponymous Interface between the ARMI reactor data model and the Plugins.\n\n    .. impl:: The interface shall allow code execution at important operational points in time.\n        :id: I_ARMI_INTERFACE\n        :implements: R_ARMI_INTERFACE\n\n        The Interface class defines a number methods with names like ``interact***``.\n        These methods are called in order at each time node. This allows for an\n        individual Plugin defining multiple interfaces to insert code at the start\n        or end of a particular time node or cycle during reactor simulation. In this\n        fashion, the Plugins and thus the Operator control when their code is run.\n\n        The end goal of all this work is to allow the Plugins to carefully tune\n        when and how they interact with the reactor data model.\n\n        Interface instances are gathered into an interface stack in\n        :py:meth:`armi.operators.operator.Operator.createInterfaces`.\n    \"\"\"\n\n    # list containing interfaceClass\n    @classmethod\n    def getDependencies(cls, cs):\n        return []\n\n    @classmethod\n    def getInputFiles(cls, cs):\n        \"\"\"Return a MergeableDict containing files that should be considered \"input\".\"\"\"\n        return utils.MergeableDict()\n\n    name: Union[str, None] = None\n    \"\"\"\n    The name of the interface. This is undefined for the base class, and must be overridden by any\n    concrete class that extends this one.\n    \"\"\"\n\n    purpose = None\n    \"\"\"\n    The action performed by an Interface. This is not required be be defined by implementations of\n    Interface, but is used to form categories of interfaces.\n    \"\"\"\n\n    class Distribute:\n        \"\"\"Enum-like return flag for behavior on interface broadcasting with MPI.\"\"\"\n\n        DUPLICATE = 1\n        NEW = 2\n        SKIP = 4\n\n    def __init__(self, r, cs):\n        \"\"\"\n        Construct an interface.\n\n        The ``r`` and ``cs`` arguments are required, but may be ``None``, where appropriate for the\n        specific ``Interface`` implementation.\n\n        Parameters\n        ----------\n        r : Reactor\n            A reactor to attach to\n        cs : Settings\n            Settings object to use\n\n        Raises\n        ------\n        RuntimeError\n            Interfaces derived from Interface must define their name\n        \"\"\"\n        if self.name is None:\n            raise RuntimeError(\n                \"Interfaces derived from Interface must define their name ({}).\".format(type(self).__name__)\n            )\n        self._enabled = True\n        self.reverseAtEOL = False\n        self._bolForce = False  # override disabled flag in interactBOL if true.\n        self.cs = cs\n        self.r = r\n        self.o = r.o if r else None\n        self.coupler = _setTightCouplerByInterfaceFunction(self, cs)\n\n    def __repr__(self):\n        return \"<Interface {0}>\".format(self.name)\n\n    def _checkSettings(self):\n        \"\"\"Raises an exception if interface settings requirements are not met.\"\"\"\n        pass\n\n    def nameContains(self, name):\n        return name in str(self.name)\n\n    def distributable(self):\n        \"\"\"\n        Return true if this can be MPI broadcast.\n\n        Notes\n        -----\n        Cases where this isn't possible include the database interface, where the SQL driver cannot\n        be distributed.\n        \"\"\"\n        return self.Distribute.DUPLICATE\n\n    def preDistributeState(self):\n        \"\"\"\n        Prepare for distribute state by returning all non-distributable attributes.\n\n        Examples\n        --------\n        >>> return {\"neutronsPerFission\", self.neutronsPerFission}\n        \"\"\"\n        return {}\n\n    def postDistributeState(self, toRestore):\n        \"\"\"Restore non-distributable attributes after a distributeState.\"\"\"\n        pass\n\n    def attachReactor(self, o, r):\n        \"\"\"\n        Set this interfaces' reactor to the reactor passed in and sets default settings.\n\n        Parameters\n        ----------\n        r : Reactor object\n            The reactor to attach\n        quiet : bool, optional\n            If true, don't print out the message while attaching\n\n        Notes\n        -----\n        This runs on all worker nodes as well as the primary.\n        \"\"\"\n        self.r = r\n        self.cs = o.cs\n        self.o = o\n\n    def detachReactor(self):\n        \"\"\"Delete the callbacks to reactor or operator. Useful when pickling, MPI sending, etc. to save memory.\"\"\"\n        self.o = None\n        self.r = None\n        self.cs = None\n\n    def duplicate(self):\n        \"\"\"\n        Duplicate this interface without duplicating some of the large attributes (like the entire reactor).\n\n        Makes a copy of interface with detached reactor/operator/settings so that it can be attached to an operator at a\n        later point in time.\n\n        Returns\n        -------\n        Interface\n            The deepcopy of this interface with detached reactor/operator/settings\n        \"\"\"\n        # temporarily remove references to the interface.  They will be reattached later.\n        o = self.o\n        self.o = None\n\n        r = self.r\n        self.r = None\n\n        cs = self.cs\n        self.cs = None\n\n        # a new sterile copy of the interface.\n        # With no record of operators, reactors, or cs, it can be added easily to a new operator\n        newI = copy.deepcopy(self)\n\n        # reattach current interface information\n        self.o = o\n        self.r = r\n        self.cs = cs\n\n        return newI\n\n    def getHistoryParams(self):\n        \"\"\"\n        Add these params to the history tracker for designated assemblies.\n\n        The assembly will get a print out of these params vs. time at EOL.\n        \"\"\"\n        return []\n\n    def getInterface(self, *args, **kwargs):\n        return self.o.getInterface(*args, **kwargs) if self.o else None\n\n    def interactInit(self):\n        \"\"\"\n        Interacts immediately after the interfaces are created.\n\n        Notes\n        -----\n        BOL interactions on other interfaces will not have occurred here.\n        \"\"\"\n        self._checkSettings()\n\n    def interactBOL(self):\n        \"\"\"Called at the Beginning-of-Life of a run, before any cycles start.\"\"\"\n        if self._enabled:\n            self._initializeParams()\n\n    def _initializeParams(self):\n        \"\"\"\n        Assign the parameters for active interfaces so that they will be in the database.\n\n        Notes\n        -----\n        Parameters with defaults are not written to the database until they have been assigned\n        SINCE_ANYTHING. This is done to reduce database size, so that we don't write parameters to\n        the DB that are related to interfaces that are not not active.\n        \"\"\"\n        for paramDef in parameters.ALL_DEFINITIONS.inCategory(self.name):\n            if paramDef.default not in (None, parameters.NoDefault):\n                paramDef.assigned = parameters.SINCE_ANYTHING\n\n    def interactEOL(self):\n        \"\"\"Called at End-of-Life, after all cycles are complete.\"\"\"\n        pass\n\n    def interactBOC(self, cycle=None):\n        \"\"\"Called at the beginning of each cycle.\"\"\"\n        pass\n\n    def interactEOC(self, cycle=None):\n        \"\"\"Called at the end of each cycle.\"\"\"\n        pass\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"Called at each time node/subcycle of every cycle.\"\"\"\n        pass\n\n    def interactCoupled(self, iteration):\n        \"\"\"Called repeatedly at each time node/subcycle when tight physics coupling is active.\"\"\"\n        pass\n\n    def getTightCouplingValue(self):\n        \"\"\"Abstract method to retrieve the value in which tight coupling will converge on.\"\"\"\n        pass\n\n    def interactError(self):\n        \"\"\"Called if an error occurs.\"\"\"\n        pass\n\n    def interactDistributeState(self):\n        \"\"\"Called after this interface is copied to a different (non-primary) MPI node.\"\"\"\n        pass\n\n    def interactRestart(self, startNode: Tuple[int, int], previousNode: Tuple[int, int]):\n        \"\"\"Perform any actions prior to simulating a restart.\n\n        Interfaces may want to restore some state that would have existed at the start of ``startNode`` prior to calling\n        :meth:`interactBOL` for the desired start point. The database interface will be used prior to any interfaces\n        calling this method, so you can assume the reactor state has been correctly loaded from the database from the\n        ``previousNode``. This helps ensure that interfaces restart at e.g., ``(cycle, node)=(4, 3)`` would see the same\n        data compared to the nominal simulation without a restart.\n\n        Parameters\n        ----------\n        startNode\n            Pair of ``(cycle, node)`` for the requested restart point.\n        previousNode\n            Pair of ``(cycle, node)`` for the time node immediately preceeding ``startNode``.\n        \"\"\"\n\n    def isRequestedDetailPoint(self, cycle=None, node=None):\n        \"\"\"\n        Determine if this interface should interact at this reactor state (cycle/node).\n\n        Notes\n        -----\n        By default, detail points are either during the requested snapshots, if any exist, or all\n        cycles and nodes if none exist.\n\n        This is useful for peripheral interfaces (CR Worth, perturbation theory, transients) that\n        may or may not be requested during a standard run.\n\n        If both cycle and node are None, this returns True\n\n        Parameters\n        ----------\n        cycle : int\n            The cycle number (or None to only consider node)\n        node : int\n            The timenode (BOC, MOC, EOC, etc.).\n\n        Returns\n        -------\n        bool\n            Whether or not this is a detail point.\n\n        \"\"\"\n        from armi.bookkeeping import snapshotInterface  # avoid cyclic import\n\n        if cycle is None and node is None:\n            return True\n        if not self.cs[\"dumpSnapshot\"]:\n            return True\n\n        for cnStamp in self.cs[\"dumpSnapshot\"]:\n            ci, ni = snapshotInterface.extractCycleNodeFromStamp(cnStamp)\n            if cycle is None and ni == node:\n                # case where only node counts (like in equilibrium cases)\n                return True\n            if ci == cycle and ni == node:\n                return True\n\n        return False\n\n    def workerOperate(self, _cmd):\n        \"\"\"\n        Receive an MPI command and do MPI work on worker nodes.\n\n        Returns\n        -------\n        bool\n            True if this interface handled the incoming command. False otherwise.\n        \"\"\"\n        return False\n\n    def enabled(self, flag=None):\n        \"\"\"\n        Mechanism to allow interfaces to be attached but not running at the interaction points.\n\n        Must be implemented on the individual interface level hooks. If given no arguments, returns\n        status of enabled. If arguments, sets enabled to that flag. (True or False)\n\n        Notes\n        -----\n        These ``return`` statements are inconsistent, but not wrong.\n        \"\"\"\n        if flag is None:\n            return self._enabled\n        elif isinstance(flag, bool):\n            self._enabled = flag\n        else:\n            raise ValueError(\"Non-bool passed to assign {}.enable().\".format(self))\n\n    def bolForce(self, flag=None):\n        \"\"\"\n        Run interactBOL even if this interface is disabled.\n\n        Parameters\n        ----------\n        flag : boolean, optional\n            Will set the bolForce flag to this boolean\n\n        Returns\n        -------\n        bool\n            true if should run at BOL. No return if you pass an input.\n\n        Notes\n        -----\n        These ``return`` statements are inconsistent, but not wrong.\n        \"\"\"\n        if flag is None:\n            return self._bolForce\n        self._bolForce = flag\n\n    def writeInput(self, inName):\n        \"\"\"Write input file(s).\"\"\"\n        raise NotImplementedError()\n\n    def readOutput(self, outName):\n        \"\"\"Read output file(s).\"\"\"\n        raise NotImplementedError()\n\n    @staticmethod\n    def specifyInputs(cs) -> Dict[Union[str, settings.Setting], List[str]]:\n        \"\"\"\n        Return a collection of file names that are considered input files.\n\n        This is a static method (i.e. is not called on a particular instance of the class), since it\n        should not require an Interface to actually be constructed. This would require constructing\n        a reactor object, which is expensive.\n\n        The files returned by an implementation should be those that one would want copied to a\n        target location when cloning a Case or CaseSuite. These can be absolute paths, relative\n        paths, or glob patterns that will be interpolated relative to the input directory. Absolute\n        paths will not be copied anywhere.\n\n        The returned dictionary will enable the source Settings object to be updated to the new file\n        location. While the dictionary keys are recommended to be Setting objects, the name of the\n        setting as a string, e.g., \"shuffleLogic\", is still interpreted. If the string name does not\n        point to a valid setting then this will lead to a failure.\n\n        Note\n        ----\n        This existed before the advent of ARMI plugins. Perhaps it can be better served as a plugin\n        hook. Potential future work.\n\n        See Also\n        --------\n        armi.cases.Case.clone() : Main user of this interface.\n\n        Parameters\n        ----------\n        cs : Settings\n            The case settings for a particular Case\n        \"\"\"\n        return {}\n\n    def updatePhysicsCouplingControl(self):\n        \"\"\"Adjusts physics coupling settings depending on current state of run.\"\"\"\n        pass\n\n\nclass InputWriter:\n    \"\"\"Use to write input files of external codes.\"\"\"\n\n    def __init__(self, r=None, externalCodeInterface=None, cs=None):\n        self.externalCodeInterface = externalCodeInterface\n        self.eci = externalCodeInterface\n        self.r = r\n        self.cs = cs\n\n    def getInterface(self, name):\n        \"\"\"Get another interface by name.\"\"\"\n        if self.externalCodeInterface:\n            return self.externalCodeInterface.getInterface(name)\n        return None\n\n    def write(self, fName):\n        \"\"\"Write the input file.\"\"\"\n        raise NotImplementedError\n\n\nclass OutputReader:\n    \"\"\"\n    A generic representation of a particular module's output.\n\n    Attributes\n    ----------\n    success : bool\n        False by default, set to True if the run is considered to have completed without error.\n\n    Notes\n    -----\n    Should ideally not require r, eci, and fname arguments and would rather just have an apply(reactor) method.\n    \"\"\"\n\n    def __init__(self, r=None, externalCodeInterface=None, fName=None, cs=None):\n        self.externalCodeInterface = externalCodeInterface\n        self.eci = self.externalCodeInterface\n        self.r = r\n        self.cs = cs\n        if fName:\n            self.output = textProcessors.TextProcessor(fName)\n        else:\n            self.output = None\n        self.fName = fName\n        self.success = False\n\n    def getInterface(self, name):\n        \"\"\"Get another interface by name.\"\"\"\n        if self.externalCodeInterface:\n            return self.externalCodeInterface.getInterface(name)\n        return None\n\n    def read(self, fileName):\n        \"\"\"Read the output file.\"\"\"\n        raise NotImplementedError\n\n    def apply(self, reactor):\n        \"\"\"\n        Apply the output back to a reactor state.\n\n        This provides a generic interface for the output data of anything to be applied to a reactor state. The\n        application could involve reading text or binary output or simply parameters to appropriate values in some other\n        data structure.\n        \"\"\"\n        raise NotImplementedError()\n\n\ndef _setTightCouplerByInterfaceFunction(interfaceClass, cs):\n    \"\"\"\n    Return an instance of a ``TightCoupler`` class or ``None``.\n\n    Parameters\n    ----------\n    interfaceClass : Interface\n        Interface class that a ``TightCoupler`` object will be added to.\n    cs : Settings\n        Case settings that are parsed to determine if tight coupling is enabled globally and if both a target parameter\n        and convergence criteria defined.\n    \"\"\"\n    # No tight coupling if there is no purpose for the Interface defined.\n    if interfaceClass.purpose is None:\n        return None\n\n    if not cs[\"tightCoupling\"] or (interfaceClass.purpose not in cs[\"tightCouplingSettings\"]):\n        return None\n\n    parameter = cs[\"tightCouplingSettings\"][interfaceClass.purpose][\"parameter\"]\n    tolerance = cs[\"tightCouplingSettings\"][interfaceClass.purpose][\"convergence\"]\n    maxIters = cs[\"tightCouplingMaxNumIters\"]\n\n    return TightCoupler(parameter, tolerance, maxIters)\n\n\ndef getActiveInterfaceInfo(cs):\n    \"\"\"\n    Return a list containing information for all of the Interface classes that are present.\n\n    This creates a list of tuples, each containing an Interface subclass and appropriate kwargs for adding them to an\n    Operator stack, given case settings. There should be entries for all Interface classes that are returned from\n    implementations of the describeInterfaces() function in modules present in the passed list of packages. The list is\n    sorted by the ORDER specified by the module in which the specific Interfaces are described.\n\n    Parameters\n    ----------\n    cs : Settings\n        The case settings that activate relevant Interfaces\n    \"\"\"\n    interfaceInfo = []\n    for info in getPluginManagerOrFail().hook.exposeInterfaces(cs=cs):\n        interfaceInfo += info\n\n    interfaceInfo = [(iInfo.interfaceCls, iInfo.kwargs) for iInfo in sorted(interfaceInfo, key=lambda x: x.order)]\n\n    return interfaceInfo\n\n\ndef isInterfaceActive(klass, cs):\n    \"\"\"Return True if the Interface klass is active.\"\"\"\n    return any(issubclass(k, klass) for k, _kwargs in getActiveInterfaceInfo(cs))\n\n\nclass InterfaceInfo(NamedTuple):\n    \"\"\"\n    Data structure with interface info.\n\n    Notes\n    -----\n    If kwargs is an empty dictionary, defaults from ``armi.operators.operator.Operator.addInterface`` will be applied.\n\n    See Also\n    --------\n    armi.operators.operator.Operator.createInterfaces : where these ultimately\n        activate various interfaces.\n    \"\"\"\n\n    order: int\n    interfaceCls: Interface\n    kwargs: dict\n"
  },
  {
    "path": "armi/matProps/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe package armi.matProps is a material library capable of representing and computing material properties.\n\nThe matProps package allows users to define materials in a custom YAML format. The format is simple, extensible, and\neasy to use. Each material has a list of \"properties\" (like density, specific heat, vapor pressure, etc). Each of those\nproperties be an arbitrary function of multiple independent variables, or a look up table of one or more variables. Each\nof these properties can define their own set of references, to allow for trustworthy modeling. A major idea in matProps\nis that we separate out materials as \"data\", rather than representing them directly in Python as \"code\".\n\nThis package does not include any material data files. The unit tests in this package have many example YAML files, and\nARMI comes packaged with more real world examples at: ``armi/resources/materials/``. The user may create their own data\nfiles to use with ``matProps`` in a directory, and pass in that path via ``armi.matProps.loadAll(path)``.\n\n\nLoading Data\n============\n\nIn your Python code, you can load a full set of matProps materials into memory with just one or two lines of code. You\njust have to provide a path to a directory filled with correctly-formatted YAML files:\n\n.. code-block:: python\n\n    import armi.matProps\n\n    pathToMaterialYAMLs = \"path/to/materialDir/\"\n    armi.matProps.loadSafe(pathToMaterialYAMLs)\n\n\nIf you do not specify a directory for the YAML files, there is a default location in your virtual environment you can\nstore the data files (in a package named ``material_data``):\n\n.. code-block:: python\n\n    import armi.matProps\n\n    armi.matProps.loadSafe()\n\n\nAdding a Property\n=================\nmatProps comes with a large set of common material properties. But it is quite easy to add another material property to\nyour simulation, if you need to.\n\n.. code-block:: python\n\n    from armi.matProps.prop import defProp\n\n    defProp(\"fuzz\", \"fuzziness\", \"1/m^2\")\n    defProp(\"goo\", \"gooiness\", \"m^2/s\")\n    defProp(\"squish\", \"squishiness\", \"1/Pa\")\n\n    armi.matProps.loadSafe(\"path/to/hilarious/materials/\")\n\n\nA Note on Design\n================\nAt the high-level, the ``matProps`` API exposes the functions in this file (``loadAll``, ``loadSafe``,\n``getMaterials``, etc). And these functions all work off three global data collections:\n``armi.matProps.loadedRootDirs``, ``armi.matProps.materials``, and ``armi.matProps.prop.properties``.\n\nIt is worth noting that this design centers around global data. This could have a more object-oriented approach where\nthe functions below and these three data sets are all stored in a class, e.g. via a ``MaterialLibrary`` class. This\nwould be more Pythonic, and allow for multiple collections of materials, say for testing. So far, no one has ever needed\nmultiple colletions of materials from matProps, because a single scientific model generally only needs one source of\ntruth for what materials are.\n\"\"\"\n\nimport os\nimport sysconfig\nimport warnings\nfrom glob import glob\n\nfrom armi.matProps.material import Material\n\nloadedRootDirs = []\nmaterials = {}\n\n\ndef getPaths(rootDir: str) -> list:\n    \"\"\"Get the paths of all the YAML files in a given directory.\"\"\"\n    if not os.path.exists(rootDir):\n        raise FileNotFoundError(f\"Directory {rootDir} not found\")\n    elif not os.path.isdir(rootDir):\n        raise NotADirectoryError(f\"Input path {rootDir} is not a directory\")\n\n    patterns = [\"*.yaml\", \"*.yml\"]\n    matFiles = []\n    for pattern in patterns:\n        matFiles.extend(glob(os.path.join(rootDir, \"**\", pattern), recursive=True))\n\n    return matFiles\n\n\ndef addMaterial(yamlPath: str, mat):\n    \"\"\"\n    Adds Material object instance to matProps.materials dict.\n\n    Parameters\n    ----------\n    yamlPath: str\n        Yaml file path whose information is being parsed.\n    mat: Material\n        Material object whose data will be saved.\n    \"\"\"\n    global materials\n    if mat.name in materials:\n        msg = f\"A material with the name `{mat.name}` as defined in ({yamlPath}) already exists.\"\n        raise KeyError(msg)\n\n    materials[mat.name] = mat\n    mat.save()\n\n\ndef loadAll(rootDir: str = None) -> None:\n    \"\"\"\n    Loads all material files from a particular directory. If a materials directory is not provided, this function will\n    attempt to find materials in the default location in the virtual environment.\n\n    Parameters\n    ----------\n    rootDir: str\n        Directory whose YAML files will be loaded into matProps. The default is the materials_data location in the venv.\n\n    Notes\n    -----\n    Hidden in here is a default directory which you can load your YAML files from. Inside your Python virtual\n    environment, you can create a data directory named \"materials_data\", and store all your matProps formatted YAML\n    files. This is optional, of course, you can just explicitly pass a directory path into this method.\n    \"\"\"\n    global loadedRootDirs\n\n    if rootDir is None:\n        rootDir = os.path.join(sysconfig.getPaths()[\"purelib\"], \"materials_data\")\n        if not os.path.exists(rootDir):\n            raise OSError(f\"No material directory provided, and default not found: {rootDir}\")\n\n    paths = getPaths(rootDir)\n    for yamlPath in paths:\n        mat = Material()\n        try:\n            mat.loadFile(yamlPath)\n        except Exception as exc:\n            msg = f\"Failed to load `{yamlPath}`.\"\n            raise RuntimeError(msg) from exc\n        addMaterial(yamlPath, mat)\n\n    loadedRootDirs.append(rootDir)\n\n\ndef clear() -> None:\n    \"\"\"Clears all loaded materials in matProps.\"\"\"\n    global materials\n    global loadedRootDirs\n    loadedRootDirs.clear()\n    materials.clear()\n\n\ndef loadSafe(rootDir: str = None) -> None:\n    \"\"\"\n    Safely load a single directory of matProps materials.\n\n    Loading a materials directory via this function will first clear out any other materials that are loaded into\n    matProps. If a materials directory is not provided, this function will attempt to find materials in the default\n    location in the virtual environment. This is meant to be a helpful tool for testing.\n\n    Parameters\n    ----------\n    rootDir: str\n        Directory whose yaml files will be loaded into matProps.\n        The default is the materials_data location in the venv.\n\n    See Also\n    --------\n    loadAll : More flexible way to load materials into matProps.\n    \"\"\"\n    clear()\n    loadAll(rootDir)\n\n\ndef getHashes() -> dict:\n    \"\"\"Calls Material.hash() for each Material object in materials.\"\"\"\n    global materials\n    hashes = {}\n    for material in materials.values():\n        hashes[material.name] = material.hash()\n\n    return hashes\n\n\ndef getMaterial(name: str) -> Material:\n    \"\"\"\n    Returns a material object with the given name from matProps.materials.\n\n    Parameters\n    ----------\n    name: str\n        Name of material whose data user wishes to retrieve.\n\n    Returns\n    -------\n    Material\n        Material object returned from matProps.materials.\n    \"\"\"\n    global materials\n    try:\n        return materials[name]\n    except KeyError:\n        msg = f\"No material named `{name}` was loaded within loaded data.\"\n        raise KeyError(msg) from None\n\n\ndef loadMaterial(yamlPath: str, saveMaterial: bool = False) -> Material:\n    \"\"\"\n    Loads an individual material file.\n\n    Parameters\n    ----------\n    yamlPath: str\n        Path to YAML file that will be parsed into this object instance.\n    saveMaterial: bool\n        If True, Material object instance will be saved into matProps.materials.\n\n    Returns\n    -------\n    Material\n        Material object whose data is parsed from material file provided by yamlPath.\n    \"\"\"\n    mat = Material()\n    mat.loadFile(yamlPath)\n    if saveMaterial:\n        addMaterial(yamlPath, mat)\n    else:\n        msg = f\"Loading material {mat} {mat.hash()}\"\n        try:\n            # If possible, keep matProps free of ARMI imports\n            from armi import runLog\n\n            runLog.info(msg)\n        except ImportError:\n            print(msg)\n\n    return mat\n\n\ndef loadedMaterials() -> list:\n    \"\"\"\n    Returns all the Material objects that have been loaded into matProps.materials.\n\n    Returns\n    -------\n    list of Material\n        Loaded Material objects\n    \"\"\"\n    global materials\n    mats = []\n    for mat in materials.values():\n        mats.append(mat)\n\n    return mats\n\n\ndef getLoadedRootDirs() -> list:\n    \"\"\"\n    Returns a list of all of the loaded root directories.\n\n    Returns\n    -------\n    list of str\n        Loaded root directories\n    \"\"\"\n    global loadedRootDirs\n    return loadedRootDirs\n\n\ndef load_all(rootDir: str = None) -> None:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.loadAll, not matProps.load_all.\", DeprecationWarning)\n    loadAll(rootDir)\n\n\ndef load_safe(rootDir: str = None) -> None:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.loadSafe, not matProps.load_safe.\", DeprecationWarning)\n    loadSafe(rootDir)\n\n\ndef get_material(name: str) -> Material:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.getMaterial, not matProps.get_material.\", DeprecationWarning)\n    return getMaterial(name)\n\n\ndef load_material(yamlPath: str, saveMaterial: bool = False) -> Material:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.loadMaterial, not matProps.load_material.\", DeprecationWarning)\n    return loadMaterial(yamlPath, saveMaterial)\n\n\ndef loaded_materials() -> list:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.loadedMaterials, not matProps.loaded_materials.\", DeprecationWarning)\n    return loadedMaterials()\n\n\ndef get_loaded_root_dirs() -> list:\n    \"\"\"Pass-through to temporarily support an old API.\"\"\"\n    warnings.warn(\"Please use matProps.getLoadedRootDirs, not matProps.get_loaded_root_dirs.\", DeprecationWarning)\n    return getLoadedRootDirs()\n"
  },
  {
    "path": "armi/matProps/constituent.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic material composition.\"\"\"\n\n\nclass Constituent:\n    \"\"\"Makeup of the Material.composition.\"\"\"\n\n    def __init__(self, name: str, minValue: float, maxValue: float, isBalance: bool):\n        \"\"\"\n        Constructor for Constituent object.\n\n        Parameters\n        ----------\n        name: str\n            Name of constituent element\n        minValue: float\n            Minimum value of constituent\n        maxValue: float\n            Maximum value of constituent\n        isBalance: bool\n            Boolean used to denote if constituent is balance element (True) or not (False).\n        \"\"\"\n        self.name = name\n        \"\"\"Name of the constituent\"\"\"\n        self.minValue = minValue\n        \"\"\"Min value of the constituent\"\"\"\n        self.maxValue = maxValue\n        \"\"\"Max value of the constituent\"\"\"\n        self.isBalance = isBalance\n        \"\"\"Flag for indicating if the consitituent is intended to the balance of the composition\"\"\"\n\n        if self.minValue < 0.0:\n            msg = f\"Constituent {self.name} has a negative minimum composition value.\"\n            raise ValueError(msg)\n        elif self.maxValue < self.minValue:\n            msg = f\"Constituent {self.name} has an invalid maximum composition value. (max < min)\"\n            raise ValueError(msg)\n        elif self.maxValue > 100.0:\n            msg = f\"Constituent {self.name} has an invalid maximum composition value. (max > 100.0)\"\n            raise ValueError(msg)\n\n    def __repr__(self):\n        \"\"\"Provides string representation of Constituent object.\"\"\"\n        msg = f\"<Constituent {self.name} min: {self.minValue} max: {self.maxValue}\"\n        if self.isBalance:\n            msg += \" computed based on balance\"\n        msg += \">\"\n        return msg\n\n    @staticmethod\n    def parseComposition(node):\n        \"\"\"\n        Method which parses \"composition\" node from yaml file and returns container of Contituent objects.\n\n        Returns list of Constituent objects. Each element is constructed from a map element in the \"composition node\".\n\n        Parameters\n        ----------\n        node: dict\n            YAML object representing composition node.\n\n        Returns\n        -------\n        list : Constituent\n            List of Constituent objects representing elements of Material.\n        \"\"\"\n        composition = []\n        elementSet = set()\n        balanceName = \"\"\n        balanceMin = 100.0\n        balanceMax = 100.0\n        sumMin = 0.0\n        sumMax = 0.0\n        numBalance = 0\n        for element, nodeContent in node.items():\n            if element == \"references\":\n                continue\n\n            elementSet.add(element)\n\n            if nodeContent == \"balance\":\n                balanceName = element\n                numBalance += 1\n            elif type(nodeContent) is str or len(nodeContent) != 2:\n                msg = (\n                    f\"Composition values must be either a tuple of min/max values, or `balance`, but got: {nodeContent}\"\n                )\n                raise TypeError(msg)\n            else:\n                constituentMin = nodeContent[0]\n                constituentMax = nodeContent[1]\n                sumMin += constituentMin\n                sumMax += constituentMax\n                part = Constituent(element, constituentMin, constituentMax, False)\n                composition.append(part)\n\n        if numBalance != 1:\n            msg = (\n                f\"Composition node must have exactly one balance element. Composition node has {numBalance} balance \"\n                \"elements instead.\"\n            )\n            raise ValueError(msg)\n\n        if balanceName:\n            if sumMin > 100.0:\n                raise ValueError(\"Composition has a minimum composition summation greater than 100.0\")\n\n            if sumMax >= 100.0:\n                balanceMin = 0.0\n            else:\n                balanceMin -= sumMax\n\n            balanceMax -= sumMin\n            balance = Constituent(balanceName, balanceMin, balanceMax, True)\n            composition.append(balance)\n\n        return composition\n"
  },
  {
    "path": "armi/matProps/function.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic class for a function to be defined in a YAML.\"\"\"\n\n\nclass Function:\n    \"\"\"\n    An base class for computing material Properties. The word \"function\" here is used in the mathematical sense, to\n    describe a generic mathematical curve. The various Function types are read in from YAML, and interpreted at run\n    time. The sub-classes of Function have specific requirements on the YAML format.\n    \"\"\"\n\n    # This is the list of all nodes that are associated with functions in the YAML input file. Any node named something\n    # not in this list is assumed to be an independent variable for the function. This list needs to remain updated if\n    # any child class adds a new YAML node.\n    FUNCTION_NODES = {\n        \"type\",  # All equations have this to define the child class type\n        \"tabulated data\",  # Optional for all equations, required for table functions\n        \"equation\",  # Used by SymbolicFunction for the equation definition\n        \"functions\",  # Used by PiecewiseFunction to define the child functions\n        \"reference temperature\",  # Optional for all equations\n    }\n\n    def __init__(self, mat, prop):\n        \"\"\"\n        Constructor for base Function class.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object with which this Function is associated\n        prop: Property\n            Property that is represented by this Function\n        \"\"\"\n        self.material = mat\n        \"\"\"A pointer back to the parent Material for this Function.\"\"\"\n\n        self.property = prop\n        \"\"\"The Property this Function represents.\"\"\"\n\n        self.independentVars: dict = {}  # Keys are the independent variables, values are a tuple of the min/max bounds\n\n        self.tableData = None\n        \"\"\"A TableFunction containing verification data for this specific function.\n        Note that for actual TableFunction instances, the tableData property is NULL.\"\"\"\n\n        self._referenceTemperature: float = -274.0\n        \"\"\"Reference temperature. Initialized be less than absolute zero in degrees Celsius\"\"\"\n\n        self._references = []\n        \"\"\"Reference data\"\"\"\n\n    def clear(self):\n        self.tableData = None\n\n    @staticmethod\n    def isTable():\n        \"\"\"Returns True if a subclass of TableFunction, otherwise False.\"\"\"\n        return False\n\n    def getReferenceTemperature(self):\n        \"\"\"\n        Returns the reference temperature, in Celcius, if it is defined.\n\n        Returns\n        -------\n        float\n            Reference temperature, in Celcius\n        \"\"\"\n        # If this statement below is true, either the reference temperature was not provided in the material YAML file\n        # or was a non-physical value.\n        if self._referenceTemperature < -273.15:\n            raise ValueError(\"Reference temperature is undefined or set to less than absolute zero.\")\n\n        return self._referenceTemperature\n\n    def getIndependentVariables(self):\n        \"\"\"\n        Returns the independent variables that are required for this function.\n\n        Returns\n        -------\n        list\n            list of independent variable strings\n        \"\"\"\n        return list(self.independentVars.keys())\n\n    def getMinBound(self, var) -> float:\n        \"\"\"\n        Returns the minimum bound for the requested variable.\n\n        Returns\n        -------\n        float\n            Minimum valid value\n        \"\"\"\n        return self.independentVars[var][0]\n\n    def getMaxBound(self, var) -> float:\n        \"\"\"\n        Returns the minimum bound for the requested variable.\n\n        Returns\n        -------\n        float\n            Maximum valid value\n        \"\"\"\n        return self.independentVars[var][1]\n\n    @property\n    def references(self) -> list:\n        return self._references\n\n    def calc(self, point: dict = None, **kwargs):\n        \"\"\"\n        Calculate the quantity of a specific Property.\n\n        The user must provide a \"point\" dictionary, or kwargs, but not both or neither.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n        kwargs:\n            dictionary of independent variable/value pairs, same purpose but to allow a nicer API.\n\n        Returns\n        -------\n        float\n            property evaluation\n        \"\"\"\n        # This method should take in one dictionary or a set of kwargs, but not both\n        if point is not None and kwargs:\n            raise ValueError(\"Please provide either a single dictionary or a set of kwargs, but not both.\")\n        elif point is None and not kwargs:\n            raise ValueError(\"Please provide at least one input to this method.\")\n\n        # select the inputs provided\n        if point:\n            data = point\n        else:\n            data = kwargs\n\n        # input sanity checking\n        if not self.independentVars.keys() <= data.keys():\n            raise KeyError(\n                f\"Specified point {data} does contain the correct independent variables: {self.independentVars}\"\n            )\n        elif not self.inRange(data):\n            raise ValueError(f\"Requested calculation point, {data} is not in the valid range of the function\")\n\n        return self._calcSpecific(data)\n\n    def inRange(self, point: dict) -> bool:\n        \"\"\"\n        Determine if a point is within range of the function.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n\n        Returns\n        -------\n        bool\n            True if the point is in the valid range, False otherwise.\n        \"\"\"\n        for var, bounds in self.independentVars.items():\n            if point[var] < bounds[0] or point[var] > bounds[1]:\n                return False\n        return True\n\n    def __repr__(self):\n        \"\"\"Provides string representation of Function object.\"\"\"\n        return f\"<{self.__class__.__name__}>\"\n\n    @staticmethod\n    def _factory(mat, node, prop):\n        \"\"\"\n        Parsing a property node and using that information to construct a Function object. This method is responsible\n        for searching for the assigning the Function object to the appropriate child class instance.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object which is associated with the returned Function object\n        node: dict\n            YAML object representing root level node of material yaml file being parsed\n        prop: Property\n            Property object that is being populated on the Material\n\n        Returns\n        -------\n        Function\n            Function pointer parsed from the specified property.\n        \"\"\"\n        from armi.matProps.piecewiseFunction import PiecewiseFunction\n        from armi.matProps.symbolicFunction import SymbolicFunction\n        from armi.matProps.tableFunction1D import TableFunction1D\n        from armi.matProps.tableFunction2D import TableFunction2D\n\n        funTypes = {\n            \"symbolic\": SymbolicFunction,\n            \"table\": TableFunction1D,\n            \"two dimensional table\": TableFunction2D,\n            \"piecewise\": PiecewiseFunction,\n        }\n\n        funcNode = node[\"function\"]\n        funcType = str(funcNode[\"type\"])\n        func = funTypes[funcType](mat, prop)\n\n        func._parse(node)\n        return func\n\n    def _setBounds(self, node: dict, var: str):\n        \"\"\"\n        Validate and set the min and max bounds for a variable.\n\n        Parameters\n        ----------\n        node: dict\n            dictionary that contains min and max values.\n        var: str\n            name of the variable\n        \"\"\"\n        if \"min\" not in node or \"max\" not in node:\n            raise KeyError(\n                f\"The independent variable node, {var}, is not formatted correctly: {node}. If this node is not \"\n                \"intended to be an independent variable, please ensure that the Function.FUNCTION_NODES set is updated \"\n                \"properly.\"\n            )\n\n        minVal = float(node[\"min\"])\n        maxVal = float(node[\"max\"])\n        if maxVal < minVal:\n            raise ValueError(f\"Maximum bound {maxVal} cannot be less than the minimum bound {minVal}\")\n        self.independentVars[var] = (minVal, maxVal)\n\n    def _parse(self, node):\n        \"\"\"\n        Method used to parse property node and fill in appropriate Function data members.\n\n        Parameters\n        ----------\n        node\n            YAML containing object to be parsed\n        \"\"\"\n        from armi.matProps.reference import Reference\n        from armi.matProps.tableFunction1D import TableFunction1D\n        from armi.matProps.tableFunction2D import TableFunction2D\n\n        funcNode = node[\"function\"]\n\n        refTempNode = funcNode.get(\"reference temperature\", None)\n        if refTempNode is not None:\n            self._referenceTemperature = float(refTempNode)\n\n        funcType = str(funcNode[\"type\"])\n        references = node.get(\"references\", [])\n        for ref in references:\n            self._references.append(Reference._factory(ref))\n\n        tabulatedNode = node.get(\"tabulated data\", None)\n        if tabulatedNode:\n            if funcType == \"two dimensional table\":\n                self.tableData = TableFunction2D(self.material, self.property)\n            else:\n                self.tableData = TableFunction1D(self.material, self.property)\n\n            if self.isTable():\n                self._parseSpecific(node)\n                self.tableData = self\n            else:\n                self.tableData._parseSpecific(node)\n        elif self.isTable():\n            raise KeyError(\"Missing node `tabulated data`\")\n\n        for var in funcNode:\n            if var not in self.FUNCTION_NODES:\n                self._setBounds(funcNode[var], var)\n\n        if not self.isTable():\n            self._parseSpecific(node)\n\n    def _parseSpecific(self, node):\n        \"\"\"\n        Abstract method that is used to parse information specific to Function child classes.\n\n        Parameters\n        ----------\n        node\n            YAML containing object information to parse and fill in Function\n        \"\"\"\n        raise NotImplementedError()\n\n    def _calcSpecific(self, point: dict) -> float:\n        \"\"\"\n        Private method that contains the analytic expression used to return a property value.\n\n        Parameters\n        ----------\n        point : dict\n            dictionary of independent variable/value pairs\n\n        Returns\n        -------\n        float\n            property evaluation at specified independent variable point\n        \"\"\"\n        raise NotImplementedError()\n"
  },
  {
    "path": "armi/matProps/interpolationFunctions.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some basic interpolation routines.\"\"\"\n\nimport math\n\n\ndef findIndex(val: float, x: list) -> int:\n    \"\"\"\n    Find the location of the provided value in the provided collection.\n\n    Parameters\n    ----------\n    val: float\n        Value whose index is needed in x\n    x: list\n        List of numbers\n\n    Returns\n    -------\n    int\n        Integer containing index wherein x[i] <= Tc <= x[i+1]\n    \"\"\"\n    if val < x[0]:\n        raise ValueError(f\"Value {val} out of bounds: {x}\")\n\n    for ii in range(len(x) - 1):\n        Tc1 = x[ii]\n        Tc2 = x[ii + 1]\n        if val >= Tc1 and val <= Tc2:\n            return ii\n\n    raise ValueError(f\"Value {val} out of bounds: {x}\")\n\n\ndef linearLinear(Tc: float, x: list, y: list) -> float:\n    \"\"\"\n    Find the approximate value on a XY table assuming a linear-linear curve.\n\n    Parameters\n    ----------\n    Tc: float\n        Independent variable at which an interpolation value is desired.\n    x: list\n        List of independent variable values\n    y: list\n        List of dependent variable values\n\n    Returns\n    -------\n    float\n        Float containing final interpolation value based on a linear-linear interpolation.\n    \"\"\"\n    ii: int = findIndex(Tc, x)\n    Tc1: float = x[ii]\n    Tc2: float = x[ii + 1]\n    return (Tc - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii]\n\n\ndef logLinear(Tc: float, x: list, y: list) -> float:\n    \"\"\"\n    Find the approximate value on a XY table assuming a log-linear curve.\n\n    Parameters\n    ----------\n    Tc: float\n        Independent variable at which an interpolation value is desired.\n    x: list\n        List of independent variable values\n    y: list\n        List of dependent variable values\n\n    Returns\n    -------\n    float\n        Float containing final interpolation value based on a log-linear interpolation.\n    \"\"\"\n    ii: int = findIndex(Tc, x)\n    Tc1: float = math.log10(x[ii])\n    Tc2: float = math.log10(x[ii + 1])\n    return (math.log10(Tc) - Tc1) / (Tc2 - Tc1) * (y[ii + 1] - y[ii]) + y[ii]\n"
  },
  {
    "path": "armi/matProps/material.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"How matProps defines a material class.\"\"\"\n\nimport hashlib\nfrom pathlib import Path\n\nfrom ruamel.yaml import YAML\n\nfrom armi.matProps import prop\nfrom armi.matProps.constituent import Constituent\nfrom armi.matProps.function import Function\nfrom armi.matProps.materialType import MaterialType\n\n\nclass Material:\n    \"\"\"\n    The Material class is a generic container for all Material types, whether they contain ASME properties, fluid\n    properties, or steel properties.\n\n    It may be necessary to have multiple Material definitions for a single material containing different phases.\n    \"\"\"\n\n    validFileFormatVersions = [3.0, \"TESTS\"]\n\n    def __init__(self):\n        \"\"\"Constructor for Material class.\"\"\"\n        self._saved = False\n        \"\"\"Boolean denoting whether or not Material object is saved in materials dict.\"\"\"\n\n        self.materialType = None\n        \"\"\"Enum represting type for the Material object\"\"\"\n\n        self.composition = []\n        \"\"\"List of Constituent objects representing composition of Material.\"\"\"\n\n        self.name = None\n        \"\"\"Name of Material object.\"\"\"\n\n        self._sha1 = None\n        \"\"\"SHA1 value of parsed material file.\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation for Material class.\"\"\"\n        return f\"<Material {self.name} {str(self.materialType)}>\"\n\n    def hash(self) -> str:\n        \"\"\"Returns the SHA1 hash value of a Material instance.\"\"\"\n        return self._sha1\n\n    def saved(self) -> bool:\n        \"\"\"\n        Returns a bool value indicating whether the Material has been stored internally in the matProps.materials map\n        via matProps.addMaterial().\n        \"\"\"\n        return self._saved\n\n    def save(self):\n        \"\"\"Sets Material._saved flag to True.\"\"\"\n        self._saved = True\n\n    @staticmethod\n    def dataCheckMaterialFile(filePath, rootNode):\n        \"\"\"\n        This is a partial data check of the material data file.\n\n        Checks the first level of data keywords and also check that the file format is a valid version.\n\n        Parameters\n        ----------\n        filePath: str\n            Path containing name of YAML file whose file format and property nodes are checked.\n        rootNode: dict\n            Root YAML node of file parsed from filePath.\n        \"\"\"\n        file_format = Material.getNode(rootNode, \"file format\")\n        if file_format not in Material.validFileFormatVersions:\n            msg = f\"Invalid file format version `{file_format}` used in: {filePath}\"\n            raise ValueError(msg)\n\n        for propName in rootNode:\n            if propName in {\"composition\", \"material type\", \"file format\"}:\n                continue\n\n            if not prop.contains(propName):\n                msg = f\"Invalid property node `{propName}` found in: {filePath}\"\n                raise KeyError(msg)\n\n    @staticmethod\n    def getValidFileFormatVersions():\n        \"\"\"Get a vector of strings with all of the valid file format versions.\"\"\"\n        return Material.validFileFormatVersions\n\n    @staticmethod\n    def getNode(node: dict, subnodeName: str):\n        \"\"\"\n        Searches a node for a child element and returns it.\n\n        Parameters\n        ----------\n        node: dict\n            Parent level node from which a child element is searched.\n        subnodeName: str\n            Name of the child element that is queried from node.\n        \"\"\"\n        if subnodeName not in node:\n            msg = f\"Missing YAML node `{subnodeName}`\"\n            raise KeyError(msg)\n\n        return node[subnodeName]\n\n    def loadNode(self, node: dict):\n        \"\"\"\n        Loads YAML and parses information to fill in Material data members including all relevant Function objects.\n\n        Parameters\n        ----------\n        node: dict\n            Material definition, like a dict that is loaded from a YAML file.\n        \"\"\"\n        self.materialType = MaterialType.fromString(self.getNode(node, \"material type\"))\n        self.composition = Constituent.parseComposition(self.getNode(node, \"composition\"))\n\n        for p in prop.properties:\n            if p.name and p.name in node:\n                setattr(self, p.symbol, Function._factory(self, node[p.name], p))\n            else:\n                # Any property not in the input file will be set to None.\n                setattr(self, p.symbol, None)\n\n    def loadFile(self, filePath: str):\n        \"\"\"\n        Loads yaml file and parses information to fill in Material data members including all relevant Function objects.\n\n        Parameters\n        ----------\n        filePath: str\n            Path containing name of YAML file to parse.\n        \"\"\"\n        # load the file path\n        y = YAML(pure=True)\n        node = y.load(Path(filePath))\n\n        # grab the material name from the file name\n        n = Path(filePath).name\n        if n.lower().endswith(\".yaml\"):\n            n = n[:-5]\n        elif n.lower().endswith(\".yml\"):\n            n = n[:-4]\n        self.name = n\n\n        # Generate SHA1 value and set data member\n        sha1 = hashlib.sha1()\n        with open(filePath, \"rb\") as materialFile:\n            sha1.update(materialFile.read())\n        self._sha1 = sha1.hexdigest()\n\n        self.dataCheckMaterialFile(filePath, node)\n        self.loadNode(node)\n"
  },
  {
    "path": "armi/matProps/materialType.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some definition of material types: fluid, fuel, metal, etc.\"\"\"\n\n\nclass MaterialType:\n    \"\"\"\n    A container for the methods used to differentiate between the types of materials.\n\n    The MaterialType class is used to determine whether the material contain ASME, fluid, fuel, or metal properties. It\n    may also be used for the phase of the material.\n    \"\"\"\n\n    \"\"\"Dictionary mapping material type strings to enum values.\"\"\"\n    types = {\n        \"Fuel\": 1,\n        \"Metal\": 2,\n        \"Fluid\": 4,\n        \"Ceramic\": 8,\n        \"ASME2015\": 16,\n        \"ASME2017\": 32,\n        \"ASME2019\": 64,\n    }\n\n    def __init__(self, value: int = 0):\n        \"\"\"\n        Constructor for MaterialType class.\n\n        Parameters\n        ----------\n        value: int\n            Integer enum value denoting material type.\n        \"\"\"\n        self._value: int = value\n        \"\"\"Enum value representing type of material.\"\"\"\n\n    @staticmethod\n    def fromString(name: str) -> \"MaterialType\":\n        \"\"\"\n        Provides MaterialType object from a user provided string.\n\n        Parameters\n        ----------\n        name: str\n            String from which a MaterialType object will be derived.\n\n        Returns\n        -------\n        MaterialType\n        \"\"\"\n        value: int = MaterialType.types.get(name, 0)\n\n        if value == 0:\n            msg = f\"Invalid material type `{name}`, valid names are: {list(MaterialType.types.keys())}\"\n            raise KeyError(msg)\n\n        return MaterialType(value)\n\n    def __repr__(self):\n        \"\"\"Provides string representation of MaterialType instance.\"\"\"\n        name = \"None\"\n        for typ, val in self.types.items():\n            if val == self._value:\n                name = typ\n                break\n\n        return f\"<MaterialType {name}>\"\n\n    def __eq__(self, other) -> bool:\n        \"\"\"\n        Support for \"==\" comparison operator.\n\n        Parameters\n        ----------\n        other: MaterialType or int\n            RHS object that is compared to MaterialType instance.\n\n        Returns\n        -------\n        bool\n            True if objects ._value data members are equivalent, False otherwise.\n        \"\"\"\n        if type(other) is int:\n            return self._value == other\n        elif type(other) is MaterialType:\n            return self._value == other._value\n        else:\n            raise TypeError(f\"Cannot compare MaterialType to type {type(other)}\")\n"
  },
  {
    "path": "armi/matProps/piecewiseFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA piecewise-defined function for used in material YAML files.\n\nEach piece can be of any other type that matProps supports.\n\"\"\"\n\nimport math\n\nfrom armi.matProps.function import Function\n\n\nclass PiecewiseFunction(Function):\n    \"\"\"\n    A piecewise function is composed of many other subfunctions, any of which can be any subclass of the Function type,\n    including ``PiecewiseFunction``.\n\n    The PiecewiseFunction uses the ``Function.inRange`` method to determine which sub-function should be used for\n    computing the quantity. An example with the YAML format is::\n\n        function:\n            <var1>:\n                min: <min1>\n                max: <max1>\n            <var2>:\n                min: <min2>\n                max: <max2>\n            type: piecewise\n            functions:\n            - function:\n                <var1>:\n                    min: <local min1>\n                    max: <local max1>\n                <var2>:\n                    min: <local min2>\n                    max: <local max2>\n                type: ...\n                tabulated data: *alias # it is suggested that the same table is used for the entire range\n            - function:\n                <var1>:\n                    min: <local min1>\n                    max: <local max1>\n                <var2>:\n                    min: <local min2>\n                    max: <local max2>\n                type: ...\n                tabulated data: *alias # it is suggested that the same table is used for the entire range\n    \"\"\"\n\n    def __init__(self, mat, prop):\n        \"\"\"\n        Constructor for PiecewiseFunction object.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object with which this PiecewiseFunction is associated\n        prop: Property\n            Property that is represented by this PiecewiseFunction\n        \"\"\"\n        super().__init__(mat, prop)\n\n        self.functions = []\n        \"\"\"List of Function objects used to compose PiecewiseFunction object.\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation of PiecewiseFunction object.\"\"\"\n        msg = \"<PiecewiseFunction \"\n        for subFunc in self.functions:\n            msg += str(subFunc)\n\n        msg += \">\"\n        return msg\n\n    def clear(self) -> None:\n        for fun in self.functions:\n            del fun\n        self.functions.clear()\n\n    def _parseSpecific(self, node):\n        \"\"\"\n        Parses nodes that are specific to PiecewiseFunction objects.\n\n        Parameters\n        ----------\n        node : dict\n            Dictionary containing the node whose values will be parsed to fill object.\n        \"\"\"\n\n        def checkOverlap(func1, func2):\n            \"\"\"Checks if the valid range for two functions overlaps on all dimensions.\"\"\"\n            for var in self.independentVars:\n                min1, max1 = func1.independentVars[var]\n                min2, max2 = func2.independentVars[var]\n\n                if math.isclose(max1, min2) or math.isclose(min1, max2):\n                    # This handles floating point comparison. Adjoining regions is allowed.\n                    return False\n                if max1 < min2 or min1 > max2:\n                    # overlap on this dimension, so no overlap overall\n                    return False\n\n            # Overlap on all dimensions\n            return True\n\n        for subFunctionDef in node[\"function\"][\"functions\"]:\n            func = self._factory(self.material, subFunctionDef, self.property)\n            self.functions.append(func)\n\n        # Ensure bounds have same variables in parent and child functions.\n        for subFunc in self.functions:\n            for var in self.independentVars:\n                if var not in subFunc.independentVars:\n                    raise KeyError(\n                        \"Piecewise child function must have same variables for valid range as main function.\"\n                    )\n\n        # Check for overlapping regions\n        for i, func1 in enumerate(self.functions):\n            for func2 in self.functions[i + 1 :]:\n                if checkOverlap(func1, func2):\n                    raise ValueError(f\"Piecewise child functions overlap: {func1}, {func2}\")\n\n    def _calcSpecific(self, point: dict) -> float:\n        \"\"\"\n        Private method that contains the analytic expression used to return a property value.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n\n        Returns\n        -------\n        float\n            property evaluation at specified independent variable point\n        \"\"\"\n        for subFunc in self.functions:\n            if subFunc.inRange(point):\n                return subFunc.calc(point)\n\n        raise ValueError(\"PiecewiseFunction error, could not evaluate\")\n"
  },
  {
    "path": "armi/matProps/point.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A single data point in a YAML file.\"\"\"\n\n\nclass Point:\n    \"\"\"A single data point in a YAML file.\"\"\"\n\n    def __init__(self, var1, var2, val):\n        \"\"\"\n        Constructor for Point class.\n\n        Parameters\n        ----------\n        var1: float\n            Independent variable 1\n        var2: float\n            If provided, independent variable 2\n        val: float\n            Dependent variable value for property\n        \"\"\"\n        self.variable1 = var1\n        \"\"\"Value of first independent variable.\"\"\"\n\n        self.variable2 = var2\n        \"\"\"Value of second independent variable.\"\"\"\n\n        self.value = val\n        \"\"\"Value of Property dependent value\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation of Point object.\"\"\"\n        return f\"<Point {self.variable1}, {self.variable2} -> {self.value}>\"\n"
  },
  {
    "path": "armi/matProps/prop.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nIn the parlance of matProps, a material 'Property' is a physical characteristic of the material that can be described\nmathematically. For instance, density, specific heat, specific gravity, coefficient of linear expansion, etc.\n\"\"\"\n\nproperties = set()\n\nPROPERTIES = {\n    \"alpha_d\": (\"thermal diffusivity\", \"m^2/s\", r\"(\\alpha_d)\"),\n    \"alpha_inst\": (\"instantaneous coefficient of thermal expansion\", r\"(1/^\\circ{}C)\", r\"(\\alpha_{inst})\"),\n    \"alpha_mean\": (\"mean coefficient of thermal expansion\", r\"(1/^\\circ{}C)\", r\"(\\alpha_{mean})\"),\n    \"c_p\": (\"specific heat capacity\", r\"U(J/(kg\\dot{}^\\circ{}C))U\"),\n    \"dH_fus\": (\"enthalpy of fusion\", \"J/kg\", r\"(\\Delta H_{f})\"),\n    \"dH_vap\": (\"latent heat of vaporization\", \"J/kg\", r\"(\\Delta H_{v})\"),\n    \"dl_l\": (\"linear expansion\", \"unitless\", r\"\\Delta l_{percent}\"),\n    \"dV\": (\"volumetric expansion\", r\"m^3/(^\\circ{}C)\", r\"\\Delta V\"),\n    \"E\": (\"Young's modulus\", \"Pa\"),\n    \"Elong\": (\"elongation\", \"%\", r\"\\epsilon\"),\n    \"eps_iso\": (\"strain from isochronous stress-strain curve\", \"unitless\"),\n    \"eps_t\": (\"design fatigue strain range\", \"unitless\"),\n    \"f\": (\"factor f from ASME.III.5 Fig. HBB-T-1432-2\", \"unitless\"),\n    \"G\": (\"electrical conductance\", r\"U(1/(\\Omega\\dot m))U\"),\n    \"gamma\": (\"surface tension\", r\"(N\\dot m)\", r\"(\\gamma)\"),\n    \"H\": (\"enthalpy\", \"J/kg\"),\n    \"H_calc_T\": (\"temperature from enthalpy\", r\"(^\\circ{}C)\", r\"(^\\circ{}C)\"),\n    \"HBW\": (\"Brinell Hardness\", \"BHN\"),\n    \"k\": (\"thermal conductivity\", r\"U(W/(m\\dot{}^\\circ{}C))U\"),\n    \"K_IC\": (\"fracture toughness\", r\"MPa\\dot\\sqrt(m)\", r\"K_{IC}\"),\n    \"kappa\": (\"isothermal compressibility\", r\"(1/Pa)\", r\"(\\kappa)\"),\n    \"Kv_prime\": (\"factor Kv' from ASME.III.5 Fig. HBB-T-1432-3\", \"unitless\", r\"K_{v}^{'}\"),\n    \"mu_d\": (\"dynamic viscosity\", r\"(Pa\\dot{}s)\", r\"(\\mu_d)\"),\n    \"mu_k\": (\"kinematic viscosity\", \"m^2/s\", r\"(\\mu_k)\"),\n    \"nu\": (\"Poisson's ratio\", \"unitless\", r\"(\\nu)\"),\n    \"nu_g\": (\"vapor specific volume\", \"m^3/kg\", r\"\\nu\"),\n    \"P_sat\": (\"vapor pressure\", r\"(Pa)\", \"P_{sat}\"),\n    \"rho\": (\"density\", \"kg/m^3\", r\"(\\rho)\"),\n    \"S\": (\"shear modulus\", \"Pa\"),\n    \"Sa\": (\"allowable stress\", \"Pa\"),\n    \"SaFat\": (\"design fatigue stress\", \"Pa\"),\n    \"Sm\": (\"design stress\", \"Pa\"),\n    \"Smt\": (\"service reference stress\", \"Pa\"),\n    \"So\": (\"design reference stress\", \"Pa\"),\n    \"Sr\": (\"stress to rupture\", \"Pa\"),\n    \"St\": (\"time dependent design stress\", \"Pa\"),\n    \"Su\": (\"tensile strength\", \"Pa\"),\n    \"Sy\": (\"yield strength\", \"Pa\"),\n    \"T_boil\": (\"boiling temperature\", r\"(^\\circ{}C)\", r\"(T_{boil})\"),\n    \"T_liq\": (\"liquidus temperature\", r\"(^\\circ{}C)\", r\"(T_{liq})\"),\n    \"T_melt\": (\"melting temperature\", r\"(^\\circ{}C)\", r\"(T_{melt})\"),\n    \"T_sol\": (\"solidus temperature\", r\"(^\\circ{}C)\", r\"(T_{sol})\"),\n    \"tMaxSr\": (\"allowable time to rupture\", \"s\"),\n    \"tMaxSt\": (\"allowable time to allowable stress\", \"s\"),\n    \"TSRF\": (\"tensile strength reduction factor\", \"unitless\"),\n    \"v_sound\": (\"speed of sound\", \"m/s\", r\"(v_{sound})\"),\n    \"WSRF\": (\"weld strength reduction factor\", \"unitless\"),\n    \"YSRF\": (\"yield strength reduction factor\", \"unitless\"),\n}\n\n\nclass Property:\n    \"\"\"A Property of a material. Most properties are computed as temperature-dependent functions.\"\"\"\n\n    def __init__(self, name: str, symbol: str, units: str, tex: str = None):\n        \"\"\"\n        Constructor for Property class.\n\n        Parameters\n        ----------\n        name: str\n            Name of the property.\n        symbol: str\n            Symbol of the property.\n        units: str\n            String representing the units of the property.\n        tex: str (optional)\n            TeX symbol used to represent the property. Defaults to symbol.\n        \"\"\"\n        self.name: str = name\n        \"\"\"Name of the Property, used to retrieve the property from the data file\"\"\"\n\n        self.symbol: str = symbol\n        \"\"\"Symbol of the property, same as the module-level attribute and Material attribute\"\"\"\n\n        self.units: str = units\n        \"\"\"Units of the Property\"\"\"\n\n        self.TeX: str = tex if tex is not None else symbol\n        \"\"\"math-style TeX symbol\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation of Property instance.\"\"\"\n        return f\"<Property {self.name}, {self.symbol}, in {self.units}>\"\n\n\ndef contains(name: str):\n    \"\"\"\n    Checks to see if a string representing a desired property is in the global properties list.\n\n    Parameters\n    ----------\n    name: str\n        Name of the property whose value is searched for in global properties list.\n\n    Returns\n    -------\n    bool\n        True if name is in properties, False otherwise.\n    \"\"\"\n    global properties\n    return any(name == p.name for p in properties)\n\n\ndef defProp(symbol: str, name: str, units: str, tex: str = None):\n    \"\"\"\n    Method which constructs and adds Property objects to global properties object.\n\n    Parameters\n    ----------\n    name: str\n        Name of the property.\n    symbol: str\n        Symbol of the property.\n    units: str\n        String representing the units of the property.\n    tex: str (optional)\n        TeX symbol used to represent the property. Defaults to symbol.\n    \"\"\"\n    global properties\n    if contains(name):\n        raise KeyError(f\"Property already defined: {name}\")\n\n    if tex is None:\n        tex = symbol\n\n    p = Property(name, symbol, units, tex)\n    properties.add(p)\n\n\ndef initialize():\n    \"\"\"Construct the global list of default properties in matProps.\"\"\"\n    for symbol, vals in PROPERTIES.items():\n        name = vals[0]\n        units = vals[1]\n        tex = vals[2] if len(vals) > 2 else None\n        defProp(symbol, name, units, tex)\n\n\ninitialize()\n"
  },
  {
    "path": "armi/matProps/reference.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"All data in the material YAMLs need to have a reference for the information source.\"\"\"\n\nUNDEFINED_REF_DATA = \"NONE\"\n\n\nclass Reference:\n    \"\"\"\n    A container for the source of the material's data. The Reference class is used to manage the material data's source\n    information and have methods to extract the data for generating the reference section of documentation.\n    \"\"\"\n\n    def __init__(self):\n        self._ref = \"\"\n        \"\"\"Entire reference in a single string\"\"\"\n\n        self._type = \"\"\n        \"\"\"Type of document (open literature|export controlled|test|your company name)\"\"\"\n\n    def __repr__(self):\n        if not self._ref:\n            return UNDEFINED_REF_DATA\n        elif not self._type:\n            return self._ref\n        else:\n            return f\"{self._ref} ({self._type})\"\n\n    @staticmethod\n    def _factory(node):\n        \"\"\"\n        Sets Reference data from a given reference node.\n\n        Parameters\n        ----------\n        node: dict\n            Dictionary representing a child element from the \"references\" node.\n\n        Returns\n        -------\n        Reference\n            Reference object with data parsed from node.\n        \"\"\"\n        reference = Reference()\n\n        refNode = node[\"ref\"]\n        if refNode:\n            reference._ref = str(refNode)\n\n        typeNode = node[\"type\"]\n        if typeNode:\n            reference._type = str(typeNode)\n\n        return reference\n\n    def getRef(self):\n        \"\"\"Accessor which returns _ref value.\"\"\"\n        return self._ref\n\n    def getType(self):\n        \"\"\"Accessor which returns _type value.\"\"\"\n        return self._type\n"
  },
  {
    "path": "armi/matProps/symbolicFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A generic symbolic function support for curves in a material YAML file.\"\"\"\n\n# Import math so that it is available for the eval statement\nimport math\nfrom copy import copy\n\nfrom sympy import symbols\nfrom sympy.parsing import parse_expr\nfrom sympy.utilities.lambdify import lambdastr\n\nfrom armi.matProps.function import Function\n\n\nclass SymbolicFunction(Function):\n    \"\"\"\n    A symbolic function. A functional form defined in the YAML file is parsed.\n\n    An example with the YAML format is::\n\n        function:\n          <var1>:\n            min: <min1>\n            max: <max1>\n          <var2>:\n            min: <min2>\n            max: <max2>\n          ...\n          type: symbolic\n          equation: <functional form>\n    \"\"\"\n\n    def __init__(self, mat, prop):\n        \"\"\"\n        Constructor for SymbolicFunction object.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object with which this SymbolicFunction is associated\n        prop: Property\n            Property that is represented by this SymbolicFunction\n        \"\"\"\n        super().__init__(mat, prop)\n        self.eqn = None\n        self.sympyStr = None\n\n    def _parseSpecific(self, node):\n        \"\"\"\n        Parses nodes that are specific to Symbolic Function object.\n\n        Parameters\n        ----------\n        node: dict\n            Dictionary containing the node whose values will be parsed to fill object.\n        \"\"\"\n        eqn = str(node[\"function\"][\"equation\"])\n\n        try:\n            symbolList = []\n            for var in self.independentVars:\n                symbolList.append(symbols(var))\n            sympyEqn = parse_expr(eqn, evaluate=False)\n            self.sympyStr = lambdastr(symbolList, sympyEqn)\n            self.eqn = eval(self.sympyStr)\n\n            # Try evaluating the function at the maximum bound. This should result in a number if the equation is\n            # properly formatted. Bad equations will throw an error either in the `lambdastr` `eval` or this `float( )`\n            # line. This is important to catch poor equations now before they cause problems intermittently later (only\n            # when calc is called for that equation).\n            point = []\n            for var in self.independentVars:\n                point.append(self.getMaxBound(var))\n\n            float(self.eqn(*point))\n        except Exception as e:\n            raise ValueError(\n                f\"Equation provided could not be interpreted:\"\n                f\" {eqn}, {getattr(self, 'sympyStr', 'Symbolic string not created yet.')}\"\n            ) from e\n\n    def _calcSpecific(self, point: dict) -> float:\n        \"\"\"\n        Returns an evaluation for a symbolic function.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n        \"\"\"\n        result = self.eqn(*[point[var] for var in self.independentVars])\n        if isinstance(result, complex):\n            raise ValueError(f\"Function is undefined at {point}. Evaluates to complex number: {result}\")\n        if math.isnan(result):\n            raise ValueError(f\"Function is undefined at {point}. Evaluates to not a number.\")\n\n        return float(result)\n\n    def __repr__(self):\n        \"\"\"Provides string representation of SymbolicFunction object.\"\"\"\n        return f\"<SymbolicFunction {self.sympyStr}>\"\n\n    def __getstate__(self):\n        d = copy(self.__dict__)\n        d[\"eqn\"] = None\n        return d\n\n    def __setstate__(self, s):\n        self.__dict__ = s\n        self.eqn = eval(self.sympyStr)\n"
  },
  {
    "path": "armi/matProps/tableFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple implementation for a simple table to replace analytic curves in the YAML data files.\"\"\"\n\nfrom armi.matProps.function import Function\n\n\nclass TableFunction(Function):\n    \"\"\"An abstract TableFunction; the base class for other table lookup methods.\"\"\"\n\n    @staticmethod\n    def isTable():\n        return True\n\n    def _setBounds(self, node: dict):\n        \"\"\"\n        Validate and set the min and max bounds for a variable.\n\n        Parameters\n        ----------\n        node: dict\n            dictionary that contains min and max values.\n        \"\"\"\n        raise NotImplementedError()\n"
  },
  {
    "path": "armi/matProps/tableFunction1D.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple implementation for a one dimensional table to replace analytic curves in the YAML data files.\"\"\"\n\nfrom armi.matProps.interpolationFunctions import linearLinear\nfrom armi.matProps.tableFunction import TableFunction\n\n\nclass TableFunction1D(TableFunction):\n    \"\"\"\n    A one dimensional table function, containing pairs of data.\n\n    An example with the YAML format is::\n\n        function:\n          <var>: 0\n          type: table\n          tabulated data:\n            - [0.0, 0.0]\n            - [50, 1e99]\n            - [100, 2e-99]\n            - [150, 100]\n\n    \"\"\"\n\n    def __init__(\n        self,\n        mat,\n        prop,\n    ):\n        \"\"\"\n        Constructor for TableFunction1D object.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object with which this TableFunction1D is associated\n        prop: Property\n            Property that is represented by this TableFunction1D\n        \"\"\"\n        super().__init__(mat, prop)\n\n        self._var1s = []\n        \"\"\"List of independent variable values for TableFunction1D object.\"\"\"\n\n        self._values = []\n        \"\"\"List of property values for TableFunction1D object.\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation of TableFunction1D object.\"\"\"\n        return \"<TableFunction1D>\"\n\n    def _setBounds(self, node: dict, var: str):\n        \"\"\"\n        Validate and set the min and max bounds for a variable.\n\n        Parameters\n        ----------\n        node: dict\n            dictionary that contains min and max values.\n        var: str\n            name of the variable\n        \"\"\"\n        self.independentVars[var] = (float(min(self._var1s)), float(max(self._var1s)))\n\n    def _parseSpecific(self, prop):\n        \"\"\"\n        Parses a temperature dependent table function.\n\n        Parameters\n        ----------\n        prop: dict\n            Node containing tabulated data that needs to be parsed.\n        \"\"\"\n        tabulated_data = prop[\"tabulated data\"]\n        for val in tabulated_data:\n            self._var1s.append(float(val[0]))\n            self._values.append(float(val[1]))\n\n    def _calcSpecific(self, point: dict) -> float:\n        \"\"\"\n        Performs a linear interpolation on tabular data.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n        \"\"\"\n        var = list(self.independentVars.keys())[0]\n        if var in point:\n            return linearLinear(point[var], self._var1s, self._values)\n\n        raise ValueError(f\"Specified point does contain the correct independent variables: {self.independentVars}\")\n"
  },
  {
    "path": "armi/matProps/tableFunction2D.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple implementation for a 2D table to replace analytic curves in the YAML data files.\"\"\"\n\nimport copy\n\nfrom armi.matProps.interpolationFunctions import findIndex, logLinear\nfrom armi.matProps.tableFunction import TableFunction\n\n\nclass TableFunction2D(TableFunction):\n    \"\"\"\n    A 2 dimensional table function. The input format, below, is permitted to have null values in it, which if used\n    during the calculation/interpolation will throw a ValueError.\n\n    The YAML format demonstrating the two dimensional tabulated data is::\n\n        function:\n          <var1>: 0\n          <var2>: 1\n          type: two dimensional table\n        tabulated data:\n          - [null,   [ 375., 400., 425., 450., 475., 500., 525., 550., 575., 600., 625., 650.]]\n          - [1.,     [   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.]]\n          - [10.,    [   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.]]\n          - [300.,   [   1.,   1.,   1.,   1.,   1.,   1.,   1.,   1.,  .97,  .91,  .87,  .84]]\n          - [30000., [   1.,   1.,   1.,   1.,  .93,  .88,  .83,  .80,  .75, null, null, null]]\n          - [300000.,[   1.,   1.,   1.,  .89,  .83,  .79,  .74,  .70,  .66, null, null, null]]\n    \"\"\"\n\n    def __init__(self, mat, prop):\n        \"\"\"\n        Constructor for TableFunction2D object.\n\n        Parameters\n        ----------\n        mat: Material\n            Material object with which this TableFunction2D is associated\n        prop: Property\n            Property that is represented by this TableFunction2D\n        \"\"\"\n        super().__init__(mat, prop)\n\n        self._rowValues = []\n        \"\"\"List containing all of the time or cycle values for TableFunction2D object.\"\"\"\n\n        self._columnValues = []\n        \"\"\"List containing all of the temperature values for TableFunction2D object.\"\"\"\n\n        self._data = []\n        \"\"\"List containing all of the property values in TableFunction2D object.\"\"\"\n\n    def __repr__(self):\n        \"\"\"Provides string representation of TableFunction2D object.\"\"\"\n        return \"<TableFunction2D>\"\n\n    def _setBounds(self, node: int, var: str):\n        \"\"\"\n        Validate and set the min and max bounds for a variable.\n\n        Parameters\n        ----------\n        node: int\n            This number is zero for columns, and one for rows.\n        var: str\n            name of the variable\n\n        Notes\n        -----\n        The method declaration here does not match the one in the super class Function. The type of the \"node\" arguement\n        should be dict, but it is int. This is a surprising and acquard asymmetry.\n        \"\"\"\n        if node == 0:\n            cache = None\n            if self.independentVars:\n                # Need to re-arrange order.\n                cache = copy.deepcopy(self.independentVars)\n                self.independentVars = {}\n\n            self.independentVars[var] = (\n                float(min(self._columnValues)),\n                float(max(self._columnValues)),\n            )\n\n            if cache:\n                self.independentVars[list(cache.keys())[0]] = list(cache.values())[0]\n        elif node == 1:\n            self.independentVars[var] = (float(min(self._rowValues)), float(max(self._rowValues)))\n        else:\n            raise ValueError(f\"The node value must be 0 or 1, but was: {node}\")\n\n    def _parseSpecific(self, prop):\n        \"\"\"\n        Parses a 2D table function.\n\n        Parameters\n        ----------\n        prop: dict\n            Node containing tabulated data that needs to be parsed.\n        \"\"\"\n        tabulatedData = prop[\"tabulated data\"]\n\n        skippedFirst = False\n        for rowNode in tabulatedData:\n            if not skippedFirst:\n                for cValNode in rowNode[1]:\n                    self._columnValues.append(float(cValNode))\n                    self._data.append([])\n\n                skippedFirst = True\n                continue\n\n            currentRowVal = float(rowNode[0])\n\n            self._rowValues.append(currentRowVal)\n            var1DependentData = rowNode[1]\n            for cIndex in range(len(self._columnValues)):\n                value = var1DependentData[cIndex]\n                self._data[cIndex].append(None if value in (\"null\", None) else float(value))\n\n    def _calcSpecific(self, point: dict) -> float:\n        \"\"\"\n        Performs 2D interpolation on tabular data.\n\n        Parameters\n        ----------\n        point: dict\n            dictionary of independent variable/value pairs\n        \"\"\"\n        columnVar = list(self.independentVars.keys())[0]\n        rowVar = list(self.independentVars.keys())[1]\n        if columnVar in point and rowVar in point:\n            columnVal = point[columnVar]\n            rowVal = point[rowVar]\n        else:\n            raise ValueError(f\"Specified point does contain the correct independent variables: {self.independentVars}\")\n\n        cIndex = findIndex(columnVal, self._columnValues)\n        rVal0 = logLinear(rowVal, self._rowValues, self._data[cIndex])\n        rVal1 = logLinear(rowVal, self._rowValues, self._data[cIndex + 1])\n        cVal0 = self._columnValues[cIndex]\n        cVal1 = self._columnValues[cIndex + 1]\n        return (columnVal - cVal0) / (cVal1 - cVal0) * (rVal1 - rVal0) + rVal0\n"
  },
  {
    "path": "armi/matProps/tests/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic testing tools for the matProps package.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.matProps.material import Material\n\n\nclass MatPropsFunTestBase(unittest.TestCase):\n    \"\"\"Base class that provides some common functionality for testing matProps Functions.\"\"\"\n\n    def setUp(self):\n        self.testName = self.id().split(\".\")[-1]\n        searchStr = \"test_\"\n        if self.testName.startswith(searchStr):\n            self.testName = self.testName[len(searchStr) :]\n\n    @staticmethod\n    def polynomialEvaluation(powerMap, value):\n        \"\"\"\n        Perform a polynomial evaluation at a specified value.\n\n        Parameters\n        ----------\n        powerMap : dict\n            Dictionary mapping power to its corresponding coefficient.\n        value: float\n            Independent variable to evaluate the polynomial at.\n\n        Returns\n        -------\n        float\n            The polynomial evaluation\n        \"\"\"\n        return sum(coefficient * pow(value, power) for power, coefficient in powerMap.items())\n\n    @staticmethod\n    def powerLawEvaluation(coefficients, value):\n        \"\"\"Perform a power law evaluation at a specified value.\"\"\"\n        intercept = coefficients.get(\"intercept\", 0.0)\n        outerMultiplier = coefficients.get(\"outer multiplier\", 1.0)\n        innerAdder = coefficients[\"inner adder\"]\n        exponent = coefficients[\"exponent\"]\n\n        return intercept + outerMultiplier * (value + innerAdder) ** exponent\n\n    @staticmethod\n    def hyperbolicEvaluation(coefficients, value):\n        \"\"\"Perform a hyperbolic function evaluation at a specified value.\"\"\"\n        intercept = coefficients[\"intercept\"]\n        outerMultiplier = coefficients[\"outer multiplier\"]\n        innerAdder = coefficients[\"inner adder\"]\n        innerDenominator = coefficients[\"inner denominator\"]\n\n        return intercept + outerMultiplier * math.tanh((value + innerAdder) / innerDenominator)\n\n    @staticmethod\n    def createEqnPoly(coefficients):\n        \"\"\"Creates a symbolic polynomial function from a dictionary of powers.\"\"\"\n        eqn = \"\"\n        for power, value in coefficients.items():\n            if not eqn:\n                # Make sure we don't have a leading + sign\n                eqn += f\"{value}*T**{power}\"\n            else:\n                eqn += f\" + {value}*T**{power}\"\n        return eqn\n\n    @staticmethod\n    def createEqnPower(coefficients):\n        \"\"\"Creates a symbolic power law function from a dictionary of constants.\"\"\"\n        eqn = f\"{coefficients.get('intercept', '')}\"\n        if \"outer multiplier\" in coefficients:\n            eqn += f\" + {coefficients['outer multiplier']}*\"\n        else:\n            eqn += \" +\"\n        eqn += f\"(T + {coefficients['inner adder']})**{coefficients['exponent']}\"\n        return eqn\n\n    @staticmethod\n    def createEqnHyper(coefficients):\n        \"\"\"Creates a symbolic hyperbolic function from a dictionary of constants.\"\"\"\n        return (\n            f\"{coefficients['intercept']} + \"\n            f\"{coefficients['outer multiplier']}*\"\n            f\"{coefficients['hyperbolic function']}(\"\n            f\"(T+{coefficients['inner adder']})/{coefficients['inner denominator']})\"\n        )\n\n    def _createFunctionWithoutTable(self, data=None):\n        \"\"\"\n        Helper function designed to create a basic viable yaml file without tabulated data in the function.\n\n        Parameters\n        ----------\n        data : dict\n            A dictionary containing user specified function child nodes.\n        \"\"\"\n        funcBody = {\"T\": {\"min\": -100.0, \"max\": 500.0}}\n        funcBody.update(data or {})\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\"function\": funcBody},\n        }\n\n        mat = Material()\n        mat.loadNode(materialData)\n\n        return mat\n\n    def _createFunction(self, data=None, tableData=None, minT=-100.0, maxT=500.0):\n        \"\"\"\n        Helper function designed to create a basic viable yaml file.\n\n        Parameters\n        ----------\n        data : dict\n            A dictionary containing user specified function child nodes.\n        tableData : dict\n            Table data to include in the function definition\n        minT : float\n            Float containing the minimum T variable value for the function.\n        maxT : float\n            Float containing the maximum T variable value for the function.\n        \"\"\"\n        funcBody = {\"T\": {\"min\": minT, \"max\": maxT}}\n        funcBody.update(data or {})\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\"function\": funcBody, \"tabulated data\": tableData or {}},\n        }\n\n        mat = Material()\n        mat.loadNode(materialData)\n\n        return mat\n\n    def belowMinimumCheck(self, yamlData, tableData=None):\n        \"\"\"Check if a ValueError is thrown if attempting to evaluate below the min value of a given T variable.\"\"\"\n        mat = self._createFunction(yamlData, tableData)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": func.getMinBound(\"T\") - 0.01})\n\n    def aboveMaximumCheck(self, yamlData, tableData=None):\n        \"\"\"Checksif a ValueError is thrown if attempting to evaluate above the max value of the T variable.\"\"\"\n        mat = self._createFunction(yamlData, tableData)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": func.getMaxBound(\"T\") + 0.01})\n"
  },
  {
    "path": "armi/matProps/tests/invalidTestFiles/badFileFormat.YAML",
    "content": "file format: INVALID\nmaterial type: Fluid\ncomposition:\n  a: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 1.0\n"
  },
  {
    "path": "armi/matProps/tests/invalidTestFiles/badProperty.yaml",
    "content": "file format: TESTS\nmaterial type: Metal\ncomposition:\n  Na: 1.0\n\nbad_prop: whatever"
  },
  {
    "path": "armi/matProps/tests/invalidTestFiles/duplicateComposition.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  a: [15, 30]\n  b: [10, 15]\n  b: [11, 16]\n  c: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 1.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir1/a.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  a: balance\n  references:\n    - ref: ACME II.2017, Table 3 pg 182\n      refType: open literature\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 1.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir1/b.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  b: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 2.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir2/c.yml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  c: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 3.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir2/d.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  d: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 4.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir3/a.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  a: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 6.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir3/e.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  e: balance\n\ndensity:\n  function:\n    T:\n      min: 100.0\n      max: 200.0\n    type: symbolic\n    equation: 5.0\n"
  },
  {
    "path": "armi/matProps/tests/testDir4/sampleProperty.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  a: balance\n\ndensity:\n  function:\n    T:\n      min: 101.0\n      max: 501.0\n    type: symbolic\n    equation: 1.0\n\nspecific heat capacity:\n  function:\n    T:\n      min: 102.0\n      max: 502.0\n    type: symbolic\n    equation: 2.0\n\nthermal conductivity:\n  function:\n    T:\n      min: 103.0\n      max: 503.0\n    type: symbolic\n    equation: 3.0\n\nthermal diffusivity:\n  function:\n    T:\n      min: 104.0\n      max: 504.0\n    type: symbolic\n    equation: 4.0\n\ndynamic viscosity:\n  function:\n    T:\n      min: 105.0\n      max: 505.0\n    type: symbolic\n    equation: 5.0\n\nkinematic viscosity:\n  function:\n    T:\n      min: 106.0\n      max: 506.0\n    type: symbolic\n    equation: 6.0\n\nmelting temperature:\n  function:\n    T:\n      min: 107.0\n      max: 507.0\n    type: symbolic\n    equation: 7.0\n\nboiling temperature:\n  function:\n    T:\n      min: 108.0\n      max: 508.0\n    type: symbolic\n    equation: 8.0\n\nlatent heat of vaporization:\n  function:\n    T:\n      min: 109.0\n      max: 509.0\n    type: symbolic\n    equation: 9.0\n\nenthalpy of fusion:\n  function:\n    T:\n      min: 110.0\n      max: 510.0\n    type: symbolic\n    equation: 10.0\n\nsurface tension:\n  function:\n    T:\n      min: 111.0\n      max: 511.0\n    type: symbolic\n    equation: 11.0\n\nvapor pressure:\n  function:\n    T:\n      min: 112.0\n      max: 512.0\n    type: symbolic\n    equation: 12.0\n\nisothermal compressibility:\n  function:\n    T:\n      min: 113.0\n      max: 513.0\n    type: symbolic\n    equation: 13.0\n\nmean coefficient of thermal expansion:\n  function:\n    T:\n      min: 114.0\n      max: 514.0\n    type: symbolic\n    equation: 14.0\n\ninstantaneous coefficient of thermal expansion:\n  function:\n    T:\n      min: 115.0\n      max: 515.0\n    type: symbolic\n    equation: 15.0\n\nYoung's modulus:\n  function:\n    T:\n      min: 116.0\n      max: 516.0\n    type: symbolic\n    equation: 16.0\n\nPoisson's ratio:\n  function:\n    T:\n      min: 117.0\n      max: 517.0\n    type: symbolic\n    equation: 17.0\n\nyield strength:\n  function:\n    T:\n      min: 118.0\n      max: 518.0\n    type: symbolic\n    equation: 18.0\n\ntensile strength:\n  function:\n    T:\n      min: 119.0\n      max: 519.0\n    type: symbolic\n    equation: 19.0\n\ndesign stress:\n  function:\n    T:\n      min: 120.0\n      max: 520.0\n    type: symbolic\n    equation: 20.0\n\ndesign reference stress:\n  function:\n    T:\n      min: 121.0\n      max: 521.0\n    type: symbolic\n    equation: 21.0\n\nallowable stress:\n  function:\n    T:\n      min: 122.0\n      max: 522.0\n    type: symbolic\n    equation: 22.0\n\ntime dependent design stress:\n  function:\n    T:\n      min: 123.0\n      max: 523.0\n    type: symbolic\n    equation: 23.0\n\nservice reference stress:\n  function:\n    T:\n      min: 124.0\n      max: 524.0\n    type: symbolic\n    equation: 24.0\n\nstress to rupture:\n  function:\n    T:\n      min: 125.0\n      max: 525.0\n    type: symbolic\n    equation: 25.0\n\ntensile strength reduction factor:\n  function:\n    T:\n      min: 126.0\n      max: 526.0\n    type: symbolic\n    equation: 26.0\n\nyield strength reduction factor:\n  function:\n    T:\n      min: 127.0\n      max: 527.0\n    type: symbolic\n    equation: 27.0\n\nweld strength reduction factor:\n  function:\n    T:\n      min: 127.0\n      max: 527.0\n    type: symbolic\n    equation: 28.0\n\nallowable time to rupture:\n  function:\n    T:\n      min: 128.0\n      max: 528.0\n    type: symbolic\n    equation: 29.0\n\nallowable time to allowable stress:\n  function:\n    T:\n      min: 129.0\n      max: 529.0\n    type: symbolic\n    equation: 30.0\n\ndesign fatigue strain range:\n  function:\n    T:\n      min: 130.0\n      max: 530.0\n    type: symbolic\n    equation: 31.0\n\nstrain from isochronous stress-strain curve:\n  function:\n    T:\n      min: 130.0\n      max: 530.0\n    type: symbolic\n    equation: 32.0\n\ndesign fatigue stress:\n  function:\n    T:\n      min: 131.0\n      max: 531.0\n    type: symbolic\n    equation: 33.0\n\nlinear expansion:\n  function:\n    T:\n      min: 132.0\n      max: 532.0\n    type: symbolic\n    equation: 34.0\n\nvapor specific volume:\n  function:\n    T:\n      min: 133.0\n      max: 533.0\n    type: symbolic\n    equation: 35.0\n\nspeed of sound:\n  function:\n    T:\n      min: 134.0\n      max: 534.0\n    type: symbolic\n    equation: 36.0\n\nsolidus temperature:\n  function:\n    T:\n      min: 135.0\n      max: 535.0\n    type: symbolic\n    equation: 37.0\n\nliquidus temperature:\n  function:\n    T:\n      min: 136.0\n      max: 536.0\n    type: symbolic\n    equation: 38.0\n\nvolumetric expansion:\n  function:\n    T:\n      min: 137.0\n      max: 537.0\n    type: symbolic\n    equation: 39.0\n\nenthalpy:\n  function:\n    T:\n      min: 138.0\n      max: 538.0\n    type: symbolic\n    equation: 40.0\n\ntemperature from enthalpy:\n  function:\n    T:\n      min: 139.0\n      max: 539.0\n    type: symbolic\n    equation: 41.0\n\nfracture toughness:\n  function:\n    T:\n      min: 140.0\n      max: 540.0\n    type: symbolic\n    equation: 42.0\n\nBrinell Hardness:\n  function:\n    T:\n      min: 141.0\n      max: 541.0\n    type: symbolic\n    equation: 43.0\n\nfactor f from ASME.III.5 Fig. HBB-T-1432-2:\n  function:\n    T:\n      min: 141.0\n      max: 541.0\n    type: symbolic\n    equation: 44.0\n\nfactor Kv' from ASME.III.5 Fig. HBB-T-1432-3:\n  function:\n    T:\n      min: 141.0\n      max: 541.0\n    type: symbolic\n    equation: 45.0\n\nshear modulus:\n  function:\n    T:\n      min: 141.0\n      max: 541.0\n    type: symbolic\n    equation: 46.0\n\nelongation:\n  function:\n    T:\n      min: 141.0\n      max: 541.0\n    type: symbolic\n    equation: 47.0\n"
  },
  {
    "path": "armi/matProps/tests/testMaterialsData/materialA.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  a: balance\n\ndensity:\n  function:\n    T:\n      min: 201.0\n      max: 601.0\n    type: symbolic\n    equation: 101.0*T + 500\n"
  },
  {
    "path": "armi/matProps/tests/testMaterialsData/materialB.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  b: balance\n\nspecific heat capacity:\n  function:\n    T: \n      min: 202.0\n      max: 602.0\n    type: symbolic\n    equation: 102.0\n"
  },
  {
    "path": "armi/matProps/tests/testMaterialsData/materialsSubDir/materialC.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  c: balance\n\nthermal conductivity:\n  function:\n    T: \n      min: 103.0\n      max: 503.0\n    type: symbolic\n    equation: 3.0\n"
  },
  {
    "path": "armi/matProps/tests/testMaterialsData/materialsSubDir/materialD.yaml",
    "content": "file format: TESTS\nmaterial type: Fluid\ncomposition:\n  d: balance\n\nthermal diffusivity:\n  function:\n    T:\n      min: 204.0\n      max: 604.0\n    type: symbolic\n    equation: 104.0\n"
  },
  {
    "path": "armi/matProps/tests/test_1DSymbolicFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple examples to verify constant, polynomial, hyperbolic, and power law functional forms.\"\"\"\n\nimport numpy as np\n\nfrom armi.matProps.tests import MatPropsFunTestBase\n\n\nclass Test1DSymbolicFunction(MatPropsFunTestBase):\n    \"\"\"Test 1D symbolic functions.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n\n        cls.basePolynomialMap = {0: 5, 1: 2, 2: -3, 3: 4, 4: -5, 5: 6, 6: -7, 7: 8}\n        cls.basePolynomialData = {\n            \"type\": \"symbolic\",\n            \"equation\": cls.createEqnPoly(cls.basePolynomialMap),\n        }\n\n        cls.basePowerLawTerms = {\n            \"exponent\": 2.0,\n            \"inner adder\": 125.0,\n            \"outer multiplier\": 3.4,\n            \"intercept\": -2.5,\n        }\n\n        cls.basePowerLawData = {\n            \"type\": \"symbolic\",\n            \"equation\": cls.createEqnPower(cls.basePowerLawTerms),\n        }\n\n        cls.baseHyperbolicTerms = {\n            \"hyperbolic function\": \"tanh\",\n            \"intercept\": 5,\n            \"outer multiplier\": 2,\n            \"inner denominator\": 4,\n            \"inner adder\": 1,\n        }\n\n        cls.baseHyperbolicData = {\n            \"type\": \"symbolic\",\n            \"equation\": cls.createEqnHyper(cls.baseHyperbolicTerms),\n        }\n\n        cls.baseConstantData = {\"type\": \"symbolic\", \"equation\": \"9123.5\"}\n\n    def test_polynomialEqnIntInt(self):\n        \"\"\"\n        Evaluates a PolynomialFunction that has 8 power values that are all integers.\n\n        Ensure that the override methods PolynomialFunction._parseSpecific() and PolynomialFunction._calcSpecific() are\n        functioning appropriately. A minimal input with a defined polynomial function is provided. The polynomial is\n        comprised of all integer coefficients and powers to ensure that matProps can properly handle integer inputs. The\n        function is evaluated at several values in the valid range and compared to a lambda expression inside the test\n        method to make sure their results are equivalent.\n        \"\"\"\n        # these polynomials have up to 8 powers/terms (including 0)\n        mat = self._createFunction(self.basePolynomialData)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n\n        # test using input dict for calc\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 0}), self.polynomialEvaluation(self.basePolynomialMap, 0))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 50}), self.polynomialEvaluation(self.basePolynomialMap, 50))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 100}), self.polynomialEvaluation(self.basePolynomialMap, 100))\n\n        # test using kwargs for calc\n        self.assertAlmostEqual(mat.rho.calc(T=0), self.polynomialEvaluation(self.basePolynomialMap, 0))\n        self.assertAlmostEqual(mat.rho.calc(T=50), self.polynomialEvaluation(self.basePolynomialMap, 50))\n        self.assertAlmostEqual(mat.rho.calc(T=100), self.polynomialEvaluation(self.basePolynomialMap, 100))\n\n    def test_polynomialEqnFloatInt(self):\n        \"\"\"Evaluates a PolynomialFunction with floating point coefficients and integer point power terms.\"\"\"\n        coefficientsMap = {0: -2.523536, 1: 5.374489, 2: 4.897134}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPoly(coefficientsMap)}\n\n        mat = self._createFunction(data)\n        func = mat.rho\n\n        # test using input dict for calc\n        self.assertAlmostEqual(func.calc({\"T\": -100.0}), self.polynomialEvaluation(coefficientsMap, -100.0))\n        self.assertAlmostEqual(func.calc({\"T\": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0))\n        self.assertAlmostEqual(func.calc({\"T\": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0))\n        self.assertAlmostEqual(func.calc({\"T\": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0))\n\n        # test using kwargs for calc\n        self.assertAlmostEqual(func.calc(T=-100.0), self.polynomialEvaluation(coefficientsMap, -100.0))\n        self.assertAlmostEqual(func.calc(T=0.0), self.polynomialEvaluation(coefficientsMap, 0.0))\n        self.assertAlmostEqual(func.calc(T=100.0), self.polynomialEvaluation(coefficientsMap, 100.0))\n        self.assertAlmostEqual(func.calc(T=500.0), self.polynomialEvaluation(coefficientsMap, 500.0))\n\n    def test_polynomialEqnFloatFloat(self):\n        \"\"\"Evaluates a PolynomialFunction with floating point coefficients and floating point power terms.\"\"\"\n        coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPoly(coefficientsMap)}\n\n        mat = self._createFunction(data, minT=0.0)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        func = mat.rho\n\n        self.assertAlmostEqual(func.calc({\"T\": 0.0}), self.polynomialEvaluation(coefficientsMap, 0.0))\n        self.assertAlmostEqual(func.calc({\"T\": 100.0}), self.polynomialEvaluation(coefficientsMap, 100.0))\n        self.assertAlmostEqual(func.calc({\"T\": 500.0}), self.polynomialEvaluation(coefficientsMap, 500.0))\n\n    def test_polynomialDiffFloatTypes(self):\n        \"\"\"Evaluates a PolynomialFunction with floating point coefficients power terms, checking exact values.\"\"\"\n        coefficientsMap = {0.5: -2.5, 2.5: 5.389, 1.5: 4.375}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPoly(coefficientsMap)}\n\n        mat = self._createFunction(data, minT=0.0)\n        self.assertAlmostEqual(mat.rho.calc({\"T\": np.float64(0.0)}), 0.0)\n        self.assertAlmostEqual(mat.rho.calc({\"T\": np.float64(100.0)}), 543250.0)\n        self.assertAlmostEqual(mat.rho.calc({\"T\": np.float64(500.0)}), 30174283.91217429)\n\n    def test_symbolicEqnError(self):\n        \"\"\"Ensure symbolic equations fail correctly when given empty or nonsense inputs.\"\"\"\n        # Leave out equation node\n        dataNoCoeff = {\"type\": \"symbolic\"}\n        with self.assertRaises(KeyError):\n            self._createFunction(dataNoCoeff)\n\n        # Provide invalid equation node.\n        dataBadCoeff = {\"type\": \"symbolic\", \"equation\": \"NOT AN EQUATION\"}\n        with self.assertRaises(ValueError):\n            self._createFunction(dataBadCoeff)\n\n    def test_powerEqn(self):\n        \"\"\"Evaluates a PowerLaw with floating point coefficients and exponents.\"\"\"\n        mat = self._createFunction(self.basePowerLawData)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": 0}), self.powerLawEvaluation(self.basePowerLawTerms, 0))\n        self.assertAlmostEqual(func.calc({\"T\": 12.5}), self.powerLawEvaluation(self.basePowerLawTerms, 12.5))\n        self.assertAlmostEqual(func.calc({\"T\": 25}), self.powerLawEvaluation(self.basePowerLawTerms, 25))\n        self.assertAlmostEqual(func.calc({\"T\": 50}), self.powerLawEvaluation(self.basePowerLawTerms, 50))\n        self.assertAlmostEqual(func.calc({\"T\": 75}), self.powerLawEvaluation(self.basePowerLawTerms, 75))\n        self.assertAlmostEqual(func.calc({\"T\": 100}), self.powerLawEvaluation(self.basePowerLawTerms, 100))\n\n    def test_powerEqnAllInt(self):\n        \"\"\"Evaluates a PowerLaw with integer coefficients and exponents.\"\"\"\n        coefficients = {\n            \"exponent\": 2,\n            \"inner adder\": 125,\n            \"outer multiplier\": 3,\n            \"intercept\": -2,\n        }\n        powerLawDataInt = {\n            \"type\": \"symbolic\",\n            \"equation\": self.createEqnPower(coefficients),\n        }\n\n        mat = self._createFunction(powerLawDataInt)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": 0}), self.powerLawEvaluation(coefficients, 0))\n        self.assertAlmostEqual(func.calc({\"T\": 25}), self.powerLawEvaluation(coefficients, 25))\n        self.assertAlmostEqual(func.calc({\"T\": 50}), self.powerLawEvaluation(coefficients, 50))\n        self.assertAlmostEqual(func.calc({\"T\": 75}), self.powerLawEvaluation(coefficients, 75))\n        self.assertAlmostEqual(func.calc({\"T\": 100}), self.powerLawEvaluation(coefficients, 100))\n\n    def test_powerEqnFloatInt(self):\n        \"\"\"Evaluates a PowerLaw with a mixture of integer and floating point coefficients and exponents.\"\"\"\n        coefficients = {\n            \"exponent\": 2.5,\n            \"inner adder\": 125,\n            \"outer multiplier\": 3.14159,\n            \"intercept\": -2,\n        }\n        powerLawDataInt = {\n            \"type\": \"symbolic\",\n            \"equation\": self.createEqnPower(coefficients),\n        }\n\n        mat = self._createFunction(powerLawDataInt)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": 0}), self.powerLawEvaluation(coefficients, 0))\n        self.assertAlmostEqual(func.calc({\"T\": 25}), self.powerLawEvaluation(coefficients, 25))\n        self.assertAlmostEqual(func.calc({\"T\": 50}), self.powerLawEvaluation(coefficients, 50))\n        self.assertAlmostEqual(func.calc({\"T\": 75}), self.powerLawEvaluation(coefficients, 75))\n        self.assertAlmostEqual(func.calc({\"T\": 100}), self.powerLawEvaluation(coefficients, 100))\n\n    def test_powerEqnNoInter(self):\n        \"\"\"Evaluates a PowerLaw with no intercept term.\"\"\"\n        coefficients = {\"exponent\": 2.0, \"inner adder\": 125.0, \"outer multiplier\": 3.4}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPower(coefficients)}\n        mat = self._createFunction(data)\n\n        # Intercept in self.powerLawEvaluation is 0.0 to reflect default value in matProps\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 0}), self.powerLawEvaluation(coefficients, 0))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 25}), self.powerLawEvaluation(coefficients, 25))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 50}), self.powerLawEvaluation(coefficients, 50))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 75}), self.powerLawEvaluation(coefficients, 75))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 100}), self.powerLawEvaluation(coefficients, 100))\n\n    def test_powerEqnNoOuter(self):\n        \"\"\"Evaluates a PowerLaw with no outer multiplier term.\"\"\"\n        coefficients = {\"exponent\": 2.0, \"inner adder\": 125.0, \"intercept\": -2.5}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPower(coefficients)}\n\n        mat = self._createFunction(data)\n        func = mat.rho\n        # Outer multiplier in self.powerLawEvaluation is 1.0 to reflect default value in matProps\n        self.assertAlmostEqual(func.calc({\"T\": 0}), self.powerLawEvaluation(coefficients, 0))\n        self.assertAlmostEqual(func.calc({\"T\": 25}), self.powerLawEvaluation(coefficients, 25))\n        self.assertAlmostEqual(func.calc({\"T\": 50}), self.powerLawEvaluation(coefficients, 50))\n        self.assertAlmostEqual(func.calc({\"T\": 75}), self.powerLawEvaluation(coefficients, 75))\n        self.assertAlmostEqual(func.calc({\"T\": 100}), self.powerLawEvaluation(coefficients, 100))\n\n    def test_powerEqnNoOuterInter(self):\n        \"\"\"Evaluates a PowerLaw with no outer multiplier or intercept term.\"\"\"\n        coefficients = {\"exponent\": 2.0, \"inner adder\": 125.0}\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnPower(coefficients)}\n\n        mat = self._createFunction(data)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": 0}), self.powerLawEvaluation(coefficients, 0))\n        self.assertAlmostEqual(func.calc({\"T\": 25}), self.powerLawEvaluation(coefficients, 25))\n        self.assertAlmostEqual(func.calc({\"T\": 50}), self.powerLawEvaluation(coefficients, 50))\n        self.assertAlmostEqual(func.calc({\"T\": 75}), self.powerLawEvaluation(coefficients, 75))\n        self.assertAlmostEqual(func.calc({\"T\": 100}), self.powerLawEvaluation(coefficients, 100))\n\n    def test_constantsEval(self):\n        \"\"\"Evaluates a PowerLaw for integer and floating point values.\"\"\"\n        mat = self._createFunction(self.baseConstantData)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": 0}), 9123.5)\n        self.assertAlmostEqual(func.calc({\"T\": 12.5}), 9123.5)\n        self.assertAlmostEqual(func.calc({\"T\": 50}), 9123.5)\n        self.assertAlmostEqual(func.calc({\"T\": 100}), 9123.5)\n\n    def test_hyperbolicEqnEval(self):\n        \"\"\"Evaluates a HyperbolicFunction for integer and floating point values.\"\"\"\n        mat = self._createFunction(self.baseHyperbolicData)\n\n        # test using input dict for calc\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 0}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 12.5}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 50}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50))\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 100}), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100))\n\n        # test using kwargs for calc\n        self.assertAlmostEqual(mat.rho.calc(T=0), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 0))\n        self.assertAlmostEqual(mat.rho.calc(T=12.5), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 12.5))\n        self.assertAlmostEqual(mat.rho.calc(T=50), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 50))\n        self.assertAlmostEqual(mat.rho.calc(T=100), self.hyperbolicEvaluation(self.baseHyperbolicTerms, 100))\n\n    def test_hyperbolicEqnEval2(self):\n        \"\"\"Evaluates a HyperbolicFunction for a different set of floating point values.\"\"\"\n        coefficients = {\n            \"hyperbolic function\": \"tanh\",\n            \"intercept\": 3.829e8,\n            \"outer multiplier\": -4.672e8,\n            \"inner denominator\": 216.66,\n            \"inner adder\": -613.52,\n        }\n        data = {\"type\": \"symbolic\", \"equation\": self.createEqnHyper(coefficients)}\n        mat = self._createFunction(data)\n        func = mat.rho\n        expectedValue = self.hyperbolicEvaluation(coefficients, 500)\n        self.assertAlmostEqual(func.calc({\"T\": 500}), expectedValue, delta=expectedValue * 1e-5)\n"
  },
  {
    "path": "armi/matProps/tests/test_composition.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic tests of the Composition class.\"\"\"\n\nimport os\nimport unittest\n\nfrom ruamel.yaml.constructor import DuplicateKeyError\n\nimport armi.matProps\nfrom armi.matProps.material import Material\n\n\nclass TestComposition(unittest.TestCase):\n    def setUp(self):\n        self.testName = self.id().split(\".\")[-1]\n        searchStr = \"test_\"\n        if self.testName.startswith(searchStr):\n            self.testName = self.testName[len(searchStr) :]\n\n    def _createFunction(self, compMap=None):\n        compValue = {}\n        if compMap is not None:\n            compValue = compMap\n        materialMap = {\n            \"file format\": \"TESTS\",\n            \"composition\": compValue,\n            \"material type\": \"Metal\",\n            \"density\": {\n                \"function\": {\n                    \"T\": {\"min\": 100.0, \"max\": 200.0},\n                    \"type\": \"symbolic\",\n                    \"equation\": 1.0,\n                }\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(materialMap)\n\n        return mat\n\n    def test_compositionMissing(self):\n        materialMap = {\n            \"file format\": \"TESTS\",\n            \"material type\": \"Metal\",\n            \"density\": \"whatever\",\n        }\n\n        mat = Material()\n\n        with self.assertRaisesRegex(KeyError, \"Missing YAML node `composition`\"):\n            mat.loadNode(materialMap)\n\n    def test_compositionInvTuple(self):\n        # Invalid doesn't have two elements\n        badCompMap = {\"Fe\": [1.0]}\n        with self.assertRaisesRegex(\n            TypeError,\n            \"Composition values must be either a tuple of min/max values, or `balance`\",\n        ):\n            self._createFunction(badCompMap)\n\n    def test_compositionInvStr(self):\n        badCompMap = {\"a\": [0.5, 0.5], \"b\": \"remainder\"}\n        with self.assertRaisesRegex(\n            TypeError,\n            \"Composition values must be either a tuple of min/max values, or `balance`\",\n        ):\n            self._createFunction(badCompMap)\n\n    def test_compositionMissBalance(self):\n        compMap = {\"a\": [0.25, 0.26], \"b\": [0.3, 0.31], \"c\": [0.45, 0.46]}\n        with self.assertRaisesRegex(ValueError, \"exactly one balance element\"):\n            self._createFunction(compMap)\n\n    def test_compositionBalanceNum(self):\n        compMap = {\"a\": [15.0, 15.1], \"b\": \"balance\", \"c\": \"balance\"}\n        with self.assertRaisesRegex(ValueError, \"exactly one balance element\"):\n            self._createFunction(compMap)\n\n    def test_compositionBalance(self):\n        compMap = {\"a\": [15.0, 20.0], \"b\": [30.0, 35.0], \"c\": \"balance\"}\n        mat = self._createFunction(compMap)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        c_minValue, c_maxValue = None, None\n        sumMin, sumMax = 0.0, 0.0\n        for compElement in mat.composition:\n            if compElement.name != \"c\":\n                self.assertFalse(compElement.isBalance)\n                compValue = compMap.get(compElement.name)\n                self.assertIsNotNone(compValue)\n                self.assertAlmostEqual(compElement.minValue, compValue[0])\n                self.assertAlmostEqual(compElement.maxValue, compValue[1])\n                sumMin += compElement.minValue\n                sumMax += compElement.maxValue\n            else:\n                self.assertTrue(compElement.isBalance)\n                c_minValue = compElement.minValue\n                c_maxValue = compElement.maxValue\n\n        self.assertAlmostEqual(c_minValue, 100.0 - sumMax)\n        self.assertAlmostEqual(c_maxValue, 100.0 - sumMin)\n\n    def test_compositionBalance2(self):\n        compMap = {\n            \"a\": [10.0, 15.0],\n            \"b\": [20.1, 35.1],\n            \"c\": [30.2, 50.2],\n            \"d\": \"balance\",\n        }\n\n        mat = self._createFunction(compMap)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        sumMin = 0.0\n        d_minValue, d_maxValue = None, None\n        for compElement in mat.composition:\n            if compElement.name != \"d\":\n                self.assertFalse(compElement.isBalance)\n                compValue = compMap.get(compElement.name)\n                self.assertIsNotNone(compValue)\n                self.assertAlmostEqual(compElement.minValue, compValue[0])\n                self.assertAlmostEqual(compElement.maxValue, compValue[1])\n                sumMin += compElement.minValue\n            else:\n                self.assertTrue(compElement.isBalance)\n                d_minValue = compElement.minValue\n                d_maxValue = compElement.maxValue\n\n        self.assertAlmostEqual(d_minValue, 0.0)\n        self.assertAlmostEqual(d_maxValue, 100.0 - sumMin)\n\n    def test_compositionMinValue(self):\n        compMap = {\"a\": [-1.0, 20.0], \"b\": \"balance\"}\n        with self.assertRaisesRegex(ValueError, \"negative minimum\"):\n            self._createFunction(compMap)\n\n    def test_compositionMaxValue(self):\n        compMap = {\"a\": [15.0, 14.9], \"b\": \"balance\"}\n        with self.assertRaisesRegex(ValueError, \"max < min\"):\n            self._createFunction(compMap)\n\n    def test_compositionMaxValue2(self):\n        compMap = {\"a\": [15.0, 100.1], \"b\": \"balance\"}\n        with self.assertRaisesRegex(ValueError, \"max > 100.0\"):\n            self._createFunction(compMap)\n\n    def test_compositionMinSum(self):\n        compMap = {\n            \"a\": [30.0, 30.1],\n            \"b\": [40.1, 40.2],\n            \"c\": [50.2, 50.3],\n            \"d\": \"balance\",\n        }\n        with self.assertRaisesRegex(ValueError, \"minimum composition summation greater than 100.0\"):\n            self._createFunction(compMap)\n\n    def test_compositionDuplicate(self):\n        duplicateTestFile = os.path.join(\n            os.path.dirname(os.path.realpath(__file__)),\n            \"invalidTestFiles\",\n            \"duplicateComposition.yaml\",\n        )\n        with self.assertRaises(DuplicateKeyError):\n            armi.matProps.loadMaterial(duplicateTestFile)\n"
  },
  {
    "path": "armi/matProps/tests/test_constituent.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Basic tests of the Constituent class.\"\"\"\n\nimport unittest\n\nfrom armi.matProps.constituent import Constituent\n\n\nclass TestConstituent(unittest.TestCase):\n    def test_errorHandling(self):\n        c = Constituent(\"Fe\", 10.0, 25.0, False)\n        self.assertEqual(str(c), \"<Constituent Fe min: 10.0 max: 25.0>\")\n\n        c = Constituent(\"Fe\", 0.0, 99.0, True)\n        self.assertEqual(str(c), \"<Constituent Fe min: 0.0 max: 99.0 computed based on balance>\")\n\n        with self.assertRaises(ValueError):\n            Constituent(\"Fe\", -10.0, 25.0, False)\n\n        with self.assertRaises(ValueError):\n            Constituent(\"Fe\", 50.0, 101.0, False)\n\n        with self.assertRaises(ValueError):\n            Constituent(\"Fe\", 50.0, 1.0, False)\n\n    def test_parseComposition(self):\n        # test we fail correctly when providing invalid inputs\n        with self.assertRaises(ValueError):\n            Constituent.parseComposition({})\n\n        with self.assertRaises(ValueError):\n            node = {\"Fe\": (0.1, 0.25)}\n            Constituent.parseComposition(node)\n\n        # a simple Iron-only material\n        node = {\"Fe\": \"balance\"}\n        c = Constituent.parseComposition(node)\n        self.assertEqual(len(c), 1)\n        self.assertEqual(c[0].maxValue, 100.0)\n        self.assertTrue(c[0].isBalance)\n\n        # a hypothetical steel-like material\n        node = {\"C\": (0.0, 10.0), \"Cr\": (0.0, 1.0), \"Fe\": \"balance\"}\n        c = Constituent.parseComposition(node)\n        self.assertEqual(len(c), 3)\n        self.assertEqual(c[0].maxValue, 10.0)\n        self.assertFalse(c[0].isBalance)\n        self.assertEqual(c[2].maxValue, 100.0)\n        self.assertTrue(c[2].isBalance)\n"
  },
  {
    "path": "armi/matProps/tests/test_functions.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for the Function class.\"\"\"\n\nfrom armi.matProps.material import Material\nfrom armi.matProps.tests import MatPropsFunTestBase\n\n\nclass TestFunctions(MatPropsFunTestBase):\n    \"\"\"Class which encapsulates the unit tests data and methods to test the matProps Function class.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n\n        cls.baseConstantData = {\"type\": \"symbolic\", \"equation\": \"9123.5\"}\n\n    def test_getReferences(self):\n        mat = self._createFunction(self.baseConstantData)\n        mat.rho._references = [\"1\", \"2\"]\n        self.assertEqual(mat.rho.references[0], \"1\")\n        self.assertEqual(mat.rho.references[1], \"2\")\n\n    def test_datafilesVarVals(self):\n        \"\"\"\n        Test to make sure that parsing variable values return the expected values when parsing \"max\" and \"min\" nodes for\n        the T variable.\n        \"\"\"\n        mat = self._createFunction(self.baseConstantData)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        density = mat.rho\n        self.assertEqual(density.getMinBound(\"T\"), -100.0)\n        self.assertEqual(density.getMaxBound(\"T\"), 500.0)\n\n    def test_datafilesMaxVar(self):\n        \"\"\"Test that makes sure a ValueError is thrown if the max of a variable is less than the min.\"\"\"\n        with self.assertRaises(ValueError):\n            self._createFunction(self.baseConstantData, maxT=-101.0)\n\n    def test_datafilesInvType(self):\n        \"\"\"Test that makes sure a KeyError is thrown if an unsupported function type is provided.\"\"\"\n        data = {\"type\": \"fake function\"}\n        with self.assertRaisesRegex(KeyError, \"fake function\"):\n            self._createFunction(data)\n\n    def test_refTempEval(self):\n        \"\"\"Test that a function with a reference temperature correctly parses and returns the expected value.\"\"\"\n        testData = self.baseConstantData.copy()\n        testData.update({\"reference temperature\": 200.0})\n        mat = self._createFunction(testData)\n        func = mat.rho\n        self.assertAlmostEqual(func.getReferenceTemperature(), 200.0)\n\n    def test_refTempMissing(self):\n        \"\"\"Test that a ValueError is thrown when accessing a reference temperature value that is not provided.\"\"\"\n        mat = self._createFunction(self.baseConstantData)\n        func = mat.rho\n        with self.assertRaisesRegex(ValueError, \"Reference temperature is undefined\"):\n            func.getReferenceTemperature()\n\n    def test_refTempInvalid(self):\n        \"\"\"Test to make sure that a ValueError is thrown if the provided reference temperature value is invalid.\"\"\"\n        testData = self.baseConstantData.copy()\n        testData.update({\"reference temperature\": -273.25})\n        mat = self._createFunction(testData)\n        func = mat.rho\n        with self.assertRaisesRegex(ValueError, \"Reference temperature is undefined\"):\n            func.getReferenceTemperature()\n\n    def test_independentVars(self):\n        mat = self._createFunction(self.baseConstantData)\n        fun = mat.rho\n\n        self.assertEqual(len(fun.independentVars), 1)\n        self.assertEqual(fun.getIndependentVariables(), [\"T\"])\n        self.assertEqual(fun.getMinBound(\"T\"), -100)\n        self.assertEqual(fun.getMaxBound(\"T\"), 500)\n\n        with self.assertRaises(KeyError):\n            fun.getMinBound(\"X\")\n\n        with self.assertRaises(KeyError):\n            fun.getMaxBound(\"Y\")\n\n    def test_calcEdgeCases(self):\n        mat = self._createFunction(self.baseConstantData)\n        fun = mat.rho\n\n        with self.assertRaises(ValueError):\n            fun.calc({\"T\": 200}, T=300)\n\n        with self.assertRaises(ValueError):\n            fun.calc()\n\n        with self.assertRaises(KeyError):\n            fun.calc({\"Z\": 200})\n\n        # whoops, I forgot to declare a \"max\" value\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\"function\": {\"T\": {\"min\": 1.0}, \"type\": \"symbolic\", \"equation\": 1.0}},\n        }\n\n        mat = Material()\n        with self.assertRaises(KeyError):\n            mat.loadNode(materialData)\n\n    def test_references(self):\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\n                \"function\": {\n                    \"T\": {\"min\": 1.0, \"max\": 10.0},\n                    \"type\": \"symbolic\",\n                    \"equation\": 1.0,\n                },\n                \"references\": [{\"ref\": \"things\", \"type\": \"open literature\"}],\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(materialData)\n        self.assertEqual(len(mat.rho.references), 1)\n        self.assertEqual(mat.rho.references[0].getRef(), \"things\")\n\n    def test_tabulatedData(self):\n        tableData = [\n            [300, 25],\n            [400, 26.28],\n            [500, 26.26],\n            [600, 25.89],\n            [700, 25.19],\n            [800, 25.10],\n            [900, 26.32],\n        ]\n\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\n                \"function\": {\n                    \"T\": {\"min\": 1.0, \"max\": 10.0},\n                    \"type\": \"symbolic\",\n                    \"equation\": 1.0,\n                },\n                \"tabulated data\": tableData,\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(materialData)\n        self.assertEqual(len(mat.rho.references), 0)\n        self.assertEqual(len(mat.rho.tableData._values), 7)\n"
  },
  {
    "path": "armi/matProps/tests/test_hashing.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Program that runs tests for the TestHashValues class.\"\"\"\n\nimport os\nimport unittest\n\nimport armi.matProps\n\n\nclass TestHashValues(unittest.TestCase):\n    \"\"\"Testing the material hashing logic.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.testDir = os.path.dirname(__file__)\n\n    def test_hash(self):\n        testFileA = os.path.join(self.testDir, \"testDir1\", \"a.yaml\")\n        testFileB = os.path.join(self.testDir, \"testMaterialsData\", \"materialB.yaml\")\n\n        matA = armi.matProps.loadMaterial(testFileA, False)\n        matB = armi.matProps.loadMaterial(testFileB, False)\n\n        hA = matA.hash()\n        hB = matB.hash()\n\n        # NOTE: We cannot check exact hashes, because of OS differences\n        self.assertEqual(len(hA), 40)\n        self.assertEqual(len(hB), 40)\n        self.assertNotEqual(hA, hB)\n"
  },
  {
    "path": "armi/matProps/tests/test_interpolationFunctions.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Program that runs all of the tests contained in the TestInterpolationFunctions class.\"\"\"\n\nimport unittest\n\nimport numpy as np\nfrom scipy import interpolate\n\nfrom armi.matProps.interpolationFunctions import findIndex, linearLinear, logLinear\n\n\nclass TestInterpolationFunctions(unittest.TestCase):\n    \"\"\"Class which creates tests for the matProps InterpolationFunctions files.\"\"\"\n\n    def test_findIndex(self):\n        x = [2, 4, 6, 8]\n        self.assertEqual(findIndex(2, x), 0)\n        self.assertEqual(findIndex(3, x), 0)\n        self.assertEqual(findIndex(3.14, x), 0)\n        self.assertEqual(findIndex(4, x), 0)  # NOTE: This is 0, not 1.\n        self.assertEqual(findIndex(4.001, x), 1)\n        self.assertEqual(findIndex(6, x), 1)  # NOTE: This is 1, not 2.\n        self.assertEqual(findIndex(6.2, x), 2)\n\n        with self.assertRaises(ValueError):\n            findIndex(-9, x)\n\n        with self.assertRaises(ValueError):\n            findIndex(9, x)\n\n    def test_linearLinear(self):\n        \"\"\"\n        Test which validates the values returned from the linear-linear interpolation method.\n\n        Uses numpy linspace function to generate values at which interpolation will be performed.\n        \"\"\"\n        x = np.arange(10)\n        y = [1.0 + xx + xx**2 for xx in range(10)]\n        f = interpolate.interp1d(x, y, bounds_error=False)\n        for nn in np.linspace(0, 9, 20):\n            self.assertTrue(np.allclose(f(nn), linearLinear(nn, x.tolist(), y)))\n\n    def test_linearLinearInterpolation(self):\n        \"\"\"\n        Duplicate test validating that the correct values are returned from a linear-linear interpolation.\n\n        Differs from test_linearLinear by constructing interpolation points using standard lists instead of numpy\n        linspace.\n        \"\"\"\n        x = [0.0, 1.0]\n        y = [1.0, 2.0]\n        for xx, yy in [(0.0, 1.0), (0.5, 1.5), (1.0, 2.0)]:\n            self.assertAlmostEqual(yy, linearLinear(xx, x, y))\n\n    def test_linearLinearExtrapolation(self):\n        \"\"\"Check to make sure a ValueError is thrown if attempting an interpolation outside the function domain.\"\"\"\n        x = [0.0, 1.0]\n        y = [1.0, 2.0]\n        with self.assertRaisesRegex(ValueError, \"out of bounds\"):\n            linearLinear(-2.0, x, y)\n\n    def test_logLinear(self):\n        \"\"\"Test that validates the values returned from the log-linear interpolation function.\"\"\"\n        x = np.arange(1.0, 11.0)\n        y = -42.0 + x + x**-2\n        n_vals = np.interp(np.log10(np.linspace(1, 10, 20)), np.log10(x), y)\n        m_vals = [logLinear(nn, x, y) for nn in np.linspace(1, 10, 20)]\n        self.assertTrue(\n            np.allclose(n_vals, m_vals),\n            f\"np:  {n_vals}\\nmatProps:{np.array(m_vals)}\",\n        )\n\n    def test_logLinearExtrapolation(self):\n        \"\"\"A ValueError should be thrown if performing a log-linear interpolation outside the function domain.\"\"\"\n        x = np.arange(1.0, 11.0)\n        y = -42.0 + x + x**-2\n        with self.assertRaisesRegex(ValueError, \"out of bounds\"):\n            logLinear(0.5, x, y)\n"
  },
  {
    "path": "armi/matProps/tests/test_material.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Program that runs all of the tests in the TestMapPropsMaterial class.\"\"\"\n\nimport os\nimport unittest\n\nimport armi.matProps\nfrom armi.matProps.material import Material\nfrom armi.matProps.materialType import MaterialType\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass TestMapPropsMaterial(unittest.TestCase):\n    \"\"\"Class which tests the functionality of the matProps Material class.\"\"\"\n\n    @staticmethod\n    def _createFunction(materialType):\n        \"\"\"\n        Helper function used to construct a minimum viable YAML file for tests.\n\n        Parameters\n        ----------\n        fileName\n            String containing name of yaml file being written\n        materialType\n            String containing the \"material type\" node value\n        \"\"\"\n        testNode = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": materialType,\n            \"density\": {\n                \"function\": {\n                    \"T\": {\n                        \"min\": 100.0,\n                        \"max\": 200.0,\n                    },\n                    \"type\": \"symbolic\",\n                    \"equation\": 1.0,\n                }\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(testNode)\n\n        return mat\n\n    def test_getValidFileFormatVersions(self):\n        versions = armi.matProps.Material.getValidFileFormatVersions()\n        self.assertGreater(len(versions), 1)\n        for version in versions:\n            if type(version) is not float:\n                self.assertEqual(version, \"TESTS\")\n\n    def test_loadFile(self):\n        mat = armi.matProps.Material()\n        self.assertEqual(str(mat), \"<Material None None>\")\n        fPath = os.path.join(THIS_DIR, \"testMaterialsData\", \"materialA.yaml\")\n        self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0)\n        mat.loadFile(fPath)\n        self.assertEqual(len(sorted(armi.matProps.materials.keys())), 0)\n\n    def test_datafilesType(self):\n        materialTypeNames = [\n            \"Fuel\",\n            \"Metal\",\n            \"Fluid\",\n            \"Ceramic\",\n            \"ASME2015\",\n            \"ASME2017\",\n            \"ASME2019\",\n        ]\n\n        for matTypeName in materialTypeNames:\n            parseType = self._createFunction(matTypeName).materialType\n            typeIdx = MaterialType.types[matTypeName]\n            expectedType = MaterialType(typeIdx)\n            self.assertEqual(parseType, expectedType)\n\n    def test_invalidFileFormat(self):\n        fPath = os.path.join(THIS_DIR, \"invalidTestFiles\", \"badFileFormat.YAML\")\n        mat = armi.matProps.Material()\n\n        with self.assertRaises(ValueError):\n            mat.loadFile(fPath)\n\n    def test_datafilesInvType(self):\n        with self.assertRaisesRegex(KeyError, \"Invalid material type\"):\n            self._createFunction(\"Solid\")\n\n    def test_saveLogic(self):\n        mat = self._createFunction(\"Metal\")\n        self.assertFalse(mat.saved())\n        mat.save()\n        self.assertTrue(mat.saved())\n"
  },
  {
    "path": "armi/matProps/tests/test_materialType.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests the MaterialType class.\"\"\"\n\nimport unittest\n\nfrom armi.matProps.materialType import MaterialType\n\n\nclass TestMaterialType(unittest.TestCase):\n    def test_fromString(self):\n        mt = MaterialType.fromString(\"Fuel\")\n        self.assertEqual(mt._value, 1)\n\n        mt = MaterialType.fromString(\"Metal\")\n        self.assertEqual(mt._value, 2)\n\n        mt = MaterialType.fromString(\"Fluid\")\n        self.assertEqual(mt._value, 4)\n\n    def test_repr(self):\n        mt = MaterialType.fromString(\"Fuel\")\n        self.assertEqual(str(mt), \"<MaterialType Fuel>\")\n\n        mt = MaterialType.fromString(\"Metal\")\n        self.assertEqual(str(mt), \"<MaterialType Metal>\")\n\n        mt = MaterialType.fromString(\"Fluid\")\n        self.assertEqual(str(mt), \"<MaterialType Fluid>\")\n\n    def test_equality(self):\n        mt1 = MaterialType(1)\n        mt11 = MaterialType(1)\n        mt4 = MaterialType(4)\n\n        self.assertTrue(mt1 == mt1)\n        self.assertTrue(mt1 == mt11)\n        self.assertFalse(mt1 == mt4)\n        self.assertFalse(mt11 == mt4)\n\n        self.assertTrue(mt1 == 1)\n        self.assertTrue(mt11 == 1)\n        self.assertFalse(mt1 == 4)\n        self.assertFalse(mt11 == 4)\n\n        with self.assertRaises(TypeError):\n            self.assertTrue(mt1 == \"1\")\n"
  },
  {
    "path": "armi/matProps/tests/test_parsing.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test YAML parsers for all files in the matProps data directory to ensure that there are no parsing errors.\"\"\"\n\nimport os\nimport tempfile\nimport unittest\nfrom os import path\n\nimport armi.matProps\n\n\nclass TestParsing(unittest.TestCase):\n    \"\"\"Class which tests the parsing and material library loading functions of matProps.\"\"\"\n\n    @property\n    def dirname(self):\n        \"\"\"Provide the directory where this file is located.\"\"\"\n        return path.dirname(path.realpath(__file__))\n\n    @classmethod\n    def setUpClass(cls):\n        cls.dummyDataPath = path.join(path.dirname(path.realpath(__file__)), \"testMaterialsData\")\n        cls.dummyMatFiles = {}\n        for root, _, files in os.walk(cls.dummyDataPath):\n            for fileName in files:\n                if fileName.lower().endswith((\".yaml\", \".yml\")):\n                    cls.dummyMatFiles[fileName] = os.path.join(root, fileName)\n\n        armi.matProps.clear()\n\n    def tearDown(self):\n        armi.matProps.clear()\n\n    def test_datafilesMatOwner(self):\n        for matFile, matPath in self.dummyMatFiles.items():\n            matNam = path.splitext(matFile)[0]\n            # the default behavior is loadMaterial(matPath, false)\n            m = armi.matProps.loadMaterial(matPath)\n            self.assertIsNotNone(m)\n            with self.assertRaisesRegex(KeyError, f\"No material named `{matNam}` was loaded within loaded data.\"):\n                armi.matProps.getMaterial(matNam)\n            m = armi.matProps.loadMaterial(self.dummyMatFiles[matFile], False)\n            self.assertIsNotNone(m)\n            with self.assertRaisesRegex(KeyError, f\"No material named `{matNam}` was loaded within loaded data.\"):\n                armi.matProps.getMaterial(matNam)\n\n            # test the pass-through function load_material, instead of the preferred loadMaterial\n            m = armi.matProps.load_material(self.dummyMatFiles[matFile], True)\n            self.assertIsNotNone(m)\n            m = armi.matProps.getMaterial(matNam)\n            self.assertIsNotNone(m)\n\n    def test_multiDataLoadingLoadingAll(self):\n        armi.matProps.loadAll(self.dummyDataPath)\n        self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials()))\n\n        armi.matProps.clear()\n        self.assertEqual(0, len(armi.matProps.loadedMaterials()))\n\n    def test_loadSafe(self):\n        armi.matProps.clear()\n        self.assertEqual(0, len(armi.matProps.loadedMaterials()))\n\n        # verify that it is safe to call loadSafe() multiple times in a row\n        for _ in range(3):\n            armi.matProps.loadSafe(self.dummyDataPath)\n            self.assertEqual(len(self.dummyMatFiles), len(armi.matProps.loadedMaterials()))\n\n        # verify the correct behavior if a bad directory is provided\n        badDir = \"does_not_exist_2924\"\n        with self.assertRaisesRegex(FileNotFoundError, f\"Directory {badDir} not found\"):\n            # test with the pass through \"load_safe\", instead of the preferred loadSafe\n            armi.matProps.load_safe(badDir)\n\n    def test_dataLoadingPrioSameDir(self):\n        armi.matProps.loadAll(self.dummyDataPath)\n        with self.assertRaises(KeyError):\n            armi.matProps.loadAll(self.dummyDataPath)\n\n        # bonus test of getHashes\n        hashes = armi.matProps.getHashes()\n        self.assertGreater(len(hashes), 3)\n        for h in hashes:\n            self.assertGreater(len(h), 8)\n            self.assertIsInstance(h, str)\n\n    def test_datafilesBadPath(self):\n        badDir = \"nopity-nopers-missing\"\n        with self.assertRaisesRegex(FileNotFoundError, f\"Directory {badDir} not found\"):\n            armi.matProps.loadAll(badDir)\n\n        with self.assertRaisesRegex(NotADirectoryError, \"Input path\"):\n            armi.matProps.loadAll(path.abspath(__file__))\n\n        with tempfile.TemporaryDirectory() as tmpDirName:\n            armi.matProps.loadAll(tmpDirName)\n\n    def test_multiDataLoadingMultidir(self):\n        \"\"\"Tests loading multiple data directories.\n\n        Load all files present in the following subdirectories of the matProps repository: tests/testDir1 and\n        tests/testDir2.\n        \"\"\"\n        dir1 = path.join(self.dirname, \"testDir1\")\n        dir2 = path.join(self.dirname, \"testDir2\")\n\n        # Load the two directories\n        armi.matProps.loadAll(dir1)\n        armi.matProps.loadAll(dir2)\n\n        # Check that the two directories are in loaded materials\n        loadList = armi.matProps.get_loaded_root_dirs()\n        self.assertTrue(dir1 in loadList)\n        self.assertTrue(dir2 in loadList)\n        self.assertTrue(len(loadList) == 2)\n\n        # Create list of file names in two directories. They are unique\n        fileSet = set()\n        for fileName in os.listdir(dir1):\n            fileSet.add(path.splitext(fileName)[0])\n        for fileName in os.listdir(dir2):\n            fileSet.add(path.splitext(fileName)[0])\n\n        materialSet = set()\n        for material in armi.matProps.loadedMaterials():\n            materialSet.add(material.name)\n\n        self.assertTrue(fileSet == materialSet)\n\n    def test_dataLoadingPrioDiffDir(self):\n        \"\"\"\n        Tests that an error is raised for loading a material twice different directories.\n\n        Attempts to load all files present in the following subdirectories of the matProps repository: tests/testDir1\n        and tests/testDir3. Though that includes some duplicates that should raise an error.\n        \"\"\"\n        dir1 = path.join(self.dirname, \"testDir1\")\n        dir3 = path.join(self.dirname, \"testDir3\")\n\n        armi.matProps.loadAll(dir1)\n        with self.assertRaisesRegex(KeyError, \"already exists\"):\n            armi.matProps.loadAll(dir3)\n\n        matA = armi.matProps.getMaterial(\"a\")\n        density = matA.rho\n        # Will evaluate to 1.0 if we have the data loaded from testDir1/a.yaml.\n        # If we load from testDir3/a.yaml it will have a different value\n        self.assertAlmostEqual(density.calc({\"T\": 150.0}), 1.0)\n        self.assertAlmostEqual(density.calc(T=150.0), 1.0)\n\n    def test_datafilesGetMat(self):\n        \"\"\"\n        Test a material retrieved by getMaterial(name) is the same as another material with the same name.\n\n        Also tests trying to access an unknown material.\n        \"\"\"\n        # test the deprecated \"load_all\", that is just a pass-through for \"loadAll\"\n        armi.matProps.load_all(self.dummyDataPath)\n        # test with the pass-through loaded_materials instead of the preferred loadedMaterials\n        for mat in armi.matProps.loaded_materials():\n            self.assertEqual(mat, armi.matProps.getMaterial(mat.name))\n\n        with self.assertRaisesRegex(KeyError, \"No material named `Fahrvergnugen` was loaded\"):\n            # test with the pass-through get_material instead of the preferred getMaterial\n            armi.matProps.get_material(\"Fahrvergnugen\")\n"
  },
  {
    "path": "armi/matProps/tests/test_performance.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test rough matProps performance timing.\"\"\"\n\nimport copy\nimport os\nimport pickle\nimport timeit\nimport unittest\n\nimport armi.matProps\n\n# NOTE: This is a sketchy magic number for testing that are heavily machine dependent.\n_LIMIT_SECONDS = 15\n\n\nclass TestPerformance(unittest.TestCase):\n    \"\"\"\n    The tests in this class are an early warning of matProps performance changes. It tests common operations that are\n    done with matProps to ensure their execution time remains in the correct ballpark.\n    \"\"\"\n\n    def test_load(self):\n        \"\"\"Tests the speed of loading a set of material files.\"\"\"\n        armi.matProps.clear()\n\n        testFiles = os.path.join(os.path.dirname(__file__), \"testMaterialsData\")\n\n        t = timeit.timeit(lambda: (armi.matProps.loadAll(testFiles), armi.matProps.clear()), number=10)\n\n        self.assertLess(t, _LIMIT_SECONDS, msg=\"matProps material loading takes too long to execute.\")\n\n    def test_pickle(self):\n        \"\"\"Tests the speed of pickling a set of material files. Pickling is important for multiprocessing.\"\"\"\n        armi.matProps.clear()\n\n        # This directory's material has many properties so it is more representative for pickle size.\n        testFiles = os.path.join(os.path.dirname(__file__), \"testDir4\")\n        armi.matProps.loadAll(testFiles)\n        mat = armi.matProps.getMaterial(\"sampleProperty\")\n\n        t = timeit.timeit(lambda: pickle.loads(pickle.dumps(mat)), number=100)\n\n        self.assertLess(t, _LIMIT_SECONDS, msg=\"matProps material pickling takes too long to execute.\")\n\n    def test_calc(self):\n        \"\"\"Tests the speed of calculating a property value.\"\"\"\n        armi.matProps.clear()\n\n        testFiles = os.path.join(os.path.dirname(__file__), \"testMaterialsData\")\n        armi.matProps.loadAll(testFiles)\n        # This material's density is a linear function.\n        mat = armi.matProps.getMaterial(\"materialA\")\n        prop = mat.rho\n\n        t = timeit.timeit(lambda: prop.calc({\"T\": 300}), number=10000)\n\n        self.assertLess(t, _LIMIT_SECONDS, msg=\"matProps material calculation takes too long to execute.\")\n\n    def test_deepcopy(self):\n        \"\"\"\n        Tests the speed of deepcopying a material. Copying is important for copying other objects that may be\n        referencing a matProps material.\n        \"\"\"\n        armi.matProps.clear()\n\n        # This directory's material has many properties so it is more representative for copy size.\n        testFiles = os.path.join(os.path.dirname(__file__), \"testDir4\")\n        armi.matProps.loadAll(testFiles)\n        mat = armi.matProps.getMaterial(\"sampleProperty\")\n\n        t = timeit.timeit(lambda: copy.deepcopy(mat), number=100)\n\n        self.assertLess(t, _LIMIT_SECONDS, msg=\"matProps material copying takes too long to execute.\")\n"
  },
  {
    "path": "armi/matProps/tests/test_piecewiseFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests related to piecewise functions.\"\"\"\n\nfrom armi.matProps.material import Material\nfrom armi.matProps.tests import MatPropsFunTestBase\n\n\nclass TestPiecewiseFunction(MatPropsFunTestBase):\n    \"\"\"Tests related to piecewise functions.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n\n        cls.basePiecewiseData = {\n            \"type\": \"piecewise\",\n            \"T\": {\n                \"min\": 0,\n                \"max\": 100,\n            },\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 25.4},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"10\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 25.4, \"max\": 50},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"99\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 50, \"max\": 100},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"-99\",\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n    def test_piecewiseEqnEval(self):\n        \"\"\"Tests the parsing of a PiecewiseFunction and make sure it evaluates at the appropriate sub function.\"\"\"\n        mat = self._createFunction(self.basePiecewiseData)\n        func = mat.rho\n        self.assertIn(\"PiecewiseFunction\", str(func))\n        self.assertAlmostEqual(func.calc({\"T\": 0}), 10)\n        self.assertAlmostEqual(func.calc({\"T\": 25.4}), 10)\n        self.assertAlmostEqual(func.calc({\"T\": 25.41}), 99)\n        self.assertAlmostEqual(func.calc({\"T\": 50}), 99)\n        self.assertAlmostEqual(func.calc({\"T\": 50.1}), -99)\n        self.assertAlmostEqual(func.calc({\"T\": 100}), -99)\n\n        func.clear()\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 0})\n\n    def test_piecewiseEqnGap(self):\n        \"\"\"Test that PiecewiseFunction evaluates correctly with gaps.\"\"\"\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"10\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 50},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"99\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 50, \"max\": 100},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"-99\",\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        mat = self._createFunction(data)\n        func = mat.rho\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": -1.0})\n\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 25.0})\n\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 101.0})\n\n        self.assertAlmostEqual(func.calc(T=0), 10)\n        self.assertAlmostEqual(func.calc(T=10), 10)\n        self.assertAlmostEqual(func.calc(T=20), 10)\n        self.assertAlmostEqual(func.calc(T=30), 99)\n        self.assertAlmostEqual(func.calc(T=40), 99)\n        self.assertAlmostEqual(func.calc(T=50), 99)\n        self.assertAlmostEqual(func.calc(T=75), -99)\n        self.assertAlmostEqual(func.calc(T=100), -99)\n\n    def test_piecewiseEqnPoly(self):\n        \"\"\"Test that makes a PiecewiseFunction composed of multiple PolynomialFunctions.\"\"\"\n        poly1CoMap = {0: -2.5, 1: 5, 2: 4}\n        poly2CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1}\n        poly3CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1}\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": -100, \"max\": 100},\n                        \"type\": \"symbolic\",\n                        \"equation\": self.createEqnPoly(poly1CoMap),\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 100, \"max\": 300},\n                        \"type\": \"symbolic\",\n                        \"equation\": self.createEqnPoly(poly2CoMap),\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 300, \"max\": 500},\n                        \"type\": \"symbolic\",\n                        \"equation\": self.createEqnPoly(poly3CoMap),\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        mat = self._createFunction(data)\n        func = mat.rho\n        self.assertAlmostEqual(func.calc({\"T\": -100.0}), self.polynomialEvaluation(poly1CoMap, -100.0))\n        self.assertAlmostEqual(func.calc({\"T\": 0.0}), self.polynomialEvaluation(poly1CoMap, 0.0))\n        self.assertAlmostEqual(func.calc({\"T\": 100.0}), self.polynomialEvaluation(poly1CoMap, 100.0))\n        self.assertAlmostEqual(func.calc({\"T\": 200.0}), self.polynomialEvaluation(poly2CoMap, 200.0))\n        self.assertAlmostEqual(func.calc({\"T\": 300.0}), self.polynomialEvaluation(poly2CoMap, 300.0))\n        self.assertAlmostEqual(func.calc({\"T\": 400.0}), self.polynomialEvaluation(poly3CoMap, 400.0))\n        self.assertAlmostEqual(func.calc({\"T\": 500.0}), self.polynomialEvaluation(poly3CoMap, 500.0))\n\n    def test_piecewiseEqnPolyTable(self):\n        \"\"\"Test that makes a PiecewiseFunction composed of a mixture of polynomial and table functions.\"\"\"\n        poly1CoMap = {0: 3.5, 1: 3, 2: -2, 3: 1}\n        poly2CoMap = {0: 4.5, 1: -2, 2: 3, 3: -2, 4: 1}\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": 0,\n                        \"type\": \"table\",\n                    },\n                    \"tabulated data\": [[-100.0, -50.0], [0.0, 0.0], [100.0, 50.0]],\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 100, \"max\": 300},\n                        \"type\": \"symbolic\",\n                        \"equation\": self.createEqnPoly(poly1CoMap),\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 300, \"max\": 500},\n                        \"type\": \"symbolic\",\n                        \"equation\": self.createEqnPoly(poly2CoMap),\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        mat = self._createFunction(data)\n        func = mat.rho\n\n        self.assertAlmostEqual(func.calc({\"T\": -100.0}), -50.0)\n        self.assertAlmostEqual(func.calc({\"T\": -50.0}), -25.0)\n        self.assertAlmostEqual(func.calc({\"T\": 0.0}), 0.0)\n        self.assertAlmostEqual(func.calc({\"T\": 50.0}), 25.0)\n        self.assertAlmostEqual(func.calc({\"T\": 100.0}), 50.0)\n        self.assertAlmostEqual(func.calc({\"T\": 200.0}), self.polynomialEvaluation(poly1CoMap, 200.0))\n        self.assertAlmostEqual(func.calc({\"T\": 300.0}), self.polynomialEvaluation(poly1CoMap, 300.0))\n        self.assertAlmostEqual(func.calc({\"T\": 400.0}), self.polynomialEvaluation(poly2CoMap, 400.0))\n        self.assertAlmostEqual(func.calc({\"T\": 500.0}), self.polynomialEvaluation(poly2CoMap, 500.0))\n\n    def test_inputCheckPiecewiseMinTemp(self):\n        \"\"\"Test to make sure an error is thrown when attempting to evaluate below the minimum valid range.\"\"\"\n        self.belowMinimumCheck(self.basePiecewiseData)\n\n    def test_inputCheckPiecewiseMaxTemp(self):\n        \"\"\"Test to make sure an error is thrown when attempting to evaluate above the maximum valid range.\"\"\"\n        self.aboveMaximumCheck(self.basePiecewiseData)\n\n    def _createFunction2D(self, data=None):\n        \"\"\"\n        Helper function designed to create a basic viable yaml file for a two dimensional function.\n\n        Parameters\n        ----------\n        data : dict\n            A dictionary containing user specified function child nodes.\n        \"\"\"\n        funcBody = {\"T\": {\"min\": -100, \"max\": 100}, \"t\": {\"min\": -100, \"max\": 100}}\n        funcBody.update(data or {})\n        materialData = {\n            \"file format\": \"TESTS\",\n            \"composition\": {\"Fe\": \"balance\"},\n            \"material type\": \"Metal\",\n            \"density\": {\"function\": funcBody, \"tabulated data\": {}},\n        }\n\n        mat = Material()\n        mat.loadNode(materialData)\n\n        return mat\n\n    def test_piecewiseEqn2d(self):\n        \"\"\"Test that PiecewiseFunction evaluates correctly with multiple dimensions.\"\"\"\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"10\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 40},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"99\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"20\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 40},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"199\",\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        mat = self._createFunction2D(data)\n        func = mat.rho\n        # Below var 1\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": -1.0, \"t\": 10})\n        # Middle gap var 1\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 25.0, \"t\": 10})\n\n        # Above var 1\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 45.0, \"t\": 10})\n\n        # Below var 2\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 10, \"t\": -1})\n\n        # Middle gap var 2\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 10, \"t\": 25})\n\n        # Above var 2\n        with self.assertRaisesRegex(ValueError, \"PiecewiseFunction error, could not evaluate\"):\n            func.calc({\"T\": 10, \"t\": 45})\n\n        self.assertAlmostEqual(func.calc(T=10, t=10), 10)\n        self.assertAlmostEqual(func.calc(T=10, t=35), 20)\n        self.assertAlmostEqual(func.calc(T=35, t=10), 99)\n        self.assertAlmostEqual(func.calc(T=35, t=35), 199)\n\n    def test_piecewiseEqnOverlap(self):\n        \"\"\"Test that PiecewiseFunction fails to load with overlapping regions.\"\"\"\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"10\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 10, \"max\": 40},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"99\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"20\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 40},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"199\",\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        with self.assertRaisesRegex(ValueError, \"Piecewise child functions overlap\"):\n            self._createFunction2D(data)\n\n    def test_piecewiseEqnDiffVars(self):\n        \"\"\"Test that PiecewiseFunction fails to load when child functions use different variables.\"\"\"\n        data = {\n            \"type\": \"piecewise\",\n            \"functions\": [\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"10\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 40},\n                        \"t\": {\"min\": 0, \"max\": 20},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"99\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"R\": {\"min\": 0, \"max\": 20},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"20\",\n                    },\n                    \"tabulated data\": None,\n                },\n                {\n                    \"function\": {\n                        \"T\": {\"min\": 30, \"max\": 40},\n                        \"t\": {\"min\": 30, \"max\": 40},\n                        \"type\": \"symbolic\",\n                        \"equation\": \"199\",\n                    },\n                    \"tabulated data\": None,\n                },\n            ],\n        }\n\n        with self.assertRaisesRegex(KeyError, \"Piecewise child function must have same variables\"):\n            self._createFunction2D(data)\n"
  },
  {
    "path": "armi/matProps/tests/test_point.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Program that runs all of the tests for the Point class.\n\nNotes\n-----\nThis file is used to verify the matProps stand-alone wheel installation. As such, it needs to remain small. Do not add\nany tests to this file with explicit file IO: no temporary directories, and no test YAML files.\n\"\"\"\n\nimport unittest\n\nfrom armi.matProps.point import Point\n\n\nclass TestPoint(unittest.TestCase):\n    \"\"\"Unit tests for the matProps Point class.\"\"\"\n\n    def test_string(self):\n        \"\"\"Test string representation of Point.\"\"\"\n        p = Point(1, 2, 3)\n        self.assertEqual(str(p), \"<Point 1, 2 -> 3>\")\n"
  },
  {
    "path": "armi/matProps/tests/test_property.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Program that runs all of the tests contained in PropertyTests class.\"\"\"\n\nimport os\nimport unittest\nfrom os import path\n\nfrom armi.matProps import loadMaterial\nfrom armi.matProps.prop import defProp, properties\n\n\nclass PropertyTests(unittest.TestCase):\n    \"\"\"Class which contains tests for the matProps Property class.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # Properties allowed for based on SDID.\n        cls.allowedPropertiesList = [\n            \"density\",\n            \"specific heat capacity\",\n            \"thermal conductivity\",\n            \"thermal diffusivity\",\n            \"dynamic viscosity\",\n            \"kinematic viscosity\",\n            \"melting temperature\",\n            \"boiling temperature\",\n            \"surface tension\",\n            \"vapor pressure\",\n            \"electrical conductance\",\n            \"isothermal compressibility\",\n            \"mean coefficient of thermal expansion\",\n            \"instantaneous coefficient of thermal expansion\",\n            \"Young's modulus\",\n            \"shear modulus\",\n            \"elongation\",\n            \"Poisson's ratio\",\n            \"yield strength\",\n            \"tensile strength\",\n            \"design stress\",\n            \"design reference stress\",\n            \"allowable stress\",\n            \"time dependent design stress\",\n            \"service reference stress\",\n            \"stress to rupture\",\n            \"tensile strength reduction factor\",\n            \"yield strength reduction factor\",\n            \"weld strength reduction factor\",\n            \"allowable time to rupture\",\n            \"allowable time to allowable stress\",\n            \"design fatigue strain range\",\n            \"strain from isochronous stress-strain curve\",\n            \"design fatigue stress\",\n            \"linear expansion\",\n            \"vapor specific volume\",\n            \"speed of sound\",\n            \"solidus temperature\",\n            \"liquidus temperature\",\n            \"volumetric expansion\",\n            \"enthalpy\",\n            \"temperature from enthalpy\",\n            \"enthalpy of fusion\",\n            \"latent heat of vaporization\",\n            \"fracture toughness\",\n            \"Brinell Hardness\",\n            \"factor f from ASME.III.5 Fig. HBB-T-1432-2\",\n            \"factor Kv' from ASME.III.5 Fig. HBB-T-1432-3\",\n        ]\n\n    def test_propertiesUnique(self):\n        \"\"\"Ensure the Property.name and Property.symbol are all unique inside the matProps.properties container.\"\"\"\n        num = len(properties)\n        self.assertEqual(num, len({p.name for p in properties}))\n        self.assertEqual(num, len({p.symbol for p in properties}))\n\n    def test_propertiesNames(self):\n        \"\"\"Ensure that we have the correct set of Properties in matProps.\"\"\"\n        propertySet = {p.name for p in properties}\n        allowedPropertiesSet = set(self.allowedPropertiesList)\n        self.assertEqual(propertySet, allowedPropertiesSet)\n\n    def test_propertiesInvName(self):\n        \"\"\"Ensure loadNode fails correctly when provided when provided an unknown property.\"\"\"\n        tempFileName = os.path.join(os.path.dirname(__file__), \"invalidTestFiles\", \"badProperty.yaml\")\n\n        with self.assertRaisesRegex(KeyError, \"Invalid property node\"):\n            loadMaterial(tempFileName)\n\n    def test_propertiesDefinitions(self):\n        \"\"\"\n        Check a logic branch in the Function.factory method which initializes armi.matProps.Function objects to be\n        null. armi.matProps.Function objects only get set to a non-null object if the appropriate property node is\n        provided in the YAML file. A test YAML file with only the density property provided. It checks to make sure that\n        the Material.rho object corresponding with density is not a null object and performs an evaluation. A check is\n        then performed on the Material.k object. This object, which corresponds to the thermal conductivity property,\n        should be null as it is not defined in the test YAML file.\n        \"\"\"\n        # Only the density property exists for the material below. It is a constant function\n        yamlFilePath = path.join(path.dirname(path.realpath(__file__)), \"testDir1\", \"a.yaml\")\n        mat = loadMaterial(yamlFilePath)\n        # Name of density function is rho for materials\n        self.assertIsNotNone(mat.rho)\n        self.assertAlmostEqual(mat.rho.calc({\"T\": 150.0}), 1.0)\n        # k corresponds to thermal conductivity which is not provided in test file.\n        self.assertIsNone(mat.k)\n\n    def test_spotCheckAllPropsDict(self):\n        \"\"\"Spot check every property at least once, using a dictionary of input values.\"\"\"\n        pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), \"testDir4\")\n        testMat = loadMaterial(path.join(pathToTestYaml, \"sampleProperty.yaml\"))\n        self.assertAlmostEqual(testMat.rho.calc({\"T\": 300.0}), 1.0)\n        self.assertAlmostEqual(testMat.c_p.calc({\"T\": 300.0}), 2.0)\n        self.assertAlmostEqual(testMat.k.calc({\"T\": 300.0}), 3.0)\n        self.assertAlmostEqual(testMat.alpha_d.calc({\"T\": 300.0}), 4.0)\n        self.assertAlmostEqual(testMat.mu_d.calc({\"T\": 300.0}), 5.0)\n        self.assertAlmostEqual(testMat.mu_k.calc({\"T\": 300.0}), 6.0)\n        self.assertAlmostEqual(testMat.T_melt.calc({\"T\": 300.0}), 7.0)\n        self.assertAlmostEqual(testMat.T_boil.calc({\"T\": 300.0}), 8.0)\n        self.assertAlmostEqual(testMat.dH_vap.calc({\"T\": 300.0}), 9.0)\n        self.assertAlmostEqual(testMat.dH_fus.calc({\"T\": 300.0}), 10.0)\n        self.assertAlmostEqual(testMat.gamma.calc({\"T\": 300.0}), 11.0)\n        self.assertAlmostEqual(testMat.P_sat.calc({\"T\": 300.0}), 12.0)\n        self.assertAlmostEqual(testMat.kappa.calc({\"T\": 300.0}), 13.0)\n        self.assertAlmostEqual(testMat.alpha_mean.calc({\"T\": 300.0}), 14.0)\n        self.assertAlmostEqual(testMat.alpha_inst.calc({\"T\": 300.0}), 15.0)\n        self.assertAlmostEqual(testMat.E.calc({\"T\": 300.0}), 16.0)\n        self.assertAlmostEqual(testMat.nu.calc({\"T\": 300.0}), 17.0)\n        self.assertAlmostEqual(testMat.Sy.calc({\"T\": 300.0}), 18.0)\n        self.assertAlmostEqual(testMat.Su.calc({\"T\": 300.0}), 19.0)\n        self.assertAlmostEqual(testMat.Sm.calc({\"T\": 300.0}), 20.0)\n        self.assertAlmostEqual(testMat.So.calc({\"T\": 300.0}), 21.0)\n        self.assertAlmostEqual(testMat.Sa.calc({\"T\": 300.0}), 22.0)\n        self.assertAlmostEqual(testMat.St.calc({\"T\": 300.0}), 23.0)\n        self.assertAlmostEqual(testMat.Smt.calc({\"T\": 300.0}), 24.0)\n        self.assertAlmostEqual(testMat.Sr.calc({\"T\": 300.0}), 25.0)\n        self.assertAlmostEqual(testMat.TSRF.calc({\"T\": 300.0}), 26.0)\n        self.assertAlmostEqual(testMat.YSRF.calc({\"T\": 300.0}), 27.0)\n        self.assertAlmostEqual(testMat.WSRF.calc({\"T\": 300.0}), 28.0)\n        self.assertAlmostEqual(testMat.tMaxSr.calc({\"T\": 300.0}), 29.0)\n        self.assertAlmostEqual(testMat.tMaxSt.calc({\"T\": 300.0}), 30.0)\n        self.assertAlmostEqual(testMat.eps_t.calc({\"T\": 300.0}), 31.0)\n        self.assertAlmostEqual(testMat.eps_iso.calc({\"T\": 300.0}), 32.0)\n        self.assertAlmostEqual(testMat.SaFat.calc({\"T\": 300.0}), 33.0)\n        self.assertAlmostEqual(testMat.dl_l.calc({\"T\": 300.0}), 34.0)\n        self.assertAlmostEqual(testMat.nu_g.calc({\"T\": 300.0}), 35.0)\n        self.assertAlmostEqual(testMat.v_sound.calc({\"T\": 300.0}), 36.0)\n        self.assertAlmostEqual(testMat.T_sol.calc({\"T\": 300.0}), 37.0)\n        self.assertAlmostEqual(testMat.T_liq.calc({\"T\": 300.0}), 38.0)\n        self.assertAlmostEqual(testMat.dV.calc({\"T\": 300.0}), 39.0)\n        self.assertAlmostEqual(testMat.H.calc({\"T\": 300.0}), 40.0)\n        self.assertAlmostEqual(testMat.H_calc_T.calc({\"T\": 300.0}), 41.0)\n        self.assertAlmostEqual(testMat.K_IC.calc({\"T\": 300.0}), 42.0)\n        self.assertAlmostEqual(testMat.HBW.calc({\"T\": 300.0}), 43.0)\n        self.assertAlmostEqual(testMat.f.calc({\"T\": 300.0}), 44.0)\n        self.assertAlmostEqual(testMat.Kv_prime.calc({\"T\": 300.0}), 45.0)\n        self.assertAlmostEqual(testMat.S.calc({\"T\": 300.0}), 46.0)\n        self.assertAlmostEqual(testMat.Elong.calc({\"T\": 300.0}), 47.0)\n\n    def test_spotCheckAllPropsKwargs(self):\n        \"\"\"Spot check every property at least once, using kwargs.\"\"\"\n        pathToTestYaml = path.join(path.dirname(path.realpath(__file__)), \"testDir4\")\n        testMat = loadMaterial(path.join(pathToTestYaml, \"sampleProperty.yaml\"))\n        self.assertAlmostEqual(testMat.rho.calc(T=300.0), 1.0)\n        self.assertAlmostEqual(testMat.c_p.calc(T=300.0), 2.0)\n        self.assertAlmostEqual(testMat.k.calc(T=300.0), 3.0)\n        self.assertAlmostEqual(testMat.alpha_d.calc(T=300.0), 4.0)\n        self.assertAlmostEqual(testMat.mu_d.calc(T=300.0), 5.0)\n        self.assertAlmostEqual(testMat.mu_k.calc(T=300.0), 6.0)\n        self.assertAlmostEqual(testMat.T_melt.calc(T=300.0), 7.0)\n        self.assertAlmostEqual(testMat.T_boil.calc(T=300.0), 8.0)\n        self.assertAlmostEqual(testMat.dH_vap.calc(T=300.0), 9.0)\n        self.assertAlmostEqual(testMat.dH_fus.calc(T=300.0), 10.0)\n        self.assertAlmostEqual(testMat.gamma.calc(T=300.0), 11.0)\n        self.assertAlmostEqual(testMat.P_sat.calc(T=300.0), 12.0)\n        self.assertAlmostEqual(testMat.kappa.calc(T=300.0), 13.0)\n        self.assertAlmostEqual(testMat.alpha_mean.calc(T=300.0), 14.0)\n        self.assertAlmostEqual(testMat.alpha_inst.calc(T=300.0), 15.0)\n        self.assertAlmostEqual(testMat.E.calc(T=300.0), 16.0)\n        self.assertAlmostEqual(testMat.nu.calc(T=300.0), 17.0)\n        self.assertAlmostEqual(testMat.Sy.calc(T=300.0), 18.0)\n        self.assertAlmostEqual(testMat.Su.calc(T=300.0), 19.0)\n        self.assertAlmostEqual(testMat.Sm.calc(T=300.0), 20.0)\n        self.assertAlmostEqual(testMat.So.calc(T=300.0), 21.0)\n        self.assertAlmostEqual(testMat.Sa.calc(T=300.0), 22.0)\n        self.assertAlmostEqual(testMat.St.calc(T=300.0), 23.0)\n        self.assertAlmostEqual(testMat.Smt.calc(T=300.0), 24.0)\n        self.assertAlmostEqual(testMat.Sr.calc(T=300.0), 25.0)\n        self.assertAlmostEqual(testMat.TSRF.calc(T=300.0), 26.0)\n        self.assertAlmostEqual(testMat.YSRF.calc(T=300.0), 27.0)\n        self.assertAlmostEqual(testMat.WSRF.calc(T=300.0), 28.0)\n        self.assertAlmostEqual(testMat.tMaxSr.calc(T=300.0), 29.0)\n        self.assertAlmostEqual(testMat.tMaxSt.calc(T=300.0), 30.0)\n        self.assertAlmostEqual(testMat.eps_t.calc(T=300.0), 31.0)\n        self.assertAlmostEqual(testMat.eps_iso.calc(T=300.0), 32.0)\n        self.assertAlmostEqual(testMat.SaFat.calc(T=300.0), 33.0)\n        self.assertAlmostEqual(testMat.dl_l.calc(T=300.0), 34.0)\n        self.assertAlmostEqual(testMat.nu_g.calc(T=300.0), 35.0)\n        self.assertAlmostEqual(testMat.v_sound.calc(T=300.0), 36.0)\n        self.assertAlmostEqual(testMat.T_sol.calc(T=300.0), 37.0)\n        self.assertAlmostEqual(testMat.T_liq.calc(T=300.0), 38.0)\n        self.assertAlmostEqual(testMat.dV.calc(T=300.0), 39.0)\n        self.assertAlmostEqual(testMat.H.calc(T=300.0), 40.0)\n        self.assertAlmostEqual(testMat.H_calc_T.calc(T=300.0), 41.0)\n        self.assertAlmostEqual(testMat.K_IC.calc(T=300.0), 42.0)\n        self.assertAlmostEqual(testMat.HBW.calc(T=300.0), 43.0)\n        self.assertAlmostEqual(testMat.f.calc(T=300.0), 44.0)\n        self.assertAlmostEqual(testMat.Kv_prime.calc(T=300.0), 45.0)\n        self.assertAlmostEqual(testMat.S.calc(T=300.0), 46.0)\n        self.assertAlmostEqual(testMat.Elong.calc(T=300.0), 47.0)\n\n    def test_defPropDup(self):\n        with self.assertRaises(KeyError):\n            defProp(\"rho\", \"density\", \"kg/m^3\", \"rho\")\n"
  },
  {
    "path": "armi/matProps/tests/test_references.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the Reference.\"\"\"\n\nimport unittest\n\nfrom armi.matProps.reference import Reference\n\n\nclass TestReference(unittest.TestCase):\n    \"\"\"Unit tests for Reference.\"\"\"\n\n    def test_str(self):\n        ref = Reference()\n        ref._ref = \"REF123\"\n        ref._type = \"TYPE321\"\n\n        self.assertEqual(str(ref), \"REF123 (TYPE321)\")\n\n    def test_getRef(self):\n        ref = Reference()\n        ref._ref = \"REF234\"\n\n        self.assertEqual(ref.getRef(), \"REF234\")\n\n    def test_getType(self):\n        ref = Reference()\n        ref._type = \"TYPE789\"\n\n        self.assertEqual(ref.getType(), \"TYPE789\")\n\n    def test_factory(self):\n        node = {\"ref\": \"REF234\", \"type\": \"TYPE789\"}\n        ref = Reference._factory(node)\n        self.assertEqual(str(ref), \"REF234 (TYPE789)\")\n        self.assertEqual(ref.getRef(), \"REF234\")\n        self.assertEqual(ref.getType(), \"TYPE789\")\n"
  },
  {
    "path": "armi/matProps/tests/test_symbolicFunction.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n\"\"\"Unit tests for the symbolic function class.\"\"\"\n\nimport copy\nimport math\nimport pickle\nimport unittest\n\nimport numpy as np\n\nfrom armi.matProps.material import Material\n\n\nclass TestSymbolicFunction(unittest.TestCase):\n    \"\"\"Unit tests for the symbolic function class.\"\"\"\n\n    def setUp(self):\n        self.yaml = {\n            \"file format\": \"TESTS\",\n            \"material type\": \"Metal\",\n            \"composition\": {\"a\": \"balance\"},\n            \"density\": {\n                \"function\": {\n                    \"type\": \"symbolic\",\n                    \"X\": {\"min\": -10, \"max\": 500.0},\n                    \"Y\": {\"min\": 1.0, \"max\": 20.0},\n                    \"Z\": {\"min\": -30.0, \"max\": -10.0},\n                    \"equation\": 1.0,\n                }\n            },\n        }\n\n    def loadMaterial(self, num=1):\n        \"\"\"Loads the material file based on `self.yaml` and returns the material object.\"\"\"\n        mat = Material()\n        mat.loadNode(self.yaml)\n        return mat\n\n    def functionTest(self, func, num=1):\n        \"\"\"\n        Takes a function as input to compare against matProps material output.\n        It is assumed that `self.yaml` has been updated to match the provided evaluation function.\n        \"\"\"\n        mat = self.loadMaterial(num=num)\n        prop = mat.rho\n        for x in np.linspace(prop.getMinBound(\"X\"), prop.getMaxBound(\"X\"), 20):\n            for y in np.linspace(prop.getMinBound(\"Y\"), prop.getMaxBound(\"Y\"), 20):\n                for z in np.linspace(prop.getMinBound(\"Z\"), prop.getMaxBound(\"Z\"), 20):\n                    received = prop.calc({\"X\": x, \"Y\": y, \"Z\": z})\n                    expected = func(x, y, z)\n                    self.assertAlmostEqual(\n                        received,\n                        expected,\n                        msg=(\n                            f\"Material property evaluation does not match for: {prop.sympyStr} at ({x}, {y}, {z}).\\n\"\n                            f\" Received: {received}, Expected: {expected}\"\n                        ),\n                        delta=abs(\n                            expected / 1e8\n                        ),  # very large numbers can have floating point differences at low decimal count\n                    )\n\n    def setEqnField(self, eqn):\n        self.yaml[\"density\"][\"function\"][\"equation\"] = eqn\n\n    def test_symbolicMult(self):\n        \"\"\"\n        Test multiplication operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested for multiplying a variable and a constant as well as\n        multiplying two variables. For each input, the property is evaluated at 20 evenly spaced points per independent\n        variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x * 20\n        self.setEqnField(\"X * 20\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X*20\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X* 20\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X *20\")\n        self.functionTest(func, 4)\n\n        func = lambda x, y, z: x * y\n        self.setEqnField(\"X * Y\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"X*Y\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"X*Y\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"X *Y\")\n        self.functionTest(func, 8)\n\n    def test_symbolicExponent(self):\n        \"\"\"\n        Test exponent operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested for raising a variable by a constant as well as raising\n        a constant by a constant. For each input, the property is evaluated at 20 evenly spaced points per independent\n        variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x**3\n        self.setEqnField(\"X ** 3\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X**3\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X** 3\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X **3\")\n        self.functionTest(func, 4)\n\n        func = lambda x, y, z: 1.1**y\n        self.setEqnField(\"1.1 ** Y\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"1.1**Y\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"1.1** Y\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"1.1 **Y\")\n        self.functionTest(func, 8)\n\n    def test_symbolicDiv(self):\n        \"\"\"\n        Test division operator for symbolic equations.\n\n        The four combinations of spacing and the operator are tested for dividing a variable and a constant as well as\n        dividing two variables. For each input, the property is evaluated at 20 evenly spaced points per independent\n        variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x / 3\n        self.setEqnField(\"X / 3\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X/3\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X/ 3\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X /3\")\n        self.functionTest(func, 4)\n\n        func = lambda x, y, z: x / y\n        self.setEqnField(\"X / Y\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"X/Y\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"X/ Y\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"X /Y\")\n        self.functionTest(func, 8)\n\n    def test_symbolicAdd(self):\n        \"\"\"\n        Test addition operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested for adding a variable and a constant as well as adding\n        two variables. For each input, the property is evaluated at 20 evenly spaced points per independent variable\n        within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x + 3\n        self.setEqnField(\"X + 3\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X+3\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X+ 3\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X +3\")\n        self.functionTest(func, 4)\n\n        func = lambda x, y, z: x + y\n        self.setEqnField(\"X + Y\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"X+Y\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"X+ Y\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"X +Y\")\n        self.functionTest(func, 8)\n\n    def test_symbolicSub(self):\n        \"\"\"\n        Test subtraction operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested for subtracting a variable and a constant as well\n        as subtracting two variables. For each input, the property is evaluated at 20 evenly spaced points per\n        independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x - 3\n        self.setEqnField(\"X - 3\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X-3\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X- 3\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X -3\")\n        self.functionTest(func, 4)\n\n        func = lambda x, y, z: x - z\n        self.setEqnField(\"X - Z\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"X-Z\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"X- Z\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"X -Z\")\n        self.functionTest(func, 8)\n\n    def test_symbolicParens(self):\n        \"\"\"\n        Test the grouping operator for symbolic equations.\n\n        Various combinations of grouping is tested with spacing on a simple addition operation. For each input, the\n        property is evaluated at 20 evenly spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x + 3\n        self.setEqnField(\"(X + 3)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"(X) + 3\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X + (3)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"(X) + (3)\")\n        self.functionTest(func, 4)\n\n        self.setEqnField(\"(X ) + 3\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"( X) + 3\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"( X ) + 3\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"( X + 3)\")\n        self.functionTest(func, 8)\n\n        self.setEqnField(\"(X + 3 )\")\n        self.functionTest(func, 9)\n\n    def test_symbolicSine(self):\n        \"\"\"\n        Test sine operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20\n        evenly spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.sin(x)\n        self.setEqnField(\"sin(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"sin (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"sin( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"sin(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicCosine(self):\n        \"\"\"\n        Test cosine operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.cos(x)\n        self.setEqnField(\"cos(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"cos (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"cos( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"cos(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicTan(self):\n        \"\"\"\n        Test tangent operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.tan(x)\n        self.setEqnField(\"tan(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"tan (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"tan( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"tan(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicSinh(self):\n        \"\"\"\n        Test hyperbolic sine operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.sinh(x)\n        self.setEqnField(\"sinh(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"sinh (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"sinh( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"sinh(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicCosh(self):\n        \"\"\"\n        Test hyperbolic cosine operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.cosh(x)\n        self.setEqnField(\"cosh(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"cosh (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"cosh( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"cosh(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicTanh(self):\n        \"\"\"\n        Test hyperbolic tangent operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.tanh(x)\n        self.setEqnField(\"tanh(X)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"tanh (X)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"tanh( X)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"tanh(X )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicNatLog(self):\n        \"\"\"\n        Test natural logarithm operator for symbolic equations.\n\n        Both log and ln variations of the function name are tested. Four combinations of spacing and the operator are\n        tested for each function name. For each input, the property is evaluated at 20 evenly spaced points per\n        independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.log(y)\n        self.setEqnField(\"ln(Y)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"ln (Y)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"ln( Y)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"ln(Y )\")\n        self.functionTest(func, 4)\n\n        self.setEqnField(\"log(Y)\")\n        self.functionTest(func, 5)\n\n        self.setEqnField(\"log (Y)\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"log( Y)\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"log(Y )\")\n        self.functionTest(func, 8)\n\n    def test_symbolicLog10(self):\n        \"\"\"\n        Test base ten logarithm operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.log10(y)\n        self.setEqnField(\"log10(Y)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"log10 (Y)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"log10( Y)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"log10(Y )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicExp(self):\n        \"\"\"\n        Test exponential operator for symbolic equations.\n\n        Four combinations of spacing and the operator are tested. For each input, the property is evaluated at 20 evenly\n        spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: math.exp(y)\n        self.setEqnField(\"exp(Y)\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"exp (Y)\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"exp( Y)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"exp(Y )\")\n        self.functionTest(func, 4)\n\n    def test_symbolicComposition(self):\n        \"\"\"\n        Test composition of functions for symbolic equations.\n\n        Four different functions are tested that are composites of other functions. For each input, the property is\n        evaluated at 20 evenly spaced points per independent variable within the valid range.\n        \"\"\"\n        # Multiple functions on one side of multiplication/divide\n        func = lambda x, y, z: x / (math.exp(y) + z)\n        self.setEqnField(\"X / (exp(Y) + Z)\")\n        self.functionTest(func, 1)\n\n        # Multiple functions inside trig function\n        func = lambda x, y, z: x * math.sin(z**y)\n        self.setEqnField(\"X * sin(Z**Y)\")\n        self.functionTest(func, 2)\n\n        # Multiple functions inside hyperbolic function\n        func = lambda x, y, z: math.tanh((x + 30) ** math.cos(y) + z * 0.2)\n        self.setEqnField(\"tanh((X+30) ** cos(Y) + Z*0.2)\")\n        self.functionTest(func, 3)\n\n        # Many sets of nested parentheses\n        func = lambda x, y, z: ((x / (y * z + 1.0)) + 2.5) * 10.2\n        self.setEqnField(\"((X / (Y*Z + 1.0)) + 2.5)*10.2\")\n        self.functionTest(func, 4)\n\n    def test_symbolicOrdop(self):\n        \"\"\"\n        Test order of operations for symbolic equations.\n\n        Five different equations are evaluated that test different components of order precedence. For each input, the\n        property is evaluated at 20 evenly spaced points per independent variable within the valid range.\n        \"\"\"\n        # multiplication and division before addition and subtraction\n        func = lambda x, y, z: (x * y) + z\n        self.setEqnField(\"X * Y + Z\")\n        self.functionTest(func, 1)\n\n        func = lambda x, y, z: x + (y * z)\n        self.setEqnField(\"X + Y * Z\")\n        self.functionTest(func, 2)\n\n        # Left to right for same precedence operators\n        func = lambda x, y, z: (x * y) / z\n        self.setEqnField(\"X * Y / Z\")\n        self.functionTest(func, 3)\n\n        # Exponents before multiplication/division\n        func = lambda x, y, z: ((x + 30) ** 1.1) * (y**2)\n        self.setEqnField(\"(X+30) ** 1.1 * Y ** 2\")\n        self.functionTest(func, 4)\n\n        # Parentheses before exponents\n        func = lambda x, y, z: (x + 30) ** (y / 2) - z\n        self.setEqnField(\"(X+30) ** (Y/2) - Z\")\n        self.functionTest(func, 5)\n\n    def test_symbolicWhitespace(self):\n        \"\"\"\n        Test excess whitespace is ignored for symbolic equations.\n\n        Two different equations are evaluated with varying amounts of whitespace introduced to ensure they produce the\n        same results. For each input, the property is evaluated at 20 evenly spaced points per independent variable\n        within the valid range of the property.\n        \"\"\"\n        func = lambda x, y, z: x + y + z\n        self.setEqnField(\"           X + Y + Z\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\" X  +   Y +    Z\")\n        self.functionTest(func, 2)\n\n        self.setEqnField(\"X                + Y + Z\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"X              +            Y + Z\")\n        self.functionTest(func, 4)\n\n        self.setEqnField(\"           X + Y + Z\")\n        self.functionTest(func, 5)\n\n        func = lambda x, y, z: math.sin(x) * y + z\n        self.setEqnField(\"sin          (X) * Y + Z\")\n        self.functionTest(func, 6)\n\n        self.setEqnField(\"   sin(     X         ) * Y + Z\")\n        self.functionTest(func, 7)\n\n        self.setEqnField(\"sin(X         )                  * Y +           Z\")\n        self.functionTest(func, 8)\n\n    def test_symbolicIntFloat(self):\n        \"\"\"\n        Test handling of integers and floats for symbolic equations.\n\n        Multiple equations are tested that verify that when integers are used in equations they do not result in integer\n        multiplication and division in Python and are instead treated as floating point numbers. For each input, the\n        property is evaluated at 20 evenly spaced points per independent variable within the valid range.\n        \"\"\"\n        func = lambda x, y, z: x / 2.0 + 3.0\n        self.setEqnField(\"X / 2 + 3\")\n        self.functionTest(func, 1)\n\n        self.setEqnField(\"X / 2.0 + 3.0\")\n        self.functionTest(func, 2)\n\n        func = lambda x, y, z: (x + 30) ** (4.0 / 3.0)\n        self.setEqnField(\"(X + 30) ** (4/3)\")\n        self.functionTest(func, 3)\n\n        self.setEqnField(\"(X + 30) ** (4.0/3.0)\")\n        self.functionTest(func, 4)\n\n    def test_symbolicBadParens(self):\n        \"\"\"\n        Test unbalanced parentheses results in errors for symbolic equations.\n\n        Multiple equations are tested that verify that various combinations of unbalanced parentheses are detected and\n        result in an error when parsing the input. Additionally, an expression with extraneous but balanced parentheses\n        is tested for correctness. For that input, the property is evaluated at 20 evenly spaced points per independent\n        variable within the valid range.\n        \"\"\"\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"(X + Y\")\n            self.loadMaterial(num=1)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"((X) + Y\")\n            self.loadMaterial(num=2)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"(X) + Y)\")\n            self.loadMaterial(num=3)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"exp(X\")\n            self.loadMaterial(num=4)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"exp X\")\n            self.loadMaterial(num=5)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"(((((X + Y)))) + (Z)))\")\n            self.loadMaterial(num=6)\n\n        # Test extraneous parentheses as well\n        func = lambda x, y, z: x + y + z\n        self.setEqnField(\"(((((X + Y)))) + (Z))\")\n        self.functionTest(func, 7)\n\n    def test_symbolicUndefined(self):\n        \"\"\"\n        Test that undefined functions results in errors for symbolic equations.\n\n        A logarithmic function is evaluated at two points in the valid range to show that the material input is parsed\n        correctly. The function is then evaluated at a value that results in a negative expression inside the logarithm\n        which is undefined.\n        \"\"\"\n        self.setEqnField(\"ln(X)\")\n        mat = self.loadMaterial(num=1)\n        prop = mat.rho\n\n        self.assertAlmostEqual(prop.calc({\"X\": 3, \"Y\": 3, \"Z\": -20}), math.log(3))\n        self.assertAlmostEqual(prop.calc({\"X\": 100, \"Y\": 3, \"Z\": -20}), math.log(100))\n\n        with self.assertRaises(ValueError):\n            prop.calc({\"X\": -5, \"Y\": 3, \"Z\": -20})\n\n    def test_symbolicCaps(self):\n        \"\"\"\n        Test bad capitalization results in errors for symbolic equations.\n\n        Multiple equations are tested that verify that various combinations of capitalization are detected and result in\n        an error when parsing the inputs.\n        \"\"\"\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"x + Y\")\n            self.loadMaterial(num=1)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"TAN(X) + Y\")\n            self.loadMaterial(num=2)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"Tan(X) + Y\")\n            self.loadMaterial(num=3)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"eXP(X) + Y\")\n            self.loadMaterial(num=4)\n\n    def test_symbolicImpmult(self):\n        \"\"\"\n        Test implicit multiplication results in errors for symbolic equations.\n\n        Multiple equations are tested that verify that various combinations of implicit multiplication are detected and\n        result in an error when parsing the inputs.\n        \"\"\"\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"2 X\")\n            self.loadMaterial(num=1)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"X 2\")\n            self.loadMaterial(num=2)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"2X\")\n            self.loadMaterial(num=3)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"2(X)\")\n            self.loadMaterial(num=4)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"X(2)\")\n            self.loadMaterial(num=5)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"X (2)\")\n            self.loadMaterial(num=6)\n\n        with self.assertRaises(ValueError):\n            self.setEqnField(\"2 sin(X)\")\n            self.loadMaterial(num=7)\n\n    def test_symbolicVarVar(self):\n        \"\"\"\n        Test repeat variables for symbolic equations.\n\n        Multiple equations are tested that verify that various combinations of repeat variable usage evaluate correctly.\n        For each input, the property is evaluated at 20 evenly spaced points per independent variable within the valid\n        range.\n        \"\"\"\n        func = lambda x, y, z: x * x + y / x + z * x\n        self.setEqnField(\"X * X + Y / X + Z * X\")\n        self.functionTest(func, 1)\n\n        func = lambda x, y, z: math.tan(x * y) + math.cos(x * y) + math.exp(z * y)\n        self.setEqnField(\"tan(X * Y) + cos(X * Y) + exp(Z * Y)\")\n        self.functionTest(func, 2)\n\n    def test_symbolicScientific(self):\n        \"\"\"\n        Test scientific notation for symbolic equations.\n\n        Multiple equations are tested that verify that various combinations of both upper and lower case scientific\n        notation evaluate correctly. For each input, the property is evaluated at 20 evenly spaced points per\n        independent variable within the valid range.\n        \"\"\"\n        # Test upper case E\n        func = lambda x, y, z: 3e5 / x\n        self.setEqnField(\"3E5 / X\")\n        self.functionTest(func, 1)\n\n        func = lambda x, y, z: 1.23e-3 * x\n        self.setEqnField(\"1.23E-3 * X\")\n        self.functionTest(func, 2)\n\n        # Test lower case e\n        func = lambda x, y, z: 3e5 / x\n        self.setEqnField(\"3e5 / X\")\n        self.functionTest(func, 3)\n\n        func = lambda x, y, z: 1.23e-3 * x\n        self.setEqnField(\"1.23e-3 * X\")\n        self.functionTest(func, 4)\n\n    def test_symbolicExamples(self):\n        \"\"\"Test a handful of complicated symbolic equations.\"\"\"\n        # example 1\n        func = lambda x, y, z: 10**5 * (\n            (-540 / (1 + math.exp(-0.02 * (x - 220))) + 520) + (-120 / (1 + math.exp(-0.02 * (x - 122))) + 92)\n        )\n        self.setEqnField(\"10**5*((-540/(1+exp(-0.02*(X-220)))+ 520)+ (-120/(1+exp(-0.02*(X-122)))+ 92))\")\n        self.functionTest(func, 1)\n\n        # example 2\n        func = lambda x, y, z: 222.0 + 225.2 * (1 - (x + 273.15) / 2500) + 512.2 * (1 - (x + 273.15) / 2502) ** 0.5\n        self.setEqnField(\"222.0 + 225.2 * (1 - (X + 273.15) / 2500) + 512.2 * (1 - (X + 273.15) / 2502) ** 0.5\")\n        self.functionTest(func, 2)\n\n        # example 3\n        func = lambda x, y, z: (2.2e11 - 7.2e6 * x - 4.2e2 * x**2) * (y / (4.2 - 2.2 * y))\n        self.setEqnField(\"(2.2E11 - 7.2E6 * X - 4.2E2 * X**2) * (Y / (4.2 - 2.2 * Y))\")\n        self.functionTest(func, 3)\n\n    def test_symbolicBadparse(self):\n        \"\"\"Test incorrect expressions results in errors for symbolic equations.\"\"\"\n        # Not a math equation\n        self.setEqnField(\"Not an equation\")\n        with self.assertRaises(ValueError):\n            self.loadMaterial(num=1)\n\n        # Unknown variable\n        self.setEqnField(\"X + Y + W\")\n        with self.assertRaises(ValueError):\n            self.loadMaterial(num=2)\n\n        # Missing an operator\n        self.setEqnField(\"X Y\")\n        with self.assertRaises(ValueError):\n            self.loadMaterial(num=3)\n\n        # Missing equation field\n        del self.yaml[\"density\"][\"function\"][\"equation\"]\n        with self.assertRaises(KeyError):\n            self.loadMaterial(num=4)\n\n    def test_pickleSymbolicFunction(self):\n        \"\"\"Downstream usages might need to pickle a material. Ensure symbolic expression can be pickled.\"\"\"\n        self.setEqnField(\"X + Y\")\n        mat = self.loadMaterial()\n        stream = pickle.dumps(mat)\n        mat2 = pickle.loads(stream)\n\n        self.assertEqual(mat.rho.getMinBound(\"X\"), mat2.rho.getMinBound(\"X\"))\n        self.assertEqual(\n            mat.rho.calc({\"X\": 0.0, \"Y\": 10, \"Z\": -10}),\n            mat2.rho.calc({\"X\": 0.0, \"Y\": 10, \"Z\": -10}),\n        )\n        self.assertEqual(\n            mat.rho.calc({\"X\": 300.0, \"Y\": 15, \"Z\": -10}),\n            mat2.rho.calc({\"X\": 300.0, \"Y\": 15, \"Z\": -10}),\n        )\n\n    def test_numpyEvals(self):\n        \"\"\"Test that numpy floats and integers work in evaluations same as integers and floats.\"\"\"\n        self.setEqnField(\"X * 2.0\")\n        mat = self.loadMaterial()\n\n        func = lambda x: x * 2\n\n        self.assertAlmostEqual(mat.rho.calc(X=np.float64(10), Y=5.0, Z=-10.0), func(10))\n        self.assertAlmostEqual(mat.rho.calc(X=np.int64(10), Y=5.0, Z=-10.0), func(10))\n\n    def test_largeExponentials(self):\n        \"\"\"Test that exponentials don't overflow.\"\"\"\n        # If sympy is allowed to simplify this expression it will try to evaluate e^-1400 which will overflow. The\n        # remainder of the values are chosen just to get a reasonable magnitude expression based on the min/max bounds\n        # for X/Y.\n        self.setEqnField(\"exp(-1400.0 + 2.6*(X*0.1+30*Y))\")\n        mat = self.loadMaterial()\n\n        func = lambda x, y: math.exp(-1400 + 2.6 * (x * 0.1 + 30 * y))\n\n        self.assertAlmostEqual(mat.rho.calc(X=300, Y=5.0, Z=-10.0), func(300, 5))\n\n    def test_symbolicOutofbounds(self):\n        \"\"\"Test evaluation outside of bounds results in ValueError for symbolic equations.\"\"\"\n        mat = self.loadMaterial()\n        prop = mat.rho\n\n        mins = [prop.getMinBound(var) for var in [\"X\", \"Y\", \"Z\"]]\n        maxs = [prop.getMaxBound(var) for var in [\"X\", \"Y\", \"Z\"]]\n\n        for i in range(3):\n            minsEdited = copy.copy(mins)\n            maxsEdited = copy.copy(maxs)\n\n            minsEdited[i] -= 0.1\n            maxsEdited[i] += 0.1\n            with self.assertRaises(ValueError):\n                prop.calc({\"X\": minsEdited[0], \"Y\": minsEdited[1], \"Z\": minsEdited[2]})\n            with self.assertRaises(ValueError):\n                prop.calc({\"X\": maxsEdited[0], \"Y\": maxsEdited[1], \"Z\": maxsEdited[2]})\n\n\nclass TestBrokenSymbolicFunctions(unittest.TestCase):\n    def test_complexNumbers(self):\n        yaml = {\n            \"file format\": \"TESTS\",\n            \"material type\": \"Metal\",\n            \"composition\": {\"a\": \"balance\"},\n            \"density\": {\n                \"function\": {\n                    \"type\": \"symbolic\",\n                    \"X\": {\"min\": -10, \"max\": 500.0},\n                    \"Y\": {\"min\": 1.0, \"max\": 20.0},\n                    \"Z\": {\"min\": -30.0, \"max\": -10.0},\n                    \"equation\": 1.0,\n                }\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(yaml)\n\n        # stomp all over the equation, to force it to return a complex number\n        mat.rho.eqn = eval(\"lambda x, y, z: 1.0 + 2.0j\")\n\n        with self.assertRaises(ValueError):\n            mat.rho._calcSpecific({\"X\": 1, \"Y\": 2, \"Z\": -20})\n\n    def test_isNan(self):\n        yaml = {\n            \"file format\": \"TESTS\",\n            \"material type\": \"Metal\",\n            \"composition\": {\"a\": \"balance\"},\n            \"density\": {\n                \"function\": {\n                    \"type\": \"symbolic\",\n                    \"X\": {\"min\": -10, \"max\": 500.0},\n                    \"Y\": {\"min\": 1.0, \"max\": 20.0},\n                    \"Z\": {\"min\": -30.0, \"max\": -10.0},\n                    \"equation\": 1.0,\n                }\n            },\n        }\n\n        mat = Material()\n        mat.loadNode(yaml)\n\n        # stomp all over the equation, to force it to return a complex number\n        mat.rho.eqn = eval(\"lambda x, y, z: math.nan\")\n\n        with self.assertRaises(ValueError):\n            mat.rho._calcSpecific({\"X\": 1, \"Y\": 2, \"Z\": -20})\n"
  },
  {
    "path": "armi/matProps/tests/test_tableFunctions.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests 1D and 2D table Functions.\"\"\"\n\nimport numpy as np\n\nfrom armi.matProps.tableFunction2D import TableFunction2D\nfrom armi.matProps.tests import MatPropsFunTestBase\n\n\nclass TestTableFunctions(MatPropsFunTestBase):\n    \"\"\"Tests 1D and 2D table Functions.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n\n        cls.baseOneDimTableData = {\"type\": \"table\", \"T\": 0}\n        cls.baseOneDimTable = [[0.0, 5.0], [100.0, 105.0]]\n\n        cls.baseTwoDimTableData = {\n            \"type\": \"two dimensional table\",\n            \"T\": 0,\n            \"t\": 1,\n        }\n        cls.baseTwoDimTable = [\n            [None, [2.0, 200.0, 632.4555]],\n            [1.0, [10.0, 208.0, 640.4555]],\n            [100.0, [110.0, 308.0, 740.4555]],\n            [316.2278, [135, 333, 765.4555]],\n        ]\n\n    def test_interpolation1Dtable(self):\n        \"\"\"Test interpolation for a two-point one-dimensional table.\"\"\"\n        mat = self._createFunction(self.baseOneDimTableData, self.baseOneDimTable)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        func = mat.rho\n        self.assertIn(\"TableFunction1D\", str(func))\n        for index in range(9):\n            val = float(index) * 12.5\n            self.assertAlmostEqual(func.calc({\"T\": np.float64(val)}), 5.0 + val)\n            self.assertAlmostEqual(func.calc({\"T\": val}), 5.0 + val)\n\n        # directly check error is correctly raised if the variable is unknown\n        with self.assertRaises(ValueError):\n            func._calcSpecific({\"X\": 1})\n\n    def test_interpolation1DtableMissnode(self):\n        \"\"\"Test to make sure a KeyError is thrown if 'tabulated data' node is absent.\"\"\"\n        with self.assertRaisesRegex(KeyError, \"tabulated data\"):\n            self._createFunctionWithoutTable(self.baseOneDimTableData)\n\n    def test_interpolation1Dtable2(self):\n        \"\"\"Test interpolation for a many-point one-dimensional table.\"\"\"\n        data = {\"type\": \"table\", \"T\": {\"min\": 900, \"max\": 250}}\n        tableData = [\n            [250, 25.68],\n            [300, 25.97],\n            [400, 26.28],\n            [500, 26.26],\n            [600, 25.89],\n            [700, 25.19],\n            [759.7, 24.61],\n            [800, 25.10],\n            [900, 26.32],\n        ]\n\n        mat = self._createFunction(data, tableData)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        self.assertAlmostEqual(mat.rho.calc(T=250), 25.68)\n        self.assertAlmostEqual(mat.rho.calc(T=275), 25.825)\n        self.assertAlmostEqual(mat.rho.calc(T=500), 26.26)\n        self.assertAlmostEqual(mat.rho.calc(T=512.5), 26.21375)\n        self.assertAlmostEqual(mat.rho.calc(T=729.7), 24.9014572864322)\n        self.assertAlmostEqual(mat.rho.calc(T=759.7), 24.61)\n\n        with self.assertRaises(ValueError):\n            mat.rho.calc(T=999)\n\n        # bonus test of method to clear table data\n        self.assertIsNotNone(mat.rho.tableData)\n        mat.rho.clear()\n        self.assertIsNone(mat.rho.tableData)\n\n    def test_interpolation1DtableInt(self):\n        \"\"\"Test interpolation for one-dimensional tables with all integer values.\"\"\"\n        tableData = [\n            [250, 5],\n            [300, 6],\n            [400, 7],\n            [500, 8],\n            [600, 9],\n            [700, 10],\n            [800, 11],\n            [900, 12],\n        ]\n\n        mat = self._createFunction(self.baseOneDimTableData, tableData, minT=250, maxT=900)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        self.assertAlmostEqual(mat.rho.calc(T=275), 5.5)\n        self.assertAlmostEqual(mat.rho.calc(T=312.5), 6.125)\n\n    def test_interpolationTable2D(self):\n        \"\"\"Test that evaluates TableFunction2D for different combinations of integer and floating values.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        mat.name = self.testName\n        self.assertEqual(str(mat), f\"<Material {self.testName} <MaterialType Metal>>\")\n        func = mat.rho\n        self.assertIn(\"TableFunction2D\", str(func))\n        self.assertAlmostEqual(func.calc({\"T\": 2, \"t\": 1}), 10)\n        self.assertAlmostEqual(func.calc({\"T\": 2, \"t\": 100.0}), 110)\n        self.assertAlmostEqual(func.calc({\"T\": 200, \"t\": 1}), 208)\n        self.assertAlmostEqual(func.calc({\"T\": 200, \"t\": 100}), 308)\n        self.assertAlmostEqual(func.calc({\"T\": 100, \"t\": 1}), 108)\n        self.assertAlmostEqual(func.calc({\"T\": 100, \"t\": 100}), 208)\n        self.assertAlmostEqual(func.calc({\"T\": 2, \"t\": 10}), 60)\n        self.assertAlmostEqual(func.calc({\"T\": 100, \"t\": 10}), 158)\n        self.assertAlmostEqual(func.calc({\"T\": 2, \"t\": 316.2278}), 135)\n        self.assertAlmostEqual(func.calc({\"T\": 632.4555, \"t\": 1}), 640.4555)\n        self.assertAlmostEqual(func.calc({\"T\": 200, \"t\": 316.2278}), 333)\n        self.assertAlmostEqual(func.calc({\"T\": 632.4555, \"t\": 100}), 740.4555)\n        self.assertAlmostEqual(func.calc({\"T\": 632.4555, \"t\": 316.2278}), 765.4555)\n        self.assertAlmostEqual(func.calc({\"T\": 200, \"t\": 177.828}), 320.500006)\n        self.assertAlmostEqual(func.calc({\"T\": 355.6559, \"t\": 100}), 463.6559)\n        self.assertAlmostEqual(func.calc({\"T\": 355.6559, \"t\": 177.828}), 476.155906)\n\n    def test_interpolationTable2DMissNode(self):\n        \"\"\"Test to make sure TableFunction2D throws a KeyError if 'tabulated data' node is absent.\"\"\"\n        with self.assertRaisesRegex(KeyError, \"tabulated data\"):\n            self._createFunctionWithoutTable(self.baseTwoDimTableData)\n\n    def test_inputCheckTable2Doutbounds(self):\n        \"\"\"Ensure a ValueError is thrown when evaluating out of the valid bounds.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 1.99, \"t\": 1.0})\n\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 632.4655, \"t\": 1.0})\n\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 2.0, \"t\": 0.99})\n\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 2.0, \"t\": 316.2378})\n\n    def test_inputCheckTableMinVar(self):\n        \"\"\"Test to make sure an error is raised when attempting to evaluate below the valid range.\"\"\"\n        self.belowMinimumCheck(self.baseOneDimTableData, self.baseOneDimTable)\n\n    def test_inputCheckTableMaxVar(self):\n        \"\"\"Test to make sure an error is raised when attempting to evaluate above the valid range.\"\"\"\n        self.aboveMaximumCheck(self.baseOneDimTableData, self.baseOneDimTable)\n\n    def test_inputCheckTable2DMinVar1(self):\n        \"\"\"Test to make sure an error is raised when attempting to evaluate below the valid range.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 1, \"t\": 50})\n\n    def test_inputCheckTable2DMaxVar1(self):\n        \"\"\"Test to make sure an error is raised when attempting to evaluate above the valid range.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 650, \"t\": 50})\n\n    def test_inputCheckTable2DMinVar2(self):\n        \"\"\"Ensure an ValueError is raised when evaluating below the valid range.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 1, \"t\": 0})\n\n    def test_table2DsetBounds(self):\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        fun = mat.rho\n\n        # staring values\n        self.assertEqual(fun.independentVars[\"T\"], (2.0, 632.4555))\n        self.assertEqual(fun.independentVars[\"t\"], (1.0, 316.2278))\n\n        # calling _setBounds will wipe out the \"t\" variable, but not update \"T\"\n        fun._columnValues = [123, 987]\n        fun._setBounds(0, \"T\")\n        self.assertEqual(fun.independentVars[\"T\"], (2.0, 632.4555))\n        with self.assertRaises(KeyError):\n            fun.independentVars[\"t\"]\n\n        # Here we update \"T\" with new column values\n        fun._columnValues = [123, 987]\n        fun._setBounds(0, \"X\")\n        self.assertEqual(fun.independentVars[\"X\"], (123.0, 987.0))\n\n        # Here we update the new variable \"X\" with new row values\n        fun._rowValues = [11, 99]\n        fun._setBounds(1, \"X\")\n        self.assertEqual(fun.independentVars[\"T\"], (2.0, 632.4555))\n        self.assertEqual(fun.independentVars[\"X\"], (11.0, 99.0))\n        with self.assertRaises(KeyError):\n            fun.independentVars[\"t\"]\n\n        # Bad inputs\n        with self.assertRaises(ValueError):\n            fun._setBounds(2, \"X\")\n\n    def test_inputCheckTable2DMaxVar2(self):\n        \"\"\"Ensure an ValueError is raised when evaluating above the valid range.\"\"\"\n        mat = self._createFunction(self.baseTwoDimTableData, self.baseTwoDimTable)\n        func = mat.rho\n        with self.assertRaises(ValueError):\n            func.calc({\"T\": 1, \"t\": 1000})\n\n    def test_calcSpec2dEdgeCase(self):\n        f = TableFunction2D(\"mat\", \"prop\")\n        f.independentVars = {\"T\": (250.0, 800.0), \"t\": (1, 3)}\n\n        # This should fail correctly when given a bad input param\n        with self.assertRaises(ValueError):\n            f._calcSpecific({\"Pa\": 1.0})\n"
  },
  {
    "path": "armi/materials/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe material package defines compositions and material-specific properties.\n\nProperties in scope include temperature dependent thermo/mechanical properties\n(like heat capacity, linear expansion coefficients, viscosity, density),\nand material-specific nuclear properties that can't exist at the nuclide level\nalone (like :py:mod:`thermal scattering laws <armi.nucDirectory.thermalScattering>`).\n\nAs the fundamental macroscopic building blocks of any physical object,\nthese are highly important to reactor analysis.\n\nThis module handles the dynamic importing of all the materials defined here at the\nframework level as well as in all the attached plugins. It is expected that most teams\nwill have special material definitions that they will want to define.\n\nIt may also make sense in the future to support user-input materials that are not\nhard-coded into the app.\n\nThe base class for all materials is in :py:mod:`armi.materials.material`.\n\"\"\"\n\nimport importlib\nimport inspect\nimport pkgutil\nfrom typing import List\n\nfrom armi.materials.material import Material\n\n# This will frequently be updated by the CONF_MATERIAL_NAMESPACE_ORDER setting\n# during reactor construction (see armi.reactor.reactors.factory).\n_MATERIAL_NAMESPACE_ORDER = [\"armi.materials\"]\n\n\ndef setMaterialNamespaceOrder(order):\n    \"\"\"\n    Set the material namespace order at the Python interpreter, global level.\n\n    .. impl:: Material collections are defined with an order of precedence in the case\n        of duplicates.\n        :id: I_ARMI_MAT_ORDER\n        :implements: R_ARMI_MAT_ORDER\n\n        An ARMI application will need materials. Materials can be imported from\n        any code the application has access to, like plugin packages. This leads to\n        the situation where one ARMI application will want to import multiple\n        collections of materials. To handle this, ARMI keeps a list of material\n        namespaces. This is an ordered list of importable packages that ARMI\n        can search for a particular material by name.\n\n        This automatic exploration of an importable package saves the user the\n        tedium have having to import or include hundreds of materials manually somehow.\n        But it comes with a caveat; the list is ordered. If two different namespaces in\n        the list include a material with the same name, the first one found in the list\n        is chosen, i.e. earlier namespaces in the list have precedence.\n    \"\"\"\n    global _MATERIAL_NAMESPACE_ORDER\n    _MATERIAL_NAMESPACE_ORDER = order\n\n\ndef importMaterialsIntoModuleNamespace(path, name, namespace, updateSource=None):\n    \"\"\"\n    Import all Material subclasses into the top subpackage.\n\n    This allows devs to use ``from armi.materials import HT9``\n\n    This can be used in plugins for similar purposes.\n\n    .. warning::\n        Do not directly import materials from this namespace in code. Use the full module\n        import instead. This is just for material resolution. This will be replaced with a more\n        formal material registry in the future.\n\n    Parameters\n    ----------\n    path : str\n        Path to package/module being imported\n    name : str\n        module name\n    namespace : dict\n        The namespace\n    updateSource : str, optional\n        Change DATA_SOURCE on import to a different string.\n        Useful for saying where plugin materials are coming from.\n    \"\"\"\n    for _modImporter, modname, _ispkg in pkgutil.walk_packages(path=path, prefix=name + \".\"):\n        if \"test\" not in modname:\n            mod = importlib.import_module(modname)\n            for item, obj in mod.__dict__.items():\n                try:\n                    if issubclass(obj, Material):\n                        namespace[item] = obj\n                        if updateSource:\n                            obj.DATA_SOURCE = updateSource\n                except TypeError:\n                    # some non-class local\n                    pass\n\n\nimportMaterialsIntoModuleNamespace(__path__, __name__, globals())\n\n\ndef iterAllMaterialClassesInNamespace(namespace):\n    \"\"\"\n    Iterate over all Material subclasses found in a namespace.\n\n    Notes\n    -----\n    Useful for testing.\n    \"\"\"\n    for obj in namespace.__dict__.values():\n        if inspect.isclass(obj):\n            if issubclass(obj, Material):\n                yield obj\n\n\ndef resolveMaterialClassByName(name: str, namespaceOrder: List[str] = None):\n    \"\"\"\n    Find the first material class that matches a name in an ordered namespace.\n\n    Names can either be fully resolved class paths (e.g. ``armi.materials.uZr:UZr``)\n    or simple class names (e.g. ``UZr``). In the latter case, the\n    ``CONF_MATERIAL_NAMESPACE_ORDER`` setting to allows users to choose which\n    particular material of a common name (like UO2 or HT9) gets used.\n\n    Input files usually specify a material like UO2. Which particular implementation\n    gets used (Framework's UO2 vs. a user plugins UO2 vs. the Kentucky Transportation\n    Cabinet's UO2) is up to the user at runtime.\n\n    .. impl:: Materials can be searched across packages in a defined namespace.\n        :id: I_ARMI_MAT_NAMESPACE\n        :implements: R_ARMI_MAT_NAMESPACE\n\n        During the runtime of an ARMI application, but particularly during the\n        construction of the reactor in memory, materials will be requested by name. At\n        that point, this code is called to search for that material name. The search\n        goes through the ordered list of Python namespaces provided. The first time an\n        instance of that material is found, it is returned. In this way, the first\n        items in the material namespace list take precedence.\n\n        When a material name is passed to this function, it may be either a simple\n        name like the string ``\"UO2\"`` or it may be much more specific, like\n        ``armi.materials.uraniumOxide:UO2``.\n\n    Parameters\n    ----------\n    name : str\n        The material class name to find, e.g. ``\"UO2\"``. Optionally, a module path\n        and class name can be provided with a colon separator as ``module:className``,\n        e.g. ``armi.materials.uraniumOxide:UO2`` for direct specification.\n    namespaceOrder : list of str, optional\n        A list of namespaces in order of preference in which to search for the\n        material. If not passed, the value in the global ``MATERIAL_NAMESPACE_ORDER``\n        will be used, which is often set by the ``CONF_MATERIAL_NAMESPACE_ORDER``\n        setting (e.g. during reactor construction). Any value passed into this argument\n        will be ignored if the ``name`` is provided with a ``modulePath``.\n\n    Returns\n    -------\n    matCls : armi.materials.material.Material\n        The material\n\n    Raises\n    ------\n    KeyError\n        When material of name cannot be found in namespaces.\n\n    Examples\n    --------\n    >>> resolveMaterialClassByName(\"UO2\", [\"something.else.materials\", \"armi.materials\"])\n    <class 'something.else.materials.UO2'>\n\n    See Also\n    --------\n    armi.reactor.reactors.factory\n        Applies user settings to default namespace order.\n    \"\"\"\n    if \":\" in name:\n        # assume direct package path like `armi.materials.uZr:UZr`\n        modPath, clsName = name.split(\":\")\n        mod = importlib.import_module(modPath)\n        return getattr(mod, clsName)\n\n    namespaceOrder = namespaceOrder or _MATERIAL_NAMESPACE_ORDER\n    for namespace in namespaceOrder:\n        mod = importlib.import_module(namespace)\n        if hasattr(mod, name):\n            return getattr(mod, name)\n\n    raise KeyError(\n        f\"Cannot find material named `{name}` in any of: {str(namespaceOrder)}. \"\n        \"Please update inputs or plugins. See CONF_MATERIAL_NAMESPACE_ORDER setting.\"\n    )\n"
  },
  {
    "path": "armi/materials/air.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple air material.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials import material\nfrom armi.utils.units import G_PER_CM3_TO_KG_PER_M3, getTk\n\n\nclass Air(material.Fluid):\n    \"\"\"\n    Dry, Near Sea Level.\n\n    Correlations based off of values in Incropera, Frank P., et al.\n    Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.\n\n    Elemental composition from PNNL-15870 Rev. 1\n            https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf\n    \"\"\"\n\n    \"\"\"\n    temperature ranges based on where values are more than 1% off of reference\n    \"\"\"\n    propertyValidTemperature = {\n        \"pseudoDensity\": ((100, 2400), \"K\"),\n        \"heat capacity\": ((100, 1300), \"K\"),\n        \"thermal conductivity\": ((200, 850), \"K\"),\n    }\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        Set mass fractions.\n\n        Notes\n        -----\n        Mass fraction reference McConn, Ronald J., et al. Compendium of\n        material composition data for radiation transport modeling. No.\n        PNNL-15870 Rev. 1. Pacific Northwest National Lab.(PNNL), Richland,\n        WA (United States), 2011.\n\n        https://www.pnnl.gov/main/publications/external/technical_reports/PNNL-15870Rev1.pdf\n        \"\"\"\n        self.setMassFrac(\"C\", 0.000124)\n        self.setMassFrac(\"N\", 0.755268)\n        self.setMassFrac(\"O\", 0.231781)\n        self.setMassFrac(\"AR\", 0.012827)\n\n    def pseudoDensity(\n        self,\n        Tk=None,\n        Tc=None,\n    ):\n        \"\"\"\n        Returns density of Air in g/cc.\n\n        This is from Table A.4 in\n        Fundamentals of Heat and Mass Transfer Incropera, DeWitt\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n\n        Returns\n        -------\n        density : float\n            mass density in g/cc\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"pseudoDensity\", Tk)\n        inv_Tk = 1.0 / Tk\n        rho_kgPerM3 = 1.15675e03 * inv_Tk**2 + 3.43413e02 * inv_Tk + 2.99731e-03\n        return rho_kgPerM3 / G_PER_CM3_TO_KG_PER_M3\n\n    def specificVolumeLiquid(self, Tk=None, Tc=None):\n        \"\"\"Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C.\"\"\"\n        return 1 / (1000.0 * self.pseudoDensity(Tk, Tc))\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns thermal conductivity of Air in g/cc.\n\n        This is from Table A.4 in Fundamentals of Heat and Mass Transfer\n        Incropera, DeWitt\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Returns\n        -------\n        thermalConductivity : float\n            thermal conductivity in W/m*K\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n        thermalConductivity = 2.13014e-08 * Tk**3 - 6.31916e-05 * Tk**2 + 1.11629e-01 * Tk - 2.00043e00\n        return thermalConductivity * 1e-3\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns heat capacity of Air in g/cc.\n\n        This is from Table A.4 in Fundamentals of Heat and Mass Transfer\n        Incropera, DeWitt\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Returns\n        -------\n        heatCapacity : float\n            heat capacity in J/kg*K\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n        return (\n            sum(\n                [\n                    +1.38642e-13 * Tk**4,\n                    -6.47481e-10 * Tk**3,\n                    +1.02345e-06 * Tk**2,\n                    -4.32829e-04 * Tk,\n                    +1.06133e00,\n                ]\n            )\n            * 1000.0\n        )  # kJ / kg K to J / kg K\n"
  },
  {
    "path": "armi/materials/alloy200.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Alloy-200 are wrought commercially pure nickel.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom numpy import interp\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass Alloy200(Material):\n    references = {\n        \"linearExpansion\": [\n            \"Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf\"\n        ],\n        \"refDens\": [\"Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf\"],\n        \"referenceMaxPercentImpurites\": [\n            \"Alloy 200/201 Data Sheet http://www.jacquet.biz/JACQUET/USA/files/JCQusa-alloy-200-201.pdf\"\n        ],\n    }\n\n    modelConst = {\n        \"a0\": 1.21620e-5,\n        \"a1\": 8.30010e-9,\n        \"a2\": -3.94985e-12,\n        \"TRefa\": 20,  # Constants for thermal expansion\n    }\n\n    propertyValidTemperature = {\"linear expansion\": ((73.15, 1273.15), \"K\")}\n\n    referenceMaxPercentImpurites = [\n        (\"C\", 0.15),\n        (\"MN\", 0.35),\n        (\"S\", 0.01),\n        (\"SI\", 0.35),\n        (\"CU\", 0.25),\n        (\"FE\", 0.40),\n    ]\n\n    linearExpansionTableK = [\n        73.15,\n        173.15,\n        373.15,\n        473.15,\n        573.15,\n        673.15,\n        773.15,\n        873.15,\n        973.15,\n        1073.15,\n        1173.15,\n        1273.15,\n    ]\n\n    linearExpansionTable = [\n        10.1e-6,\n        11.3e-6,\n        13.3e-6,\n        13.9e-6,\n        14.3e-6,\n        14.8e-6,\n        15.2e-6,\n        15.6e-6,\n        15.8e-6,\n        16.2e-6,\n        16.5e-6,\n        16.7e-6,\n    ]\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns instantaneous coefficient of thermal expansion of Alloy 200.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Returns\n        -------\n        linearExpansion : float\n            instantaneous coefficient of thermal expansion of Alloy 200 (1/C)\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n\n        return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable)\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        Notes\n        -----\n        It is assumed half the max composition for the impurities and the rest is Ni.\n        \"\"\"\n        nickleMassFrac = 1.0\n\n        for elementSymbol, massFrac in self.referenceMaxPercentImpurites:\n            assumedMassFrac = massFrac * 0.01 / 2.0\n            self.setMassFrac(elementSymbol, assumedMassFrac)\n            nickleMassFrac -= assumedMassFrac\n\n        self.setMassFrac(\"NI\", nickleMassFrac)\n        self.refDens = 8.9\n"
  },
  {
    "path": "armi/materials/b4c.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nBoron carbide; a very typical reactor control material.\n\nNote that this material defaults to a theoretical density fraction of 0.9, reflecting the difficulty of producing B4C at\n100% theoretical density in real life. To get different fraction, use the `TD_frac` material modification in your\nassembly definition.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.utils.units import getTc\n\n\nclass B4C(material.Material):\n    DEFAULT_MASS_DENSITY = 2.52\n    DEFAULT_THEORETICAL_DENSITY_FRAC = 0.90\n    enrichedNuclide = \"B10\"\n    NATURAL_B10_NUM_FRAC = 0.199\n    propertyValidTemperature = {\"linear expansion percent\": ((25, 600), \"C\")}\n\n    def __init__(self):\n        self.b10NumFrac = self.NATURAL_B10_NUM_FRAC\n        super().__init__()\n\n    def applyInputParams(self, B10_wt_frac=None, theoretical_density=None, TD_frac=None, *args, **kwargs):\n        if B10_wt_frac is not None:\n            # we can't just use the generic enrichment adjustment here because the\n            # carbon has to change with enrich.\n            self.adjustMassEnrichment(B10_wt_frac)\n        if theoretical_density is not None:\n            runLog.warning(\n                \"The 'theoretical_density' material modification for B4C will be \"\n                \"deprecated. Update your inputs to use 'TD_frac' instead.\",\n                single=True,\n            )\n            if TD_frac is not None:\n                runLog.warning(\n                    f\"Both 'theoretical_density' and 'TD_frac' are specified for {self}. 'TD_frac' will be used.\"\n                )\n            else:\n                self.updateTD(theoretical_density)\n        if TD_frac is not None:\n            self.updateTD(TD_frac)\n\n    def updateTD(self, td: float) -> None:\n        self.theoreticalDensityFrac = td\n        self.clearCache()\n\n    def setNewMassFracsFromMassEnrich(self, massEnrichment):\n        r\"\"\"\n        Calculate the mass fractions for a given  mass enrichment and set it on any parent.\n\n        Parameters\n        ----------\n        massEnrichment : float\n            The mass enrichment as a fraction.\n\n        Returns\n        -------\n        boron10MassGrams, boron11MassGrams, carbonMassGrams : float\n            The resulting mass of each nuclide/element\n\n        Notes\n        -----\n        B-10: 10.012 g/mol\n        B-11: 11.009 g/mol\n        Carbon:  12.0107 g/mol\n\n        4 moles of boron/1 mole of carbon\n\n        grams of boron-10 = 10.012 g/mol* 4 mol * 0.199   =  7.969552 g\n        grams of boron-11 = 11.009 g/mol* 4 mol * 0.801   = 35.272836 g\n        grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g\n\n        from number enrichment mi:\n        mB10 = nB10*AB10 /(nB10*AB10 + nB11*AB11)\n        \"\"\"\n        if massEnrichment < 0 or massEnrichment > 1:\n            raise ValueError(f\"massEnrichment {massEnrichment} is unphysical for B4C\")\n\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            b10AtomicMass = 10.01293728\n            b11AtomicMass = 11.0093054803\n            cAtomicMass = 12.011137118560828\n        else:\n            b10AtomicMass = nb.byName[\"B10\"].weight\n            b11AtomicMass = nb.byName[\"B11\"].weight\n            cAtomicMass = nb.byName[\"C\"].weight\n\n        b10NumEnrich = (massEnrichment / b10AtomicMass) / (\n            massEnrichment / b10AtomicMass + (1 - massEnrichment) / b11AtomicMass\n        )\n        b11NumEnrich = 1.0 - b10NumEnrich\n\n        boron10MassGrams = b10AtomicMass * b10NumEnrich * 4.0\n        boron11MassGrams = b11AtomicMass * b11NumEnrich * 4.0\n        carbonMassGrams = cAtomicMass\n\n        gTotal = boron10MassGrams + boron11MassGrams + carbonMassGrams\n\n        boron10MassGrams /= gTotal\n        boron11MassGrams /= gTotal\n        carbonMassGrams /= gTotal\n        if self.parent:\n            self.parent.setMassFracs({\"B10\": boron10MassGrams, \"B11\": boron11MassGrams, \"C\": carbonMassGrams})\n\n        return boron10MassGrams, boron11MassGrams, carbonMassGrams\n\n    def setDefaultMassFracs(self) -> None:\n        r\"\"\"B4C mass fractions. Using Natural B4C. 19.9% B-10/ 80.1% B-11\n        Boron: 10.811 g/mol\n        Carbon:  12.0107 g/mol.\n\n        4 moles of boron/1 mole of carbon\n\n        grams of boron-10 = 10.01 g/mol* 4 mol * 0.199   =  7.96796 g\n        grams of boron-11 = 11.01 g/mol* 4 mol * 0.801   = 35.27604 g\n        grams of carbon= 12.0107 g/mol * 1 mol = 12.0107 g\n\n        total=55.2547 g.\n        Mass fractions are computed from this.\n        \"\"\"\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            b10AtomicMass = 10.01293728\n            b11AtomicMass = 11.0093054803\n        else:\n            b10AtomicMass = nb.byName[\"B10\"].weight\n            b11AtomicMass = nb.byName[\"B11\"].weight\n\n        massEnrich = self.getMassEnrichmentFromNumEnrich(self.b10NumFrac, b10AtomicMass, b11AtomicMass)\n\n        gBoron10, gBoron11, gCarbon = self.setNewMassFracsFromMassEnrich(massEnrichment=massEnrich)\n        self.setMassFrac(\"B10\", gBoron10)\n        self.setMassFrac(\"B11\", gBoron11)\n        self.setMassFrac(\"C\", gCarbon)\n        self.refDens = self.DEFAULT_MASS_DENSITY\n        # TD reference : Dunner, Heuvel, \"Absorber Materials for control rod systems of fast breeder reactors\"\n        # Journal of nuclear materials, 124, 185-194, (1984).\"\n        self.theoreticalDensityFrac = self.DEFAULT_THEORETICAL_DENSITY_FRAC  # normally is around 0.88-93.\n\n    @staticmethod\n    def getMassEnrichmentFromNumEnrich(\n        b10NumFrac: float, b10AtomicMass: float = None, b11AtomicMass: float = None\n    ) -> float:\n        \"\"\"Given a B10 number fraction, give the B10 weight fraction.\"\"\"\n        if b10AtomicMass is None:\n            b10AtomicMass = 10.01293728\n        if b11AtomicMass is None:\n            b11AtomicMass = 11.0093054803\n\n        return b10NumFrac * b10AtomicMass / (b10NumFrac * b10AtomicMass + (1.0 - b10NumFrac) * b11AtomicMass)\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return density that preserves mass when thermally expanded in 2D.\n\n        Notes\n        -----\n        Applies theoretical density of B4C to parent method\n        \"\"\"\n        return material.Material.pseudoDensity(self, Tk, Tc) * self.theoreticalDensityFrac\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return density that preserves mass when thermally expanded in 3D.\n\n        Notes\n        -----\n        Applies theoretical density of B4C to parent method\n        \"\"\"\n        return material.Material.density(self, Tk, Tc) * self.theoreticalDensityFrac\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Boron carbide expansion. Very preliminary.\"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tc)\n        deltaT = Tc - 25\n        dLL = deltaT * 4.5e-6\n        return dLL * 100\n"
  },
  {
    "path": "armi/materials/be9.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nBeryllium is a lightweight metal with lots of interesting nuclear use-cases.\n\nIt has a nice (n,2n) reaction and is an inhalation hazard.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.nucDirectory import thermalScattering as tsl\nfrom armi.utils.units import getTk\n\n\nclass Be9(Material):\n    \"\"\"Beryllium.\"\"\"\n\n    thermalScatteringLaws = (tsl.fromNameAndCompound(\"BE\", tsl.BE_METAL),)\n    propertyValidTemperature = {\"linear expansion percent\": ((50, 1560.0), \"K\")}\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"BE9\", 1.0)\n        self.refDens = 1.85\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        r\"\"\"\n        Finds the linear expansion coefficient of Be9. given T in C\n        returns m/m-K\n        Based on http://www-ferp.ucsd.edu/LIB/PROPS/PANOS/be.html\n        which is in turn based on Fusion Engineering and Design . FEDEEE 5(2), 141-234 (1987).\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n        return 1e-4 * (8.4305 + 1.1464e-2 * Tk - 2.9752e-6 * Tk**2)\n"
  },
  {
    "path": "armi/materials/caH2.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Calcium Hydride.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass CaH2(SimpleSolid):\n    \"\"\"CalciumHydride.\"\"\"\n\n    def setDefaultMassFracs(self):\n        \"\"\"Default mass fractions.\n\n        http://atom.kaeri.re.kr/ton/\n        iso atomic percent abundance and atomic mass of 20-calcium\n        | 20-Ca-40     96.941%    39.9625912\n        | 20-Ca-42      0.647%    41.9586183\n        | 20-Ca-43      0.135%    42.9587668\n        | 20-Ca-44      2.086%    43.9554811\n        | 20-Ca-46      0.004%    45.9536928\n        | 20-Ca-48      0.187%    47.9525335\n\n        atomic weight of H2                  2.01565\n        weight of CaH2                      42.09367285\n\n        | weight% of Ca-40 in CaH2            0.920331558\n        | weight% of Ca-42 in CaH2            0.006449241\n        | weight% of Ca-43 in CaH2            0.001377745\n        | weight% of Ca-44 in CaH2            0.02178264\n        | weight% of Ca-46 in CaH2            4.3668E-05\n        | weight% of Ca-48 in CaH2            0.002130278\n        | weight% of H2 in CaH2               0.047884869\n        \"\"\"\n        self.setMassFrac(\"CA\", 0.952115131)\n        self.setMassFrac(\"H\", 0.047884869)\n\n    def density(self, Tk=None, Tc=None):\n        \"\"\"Mass density.\n\n        http://en.wikipedia.org/wiki/Calcium_hydride\n\n        Returns\n        -------\n        density : float\n            grams / cc\n        \"\"\"\n        return 1.70\n"
  },
  {
    "path": "armi/materials/californium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCalifornium is a synthetic element made in nuclear reactors.\n\nIt is interesting in that it has a large spontaneous fission decay mode that produces lots of neutrons. It's often used\nas a neutron source.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass Californium(SimpleSolid):\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"CF252\", 1.0)\n\n    def density(self, Tk=None, Tc=None):\n        \"\"\"https://en.wikipedia.org/wiki/Californium.\"\"\"\n        return 15.1  # g/cm3\n"
  },
  {
    "path": "armi/materials/concrete.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nConcrete.\n\nConcrete is often used to provide structural support of nuclear equipment. It can also provide radiation shielding.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\n\n\nclass Concrete(Material):\n    \"\"\"Simple concreate material.\n\n    https://web.archive.org/web/20221103120449/https://physics.nist.gov/cgi-bin/Star/compos.pl?matno=144\n    \"\"\"\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"H\", 0.010000)\n        self.setMassFrac(\"C\", 0.001000)\n        self.setMassFrac(\"O16\", 0.529107)\n        self.setMassFrac(\"NA23\", 0.016000)\n        self.setMassFrac(\"MG\", 0.002000)\n        self.setMassFrac(\"AL\", 0.033872)\n        self.setMassFrac(\"SI\", 0.337021)\n        self.setMassFrac(\"K\", 0.013000)\n        self.setMassFrac(\"CA\", 0.044000)\n        self.setMassFrac(\"FE\", 0.014000)\n\n    def density(self, Tk=None, Tc=None):\n        return 2.3000  # g/cm3\n"
  },
  {
    "path": "armi/materials/copper.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Copper metal.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass Cu(Material):\n    propertyValidTemperature = {\"linear expansion percent\": ((40.43, 788.83), \"K\")}\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"CU63\", 0.6915)\n        self.setMassFrac(\"CU65\", 0.3085)\n\n    def density(self, Tk=None, Tc=None):\n        return 8.913  # g/cm3\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the linear expansion percent for Copper.\n\n        Notes\n        -----\n        Digitized using Engauge Digitizer from Figure 21 of\n        Thrust Chamber Life Prediction - Volume I - Mechanical and Physical\n        Properties of High Performance Rocket Nozzle Materials (NASA CR - 134806)\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n        return 5.0298e-07 * Tk**2 + 1.3042e-03 * Tk - 4.3097e-01\n"
  },
  {
    "path": "armi/materials/cs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cesium.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Fluid\nfrom armi.utils.units import getTk\n\n\nclass Cs(Fluid):\n    \"\"\"Cesium.\"\"\"\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"CS133\", 1.0)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"The 2D/3D density of liquid Cesium.\n\n        https://en.wikipedia.org/wiki/Caesium\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        if Tk < self.meltingPoint():\n            return 1.93  # g/cm3\n        else:\n            return 1.843  # g/cm3\n\n    def meltingPoint(self):\n        return 301.7  # K\n"
  },
  {
    "path": "armi/materials/custom.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCustom materials are ones that you can specify all the number densities yourself.\n\nUseful for benchmarking when you have a particular specified material density. Use the isotopic input described in\n:ref:`bp-input-file`.\n\nThe density function gets applied from custom isotopics by\n:py:meth:`armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply`.\n\"\"\"\n\nfrom armi.materials.material import Material\n\n\nclass Custom(Material):\n    \"\"\"Custom Materials have user input properties.\"\"\"\n\n    enrichedNuclide = \"U235\"\n\n    def __init__(self):\n        \"\"\"\n        During construction, set default density to 1.0. That way, people can set number densities without having to set\n        a density and it will work. This will generally be overwritten in practice by a constant user-input density.\n        \"\"\"\n        Material.__init__(self)\n        self.customDensity = 1.0\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"\n        The density value is set in the loading input.\n\n        In some cases it needs to be set after full core assemblies are populated (e.g. for CustomLocation materials),\n        so the missing density warning will appear no matter what.\n        \"\"\"\n        return self.customDensity\n\n    def setMassFrac(self, *args, **kwargs):\n        if self.customDensity == 1.0:\n            raise ValueError(\"Cannot set mass fractions on Custom materials unless a density is defined.\")\n        Material.setMassFrac(self, *args, **kwargs)\n"
  },
  {
    "path": "armi/materials/graphite.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Graphite is often used as a moderator in gas-cooled nuclear reactors.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.nucDirectory import thermalScattering as tsl\nfrom armi.utils import units\n\n\nclass Graphite(Material):\n    \"\"\"\n    Graphite.\n\n    .. [INL-EXT-16-38241] McEligot, Donald, Swank, W. David, Cottle, David L., and Valentin,\n        Francisco I. Thermal Properties of G-348 Graphite. United States: N. p., 2016. Web. doi:10.2172/1330693.\n        https://www.osti.gov/biblio/1330693\n    \"\"\"\n\n    thermalScatteringLaws = (tsl.fromNameAndCompound(\"C\", tsl.GRAPHITE_10P),)\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        Set graphite to carbon.\n\n        Room temperature density from [INL-EXT-16-38241]_, table 2.\n        \"\"\"\n        self.setMassFrac(\"C\", 1.0)\n        self.refDens = 1.8888\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        This is dL/L0 for graphite.\n\n        From  [INL-EXT-16-38241]_, page 4.\n        \"\"\"\n        Tc = units.getTc(Tc, Tk)\n        return 100 * (-1.454e-4 + 4.812e-6 * Tc + 1.145e-9 * Tc**2)\n"
  },
  {
    "path": "armi/materials/hafnium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hafnium is an element that has high capture cross section across multiple isotopes.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\nfrom armi.nucDirectory import nucDir\n\n\nclass Hafnium(SimpleSolid):\n    def setDefaultMassFracs(self):\n        for a, abund in nucDir.getNaturalMassIsotopics(\"HF\"):\n            self.setMassFrac(\"HF{0}\".format(a), abund)\n\n    def density(self, Tk=None, Tc=None):\n        r\"\"\"http://www.lenntech.com/periodic/elements/hf.htm.\"\"\"\n        return 13.07\n"
  },
  {
    "path": "armi/materials/hastelloyN.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Hastelloy-N is a high-nickel structural material invented by ORNL for handling molten fluoride salts.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc, getTk\n\n\nclass HastelloyN(Material):\n    r\"\"\"\n    Hastelloy N alloy (UNS N10003).\n\n    .. [Haynes] Haynes International, H-2052D 2020\n        (http://haynesintl.com/docs/default-source/pdfs/new-alloy-brochures/corrosion-resistant-alloys/brochures/n-brochure.pdf)\n\n    .. [SAB] Sabharwall, et. al.\n        Feasibility Study of Secondary Heat Exchanger Concepts for the Advanced High Temperature Reactor\n        INL/EXT-11-23076, 2011\n\n    \"\"\"\n\n    materialIntro = (\n        \"Hastelloy N alloy is a nickel-base alloy that was invented at Oak RIdge National Laboratories \"\n        \"as a container material for molten fluoride salts. It has good oxidation resistance to hot fluoride \"\n        \"salts in the temperature range of 704 to 871C (1300 to 1600F)\"\n    )\n\n    propertyValidTemperature = {\n        \"thermal conductivity\": ((473.15, 973.15), \"K\"),\n        \"heat capacity\": ((373.15, 973.15), \"K\"),\n        \"thermal expansion\": ((293.15, 1173.15), \"K\"),\n    }\n\n    refTempK = 293.15\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        Hastelloy N mass fractions.\n\n        From [Haynes]_.\n        \"\"\"\n        self.setMassFrac(\"CR\", 0.07)\n        self.setMassFrac(\"MO\", 0.16)\n        self.setMassFrac(\"FE\", 0.04)  # max.\n        self.setMassFrac(\"SI\", 0.01)  # max.\n        self.setMassFrac(\"MN\", 0.0080)  # max.\n        self.setMassFrac(\"V\", 0.0005)  # max.\n        self.setMassFrac(\"C\", 0.0006)\n        self.setMassFrac(\"CO\", 0.0020)  # max.\n        self.setMassFrac(\"CU\", 0.0035)  # max.\n        self.setMassFrac(\"W\", 0.005)  # max.\n        self.setMassFrac(\"AL\", 0.0025)  # max.\n        self.setMassFrac(\"TI\", 0.0025)  # max.\n        self.setMassFrac(\"NI\", 1.0 - sum(self.massFrac.values()))  # balance\n\n        self.refDens = 8.86\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Calculates the thermal conductivity of Hastelloy N.\n        Second order polynomial fit to data from [Haynes]_.\n\n        Parameters\n        ----------\n        Tk : float\n            Temperature in (K)\n\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        Hastelloy N thermal conductivity (W/m-K)\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        Tk = getTk(Tc=Tc)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n        return 1.92857e-05 * Tc**2 + 3.12857e-03 * Tc + 1.17743e01  # W/m-K\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Calculates the specific heat capacity of Hastelloy N.\n        Sixth order polynomial fit to data from Table 2-20 [SAB]_ (R^2=0.97).\n\n        Parameters\n        ----------\n        Tk : float\n            Temperature in (K)\n\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        Hastelloy N specific heat capacity (J/kg-C)\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        Tk = getTk(Tc=Tc)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n        return (\n            +3.19981e02\n            + 2.47421e00 * Tc\n            - 2.49306e-02 * Tc**2\n            + 1.32517e-04 * Tc**3\n            - 3.58872e-07 * Tc**4\n            + 4.69003e-10 * Tc**5\n            - 2.32692e-13 * Tc**6\n        )\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        r\"\"\"\n        Average thermal expansion dL/L. Used for computing hot dimensions.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        %dLL(T) in m/m/K\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        refTempC = getTc(Tk=self.refTempK)\n        return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC)\n\n    def meanCoefficientThermalExpansion(self, Tk=None, Tc=None):\n        r\"\"\"\n        Mean coefficient of thermal expansion for Hastelloy N.\n        Second order polynomial fit of data from [Haynes]_.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        mean coefficient of thermal expansion in m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        Tk = getTk(Tc=Tc)\n        self.checkPropertyTempRange(\"thermal expansion\", Tk)\n        return 2.60282e-12 * Tc**2 + 7.69859e-10 * Tc + 1.21036e-05\n"
  },
  {
    "path": "armi/materials/ht9.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimple/academic/incomplete HT9 ferritic-martensitic stainless steel material.\n\nThis is a famous SFR cladding/duct material because it doesn't void swell that much.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import materials\nfrom armi.utils import units\n\n\nclass HT9(materials.Material):\n    \"\"\"\n    Simplified HT9 stainless steel.\n\n    .. warning:: This is an academic-quality material.\n        When more detail is desired, a custom material should be implemented via a\n        user-provided plugin.\n\n    .. [MFH] Metallic Fuels Handbook\n            Hofman, G. L., Billone, M. C., Koenig, J. F., Kramer, J. M., Lambert, J. D. B., Leibowitz, L.,\n            Orechwa, Y., Pedersen, D. R., Porter, D. L., Tsai, H., and Wright, A. E. Metallic Fuels Handbook.\n            United States: N. p., 2019. Web. doi:10.2172/1506477.\n            https://www.osti.gov/biblio/1506477-metallic-fuels-handbook\n    \"\"\"\n\n    propertyValidTemperature = {\"linear expansion\": ((293, 1050), \"K\")}\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        HT9 mass fractions.\n\n        From E.2-1 of [MFH]_.\n        https://www.osti.gov/biblio/1506477-metallic-fuels-handbook\n        \"\"\"\n        self.setMassFrac(\"C\", 0.002)\n        self.setMassFrac(\"MN\", 0.005)\n        self.setMassFrac(\"SI\", 0.0025)\n        self.setMassFrac(\"NI\", 0.0055)\n        self.setMassFrac(\"CR\", 0.1175)\n        self.setMassFrac(\"MO\", 0.01)\n        self.setMassFrac(\"W\", 0.0055)\n        self.setMassFrac(\"V\", 0.0030)\n        self.setMassFrac(\"FE\", 1.0 - sum(self.massFrac.values()))\n\n        self.refDens = 7.778\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Gets the linear expansion from E.2.2.2 in [MFH]_ for HT9.\n\n        The ref gives dL/L0 in percent and is valid from 293 - 1050 K.\n        \"\"\"\n        tk = units.getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", tk)\n        return -0.16256 + 1.62307e-4 * tk + 1.42357e-6 * tk**2 - 5.50344e-10 * tk**3\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"\n        Thermal conductivity in W/m-K).\n\n        From [MFH]_, E.2.2.3, eq 5.\n\n        .. tip:: This can probably be sped up with a polynomial evaluator.\n        \"\"\"\n        Tk = units.getTk(Tc, Tk)\n        return 29.65 - 6.668e-2 * Tk + 2.184e-4 * Tk**2 - 2.527e-7 * Tk**3 + 9.621e-11 * Tk**4\n"
  },
  {
    "path": "armi/materials/inconel.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Inconel is a austenitic nickel-chromium superalloy.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass Inconel(SimpleSolid):\n    references = {\n        \"mass fractions\": \"https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf\",\n        \"density\": \"https://www.specialmetals.com/documents/technical-bulletins/inconel/inconel-alloy-617.pdf\",\n    }\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"NI\", 0.52197)\n        self.setMassFrac(\"CR\", 0.22)\n        self.setMassFrac(\"CO59\", 0.125)\n        self.setMassFrac(\"MO\", 0.09)\n        self.setMassFrac(\"AL27\", 0.0115)\n        self.setMassFrac(\"C\", 0.001)\n        self.setMassFrac(\"FE\", 0.015)\n        self.setMassFrac(\"MN55\", 0.005)\n        self.setMassFrac(\"SI\", 0.005)\n        self.setMassFrac(\"TI\", 0.003)\n        self.setMassFrac(\"CU\", 0.0025)\n        self.setMassFrac(\"B10\", 0.00003 * 0.1997)\n        self.setMassFrac(\"B11\", 0.00003 * (1.0 - 0.1997))\n\n    def density(self, Tk=None, Tc=None):\n        return 8.3600\n\n\nclass Inconel617(Inconel):\n    \"\"\"\n    Note: historically the 'Inconel' material represented the high-nickel alloy\n    Inconel 617. This material enables the user to know with certainty that\n    this material represents Inconel 617 and doesn't break any older models.\n    \"\"\"\n"
  },
  {
    "path": "armi/materials/inconel600.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Inconel600.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc\n\n\nclass Inconel600(Material):\n    propertyValidTemperature = {\n        \"heat capacity\": ((20, 900), \"C\"),\n        \"linear expansion\": ((21.0, 900.0), \"C\"),\n        \"linear expansion percent\": ((21.0, 900.0), \"C\"),\n        \"thermal conductivity\": ((20.0, 800.0), \"C\"),\n    }\n    references = {\n        \"mass fractions\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n        \"density\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n        \"thermalConductivity\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n        \"specific heat\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n        \"linear expansion percent\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n        \"linear expansion\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf\",\n    }\n    refTempK = 294.15\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 8.47  # g/cc\n        # Only density measurement presented in the reference. Presumed to be performed at 21C since\n        # this was the reference temperature for linear expansion measurements.\n\n    def setDefaultMassFracs(self):\n        massFracs = {\n            \"NI\": 0.7541,\n            \"CR\": 0.1550,\n            \"FE\": 0.0800,\n            \"C\": 0.0008,\n            \"MN55\": 0.0050,\n            \"S\": 0.0001,\n            \"SI\": 0.0025,\n            \"CU\": 0.0025,\n        }\n        for element, massFrac in massFracs.items():\n            self.setMassFrac(element, massFrac)\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns the thermal conductivity of Inconel600.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in (K)\n        Tc : float, optional\n            Temperature in (C)\n\n        Returns\n        -------\n        thermalCond : float\n            thermal conductivity in W/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tc)\n        thermalCond = 3.4938e-6 * Tc**2 + 1.3403e-2 * Tc + 14.572\n        return thermalCond  # W/m-C\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns the specific heat capacity of Inconel600.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius.\n\n        Returns\n        -------\n        heatCapacity : float\n            heat capacity in J/kg/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tc)\n        heatCapacity = 7.4021e-6 * Tc**2 + 0.20573 * Tc + 441.3\n        return heatCapacity  # J/kg-C\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns percent linear expansion of Inconel600.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExpPercent in %-m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tc)\n        linExpPercent = 3.722e-7 * Tc**2 + 1.303e-3 * Tc - 2.863e-2\n        return linExpPercent\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"\n        From http://www.specialmetals.com/documents/Inconel%20alloy%20600.pdf.\n\n        Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100\n        to convert from percent strain to strain, then differentiated with respect to temperature to\n        find the correlation for instantaneous linear expansion.\n\n        i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion\n        correlation is 2*a/100*Tc + b/100\n\n        2*(3.722e-7/100.0)*Tc + 1.303e-3/100.0\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExp in m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tc)\n        linExp = 7.444e-9 * Tc + 1.303e-5\n        return linExp\n"
  },
  {
    "path": "armi/materials/inconel625.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Inconel625.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc\n\n\nclass Inconel625(Material):\n    propertyValidTemperature = {\n        \"heat capacity\": ((221.0, 1093.0), \"C\"),\n        \"linear expansion\": ((21.0, 927.0), \"C\"),\n        \"linear expansion percent\": ((21.0, 927.0), \"C\"),\n        \"thermal conductivity\": ((21.0, 982.0), \"C\"),\n    }\n    references = {\n        \"mass fractions\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n        \"density\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n        \"linearExpansionPercent\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n        \"linearExpansion\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n        \"thermalConductivity\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n        \"specific heat\": \"http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf\",\n    }\n    refTempK = 294.15\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 8.44  # g/cc\n        # Only density measurement presented in the reference.\n        # Presumed to be performed at 21C since this was the reference temperature for linear expansion measurements.\n\n    def setDefaultMassFracs(self):\n        massFracs = {\n            \"NI\": 0.6188,\n            \"CR\": 0.2150,\n            \"FE\": 0.0250,\n            \"MO\": 0.0900,\n            \"TA181\": 0.0365,\n            \"C\": 0.0005,\n            \"MN55\": 0.0025,\n            \"SI\": 0.0025,\n            \"P31\": 0.0001,\n            \"S\": 0.0001,\n            \"AL27\": 0.0020,\n            \"TI\": 0.0020,\n            \"CO59\": 0.0050,\n        }\n        for element, massFrac in massFracs.items():\n            self.setMassFrac(element, massFrac)\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns the thermal conductivity of Inconel625.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius.\n\n        Returns\n        -------\n        thermalCond : float\n            thermal conductivity in W/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tc)\n        thermalCond = 2.7474e-6 * Tc**2 + 0.012907 * Tc + 9.62532\n        return thermalCond  # W/m-C\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns the specific heat capacity of Inconel625.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius.\n\n        Returns\n        -------\n        heatCapacity : float\n            heat capacity in J/kg/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tc)\n        heatCapacity = -5.3777e-6 * Tc**2 + 0.25 * Tc + 404.26\n        return heatCapacity  # J/kg-C\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns percent linear expansion of Inconel625.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExpPercent in %-m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tc)\n        linExpPercent = 5.083e-7 * Tc**2 + 1.125e-3 * Tc - 1.804e-2\n        return linExpPercent\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"\n        From http://www.specialmetals.com/assets/documents/alloys/inconel/inconel-alloy-625.pdf.\n\n        Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100\n        to convert from percent strain to strain, then differentiated with respect to temperature to\n        find the correlation for instantaneous linear expansion.\n\n        i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion\n        correlation is 2*a/100*Tc + b/100\n\n        2*(5.083e-7/100.0)*Tc + 1.125e-3/100.0\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExp in m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tc)\n        linExp = 1.0166e-8 * Tc + 1.125e-5\n        return linExp\n"
  },
  {
    "path": "armi/materials/inconel800.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Incoloy 800.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc\n\n\nclass Inconel800(Material):\n    r\"\"\"\n    Incoloy 800/800H (UNS N08800/N08810).\n\n    .. [SM] Special Metals - Incoloy alloy 800\n        (https://www.specialmetals.com/assets/smc/documents/alloys/incoloy/incoloy-alloy-800.pdf)\n    \"\"\"\n\n    propertyValidTemperature = {\"thermal expansion\": ((20.0, 800.0), \"C\")}\n    refTempK = 294.15\n\n    def setDefaultMassFracs(self):\n        \"\"\"\n        Incoloy 800H mass fractions.\n\n        From [SM]_.\n        \"\"\"\n        self.setMassFrac(\"NI\", 0.325)  # ave.\n        self.setMassFrac(\"CR\", 0.21)  # ave.\n        self.setMassFrac(\"C\", 0.00075)  # ave. 800H\n        self.setMassFrac(\"MN\", 0.015)  # max.\n        self.setMassFrac(\"S\", 0.00015)  # max.\n        self.setMassFrac(\"SI\", 0.01)  # max.\n        self.setMassFrac(\"CU\", 0.0075)  # max.\n        self.setMassFrac(\"AL\", 0.00375)  # ave.\n        self.setMassFrac(\"TI\", 0.00375)  # ave.\n        self.setMassFrac(\"FE\", 1.0 - sum(self.massFrac.values()))  # balance, 0.395 min.\n\n        self.refDens = 7.94\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Average thermal expansion dL/L. Used for computing hot dimensions.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        %dLL(T) in m/m/K\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        refTempC = getTc(Tk=self.refTempK)\n        return 100.0 * self.meanCoefficientThermalExpansion(Tc=Tc) * (Tc - refTempC)\n\n    def meanCoefficientThermalExpansion(self, Tk=None, Tc=None):\n        \"\"\"\n        Mean coefficient of thermal expansion for Incoloy 800.\n        Third order polynomial fit of table 5 from [SM]_.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        mean coefficient of thermal expansion in m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal expansion\", Tc)\n        return 2.52525e-14 * Tc**3 - 3.77814e-11 * Tc**2 + 2.06360e-08 * Tc + 1.28071e-05\n"
  },
  {
    "path": "armi/materials/inconelPE16.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Inconel PE16.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials.material import SimpleSolid\n\n\nclass InconelPE16(SimpleSolid):\n    references = {\n        \"mass fractions\": r\"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf\",\n        \"density\": r\"http://www.specialmetals.com/assets/documents/alloys/nimonic/nimonic-alloy-pe16.pdf\",\n    }\n\n    def setDefaultMassFracs(self):\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            ag107abundance = 0.51839001\n            ag109abundance = 0.48160999\n            b10abundance = 0.19799999\n            b11abundance = 0.80199997\n        else:\n            ag107abundance = nb.byName[\"AG107\"].abundance\n            ag109abundance = nb.byName[\"AG109\"].abundance\n            b10abundance = nb.byName[\"B10\"].abundance\n            b11abundance = nb.byName[\"B11\"].abundance\n\n        massFracs = {\n            \"C\": 0.0006,\n            \"SI\": 0.0025,\n            \"MN55\": 0.001,\n            \"S\": 0.000075,\n            \"AG107\": 0.0000025 * ag107abundance,\n            \"AG109\": 0.0000025 * ag109abundance,\n            \"AL27\": 0.012,\n            \"B10\": 0.000025 * b10abundance,\n            \"B11\": 0.000025 * b11abundance,\n            \"BI209\": 0.0000005,\n            \"CO59\": 0.01,\n            \"CR\": 0.165,\n            \"CU\": 0.0025,\n            \"MO\": 0.033,\n            \"NI\": 0.425,\n            \"PB\": 0.0000075,\n            \"TI\": 0.012,\n            \"ZR\": 0.0003,\n        }\n        massFracs[\"FE\"] = 1 - sum(massFracs.values())  # balance*\n\n        # *Reference to the 'balance' of a composition does not guarantee this is exclusively of the element mentioned\n        # but that it predominates and others are present only in minimal quantities.\n\n        for element, massFrac in massFracs.items():\n            self.setMassFrac(element, massFrac)\n\n    def density(self, Tk=None, Tc=None):\n        runLog.warning(\n            \"PE16 mass density is not temperature dependent, using room temperature value\",\n            single=True,\n            label=\"InconelPE16 density\",\n        )\n        return 8.00\n"
  },
  {
    "path": "armi/materials/inconelX750.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Inconel X750.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc\n\n\nclass InconelX750(Material):\n    propertyValidTemperature = {\n        \"heat capacity\": ((-18.0, 1093.0), \"C\"),\n        \"linear expansion\": ((21.1, 982.2), \"C\"),\n        \"linear expansion percent\": ((21.1, 982.2), \"C\"),\n        \"thermal conductivity\": ((-156.7, 871.1), \"C\"),\n    }\n    references = {\n        \"mass fractions\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n        \"density\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n        \"thermalConductivity\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n        \"specific heat\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n        \"linearExpansionPercent\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n        \"linearExpansion\": \"http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf\",\n    }\n    refTempK = 294.15\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 8.28  # g/cc\n        # Only density measurement presented in the reference.\n        # Presumed to be performed at 21C since this was the reference temperature for linear\n        # expansion measurements.\n\n    def setDefaultMassFracs(self):\n        massFracs = {\n            \"NI\": 0.7180,\n            \"CR\": 0.1550,\n            \"FE\": 0.0700,\n            \"TI\": 0.0250,\n            \"AL27\": 0.0070,\n            \"NB93\": 0.0095,\n            \"MN55\": 0.0050,\n            \"SI\": 0.0025,\n            \"S\": 0.0001,\n            \"CU\": 0.0025,\n            \"C\": 0.0004,\n            \"CO59\": 0.0050,\n        }\n        for element, massFrac in massFracs.items():\n            self.setMassFrac(element, massFrac)\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns the thermal conductivity of InconelX750.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius.\n\n        Returns\n        -------\n        thermalCond : float\n            thermal conductivity in W/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tc)\n        thermalCond = 1.4835e-6 * Tc**2 + 1.2668e-2 * Tc + 11.632\n        return thermalCond  # W/m-C\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns the specific heat capacity of InconelX750.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius.\n\n        Returns\n        -------\n        heatCapacity : float\n            heat capacity in J/kg/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tc)\n        heatCapacity = 9.2261e-7 * Tc**3 - 9.6368e-4 * Tc**2 + 4.7778e-1 * Tc + 420.55\n        return heatCapacity  # J/kg-C\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        r\"\"\"\n        Returns percent linear expansion of InconelX750.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExpPercent in %-m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tc)\n        linExpPercent = 6.8378e-7 * Tc**2 + 1.056e-3 * Tc - 1.3161e-2\n        return linExpPercent\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"\n        From http://www.specialmetals.com/documents/Inconel%20alloy%20X-750.pdf.\n\n        Using the correlation for linearExpansionPercent, the 2nd order polynomial is divided by 100\n        to convert from percent strain to strain, then differentiated with respect to temperature to\n        find the correlation for instantaneous linear expansion.\n\n        i.e. for a linearExpansionPercent correlation of a*Tc**2 + b*Tc + c, the linearExpansion\n        correlation is 2*a/100*Tc + b/100\n\n        2*(6.8378e-7/100.0)*Tc + 1.056e-3/100.0\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        linExp in m/m/C\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tc)\n        linExp = 1.36756e-8 * Tc + 1.056e-5\n        return linExp\n"
  },
  {
    "path": "armi/materials/lead.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Lead.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials import material\nfrom armi.utils.units import getTk\n\n\nclass Lead(material.Fluid):\n    \"\"\"Natural lead.\"\"\"\n\n    propertyValidTemperature = {\n        \"density\": ((600, 1700), \"K\"),\n        \"heat capacity\": ((600, 1500), \"K\"),\n        \"volumetric expansion\": ((600, 1700), \"K\"),\n    }\n\n    def volumetricExpansion(self, Tk=None, Tc=None):\n        r\"\"\"Volumetric expansion inferred from density.\n\n        NOT BASED ON MEASUREMENT.\n        Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"volumetric expansion\", Tk)\n\n        return 1.0 / (9516.9 - Tk)\n\n    def setDefaultMassFracs(self):\n        \"\"\"Mass fractions.\"\"\"\n        self.setMassFrac(\"PB\", 1)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return 11.367 - 0.0011944 * Tk  # pre-converted from kg/m^3 to g/cc\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        \"\"\"Heat capacity in J/kg/K from Sobolev.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n\n        return 162.9 - 3.022e-2 * Tk + 8.341e-6 * Tk**2\n"
  },
  {
    "path": "armi/materials/leadBismuth.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nLead-Bismuth eutectic.\n\nThis is a great coolant for superfast neutron reactors. It's heavy though.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nimport math\n\nfrom armi.materials import material\nfrom armi.utils.units import getTk\n\n\nclass LeadBismuth(material.Fluid):\n    \"\"\"Lead bismuth eutectic.\"\"\"\n\n    propertyValidTemperature = {\n        \"density\": ((400, 1300), \"K\"),\n        \"dynamic visc\": ((400, 1100), \"K\"),\n        \"heat capacity\": ((400, 1100), \"K\"),\n        \"thermal conductivity\": ((400, 1100), \"K\"),\n        \"volumetric expansion\": ((400, 1300), \"K\"),\n    }\n\n    def setDefaultMassFracs(self):\n        r\"\"\"Mass fractions.\"\"\"\n        self.setMassFrac(\"PB\", 0.445)\n        self.setMassFrac(\"BI209\", 0.555)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        r\"\"\"Density in g/cc from V. sobolev/ J Nucl Mat 362 (2007) 235-247.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return 11.096 - 0.0013236 * Tk  # pre-converted from kg/m^3 to g/cc\n\n    def dynamicVisc(self, Tk=None, Tc=None):\n        r\"\"\"Dynamic viscosity in Pa-s from Sobolev.\n\n        Accessed online at:\n        http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"dynamic visc\", Tk)\n\n        return 4.94e-4 * math.exp(754.1 / Tk)\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        r\"\"\"Heat ccapacity in J/kg/K from Sobolev. Expected accuracy 5%.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n\n        return 159 - 2.72e-2 * Tk + 7.12e-6 * Tk**2\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"Thermal conductivity in W/m/K from Sobolev.\n\n        Accessed online at:\n        http://www.oecd-nea.org/science/reports/2007/nea6195-handbook.html on 11/9/12\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n\n        return 2.45 * Tk / (86.334 + 0.0511 * Tk)\n\n    def volumetricExpansion(self, Tk=None, Tc=None):\n        r\"\"\"Volumetric expansion inferred from density.\n\n        NOT BASED ON MEASUREMENT.\n        Done by V. sobolev/ J Nucl Mat 362 (2007) 235-247\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"volumetric expansion\", Tk)\n\n        return 1.0 / (8383.2 - Tk)\n"
  },
  {
    "path": "armi/materials/lithium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nLithium.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\nWarning\n-------\nWhenever you irradiate lithium you will get tritium.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.utils.mathematics import getFloat\n\n\nclass Lithium(material.Fluid):\n    references = {\"density\": \"Wikipedia\"}\n    enrichedNuclide = \"LI6\"\n\n    def applyInputParams(self, LI_wt_frac=None, LI6_wt_frac=None, *args, **kwargs):\n        if LI_wt_frac is not None:\n            runLog.warning(\n                \"The 'LI_wt_frac' material modification for Lithium will be deprecated\"\n                \" Update your inputs to use 'LI6_wt_frac' instead.\",\n                single=True,\n                label=\"Lithium applyInputParams 1\",\n            )\n            if LI6_wt_frac is not None:\n                runLog.warning(\n                    f\"Both 'LI_wt_frac' and 'LI6_wt_frac' are specified for {self}. 'LI6_wt_frac' will be used.\",\n                    single=True,\n                    label=\"Lithium applyInputParams 2\",\n                )\n\n        LI6_wt_frac = LI6_wt_frac or LI_wt_frac\n\n        enrich = getFloat(LI6_wt_frac)\n        # allow 0.0 to pass in!\n        if enrich is not None:\n            self.adjustMassEnrichment(LI6_wt_frac)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        r\"\"\"Density (g/cc) from Wikipedia.\n\n        Will be liquid above 180C.\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        \"\"\"\n        return 0.512\n\n    def setDefaultMassFracs(self):\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            li6abundance = 0.0759\n            li7abundance = 0.92410004\n        else:\n            li6abundance = nb.byName[\"LI6\"].abundance\n            li7abundance = nb.byName[\"LI7\"].abundance\n\n        self.setMassFrac(\"LI6\", li6abundance)\n        self.setMassFrac(\"LI7\", li7abundance)\n\n    def meltingPoint(self):\n        return 453.69  # K\n\n    def boilingPoint(self):\n        return 1615.0  # K\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"Wikipedia.\"\"\"\n        return 84.8  # W/m-K\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        return 3570.0\n"
  },
  {
    "path": "armi/materials/magnesium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Magnesium.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials import material\nfrom armi.utils.units import getTk\n\n\nclass Magnesium(material.Fluid):\n    propertyValidTemperature = {\"density\": ((923, 1390), \"K\")}\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"MG\", 1.0)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"Returns mass density of magnesium in g/cm3.\n\n        The Liquid Temperature Range, Density and Constants of Magnesium. P.J. McGonigal. Temple University 1961.\n\n        Notes\n        -----\n        For Fluids, ARMI defines this 2D pseudodensity is the same as the usual 3D physical density.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return 1.834 - 2.647e-4 * Tk\n"
  },
  {
    "path": "armi/materials/material.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nBase Material classes.\n\nMost temperatures may be specified in either K or C and the functions will convert for you.\n\"\"\"\n\nimport functools\nimport traceback\nimport warnings\n\nimport numpy as np\nfrom scipy.optimize import fsolve\n\nfrom armi import runLog\nfrom armi.nucDirectory import nuclideBases\nfrom armi.reactor.flags import TypeSpec\nfrom armi.utils import densityTools\nfrom armi.utils.units import getTc, getTk\n\n# globals\nFAIL_ON_RANGE = True\n\n\ndef parentAwareDensityRedirect(f):\n    \"\"\"Wrap Material.density to warn people about potential problems.\n\n    If a Material is linked to a Component, ``Material.density`` may produce\n    different results from ``Component.density``. The component's density\n    is considered the source of truth because it incorporates changes in volume,\n    composition, and temperature in concert with the state of the reactor.\n    \"\"\"\n\n    @functools.wraps(f)\n    def inner(self: \"Material\", *args, **kwargs) -> float:\n        if self.parent is not None:\n            stack = traceback.extract_stack()\n            # last entry is here, second to last is what called this\n            caller = stack[-2]\n            label = f\"Found call to Material.density in {caller.filename} at line {caller.lineno}\"\n            runLog.warning(\n                f\"{label}. Calls to Material.density when attached to a component have the potential to induce \"\n                \"subtle differences as Component.density and Material.density can diverge.\",\n                single=True,\n                label=label,\n            )\n        return f(self, *args, **kwargs)\n\n    return inner\n\n\nclass Material:\n    r\"\"\"\n    A material is made up of elements or isotopes. It has bulk properties like density.\n\n    .. impl:: The abstract material class.\n        :id: I_ARMI_MAT_PROPERTIES\n        :implements: R_ARMI_MAT_PROPERTIES\n\n        The ARMI Materials library is based on the Object-Oriented Programming design approach, and\n        uses this generic ``Material`` base class. In this class we define a large number of\n        material properties like density, heat capacity, or linear expansion coefficient. Specific\n        materials then subclass this base class to assign particular values to those properties.\n\n    .. impl:: Materials generate nuclide mass fractions at instantiation.\n        :id: I_ARMI_MAT_FRACS\n        :implements: R_ARMI_MAT_FRACS\n\n        An ARMI material is meant to be able to represent real world materials that might be used in\n        the construction of a nuclear reactor. As such, they are not just individual nuclides, but\n        practical materials like a particular concrete, steel, or water. One of the main things that\n        will be needed to describe such a material is the exact nuclide fractions. As such, the\n        constructor of every Material subclass attempts to set these mass fractions.\n\n    Attributes\n    ----------\n    parent : Component\n        The component to which this material belongs\n    massFrac : dict\n        Mass fractions for all nuclides in the material keyed on the nuclide symbols\n    refDens : float\n        A reference density used by some materials, for instance `SimpleSolid`\\ s, during thermal\n        expansion\n    theoreticalDensityFrac : float\n        Fraction of the material's density in reality, which is commonly different from 1.0 in solid\n        materials due to the manufacturing process. Can often be set from the blueprints input via\n        the TD_frac material modification. For programmatic setting, use `adjustTD()`.\n\n    Notes\n    -----\n    Specific material classes may have many more attributes specific to the implementation\n    for that material.\n    \"\"\"\n\n    def __init_subclass__(cls) -> None:\n        # Apply the density decorator to every subclass\n        if not hasattr(cls.density, \"__wrapped__\"):\n            cls.density = parentAwareDensityRedirect(cls.density)\n\n    DATA_SOURCE = \"ARMI\"\n    \"\"\"Indication of where the material is loaded from (may be plugin name)\"\"\"\n\n    references = {}\n    \"\"\"The literature references {property : citation}\"\"\"\n\n    enrichedNuclide = None\n    \"\"\"Name of enriched nuclide to be interpreted by enrichment modification methods\"\"\"\n\n    modelConst = {}\n    \"\"\"Constants that may be used in interpolation functions for property lookups\"\"\"\n\n    propertyValidTemperature = {}\n    \"\"\"Dictionary of valid temperatures over which the property models are valid in the format\n    'Property Name': ((Temperature_Lower_Limit, Temperature_Upper_Limit), Temperature_Units)\"\"\"\n\n    thermalScatteringLaws = ()\n    \"\"\"A tuple of :py:class:`~armi.nucDirectory.thermalScattering.ThermalScatteringLabels` instances with information\n    about thermal scattering.\"\"\"\n\n    def __init__(self):\n        self.parent = None\n        self.massFrac = {}\n        self.refDens = 0.0\n        self.theoreticalDensityFrac = 1.0\n        self.cached = {}\n        self._backupCache = None\n        self._name = self.__class__.__name__\n\n        # call subclass implementations\n        self.setDefaultMassFracs()\n\n    def __repr__(self):\n        return f\"<Material: {self._name}>\"\n\n    @property\n    def name(self):\n        \"\"\"Getter for the private name attribute of this Material.\"\"\"\n        return self._name\n\n    @name.setter\n    def name(self, nomen):\n        \"\"\"Setter for the private name attribute of this Material.\n\n        Warning\n        -------\n        Some code in ARMI expects the \"name\" of a material matches its class name. So you use this\n        method at your own risk.\n\n        See Also\n        --------\n        armi.materials.resolveMaterialClassByName\n        \"\"\"\n        self._name = nomen\n\n    def getName(self):\n        \"\"\"Duplicate of name property, kept for backwards compatibility.\"\"\"\n        return self._name\n\n    def getChildren(self, deep=False, generationNum=1, includeMaterials=False, predicate=None):\n        \"\"\"Return empty list, representing that materials have no children.\"\"\"\n        return []\n\n    def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True):\n        \"\"\"Return empty list, representing that this object has no children.\"\"\"\n        return []\n\n    def backUp(self):\n        \"\"\"Create and store a backup of the state.\"\"\"\n        self._backupCache = (self.cached, self._backupCache)\n        self.cached = {}  # don't .clear(), using reference above!\n\n    def restoreBackup(self, paramsToApply):\n        \"\"\"Restore the parameters from previously created backup.\"\"\"\n        self.cached, self._backupCache = self._backupCache\n\n    def clearCache(self):\n        \"\"\"Clear the cache so all new values are recomputed.\"\"\"\n        self.cached = {}\n\n    def _getCached(self, name):\n        \"\"\"Obtain a value from the cache.\"\"\"\n        return self.cached.get(name, None)\n\n    def _setCache(self, name, val):\n        \"\"\"\n        Set a value in the cache.\n\n        See Also\n        --------\n        _getCached : returns a previously-cached value\n        \"\"\"\n        self.cached[name] = val\n\n    def duplicate(self):\n        \"\"\"Copy without needing a deepcopy.\"\"\"\n        m = self.__class__()\n\n        m.massFrac = {}\n        for key, val in self.massFrac.items():\n            m.massFrac[key] = val\n\n        m.parent = self.parent\n        m.refDens = self.refDens\n        m.theoreticalDensityFrac = self.theoreticalDensityFrac\n\n        return m\n\n    def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        The instantaneous linear expansion coefficient (dL/L)/dT.\n\n        This is used for reactivity coefficients, etc. but will not affect density or dimensions.\n\n        See Also\n        --------\n        linearExpansionPercent : average linear thermal expansion to affect dimensions and density\n        \"\"\"\n        raise NotImplementedError(f\"{self} does not have a linear expansion property defined\")\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Average thermal expansion dL/L. Used for computing hot dimensions and density.\n\n        Defaults to 0.0 for materials that don't expand.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        dLL(T) in % m/m/K\n\n        See Also\n        --------\n        linearExpansion : handle instantaneous thermal expansion coefficients\n        \"\"\"\n        return 0.0\n\n    def linearExpansionFactor(self, Tc: float, T0: float) -> float:\n        \"\"\"\n        Return a dL/L factor relative to T0 instead of the material-dependent reference temperature.\n\n        Notes\n        -----\n        For a detailed description of the linear expansion methodology, see \"thermalExpansion\" in the documentation.\n\n        Parameters\n        ----------\n        Tc : float\n            Current (hot) temperature in C\n        T0 : float\n            Cold temperature in C\n\n        Returns\n        -------\n        dLL: float\n            The average thermal expansion between Tc and T0. If there is no dLL, it should return 0.0.\n\n        See Also\n        --------\n        linearExpansionPercent\n        \"\"\"\n        dLLhot = self.linearExpansionPercent(Tc=Tc)\n        dLLcold = self.linearExpansionPercent(Tc=T0)\n\n        return (dLLhot - dLLcold) / (100.0 + dLLcold)\n\n    def getThermalExpansionDensityReduction(self, prevTempInC: float, newTempInC: float) -> float:\n        \"\"\"Return the factor required to update thermal expansion going from temperatureInC to temperatureInCNew.\"\"\"\n        dLL = self.linearExpansionFactor(Tc=newTempInC, T0=prevTempInC)\n        return 1.0 / (1 + dLL) ** 2\n\n    def setDefaultMassFracs(self):\n        \"\"\"Mass fractions.\"\"\"\n        pass\n\n    def setMassFrac(self, nucName: str, massFrac: float) -> None:\n        \"\"\"\n        Assigns the mass fraction of a nuclide within the material.\n\n        Notes\n        -----\n        This will try to convert the provided ``massFrac`` into a float for assignment. If the\n        conversion cannot occur then an error will be thrown.\n        \"\"\"\n        try:\n            massFrac = float(massFrac)\n        except Exception as ee:\n            raise TypeError(\n                f\"Error in converting the mass fraction of {massFrac} \"\n                f\"for nuclide {nucName} in {self} to a float. \"\n                f\"Exception: {ee}\"\n            )\n\n        if massFrac < 0.0 or massFrac > 1.0:\n            raise ValueError(f\"Mass fraction of {massFrac} for {nucName} is not between 0 and 1.\")\n\n        self.massFrac[nucName] = massFrac\n\n    def applyInputParams(self):\n        \"\"\"Apply material-specific material input parameters.\"\"\"\n        pass\n\n    def adjustMassEnrichment(self, massEnrichment: float) -> None:\n        \"\"\"\n        Adjust the enrichment of the material.\n\n        See Also\n        --------\n        adjustMassFrac\n        \"\"\"\n        self.adjustMassFrac(self.enrichedNuclide, massEnrichment)\n\n    def adjustMassFrac(self, nuclideName: str, massFraction: float) -> None:\n        \"\"\"\n        Change the mass fraction of the specified nuclide.\n\n        This adjusts the mass fraction of a specified nuclide relative to other nuclides of the same\n        element. If there are no other nuclides within the element, then it is enriched relative to\n        the entire material. For example, enriching U235 in UZr would enrich U235 relative to U238\n        and other naturally occurring uranium isotopes. Likewise, enriching ZR in UZr would enrich\n        ZR relative to uranium.\n\n        The method maintains a constant number of atoms, and adjusts ``refDens`` accordingly.\n\n        Parameters\n        ----------\n        nuclideName : str\n            Name of nuclide to enrich.\n\n        massFraction : float\n            New mass fraction to achieve.\n        \"\"\"\n        if massFraction > 1.0 or massFraction < 0.0:\n            raise ValueError(f\"Cannot enrich to massFraction of {massFraction}, must be between 0 and 1\")\n\n        nucsNames = list(self.massFrac)\n\n        # refDens could be zero, but cannot normalize to zero.\n        density = self.refDens or 1.0\n        massDensities = np.array([self.massFrac[nuc] for nuc in nucsNames]) * density\n        atomicMasses = np.array([nuclideBases.byName[nuc].weight for nuc in nucsNames])  # in AMU\n        molesPerCC = massDensities / atomicMasses  # item-wise division\n\n        enrichedIndex = nucsNames.index(nuclideName)\n        isoAndEles = nuclideBases.byName[nuclideName].element.nuclides\n        allIndicesUpdated = [nucsNames.index(nuc.name) for nuc in isoAndEles if nuc.name in self.massFrac]\n\n        if len(allIndicesUpdated) == 1:\n            if isinstance(\n                nuclideBases.byName[nuclideName], nuclideBases.NaturalNuclideBase\n            ) or nuclideBases.isMonoIsotopicElement(nuclideName):\n                # If there are not any other nuclides, assume we are enriching an entire element.\n                # Consequently, allIndicesUpdated is no longer the element's indices, but the materials indices\n                allIndicesUpdated = range(len(nucsNames))\n            else:\n                raise ValueError(  # could be warning if problematic\n                    f\"Nuclide {nuclideName} was to be enriched in material {self}, but there were no other isotopes of \"\n                    \"that element. Could not assume the enrichment of the entire element as there were other possible \"\n                    \"isotopes that did not exist in this material.\"\n                )\n\n        if massFraction == 1.0:\n            massDensities[allIndicesUpdated] = 0.0\n            massDensities[enrichedIndex] = 1.0\n        else:\n            balanceWeight = massDensities[allIndicesUpdated].sum() - massDensities[enrichedIndex]\n            if balanceWeight == 0.0:\n                onlyOneOtherFracToDetermine = len(allIndicesUpdated) == 2\n                if not onlyOneOtherFracToDetermine:\n                    raise ValueError(\n                        f\"Material {self} has too many masses set to zero. cannot enrich {nuclideName} to \"\n                        f\"{massFraction}. Current mass fractions: {self.massFrac}\"\n                    )\n                # massDensities get normalized later when conserving atoms; these are just ratios\n                massDensities[allIndicesUpdated] = 1 - massFraction  # there is only one other.\n                massDensities[enrichedIndex] = massFraction\n            else:\n                # derived from solving the following equation for enrchedWeight:\n                # massFraction = enrichedWeight / (enrichedWeight + balanceWeight)\n                massDensities[enrichedIndex] = massFraction * balanceWeight / (1 - massFraction)\n        # ratio is set by here but atoms not conserved yet\n\n        updatedNucsMolesPerCC = massDensities[allIndicesUpdated] / atomicMasses[allIndicesUpdated]\n        updatedNucsMolesPerCC *= molesPerCC[allIndicesUpdated].sum() / updatedNucsMolesPerCC.sum()  # conserve atoms\n        molesPerCC[allIndicesUpdated] = updatedNucsMolesPerCC\n\n        updatedMassDensities = molesPerCC * atomicMasses\n        updatedDensity = updatedMassDensities.sum()\n        massFracs = updatedMassDensities / updatedDensity\n\n        if not np.isclose(sum(massFracs), 1.0, atol=1e-10):\n            raise RuntimeError(f\"The mass fractions {massFracs} in {self} do not sum to 1.0.\")\n\n        self.massFrac = {nuc: weight for nuc, weight in zip(nucsNames, massFracs)}\n        if self.refDens != 0.0:  # don't update density if not assigned\n            self.refDens = updatedDensity\n\n    def volumetricExpansion(self, Tk=None, Tc=None):\n        pass\n\n    def getTemperatureAtDensity(self, targetDensity: float, tempGuessInC: float) -> float:\n        \"\"\"Get the temperature at which the perturbed density occurs (in Celsius).\"\"\"\n        # 0 at tempertature of targetDensity\n        densFunc = lambda temp: self.density(Tc=temp) - targetDensity\n        # is a numpy array if fsolve is called\n        tAtTargetDensity = float(fsolve(densFunc, tempGuessInC)[0])\n        return tAtTargetDensity\n\n    @property\n    def liquidPorosity(self) -> float:\n        \"\"\"Fraction of the material that is liquid void (unitless).\"\"\"\n        return 0.0 if self.parent is None else self.parent.liquidPorosity\n\n    @property\n    def gasPorosity(self) -> float:\n        \"\"\"Fraction of the material that is gas void (unitless).\"\"\"\n        return 0.0 if self.parent is None else self.parent.gasPorosity\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return density that preserves mass when thermally expanded in 2D (in g/cm^3).\n\n        Warning\n        -------\n        This will not typically agree with ``Material.density()`` or ``Component.density()``\n        since this method only expands in 2 dimensions. Depending on your use of\n        ``inputHeightsConsideredHot`` and ``Component.temperatureInC``, ``Material.psuedoDensity()``\n        may be a factor of (1+dLL) different than ``Material.density()`` or ``Component.density()``.\n\n        In the case of fluids, density and pseudoDensity are the same as density is not driven by\n        linear expansion, but  rather an explicit density function dependent on temperature.\n        ``Material.linearExpansionPercent()`` is zero for a fluid.\n\n        See Also\n        --------\n        density\n        armi.reactor.components.component.Component.density\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        dLL = self.linearExpansionPercent(Tk=Tk)\n        if self.refDens is None:\n            runLog.warning(\n                f\"{self} has no reference density\",\n                single=True,\n                label=\"No refD \" + self.getName(),\n            )\n            self.refDens = 0.0\n\n        f = (1.0 + dLL / 100.0) ** 2\n        return self.refDens / f\n\n    def pseudoDensityKgM3(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return density that preserves mass when thermally expanded in 2D in units of kg/m^3.\n\n        See Also\n        --------\n        density:\n            Arguments are forwarded to the g/cc version\n        \"\"\"\n        return self.pseudoDensity(Tk, Tc) * 1000.0\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return density that preserves mass when thermally expanded in 3D (in g/cm^3).\n\n        Notes\n        -----\n        Since refDens is specified at the material-dep reference case, we don't need to specify the\n        reference temperature. It is already consistent with linearExpansion Percent.\n        - p*(dp/p(T) + 1) =p*( p + dp(T) )/p = p + dp(T) = p(T)\n        - dp/p = (1-(1 + dL/L)**3)/(1 + dL/L)**3\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        dLL = self.linearExpansionPercent(Tk=Tk)\n        refD = self.refDens\n        if refD is None:\n            runLog.warning(\n                \"{0} has no reference density\".format(self),\n                single=True,\n                label=\"No refD \" + self.getName(),\n            )\n            return None\n        f = (1.0 + dLL / 100.0) ** 3\n        return refD / f\n\n    def densityKgM3(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Return density that preserves mass when thermally expanded in 3D in units of kg/m^3.\n\n        See Also\n        --------\n        density:\n            Arguments are forwarded to the g/cc version\n        \"\"\"\n        return self.density(Tk, Tc) * 1000.0\n\n    def getCorrosionRate(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Given a temperature, get the corrosion rate of the material (in microns/year).\"\"\"\n        return 0.0\n\n    def yieldStrength(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Returns yield strength at given T in MPa.\"\"\"\n        pass\n\n    def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Thermal conductivity for given T (in units of W/m/K).\"\"\"\n        pass\n\n    def getProperty(self, propName: str, Tk: float = None, Tc: float = None, **kwargs) -> float:\n        \"\"\"Gets properties in a way that caches them.\"\"\"\n        Tk = getTk(Tc, Tk)\n\n        cached = self._getCached(propName)\n        if cached and cached[0] == Tk:\n            # only use cached value if the temperature at which it is cached is the same.\n            return cached[1]\n        else:\n            # go look it up from material properties.\n            val = getattr(self, propName)(Tk=Tk, **kwargs)\n            # cache only one value for each property. Prevents unbounded cache explosion.\n            self._setCache(propName, (Tk, val))\n            return val\n\n    def getMassFrac(\n        self,\n        nucName=None,\n        normalized=True,\n        expandFissionProducts=False,\n    ):\n        \"\"\"\n        Return mass fraction of nucName.\n\n        Parameters\n        ----------\n        nucName : str, optional\n            Nuclide name to return ('ZR','PU239',etc.)\n\n        normalized : bool, optional\n            Return the mass fraction such that the sum of all nuclides is sum to 1.0. Default True\n\n        Notes\n        -----\n        self.massFrac are modified mass fractions that may not add up to 1.0 (for instance, after a\n        axial expansion, the modified mass fracs will sum to less than one. The alternative is to\n        put a multiplier on the density. They're mathematically equivalent.\n\n        This function returns the normalized mass fraction (they will add to 1.0) as long as the\n        mass fracs are modified only by get and setMassFrac\n\n        This is a performance-critical method as it is called millions of times in a typical ARMI\n        run.\n\n        See Also\n        --------\n        setMassFrac\n        \"\"\"\n        return self.massFrac.get(nucName, 0.0)\n\n    def clearMassFrac(self) -> None:\n        \"\"\"Zero out all nuclide mass fractions.\"\"\"\n        self.massFrac.clear()\n\n    def removeNucMassFrac(self, nuc: str) -> None:\n        self.setMassFrac(nuc, 0)\n        try:\n            del self.massFrac[nuc]\n        except KeyError:\n            # the nuc isn't in the mass Frac vector\n            pass\n\n    def checkPropertyTempRange(self, label, val):\n        \"\"\"Checks if the given property / value combination fall between the min and max valid\n        temperatures provided in the propertyValidTemperature object.\n\n        Parameters\n        ----------\n        label : str\n            The name of the function or property that is being checked.\n\n        val : float\n            The value to check whether it is between minT and maxT.\n\n        Notes\n        -----\n        This was designed as a convenience method for ``checkTempRange``.\n        \"\"\"\n        (minT, maxT) = self.propertyValidTemperature[label][0]\n        self.checkTempRange(minT, maxT, val, label)\n\n    def checkTempRange(self, minT, maxT, val, label=\"\"):\n        \"\"\"\n        Checks if the given temperature (val) is between the minT and maxT temperature limits\n        supplied.\n\n        Label identifies what material type or element is being evaluated in the check.\n\n        Parameters\n        ----------\n        minT, maxT : float\n            The minimum and maximum values that val is allowed to have.\n\n        val : float\n            The value to check whether it is between minT and maxT.\n\n        label : str\n            The name of the function or property that is being checked.\n        \"\"\"\n        if not minT <= val <= maxT:\n            msg = \"Temperature {0} out of range ({1} to {2}) for {3} {4}\".format(val, minT, maxT, self.name, label)\n            if FAIL_ON_RANGE or np.isnan(val):\n                runLog.error(msg)\n                raise ValueError(msg)\n            else:\n                runLog.warning(\n                    msg,\n                    single=True,\n                    label=f\"T out of bounds for {self.name} {label}\",\n                )\n\n    def densityTimesHeatCapacity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return heat capacity * density at a temperature.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in degrees Celsius\n\n        Returns\n        -------\n        rhoCP : float\n            Calculated value for the HT9 density* heat capacity\n            unit (J/m^3-K)\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n\n        rhoCp = self.density(Tc=Tc) * 1000.0 * self.heatCapacity(Tc=Tc)\n\n        return rhoCp\n\n    def getNuclides(self):\n        \"\"\"\n        Return nuclides in the component that contains this Material.\n\n        Notes\n        -----\n        This method is the only reason Materials still have self.parent. Essentially, we want to\n        change that, but right now the logic for finding nuclides in the Reactor is recursive and\n        considers Materials first. The bulk of the work in finally removing this method will come in\n        downstream repos, where users have fully embraced this method and call it directly in many,\n        many places. Please do not use this method, as it is being deprecated.\n        \"\"\"\n        warnings.warn(\"Material.getNuclides is being deprecated.\", DeprecationWarning)\n        return self.parent.getNuclides()\n\n    def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float:\n        \"\"\"Return a temperature difference for a given density perturbation.\"\"\"\n        linearExpansion = self.linearExpansion(Tc=Tc)\n        linearChange = densityFrac ** (-1.0 / 3.0) - 1.0\n        deltaT = linearChange / linearExpansion\n        if not quiet:\n            runLog.info(\n                f\"The linear expansion for {self.getName()} at initial temperature of {Tc} C is \"\n                f\"{linearExpansion}.\\nA change in density of {(densityFrac - 1.0) * 100.0} percent \"\n                \"at would require a change in temperature of {deltaT} C.\",\n                single=True,\n            )\n        return deltaT\n\n    def heatCapacity(self, Tk=None, Tc=None):\n        \"\"\"Returns heat capacity in units of J/kg/C.\"\"\"\n        raise NotImplementedError(f\"Material {type(self).__name__} does not implement heatCapacity\")\n\n    def getTD(self):\n        \"\"\"Get the fraction of theoretical density for this material.\"\"\"\n        return self.theoreticalDensityFrac\n\n    def adjustTD(self, val):\n        \"\"\"Set or change the fraction of theoretical density for this material.\"\"\"\n        self.theoreticalDensityFrac = val\n        self.clearCache()\n\n\nclass Fluid(Material):\n    \"\"\"A material that fills its container. Could also be a gas.\"\"\"\n\n    def __init_subclass__(cls):\n        # Undo the parent-aware density wrapping. Fluids do not expand in the same way solids, so\n        # Fluid.density(T) is correct. This does not hold for solids because they thermally expand.\n        if hasattr(cls.density, \"__wrapped__\"):\n            cls.density = cls.density.__wrapped__\n\n    def getThermalExpansionDensityReduction(self, prevTempInC, newTempInC):\n        \"\"\"Return the factor required to update thermal expansion going from one temperature (in\n        Celsius) to a new temperature.\n        \"\"\"\n        rho0 = self.pseudoDensity(Tc=prevTempInC)\n        if not rho0:\n            return 1.0\n        rho1 = self.pseudoDensity(Tc=newTempInC)\n        return rho1 / rho0\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        \"\"\"For void, lets just not allow temperature changes to change dimensions\n        since it is a liquid it will fill its space.\n\n        .. impl:: Fluid materials are not thermally expandable.\n            :id: I_ARMI_MAT_FLUID\n            :implements: R_ARMI_MAT_FLUID\n\n            ARMI does not model thermal expansion of fluids. The ``Fluid`` superclass therefore sets\n            the thermal expansion coefficient to zero. All fluids subclassing  the ``Fluid``\n            material will inherit this method which sets the linear expansion coefficient to zero at\n            all temperatures.\n        \"\"\"\n        return 0.0\n\n    def getTempChangeForDensityChange(self, Tc: float, densityFrac: float, quiet: bool = True) -> float:\n        \"\"\"Return a temperature difference for a given density perturbation.\"\"\"\n        currentDensity = self.pseudoDensity(Tc=Tc)\n        perturbedDensity = currentDensity * densityFrac\n        tAtPerturbedDensity = self.getTemperatureAtDensity(perturbedDensity, Tc)\n        deltaT = tAtPerturbedDensity - Tc\n        if not quiet:\n            runLog.info(\n                \"A change in density of {} percent in {} at an initial temperature of {} C would \"\n                \"require a change in temperature of {} C.\".format(\n                    (densityFrac - 1.0) * 100.0, self.getName(), Tc, deltaT\n                ),\n                single=True,\n            )\n        return deltaT\n\n    def density(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the density at the specified temperature for 3D expansion (in g/cm^3).\n\n        Notes\n        -----\n        For fluids, there is no such thing as 2D expansion so pseudoDensity() is already 3D.\n        \"\"\"\n        return self.pseudoDensity(Tk=Tk, Tc=Tc)\n\n\nclass SimpleSolid(Material):\n    \"\"\"\n    Base material for a simple material that primarily defines density.\n\n    See Also\n    --------\n    armi.materials.pseudoDensity:\n    armi.materials.density:\n    \"\"\"\n\n    refTempK = 300\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = self.density(Tk=self.refTempK)\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Average thermal expansion dL/L. Used for computing hot dimensions and density.\n\n        Defaults to 0.0 for materials that don't expand.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in (K)\n        Tc : float\n            Temperature in (C)\n\n        Returns\n        -------\n        dLL(T) in % m/m/K\n\n        Notes\n        -----\n        This only method only works for Simple Solid Materials which assumes the density function\n        returns 'free expansion' density as a function temperature\n        \"\"\"\n        density1 = self.density(Tk=self.refTempK)\n        density2 = self.density(Tk=Tk, Tc=Tc)\n\n        if density1 == density2:\n            return 0\n        else:\n            return 100 * ((density1 / density2) ** (1.0 / 3.0) - 1)\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Material density (in g/cm^3).\"\"\"\n        return 0.0\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        The same method as the parent class, but with the ability to apply a\n        non-unity theoretical density (in g/cm^3).\n        \"\"\"\n        return Material.pseudoDensity(self, Tk=Tk, Tc=Tc) * self.getTD()\n\n\nclass FuelMaterial(Material):\n    \"\"\"\n    Material that is considered a nuclear fuel.\n\n    All this really does is enable the special class 1/class 2 isotopics input option.\n    \"\"\"\n\n    class1_wt_frac = None\n    class1_custom_isotopics = None\n    class2_custom_isotopics = None\n\n    def applyInputParams(\n        self,\n        class1_custom_isotopics=None,\n        class2_custom_isotopics=None,\n        class1_wt_frac=None,\n        customIsotopics=None,\n    ):\n        \"\"\"Apply optional class 1/class 2 custom enrichment input.\n\n        Notes\n        -----\n        This is often overridden to insert customized material modification parameters but then this\n        parent should always be called at the end in case users want to use this style of custom\n        input.\n\n        This is only applied to materials considered fuel so we don't apply these kinds of\n        parameters to coolants and structural material, which are often not parameterized with any\n        kind of enrichment.\n        \"\"\"\n        if class1_wt_frac:\n            if not 0 <= class1_wt_frac <= 1:\n                raise ValueError(\n                    f\"class1_wt_frac must be between 0 and 1 (inclusive). Right now it is {class1_wt_frac}.\"\n                )\n\n            validIsotopics = customIsotopics.keys()\n            errMsg = \"{} '{}' not found in the defined custom isotopics.\"\n            if class1_custom_isotopics not in validIsotopics:\n                raise KeyError(errMsg.format(\"class1_custom_isotopics\", class1_custom_isotopics))\n            if class2_custom_isotopics not in validIsotopics:\n                raise KeyError(errMsg.format(\"class2_custom_isotopics\", class2_custom_isotopics))\n            if class1_custom_isotopics == class2_custom_isotopics:\n                runLog.warning(\n                    \"The custom isotopics specified for the class1/class2 materials are both \"\n                    f\"'{class1_custom_isotopics}'. You are not actually blending anything!\"\n                )\n\n            self.class1_wt_frac = class1_wt_frac\n            self.class1_custom_isotopics = class1_custom_isotopics\n            self.class2_custom_isotopics = class2_custom_isotopics\n\n            self._applyIsotopicsMixFromCustomIsotopicsInput(customIsotopics)\n\n    def _applyIsotopicsMixFromCustomIsotopicsInput(self, customIsotopics):\n        \"\"\"\n        Apply a Class 1/Class 2 mixture of custom isotopics at input.\n\n        Only adjust heavy metal.\n\n        This may also be needed for building charge assemblies during reprocessing, but will take\n        input from the SFP rather than from the input external feeds.\n        \"\"\"\n        class1Isotopics = customIsotopics[self.class1_custom_isotopics]\n        class2Isotopics = customIsotopics[self.class2_custom_isotopics]\n        densityTools.applyIsotopicsMix(self, class1Isotopics, class2Isotopics)\n\n    def duplicate(self):\n        \"\"\"Copy without needing a deepcopy.\"\"\"\n        m = self.__class__()\n\n        m.massFrac = {}\n        for key, val in self.massFrac.items():\n            m.massFrac[key] = val\n\n        m.parent = self.parent\n        m.refDens = self.refDens\n        m.theoreticalDensityFrac = self.theoreticalDensityFrac\n\n        m.class1_wt_frac = self.class1_wt_frac\n        m.class1_custom_isotopics = self.class1_custom_isotopics\n        m.class2_custom_isotopics = self.class2_custom_isotopics\n\n        return m\n"
  },
  {
    "path": "armi/materials/mgO.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Magnesium Oxide.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc, getTk\n\n\nclass MgO(Material):\n    \"\"\"MagnesiumOxide.\"\"\"\n\n    propertyValidTemperature = {\n        \"density\": ((273, 1273), \"K\"),\n        \"linear expansion percent\": ((273.15, 1273.15), \"K\"),\n    }\n\n    def __init__(self):\n        Material.__init__(self)\n        \"\"\"Same reference as linear expansion. Table II.\n\n        Reference density is from Wolfram Alpha At STP (273 K)\n        \"\"\"\n        self.refDens = 3.58\n\n    def setDefaultMassFracs(self):\n        \"\"\"Mass fractions.\"\"\"\n        self.setMassFrac(\"MG\", 0.603035897)\n        self.setMassFrac(\"O16\", 0.396964103)\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"The coefficient of expansion of magnesium oxide.\n\n        Milo A. Durand\n        Journal of Applied Physics 7, 297 (1936); doi: 10.1063/1.174539\n\n        This is based on a 3rd order polynomial fit of the data in Table I.\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        Tk = getTk(Tc=Tc)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n        return 1.0489e-5 * Tc + 6.0458e-9 * Tc**2 - 2.6875e-12 * Tc**3\n"
  },
  {
    "path": "armi/materials/mixture.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Homogenized mixture material.\"\"\"\n\nfrom armi import materials\n\n\nclass _Mixture(materials.Material):\n    \"\"\"\n    Homogenized mixture of materials.\n\n    :meta public:\n\n    .. warning:: This class is meant to be used for homogenized block models for neutronics and other\n       physics solvers.\n\n    Notes\n    -----\n    This material class can be used to represent a homognized mixture of materials within a block.\n    This would be done for performance reasons. It allows ARMI to avoid copying and carrying around\n    the detailed, explicit representation of components within a block to be used in a physics solver\n    when that solver only needs to know the homogenized number density within a block.\n\n    See Also\n    --------\n    armi.reactor.blocks.HexBlock.createHomogenizedCopy\n    \"\"\"\n"
  },
  {
    "path": "armi/materials/molybdenum.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Molybdenum.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass Molybdenum(SimpleSolid):\n    def setDefaultMassFracs(self):\n        \"\"\"Moly mass fractions.\"\"\"\n        self.setMassFrac(\"MO\", 1.0)\n\n    def density(self, Tk=None, Tc=None):\n        return 10.28  # g/cc\n"
  },
  {
    "path": "armi/materials/mox.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMixed-oxide (MOX) ceramic fuel.\n\nA definitive source for these properties is [#ornltm20002]_.\n\n.. [#ornltm20002] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of\n    Irradiation. S.G. Popov, et.al.  Oak Ridge National Laboratory.\n    ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.materials.uraniumOxide import UraniumOxide\nfrom armi.nucDirectory import nucDir\n\n\nclass MOX(UraniumOxide):\n    \"\"\"\n    MOX fuel.\n\n    Some parameters (density, thermal conductivity, etc) are inherited from UraniumOxide. These\n    parameters are sufficiently equivalent to pure UO2 in the literature to leave them unchanged.\n\n    Specific MOX mixtures may be defined in blueprints under custom isotopics.\n    \"\"\"\n\n    enrichedNuclide = \"U235\"\n\n    def __init__(self):\n        UraniumOxide.__init__(self)\n\n    def applyInputParams(self, U235_wt_frac=None, TD_frac=None, mass_frac_PU02=None, *args, **kwargs):\n        if U235_wt_frac is not None:\n            self.adjustMassEnrichment(U235_wt_frac)\n\n        td = TD_frac\n        if td is not None:\n            if td > 1.0:\n                runLog.warning(\n                    \"Theoretical density frac for {0} is {1}, which is >1\".format(self, td),\n                    single=True,\n                    label=\"Large theoretical density\",\n                )\n            elif td == 0:\n                runLog.warning(\n                    \"Theoretical density frac for {self} is zero!\",\n                    single=True,\n                    label=\"Zero theoretical density\",\n                )\n            self.adjustTD(td)\n\n        if mass_frac_PU02 is not None:\n            self.setMassFracPuO2(mass_frac_PU02)\n        material.FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def getMassFracPuO2(self):\n        massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol=\"PU\")])\n        massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol=\"U\")])\n        return massFracPu / (massFracPu + massFracU)\n\n    def setMassFracPuO2(self, massFracPuO2):\n        massFracPu = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol=\"PU\")])\n        massFracU = sum([self.getMassFrac(n) for n in nucDir.getNuclideNames(elementSymbol=\"U\")])\n        total = massFracU + massFracPu\n\n        for Pu in nucDir.getNuclideNames(\"PU\"):\n            self.setMassFrac(Pu, self.getMassFrac(Pu) / massFracPu * massFracPuO2 * total)\n\n        for U in nucDir.getNuclideNames(\"PU\"):\n            self.setMassFrac(U, self.getMassFrac(U) / massFracU * (1 - massFracPuO2) * total)\n\n    def getMolFracPuO2(self):\n        molweightUO2 = 270.02771  # Approximation, does not include variance due to isotopes\n        molweightPuO2 = 275.9988  # Approximation, does not include variance due to isotopes\n\n        massFracPuO2 = self.getMassFracPuO2()\n        massFracUO2 = 1 - massFracPuO2\n        return massFracPuO2 * molweightUO2 / massFracUO2 / molweightPuO2\n\n    def setDefaultMassFracs(self):\n        r\"\"\"UO2 + PuO2 mixture mass fractions.\n\n        Pu238: 238.0495599 g/mol\n        Pu239: 239.0521634 g/mol\n        Pu240: 240.0538135 g/mol\n        Pu241: 241.0568515 g/mol\n        Pu242: 242.0587426 g/mol\n        Am241: 241.0568291 g/mol\n        U-235: 235.0439299 g/mol\n        U-238: 238.0507882 g/mol\n        Oxygen: 15.9994 g/mol\n\n        JOYO MOX mass fraction calculation:\n        Pu mixture: 0.1% Pu238 + 76.82% Pu239 + 19.23% Pu240 + 2.66% Pu241 + 0.55% Pu242 + 0.64% Am241\n        Pu atomic mass: 239.326469 g/mol\n\n        U mixture: 22.99% U-235 + 77.01% U-238\n        U atomic mass: 237.359511 g/mol\n\n        UPu mixture: 17.7% Pu mixture + 82.3% U mixture\n        UPu atomic mass: 237.70766 g/mol\n\n        2 moles of oxygen/1 mole of UPu\n\n        grams of UPu = 237.70766 g/mol* 1 mol  = 237.70766 g\n        grams of oxygen= 15.9994 g/mol * 2 mol =  31.9988 g\n\n        total= 269.70646 g.\n\n        Mass fraction UPu : 237.70766/269.70646 = 0.881357\n        Mass fraction Pu mixture: 0.177*237.70766/269.70646 = 0.156000\n        Mass fraction U mixture: 0.823*237.70766/269.70646 = 0.725356\n\n        Mass fraction Pu238: 0.001*42.074256/269.70646   = 0.000156\n        Mass fraction Pu239: 0.7682*42.074256/269.70646  = 0.119839\n        Mass fraction Pu240: 0.1923*42.074256/269.70646  = 0.029999\n        Mass fraction Pu241: 0.0266*42.074256/269.70646  = 0.004150\n        Mass fraction Pu242: 0.0055*42.074256/269.70646  = 0.000858\n        Mass fraction Am241: 0.0064*42.074256/269.70646  = 0.000998\n        Mass fraction U-235: 0.2299*195.633404/269.70646 = 0.166759\n        Mass fraction U-238: 0.7701*195.633404/269.70646 = 0.558597\n        Mass fraction O:     31.9988/269.70646           = 0.118643\n        \"\"\"\n        self.setMassFrac(\"PU238\", 0.000156)\n        self.setMassFrac(\"PU239\", 0.119839)\n        self.setMassFrac(\"PU240\", 0.029999)\n        self.setMassFrac(\"PU241\", 0.004150)\n        self.setMassFrac(\"PU242\", 0.000858)\n        self.setMassFrac(\"AM241\", 0.000998)\n        self.setMassFrac(\"U235\", 0.166759)\n        self.setMassFrac(\"U238\", 0.558597)\n        self.setMassFrac(\"O16\", 0.118643)\n\n    def meltingPoint(self):\n        \"\"\"\n        Melting point in K - ORNL/TM-2000/351.\n\n        Melting point is a function of PuO2 mol fraction.\n        The liquidus Tl and solidus Ts temperatures in K are given by:\n        Tl(y) = 3120.0 - 388.1*y - 30.4*y^2\n        Ts(y) = 3120.0 - 655.3*y + 336.4*y^2 - 99.9*y^3\n        where y is the mole fraction of PuO2\n        This function returns the solidus temperature.\n        Does not take into account changes in the melting temp due to burnup.\n        \"\"\"\n        molFracPuO2 = self.getMolFracPuO2()\n        return 3120.0 - 655.3 * molFracPuO2 + 336.4 * molFracPuO2**2 - 99.9 * molFracPuO2**3\n"
  },
  {
    "path": "armi/materials/nZ.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Niobium Zirconium Alloy.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass NZ(SimpleSolid):\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"NB93\", 0.99)\n        self.setMassFrac(\"ZR\", 0.01)\n\n    def density(self, Tk=None, Tc=None):\n        return 8.66  # g/cc\n"
  },
  {
    "path": "armi/materials/potassium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Potassium.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials import material\nfrom armi.utils.units import getTc, getTk\n\n\nclass Potassium(material.Fluid):\n    \"\"\"\n    Molten pure Potassium.\n\n    From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972.\n    \"\"\"\n\n    propertyValidTemperature = {\"density\": ((63.2, 1250), \"C\")}\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        r\"\"\"\n        Calculates the density of molten Potassium in g/cc.\n\n        From Foust, O.J. Sodium-NaK Engineering Handbook Vol. 1. New York: Gordon and Breach, 1972.\n        Page 18.\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        Tk = getTk(Tc=Tc)\n        self.checkPropertyTempRange(\"density\", Tc)\n        return 0.8415 - 2.172e-4 * Tc - 2.70e-8 * Tc**2 + 4.77e-12 * Tc**3\n"
  },
  {
    "path": "armi/materials/scandiumOxide.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Scandium Oxide.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass Sc2O3(Material):\n    propertyValidTemperature = {\"linear expansion percent\": ((273.15, 1573.15), \"K\")}\n\n    def __init__(self):\n        Material.__init__(self)\n        \"\"\"\n        https://en.wikipedia.org/wiki/Scandium_oxide\n        \"\"\"\n        self.refDens = 3.86\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"SC45\", 0.6520)\n        self.setMassFrac(\"O16\", 0.3480)\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the linear expansion percent for Scandium Oxide (Scandia).\n\n        Notes\n        -----\n        From Table 4 of \"Thermal Expansion and Phase Inversion of Rare-Earth Oxides.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n        return 2.6045e-07 * Tk**2 + 4.6374e-04 * Tk - 1.4696e-01\n"
  },
  {
    "path": "armi/materials/siC.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Silicon Carbide.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nimport math\n\nfrom armi.materials.material import Material\nfrom armi.nucDirectory import thermalScattering as tsl\nfrom armi.utils.units import getTc\n\n\nclass SiC(Material):\n    \"\"\"Silicon Carbide.\"\"\"\n\n    thermalScatteringLaws = (tsl.fromNameAndCompound(\"C\", tsl.SIC), tsl.fromNameAndCompound(\"SI\", tsl.SIC))\n    references = {\n        \"heat capacity\": [\"Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997\"],\n        \"cumulative linear expansion\": [\n            \"Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997\"\n        ],\n        \"density\": [\"Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997\"],\n        \"thermal conductivity\": [\"Munro, Material Properties of a-SiC, J. Phys. Chem. Ref. Data, Vol. 26, No. 5, 1997\"],\n    }\n\n    propertyEquation = {\n        \"heat capacity\": \"1110 + 0.15*Tc - 425*math.exp(-0.003*Tc)\",\n        \"cumulative linear expansion\": \"(4.22 + 8.33E-4*Tc-3.51*math.exp(-0.00527*Tc))*1.0E-6\",\n        \"density\": \"(rho0*(1 + cA*(Tc - Tc0))**(-3))*1.0E3\",\n        \"thermal conductivity\": \"(52000*math.exp(-1.24E-5*Tc))/(Tc+437)\",\n    }\n\n    propertyUnits = {\n        \"melting point\": \"K\",\n        \"heat capacity\": \"J kg^-1 K^-1\",\n        \"cumulative linear expansion\": \"K^-1\",\n        \"density\": \"kg m^-3\",\n        \"thermal conductivity\": \"W m^-1 K^-1\",\n    }\n\n    propertyNotes = {}\n\n    propertyValidTemperature = {\n        \"cumulative linear expansion\": ((0, 1500), \"C\"),\n        \"density\": ((0, 1500), \"C\"),\n        \"heat capacity\": ((0, 2000), \"C\"),\n        \"thermal conductivity\": ((0, 2000), \"C\"),\n    }\n\n    refTempK = 298.15\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"C\", 0.299547726)\n        self.setMassFrac(\"SI\", 0.700452274)\n\n        self.refDens = 3.21\n\n    def meltingPoint(self):\n        return 3003.0\n\n    def heatCapacity(self, Tc=None, Tk=None):\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tc)\n        return 1110 + 0.15 * Tc - 425 * math.exp(-0.003 * Tc)\n\n    def cumulativeLinearExpansion(self, Tk=None, Tc=None):\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"cumulative linear expansion\", Tc)\n        return (4.22 + 8.33e-4 * Tc - 3.51 * math.exp(-0.00527 * Tc)) * 1.0e-6\n\n    def pseudoDensity(self, Tc=None, Tk=None):\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tc)\n        rho0 = 3.16\n        Tc0 = 0.0\n        cA = self.cumulativeLinearExpansion(Tc=Tc)\n        return rho0 * (1 + cA * (Tc - Tc0)) ** (-3)\n\n    def thermalConductivity(self, Tc=None, Tk=None):\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tc)\n        return (52000 * math.exp(-1.24e-5 * Tc)) / (Tc + 437)\n"
  },
  {
    "path": "armi/materials/sodium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Simple sodium material.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.utils.units import getTc, getTk\n\n\nclass Sodium(material.Fluid):\n    \"\"\"\n    Simplified sodium material.\n\n    .. warning:: This is an academic-quality material. Bring in user-provided material\n        properties through plugins as necessary.\n\n    Most info from  [ANL-RE-95-2]_\n\n    .. [ANL-RE-95-2] Fink, J.K., and Leibowitz, L. Thermodynamic and transport properties of sodium\n        liquid and vapor. United States: N. p., 1995. Web. doi:10.2172/94649.\n        https://www.osti.gov/biblio/94649-gXNdLI/webviewable/\n    \"\"\"\n\n    propertyValidTemperature = {\n        \"density\": ((97.85, 2230.55), \"C\"),\n        \"enthalpy\": ((371.0, 2000.0), \"K\"),\n        \"thermal conductivity\": ((371.5, 1500), \"K\"),\n    }\n\n    def setDefaultMassFracs(self):\n        \"\"\"It's just sodium.\"\"\"\n        self.setMassFrac(\"NA\", 1.0)\n        self.refDens = 0.968\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns density of Sodium in g/cc.\n\n        This is from 1.3.1 in [ANL-RE-95-2]_.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Returns\n        -------\n        density : float\n            mass density in g/cc\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tc)\n\n        if (Tc is not None) and (Tc <= 97.72):\n            runLog.warning(\n                \"Sodium frozen at Tc: {0}\".format(Tc),\n                label=\"Sodium frozen at Tc={0}\".format(Tc),\n                single=True,\n            )\n\n        critDens = 219  # critical density\n        f = 275.32  #\n        g = 511.58\n        h = 0.5\n        Tcrit = 2503.7  # critical temperature\n        return (\n            critDens + f * (1 - (Tc + 273.15) / Tcrit) + g * (1 - (Tc + 273.15) / Tcrit) ** h\n        ) / 1000.0  # convert from kg/m^3 to g/cc.\n\n    def specificVolumeLiquid(self, Tk=None, Tc=None):\n        \"\"\"Returns the liquid specific volume in m^3/kg of this material given Tk in K or Tc in C.\"\"\"\n        return 1 / (1000.0 * self.pseudoDensity(Tk, Tc))\n\n    def enthalpy(self, Tk=None, Tc=None):\n        \"\"\"\n        Return enthalpy in J/kg.\n\n        From [ANL-RE-95-2]_, Table 1.1-2.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"enthalpy\", Tk)\n        enthalpy = -365.77 + 1.6582 * Tk - 4.2395e-4 * Tk**2 + 1.4847e-7 * Tk**3 + 2992.6 / Tk\n        enthalpy = enthalpy * 1000  # convert from kJ/kg to kJ/kg\n        return enthalpy\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"\n        Returns thermal conductivity of Sodium.\n\n        From [ANL-RE-95-2]_, Table 2.1-2\n\n        Parameters\n        ----------\n        Tk : float, optional\n            temperature in degrees Kelvin\n        Tc : float, optional\n            temperature in degrees Celsius\n\n        Returns\n        -------\n        thermalConductivity : float\n            thermal conductivity of Sodium (W/m-K)\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n        thermalConductivity = 124.67 - 0.11381 * Tk + 5.5226e-5 * Tk**2 - 1.1842e-8 * Tk**3\n        return thermalConductivity\n"
  },
  {
    "path": "armi/materials/sodiumChloride.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSodium Chloride salt.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\nNotes\n-----\nThis is a very simple description of this material.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\nfrom armi.utils.units import getTk\n\n\nclass NaCl(SimpleSolid):\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"NA23\", 0.3934)\n        self.setMassFrac(\"CL35\", 0.4596)\n        self.setMassFrac(\"CL37\", 0.1470)\n\n    def density(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the density of Sodium Chloride.\n\n        Notes\n        -----\n        From equation 10 of Thermophysical Properties of NaCl\n        NaBr and NaF by y-ray attenuation technique\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        return -3.130e-04 * Tk + 2.23\n"
  },
  {
    "path": "armi/materials/sulfur.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Sulfur.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.utils.mathematics import linearInterpolation\nfrom armi.utils.units import getTk\n\n\nclass Sulfur(material.Fluid):\n    propertyValidTemperature = {\n        \"density\": ((334, 430), \"K\"),\n        \"volumetric expansion\": ((334, 430), \"K\"),\n    }\n\n    def applyInputParams(self, sulfur_density_frac=None, TD_frac=None):\n        if sulfur_density_frac is not None:\n            runLog.warning(\n                \"The 'sulfur_density_frac' material modification for Sulfur \"\n                \"will be deprecated. Update your inputs to use 'TD_frac' instead.\",\n                single=True,\n            )\n            if TD_frac is not None:\n                runLog.warning(\n                    f\"Both 'sulfur_density_frac' and 'TD_frac' are specified for {self}. 'TD_frac' will be used.\"\n                )\n            else:\n                self.updateTD(sulfur_density_frac)\n        if TD_frac is not None:\n            self.updateTD(TD_frac)\n\n    def updateTD(self, TD):\n        self.fullDensFrac = float(TD)\n\n    def setDefaultMassFracs(self):\n        \"\"\"Mass fractions.\"\"\"\n        self.fullDensFrac = 1.0\n        self.setMassFrac(\"S32\", 0.9493)\n        self.setMassFrac(\"S33\", 0.0076)\n        self.setMassFrac(\"S34\", 0.0429)\n        self.setMassFrac(\"S36\", 0.002)\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"Density of Liquid Sulfur.\n\n        Ref: P. Espeau, R. Ceolin \"density of molten sulfur in the 334-508K range\"\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return (2.18835 - 0.00098187 * Tk) * (self.fullDensFrac)\n\n    def volumetricExpansion(self, Tk=None, Tc=None):\n        \"\"\"\n        This is just a two-point interpolation.\n\n        P. Espeau, R. Ceolin \"density of molten sulfur in the 334-508K range\"\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        (Tmin, Tmax) = self.propertyValidTemperature[\"volumetric expansion\"][0]\n        self.checkPropertyTempRange(\"volumetric expansion\", Tk)\n\n        return linearInterpolation(x0=334, y0=5.28e-4, x1=430, y1=5.56e-4, targetX=Tk)\n"
  },
  {
    "path": "armi/materials/tZM.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"TZM.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom numpy import interp\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTc\n\n\nclass TZM(Material):\n    propertyValidTemperature = {\"linear expansion percent\": ((21.11, 1382.22), \"C\")}\n    references = {\n        \"linear expansion percent\": \"Report on the Mechanical and Thermal Properties of Tungsten \\\n            and TZM Sheet Produced in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau \\\n            of Naval Weapons Contract No. N600(19)-59530, Southern Research Institute\"\n    }\n\n    temperatureC = [\n        21.11,\n        456.11,\n        574.44,\n        702.22,\n        840.56,\n        846.11,\n        948.89,\n        1023.89,\n        1146.11,\n        1287.78,\n        1382.22,\n    ]\n\n    percentThermalExpansion = [\n        0,\n        1.60e-01,\n        2.03e-01,\n        2.53e-01,\n        3.03e-01,\n        3.03e-01,\n        3.42e-01,\n        3.66e-01,\n        4.21e-01,\n        4.68e-01,\n        5.04e-01,\n    ]\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 10.16\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"C\", 2.50749e-05)\n        self.setMassFrac(\"TI\", 0.002502504)\n        self.setMassFrac(\"ZR\", 0.000761199)\n        self.setMassFrac(\"MO\", 0.996711222)\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Return linear expansion in %dL/L from interpolation of tabular data.\n\n        This function is used to expand a material from its reference temperature (21C)\n        to a particular hot temperature.\n\n        Parameters\n        ----------\n        Tk : float\n            temperature in K\n        Tc : float\n            temperature in C\n\n        Source: Report on the Mechanical and Thermal Properties of Tungsten and TZM Sheet Produced\n                in the Refractory Metal Sheet Rolling Program, Part 1 to Bureau of Naval Weapons\n                Contract No. N600(19)-59530, 1966 Southern Research Institute.\n\n        See Table viii-b, Appendix B, page 181.\n        \"\"\"\n        Tc = getTc(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tc)\n\n        return interp(Tc, self.temperatureC, self.percentThermalExpansion)\n"
  },
  {
    "path": "armi/materials/tantalum.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tantalum.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import SimpleSolid\n\n\nclass Tantalum(SimpleSolid):\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"TA181\", 1)\n\n    def density(self, Tk=None, Tc=None):\n        return 16.6  # g/cc\n"
  },
  {
    "path": "armi/materials/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/materials/tests/test__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module tests the __init__.py file since it has rather unique behavior.\"\"\"\n\nimport unittest\n\nfrom armi import materials\n\n\ndef betterSubClassCheck(item, superClass):\n    try:\n        return issubclass(item, superClass)\n    except TypeError:\n        return False\n\n\nclass Materials__init__Tests(unittest.TestCase):\n    def test_canAccessClassesFromPackage(self):\n        klasses = [kk for _, kk in vars(materials).items() if betterSubClassCheck(kk, materials.material.Material)]\n        self.assertGreater(len(klasses), 10)\n\n    def test_packageClassesEqualModuleClasses(self):\n        self.assertEqual(materials.UraniumOxide, materials.uraniumOxide.UraniumOxide)\n"
  },
  {
    "path": "armi/materials/tests/test_air.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for air materials.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.materials.air import Air\nfrom armi.utils import densityTools\nfrom armi.utils.units import getTc\n\n\"\"\"\nReference thermal physical properties from Table A.4 in Incropera, Frank P., et al. Fundamentals of\nheat and mass transfer. Vol. 5. New York: Wiley, 2002.\n\"\"\"\n\nREFERENCE_Tk = [\n    100,\n    150,\n    200,\n    250,\n    300,\n    350,\n    400,\n    450,\n    500,\n    550,\n    600,\n    650,\n    700,\n    750,\n    800,\n    850,\n    900,\n    950,\n    1000,\n    1100,\n    1200,\n    1300,\n    1400,\n    1500,\n    1600,\n    1700,\n    1800,\n    1900,\n    2000,\n    2100,\n    2200,\n    2300,\n    2400,\n    2500,\n    3000,\n]\n\nREFERENCE_DENSITY_KG_PER_M3 = [\n    3.5562,\n    2.3364,\n    1.7458,\n    1.3947,\n    1.1614,\n    0.995,\n    0.8711,\n    0.774,\n    0.6964,\n    0.6329,\n    0.5804,\n    0.5356,\n    0.4972,\n    0.4643,\n    0.4354,\n    0.4097,\n    0.3868,\n    0.3666,\n    0.3482,\n    0.3166,\n    0.2902,\n    0.2679,\n    0.2488,\n    0.2322,\n    0.2177,\n    0.2049,\n    0.1935,\n    0.1833,\n    0.1741,\n    0.1658,\n    0.1582,\n    0.1513,\n    0.1448,\n    0.1389,\n    0.1135,\n]\n\nREFERENCE_HEAT_CAPACITY_kJ_PER_KG_K = [\n    1.032,\n    1.012,\n    1.007,\n    1.006,\n    1.007,\n    1.009,\n    1.014,\n    1.021,\n    1.03,\n    1.04,\n    1.051,\n    1.063,\n    1.075,\n    1.087,\n    1.099,\n    1.11,\n    1.121,\n    1.131,\n    1.141,\n    1.159,\n    1.175,\n    1.189,\n    1.207,\n    1.23,\n    1.248,\n    1.267,\n    1.286,\n    1.307,\n    1.337,\n    1.372,\n    1.417,\n    1.478,\n    1.558,\n    1.665,\n    2.726,\n]\n\nREFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K = [\n    9.34,\n    13.8,\n    18.1,\n    22.3,\n    26.3,\n    30,\n    33.8,\n    37.3,\n    40.7,\n    43.9,\n    46.9,\n    49.7,\n    52.4,\n    54.9,\n    57.3,\n    59.6,\n    62,\n    64.3,\n    66.7,\n    71.5,\n    76.3,\n    82,\n    91,\n    100,\n    106,\n    113,\n    120,\n    128,\n    137,\n    147,\n    160,\n    175,\n    196,\n    222,\n]\n\n\nclass TestAir(unittest.TestCase):\n    \"\"\"unit tests for air materials.\n\n    .. test:: There is a base class for fluid materials.\n        :id: T_ARMI_MAT_FLUID1\n        :tests: R_ARMI_MAT_FLUID\n    \"\"\"\n\n    def test_pseudoDensity(self):\n        \"\"\"\n        Reproduce verification results at 300K from Incropera, Frank P., et al.\n        Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.\n        \"\"\"\n        air = Air()\n\n        for Tk, densKgPerM3 in zip(REFERENCE_Tk, REFERENCE_DENSITY_KG_PER_M3):\n            if Tk < 2400:\n                error = math.fabs((air.pseudoDensityKgM3(Tk=Tk) - densKgPerM3) / densKgPerM3)\n                self.assertLess(error, 1e-2)\n                error = math.fabs((air.pseudoDensityKgM3(Tc=getTc(Tk=Tk)) - densKgPerM3) / densKgPerM3)\n                self.assertLess(error, 1e-2)\n\n    def test_heatCapacity(self):\n        \"\"\"\n        Reproduce verification results at 300K from Incropera, Frank P., et al.\n        Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.\n        \"\"\"\n        air = Air()\n\n        for Tk, heatCapacity in zip(REFERENCE_Tk, REFERENCE_HEAT_CAPACITY_kJ_PER_KG_K):\n            if Tk < 1300:\n                error = math.fabs((air.heatCapacity(Tk=Tk) - heatCapacity * 1e3) / (heatCapacity * 1e3))\n                self.assertLess(error, 1e-2)\n\n    def test_thermalConductivity(self):\n        \"\"\"\n        Reproduce verification results at 300K from Incropera, Frank P., et al.\n        Fundamentals of heat and mass transfer. Vol. 5. New York: Wiley, 2002.\n        \"\"\"\n        air = Air()\n\n        for Tk, thermalConductivity in zip(REFERENCE_Tk, REFERENCE_THERMAL_CONDUCTIVITY_mJ_PER_M_K):\n            if Tk > 200 and Tk < 850:\n                error = math.fabs(\n                    (air.thermalConductivity(Tk=Tk) - thermalConductivity * 1e-3) / (thermalConductivity * 1e-3)\n                )\n                self.assertLess(error, 1e-2)\n\n    def test_massFrac(self):\n        \"\"\"Reproduce the number ratios results to PNNL-15870 Rev 1.\"\"\"\n        air = Air()\n\n        refC = 0.000150\n        refN = 0.784431\n        refO = 0.210748\n        refAR = 0.004671\n\n        nuclides, nDens = densityTools.getNDensFromMasses(air.pseudoDensity(Tk=300), air.massFrac)\n\n        diff = 1e-4\n        error = abs(nDens[0] / sum(nDens) - refC)\n        self.assertLess(error, diff)\n        error = abs(nDens[1] / sum(nDens) - refN)\n        self.assertLess(error, diff)\n        error = abs(nDens[2] / sum(nDens) - refO)\n        self.assertLess(error, diff)\n        error = abs(nDens[3] / sum(nDens) - refAR)\n        self.assertLess(error, diff)\n\n        self.assertEqual(nuclides[0].decode(), \"C\")\n        self.assertEqual(nuclides[1].decode(), \"N\")\n        self.assertEqual(nuclides[2].decode(), \"O\")\n        self.assertEqual(nuclides[3].decode(), \"AR\")\n\n    def test_validRanges(self):\n        air = Air()\n\n        den0 = air.density(Tk=101)\n        denf = air.density(Tk=2399)\n        self.assertLess(denf, den0)\n\n        hc0 = air.heatCapacity(Tk=101)\n        hcf = air.heatCapacity(Tk=1299)\n        self.assertGreater(hcf, hc0)\n\n        tc0 = air.thermalConductivity(Tk=201)\n        tcf = air.thermalConductivity(Tk=849)\n        self.assertGreater(tcf, tc0)\n"
  },
  {
    "path": "armi/materials/tests/test_b4c.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for boron carbide.\"\"\"\n\nimport unittest\n\nfrom armi.materials.b4c import B4C\nfrom armi.materials.tests.test_materials import AbstractMaterialTest\n\n\nclass B4C_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = B4C\n\n    def setUp(self):\n        AbstractMaterialTest.setUp(self)\n        self.mat = B4C()\n\n        self.B4C_theoretical_density = B4C()\n        self.B4C_theoretical_density.applyInputParams(theoretical_density=0.5)\n\n        self.B4C_TD_frac = B4C()\n        self.B4C_TD_frac.applyInputParams(TD_frac=0.4)\n\n        self.B4C_both = B4C()\n        self.B4C_both.applyInputParams(theoretical_density=0.5, TD_frac=0.4)\n\n    def test_theoretical_pseudoDensity(self):\n        ref = self.mat.pseudoDensity(500)\n\n        reduced = self.B4C_theoretical_density.pseudoDensity(500)\n        self.assertAlmostEqual(ref * 0.5 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)\n\n        reduced = self.B4C_TD_frac.pseudoDensity(500)\n        self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)\n\n        reduced = self.B4C_both.pseudoDensity(500)\n        self.assertAlmostEqual(ref * 0.4 / B4C.DEFAULT_THEORETICAL_DENSITY_FRAC, reduced)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n    def test_variousEdgeCases(self):\n        with self.assertRaises(ValueError):\n            self.mat.setNewMassFracsFromMassEnrich(-0.001)\n\n        with self.assertRaises(ValueError):\n            self.mat.setNewMassFracsFromMassEnrich(1.001)\n"
  },
  {
    "path": "armi/materials/tests/test_be9.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit test for Beryllium.\"\"\"\n\nimport unittest\n\nfrom armi.materials.be9 import Be9\nfrom armi.materials.tests import test_materials\n\n\nclass TestBe9(test_materials.AbstractMaterialTest, unittest.TestCase):\n    \"\"\"Beryllium tests.\"\"\"\n\n    MAT_CLASS = Be9\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=25)\n        ref = 1.85\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_fluids.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for fluid-specific behaviors.\n\nThe ARMI framework has a lot of thermal expansion machinery that applies to all components\nbut doesn't make sense for fluids. The tests here help show fluid materials still\nplay nice with the rest of the framework.\n\"\"\"\n\nfrom unittest import TestCase\n\nfrom armi.materials.material import Fluid, Material\nfrom armi.reactor.components import Circle\nfrom armi.tests import mockRunLogs\n\n\nclass TestFluids(TestCase):\n    class MyFluid(Fluid):\n        \"\"\"Stand-in fluid that doesn't provide lots of functionality.\"\"\"\n\n    class MySolid(Material):\n        \"\"\"Stand-in solid that doesn't provide lots of functionality.\"\"\"\n\n    def test_fluidDensityWrapperNoWarning(self):\n        \"\"\"Test that Component.material.density does not raise a warning for fluids.\n\n        The ARMI Framework contains a mechanism to warn users if they ask for the density of a\n        material attached to a component. But the component is the source of truth for volume and\n        composition. And can be thermally expanded during operation. Much of the framework operates\n        on ``Component.density`` and other ``Component`` methods for mass accounting. However,\n        ``comp.material.density`` does not know about the new composition or volumes and can diverge\n        from ``component.density``.\n\n        Additionally, the framework does not do any thermal expansion on fluids. So the above calls\n        to ``component.material.density`` are warranted for fluids.\n        \"\"\"\n        self._checkCompDensityLogs(\n            mat=self.MySolid(),\n            nExpectedWarnings=1,\n            msg=\"Solids should have the density warning logged.\",\n        )\n        self._checkCompDensityLogs(\n            mat=self.MyFluid(),\n            nExpectedWarnings=0,\n            msg=\"Fluids should not have the density warning logged.\",\n        )\n\n    def _checkCompDensityLogs(self, mat: Material, nExpectedWarnings: int, msg: str):\n        comp = Circle(name=\"test\", material=mat, Tinput=20, Thot=20, id=0, od=1, mult=1)\n        with mockRunLogs.LogCounter() as logs:\n            comp.material.density(Tc=comp.temperatureInC)\n        self.assertEqual(logs.messageCounts[\"warning\"], nExpectedWarnings, msg=msg)\n"
  },
  {
    "path": "armi/materials/tests/test_graphite.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for graphite material.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.materials.graphite import Graphite\n\n\nclass Graphite_TestCase(unittest.TestCase):\n    MAT_CLASS = Graphite\n\n    def setUp(self):\n        self.mat = self.MAT_CLASS()\n\n    def test_linearExpansionPercent(self):\n        accuracy = 2\n\n        cur = self.mat.linearExpansionPercent(330)\n        ref = 0.013186\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.linearExpansionPercent(1500)\n        ref = 0.748161\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.linearExpansionPercent(3000)\n        ref = 2.149009\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n    def test_density(self):\n        \"\"\"Test to reproduce density measurements results in table 2 from [INL-EXT-16-38241].\"\"\"\n        uncertainty = 0.01\n\n        for Tc, ref_rho in [\n            # sample G-348-1\n            (22.6, 1.8885),\n            (401.6, 1.8772),\n            (801.3, 1.8634),\n            # sample G-348-2\n            (23.5, 1.9001),\n            (401.0, 1.8888),\n            (800.9, 1.8748),\n        ]:\n            test_rho = self.mat.density(Tc=Tc)\n            error = math.fabs((ref_rho - test_rho) / ref_rho)\n\n            self.assertLess(error, uncertainty)\n"
  },
  {
    "path": "armi/materials/tests/test_lithium.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for lithium.\"\"\"\n\nimport unittest\n\nfrom armi.materials.lithium import Lithium\nfrom armi.materials.tests.test_materials import AbstractMaterialTest\n\n\nclass Lithium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = Lithium\n\n    def setUp(self):\n        AbstractMaterialTest.setUp(self)\n        self.mat = Lithium()\n\n        self.Lithium_LI_wt_frac = Lithium()\n        self.Lithium_LI_wt_frac.applyInputParams(LI6_wt_frac=0.5)\n\n        self.Lithium_LI6_wt_frac = Lithium()\n        self.Lithium_LI6_wt_frac.applyInputParams(LI6_wt_frac=0.6)\n\n        self.Lithium_both = Lithium()\n        self.Lithium_both.applyInputParams(LI6_wt_frac=0.8)\n\n    def test_Lithium_material_modifications(self):\n        self.assertEqual(self.mat.getMassFrac(\"LI6\"), 0.0759)\n        self.assertAlmostEqual(self.Lithium_LI_wt_frac.getMassFrac(\"LI6\"), 0.5, places=10)\n        self.assertAlmostEqual(self.Lithium_LI6_wt_frac.getMassFrac(\"LI6\"), 0.6, places=10)\n        self.assertAlmostEqual(self.Lithium_both.getMassFrac(\"LI6\"), 0.8, places=10)\n\n    def test_pseudoDensity(self):\n        ref = self.mat.pseudoDensity(Tc=100)\n        self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001))\n\n        ref = self.mat.pseudoDensity(Tc=200)\n        self.assertAlmostEqual(ref, 0.512, delta=abs(ref * 0.001))\n\n    def test_meltingPoint(self):\n        ref = self.mat.meltingPoint()\n        cur = 453.69\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_boilingPoint(self):\n        ref = self.mat.boilingPoint()\n        cur = 1615.0\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_heatCapacity(self):\n        ref = self.mat.heatCapacity(Tc=100)\n        cur = 3570.0\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n        ref = self.mat.heatCapacity(Tc=200)\n        cur = 3570.0\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_materials.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests materials.py.\"\"\"\n\nimport math\nimport pickle\nimport unittest\nfrom copy import deepcopy\n\nfrom numpy import testing\n\nfrom armi import context, materials, settings\nfrom armi.materials import _MATERIAL_NAMESPACE_ORDER, setMaterialNamespaceOrder\nfrom armi.reactor import blueprints\nfrom armi.utils import units\n\n\nclass AbstractMaterialTest:\n    \"\"\"Base for material tests.\"\"\"\n\n    MAT_CLASS = None\n    VALID_TEMP_K = 500\n\n    def setUp(self):\n        self.mat = self.MAT_CLASS()\n\n    def test_isPicklable(self):\n        \"\"\"Test that all materials are picklable so we can do MPI communication of state.\"\"\"\n        stream = pickle.dumps(self.mat)\n        mat = pickle.loads(stream)\n\n        # check a property that is sometimes interpolated.\n        self.assertEqual(self.mat.thermalConductivity(self.VALID_TEMP_K), mat.thermalConductivity(self.VALID_TEMP_K))\n\n    def test_density(self):\n        \"\"\"Test that all materials produce a non-zero density from density.\"\"\"\n        self.assertNotEqual(self.mat.density(self.VALID_TEMP_K), 0)\n\n    def test_TD(self):\n        \"\"\"Test the material density.\"\"\"\n        self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac)\n\n        self.mat.clearCache()\n        self.mat._setCache(\"dummy\", 666)\n        self.assertEqual(self.mat.cached, {\"dummy\": 666})\n        self.mat.adjustTD(0.5)\n        self.assertEqual(0.5, self.mat.theoreticalDensityFrac)\n        self.assertEqual(self.mat.cached, {})\n\n    def test_duplicate(self):\n        \"\"\"Test the material duplication.\"\"\"\n        mat = self.mat.duplicate()\n\n        self.assertEqual(len(mat.massFrac), len(self.mat.massFrac))\n        for key in self.mat.massFrac:\n            self.assertEqual(mat.massFrac[key], self.mat.massFrac[key])\n\n        self.assertEqual(mat.parent, self.mat.parent)\n        self.assertEqual(mat.refDens, self.mat.refDens)\n        self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac)\n\n    def test_cache(self):\n        \"\"\"Test the material cache.\"\"\"\n        self.mat.clearCache()\n        self.assertEqual(len(self.mat.cached), 0)\n\n        self.mat._setCache(\"Emmy\", \"Noether\")\n        self.assertEqual(len(self.mat.cached), 1)\n\n        val = self.mat._getCached(\"Emmy\")\n        self.assertEqual(val, \"Noether\")\n\n    def test_densityKgM3(self):\n        \"\"\"Test the density for kg/m^3.\"\"\"\n        dens = self.mat.density(self.VALID_TEMP_K)\n        densKgM3 = self.mat.densityKgM3(self.VALID_TEMP_K)\n        self.assertEqual(dens * 1000.0, densKgM3)\n\n    def test_pseudoDensityKgM3(self):\n        \"\"\"Test the pseudo density for kg/m^3.\"\"\"\n        dens = self.mat.pseudoDensity(self.VALID_TEMP_K)\n        densKgM3 = self.mat.pseudoDensityKgM3(self.VALID_TEMP_K)\n        self.assertEqual(dens * 1000.0, densKgM3)\n\n    def test_wrappedDensity(self):\n        \"\"\"Test that the density decorator is applied to non-fluids.\"\"\"\n        self.assertEqual(\n            hasattr(self.mat.density, \"__wrapped__\"),\n            not isinstance(self.mat, materials.Fluid),\n            msg=self.mat,\n        )\n\n\nclass MaterialConstructionTests(unittest.TestCase):\n    def test_material_initialization(self):\n        \"\"\"Make sure all materials can be instantiated without error.\"\"\"\n        for matClass in materials.iterAllMaterialClassesInNamespace(materials):\n            matClass()\n\n\nclass MaterialFindingTests(unittest.TestCase):\n    \"\"\"Make sure materials are discoverable as designed.\"\"\"\n\n    def test_findMaterial(self):\n        \"\"\"Test resolveMaterialClassByName() function.\n\n        .. test:: Materials can be grabbed from a list of namespaces.\n            :id: T_ARMI_MAT_NAMESPACE0\n            :tests: R_ARMI_MAT_NAMESPACE\n        \"\"\"\n        self.assertIs(\n            materials.resolveMaterialClassByName(\"Void\", namespaceOrder=[\"armi.materials\"]),\n            materials.Void,\n        )\n        self.assertIs(\n            materials.resolveMaterialClassByName(\"Void\", namespaceOrder=[\"armi.materials.void\"]),\n            materials.Void,\n        )\n        self.assertIs(\n            materials.resolveMaterialClassByName(\"Void\", namespaceOrder=[\"armi.materials.mox\", \"armi.materials.void\"]),\n            materials.Void,\n        )\n        with self.assertRaises(ModuleNotFoundError):\n            materials.resolveMaterialClassByName(\"Void\", namespaceOrder=[\"invalid.namespace\", \"armi.materials.void\"])\n        with self.assertRaises(KeyError):\n            materials.resolveMaterialClassByName(\"Unobtanium\", namespaceOrder=[\"armi.materials\"])\n\n    def __validateMaterialNamespace(self):\n        \"\"\"Helper method to validate the material namespace a little.\"\"\"\n        self.assertTrue(isinstance(_MATERIAL_NAMESPACE_ORDER, list))\n        self.assertGreater(len(_MATERIAL_NAMESPACE_ORDER), 0)\n        for nameSpace in _MATERIAL_NAMESPACE_ORDER:\n            self.assertTrue(isinstance(nameSpace, str))\n\n    @unittest.skipUnless(context.MPI_RANK == 0, \"test only on root node\")\n    def test_namespacing(self):\n        \"\"\"Test loading materials with different material namespaces, to cover how they work.\n\n        .. test:: Material can be found in defined packages.\n            :id: T_ARMI_MAT_NAMESPACE1\n            :tests: R_ARMI_MAT_NAMESPACE\n\n        .. test:: Material namespaces register materials with an order of priority.\n            :id: T_ARMI_MAT_ORDER\n            :tests: R_ARMI_MAT_ORDER\n        \"\"\"\n        # let's do a quick test of getting a material from the default namespace\n        setMaterialNamespaceOrder([\"armi.materials\"])\n        uraniumOxide = materials.resolveMaterialClassByName(\"UraniumOxide\", namespaceOrder=[\"armi.materials\"])\n        self.assertGreater(uraniumOxide().density(500), 0)\n\n        # validate the default namespace in ARMI\n        self.__validateMaterialNamespace()\n\n        # show you can add a material namespace\n        newMats = \"armi.utils.tests.test_densityTools\"\n        setMaterialNamespaceOrder([\"armi.materials\", newMats])\n        self.__validateMaterialNamespace()\n\n        # in the case of duplicate materials, show that the material namespace determines\n        # which material is chosen\n        uraniumOxideTest = materials.resolveMaterialClassByName(\n            \"UraniumOxide\", namespaceOrder=[newMats, \"armi.materials\"]\n        )\n        for t in range(200, 600):\n            self.assertEqual(uraniumOxideTest().density(t), 0)\n            self.assertEqual(uraniumOxideTest().pseudoDensity(t), 0)\n\n        # for safety, reset the material namespace list and order\n        setMaterialNamespaceOrder([\"armi.materials\"])\n\n\nclass Californium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Californium\n\n    def test_pseudoDensity(self):\n        ref = 15.1\n\n        cur = self.mat.pseudoDensity(923)\n        self.assertEqual(cur, ref)\n\n        cur = self.mat.pseudoDensity(1390)\n        self.assertEqual(cur, ref)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n    def test_porosities(self):\n        self.mat.parent = None\n        self.assertEqual(self.mat.liquidPorosity, 0.0)\n        self.assertEqual(self.mat.gasPorosity, 0.0)\n\n    def test_getCorrosionRate(self):\n        self.assertEqual(self.mat.getCorrosionRate(500), 0.0)\n\n\nclass Cesium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Cs\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(250)\n        ref = 1.93\n        self.assertAlmostEqual(cur, ref, delta=ref * 0.05)\n\n        cur = self.mat.pseudoDensity(450)\n        ref = 1.843\n        self.assertAlmostEqual(cur, ref, delta=ref * 0.05)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Magnesium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Magnesium\n    VALID_TEMP_K = 1000\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(923)\n        ref = 1.5897\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1390)\n        ref = 1.4661\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass MagnesiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.MgO\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(923)\n        ref = 3.48887\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1250)\n        ref = 3.418434\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_linearExpansionPercent(self):\n        cur = self.mat.linearExpansionPercent(Tc=100)\n        ref = 0.00110667\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.linearExpansionPercent(Tc=400)\n        ref = 0.0049909\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Molybdenum_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Molybdenum\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(333)\n        ref = 10.28\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1390)\n        ref = 10.28\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass MOX_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.MOX\n\n    def test_density(self):\n        cur = self.mat.density(333)\n        ref = 10.926\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_getMassFracPuO2(self):\n        ref = 0.176067\n        self.assertAlmostEqual(self.mat.getMassFracPuO2(), ref, delta=ref * 0.001)\n\n    def test_getMolFracPuO2(self):\n        ref = 0.209\n        self.assertAlmostEqual(self.mat.getMolFracPuO2(), ref, delta=ref * 0.001)\n\n    def test_getMeltingPoint(self):\n        ref = 2996.788765\n        self.assertAlmostEqual(self.mat.meltingPoint(), ref, delta=ref * 0.001)\n\n    def test_applyInputParams(self):\n        massFracNameList = [\n            \"AM241\",\n            \"O16\",\n            \"PU238\",\n            \"PU239\",\n            \"PU240\",\n            \"PU241\",\n            \"PU242\",\n            \"U235\",\n            \"U238\",\n        ]\n        massFracRefValList = [\n            0.000998,\n            0.118643,\n            0.000156,\n            0.119839,\n            0.029999,\n            0.00415,\n            0.000858,\n            0.166759,\n            0.558597,\n        ]\n\n        self.mat.applyInputParams()\n\n        for name, frac in zip(massFracNameList, massFracRefValList):\n            cur = self.mat.massFrac[name]\n            self.assertEqual(cur, frac)\n\n        # bonus code coverage for clearMassFrac()\n        self.mat.clearMassFrac()\n        self.assertEqual(len(self.mat.massFrac), 0)\n\n        # bonus coverage for removeNucMassFrac\n        self.mat.removeNucMassFrac(\"PassWithoutWarning\")\n        self.assertEqual(len(self.mat.massFrac), 0)\n\n\nclass NaCl_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.NaCl\n\n    def test_density(self):\n        cur = self.mat.density(Tc=100)\n        ref = 2.113204\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.density(Tc=300)\n        ref = 2.050604\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass NiobiumZirconium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.NZ\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tk=100)\n        ref = 8.66\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.pseudoDensity(Tk=1390)\n        ref = 8.66\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Potassium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Potassium\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=100)\n        ref = 0.8195\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(Tc=333)\n        ref = 0.7664\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(Tc=500)\n        ref = 0.7267\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(Tc=750)\n        ref = 0.6654\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(Tc=1200)\n        ref = 0.5502\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass ScandiumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Sc2O3\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=25)\n        ref = 3.86\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_linearExpansionPercent(self):\n        cur = self.mat.linearExpansionPercent(Tc=100)\n        ref = 0.0623499\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.linearExpansionPercent(Tc=400)\n        ref = 0.28322\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Sodium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Sodium\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(372)\n        ref = 0.92546\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1700)\n        ref = 0.597\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_specificVolumeLiquid(self):\n        cur = self.mat.specificVolumeLiquid(372)\n        ref = 0.0010805\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.specificVolumeLiquid(1700)\n        ref = 0.001674\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_enthalpy(self):\n        cur = self.mat.enthalpy(372)\n        ref = 208100.1914\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.enthalpy(1700)\n        ref = 1959147.963\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(372)\n        ref = 89.36546\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.thermalConductivity(1500)\n        ref = 38.24675\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Tantalum_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Tantalum\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=100)\n        ref = 16.6\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.pseudoDensity(Tc=300)\n        ref = 16.6\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass ThoriumUraniumMetal_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.ThU\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=100)\n        ref = 11.68\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.pseudoDensity(Tc=300)\n        ref = 11.68\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_meltingPoint(self):\n        cur = self.mat.meltingPoint()\n        ref = 2025.0\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(Tc=100)\n        ref = 43.1\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.thermalConductivity(Tc=300)\n        ref = 43.1\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_linearExpansion(self):\n        cur = self.mat.linearExpansion(Tc=100)\n        ref = 11.9e-6\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.linearExpansion(Tc=300)\n        ref = 11.9e-6\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 1)\n\n\nclass Uranium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Uranium\n\n    def test_applyInputParams(self):\n        # check the defaults when applyInputParams is applied without arguments\n        U235_wt_frac_default = 0.0071136523\n        self.mat.applyInputParams()\n        self.assertAlmostEqual(self.mat.massFrac[\"U235\"], U235_wt_frac_default)\n        densityTemp = materials.Uranium._densityTableK[0]\n        density0 = self.mat.density(Tk=materials.Uranium._densityTableK[0])\n        expectedDensity = materials.Uranium._densityTable[0]\n        self.assertEqual(density0, expectedDensity)\n\n        newWtFrac = 1.0\n        newTDFrac = 0.5\n        self.mat.applyInputParams(U235_wt_frac=newWtFrac, TD_frac=newTDFrac)\n        self.assertEqual(self.mat.massFrac[\"U235\"], newWtFrac)\n        self.assertEqual(self.mat.density(Tk=densityTemp), expectedDensity * newTDFrac)\n        self.assertAlmostEqual(self.mat.pseudoDensity(Tk=densityTemp), 9.415418593432646)\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(Tc=100)\n        ref = 28.489312629207500293659904855\n        self.assertAlmostEqual(cur, ref, delta=10e-10)\n\n        cur = self.mat.thermalConductivity(Tc=300)\n        ref = 32.789271449207497255429188954\n        self.assertAlmostEqual(cur, ref, delta=10e-10)\n\n        cur = self.mat.thermalConductivity(Tc=500)\n        ref = 37.561790269207499193271360127\n        self.assertAlmostEqual(cur, ref, delta=10e-10)\n\n        cur = self.mat.thermalConductivity(Tc=700)\n        ref = 42.806869089207502554472739575\n        self.assertAlmostEqual(cur, ref, delta=10e-10)\n\n        cur = self.mat.thermalConductivity(Tc=900)\n        ref = 48.524507909207507339033327298\n        self.assertAlmostEqual(cur, ref, delta=10e-10)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n        # ensure that material properties check the bounds and that the bounds\n        # align with what is expected\n        for propName, methodName in zip(\n            [\n                \"thermal conductivity\",\n                \"heat capacity\",\n                \"density\",\n                \"linear expansion\",\n                \"linear expansion percent\",\n            ],\n            [\n                \"thermalConductivity\",\n                \"heatCapacity\",\n                \"density\",\n                \"linearExpansion\",\n                \"linearExpansionPercent\",\n            ],\n        ):\n            lowerBound = self.mat.propertyValidTemperature[propName][0][0]\n            upperBound = self.mat.propertyValidTemperature[propName][0][1]\n\n            with self.assertRaises(ValueError):\n                getattr(self.mat, methodName)(lowerBound - 1)\n\n            with self.assertRaises(ValueError):\n                getattr(self.mat, methodName)(upperBound + 1)\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=500)\n        ref = 18.74504534852846\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.pseudoDensity(Tc=1000)\n        ref = 18.1280492780791\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n\nclass UraniumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.UraniumOxide\n\n    def test_adjustMassEnrichment(self):\n        o16 = 15.999304875697801\n        u235 = 235.043929425\n        u238 = 238.050788298\n        self.mat.adjustMassEnrichment(0.02)\n\n        gPerMol = 2 * o16 + 0.02 * u235 + 0.98 * u238\n        massFracs = self.mat.massFrac\n\n        testing.assert_allclose(massFracs[\"O\"], 2 * o16 / gPerMol, rtol=5e-4)\n        testing.assert_allclose(massFracs[\"U235\"], 0.02 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4)\n        testing.assert_allclose(massFracs[\"U238\"], 0.98 * (u235 * 0.02 + u238 * 0.98) / gPerMol, rtol=5e-4)\n\n        self.mat.adjustMassEnrichment(0.2)\n        massFracs = self.mat.massFrac\n        gPerMol = 2 * o16 + 0.8 * u238 + 0.2 * u235\n\n        testing.assert_allclose(massFracs[\"O\"], 2 * o16 / gPerMol, rtol=5e-4)\n        testing.assert_allclose(massFracs[\"U235\"], 0.2 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4)\n        testing.assert_allclose(massFracs[\"U238\"], 0.8 * (u235 * 0.2 + u238 * 0.8) / gPerMol, rtol=5e-4)\n\n    def test_meltingPoint(self):\n        cur = self.mat.meltingPoint()\n        self.assertEqual(cur, 3123.0)\n\n    def test_density(self):\n        # Reference data taken from ORNL/TM-2000/351. \"Thermophysical Properties of MOX and UO2\n        # Fuels Including the Effects of Irradiation.\", Popov, et al.  Table 3.2 \"Parameters of\n        # thermal expansion of stoichiometric MOX fuel and density of UO2 as a function of\n        # temperature\"\n        cur = self.mat.density(Tk=700)\n        ref = 1.0832e4 * 0.001  # Convert to grams/cc\n        delta = ref * 0.02\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.density(Tk=2600)\n        ref = 9.9698e3 * 0.001  # Convert to grams/cc\n        delta = ref * 0.02\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(600)\n        ref = 4.864\n        accuracy = 3\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.thermalConductivity(1800)\n        ref = 2.294\n        accuracy = 3\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.thermalConductivity(2700)\n        ref = 1.847\n        accuracy = 3\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_linearExpansion(self):\n        cur = self.mat.linearExpansion(300)\n        ref = 9.93e-6\n        accuracy = 2\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.linearExpansion(1500)\n        ref = 1.0639e-5\n        accuracy = 2\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.linearExpansion(3000)\n        ref = 1.5821e-5\n        accuracy = 2\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_linearExpansionPercent(self):\n        cur = self.mat.linearExpansionPercent(Tk=500)\n        ref = 0.222826\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n        cur = self.mat.linearExpansionPercent(Tk=950)\n        ref = 0.677347\n        self.assertAlmostEqual(cur, ref, delta=abs(ref * 0.001))\n\n    def test_heatCapacity(self):\n        \"\"\"Check against Figure 4.2 from ORNL 2000-1723 EFG.\"\"\"\n        self.assertAlmostEqual(self.mat.heatCapacity(300), 230.0, delta=20)\n        self.assertAlmostEqual(self.mat.heatCapacity(1000), 320.0, delta=20)\n        self.assertAlmostEqual(self.mat.heatCapacity(2000), 380.0, delta=20)\n\n    def test_getTemperatureAtDensity(self):\n        expectedTemperature = 100.0\n        tAtTargetDensity = self.mat.getTemperatureAtDensity(self.mat.density(Tc=expectedTemperature), 30.0)\n        self.assertAlmostEqual(expectedTemperature, tAtTargetDensity)\n\n    def test_getDensityExpansion3D(self):\n        expectedTemperature = 100.0\n\n        ref_density = 10.86792660463439e3\n        test_density = self.mat.densityKgM3(Tc=expectedTemperature)\n        error = math.fabs((ref_density - test_density) / ref_density)\n        self.assertLess(error, 0.005)\n\n    def test_removeNucMassFrac(self):\n        self.mat.removeNucMassFrac(\"O\")\n        massFracs = [str(k) for k in self.mat.massFrac.keys()]\n        self.assertListEqual([\"U235\", \"U238\"], massFracs)\n\n    def test_densityTimesHeatCapactiy(self):\n        Tc = 500.0\n        expectedRhoCp = self.mat.density(Tc=Tc) * 1000.0 * self.mat.heatCapacity(Tc=Tc)\n        self.assertAlmostEqual(expectedRhoCp, self.mat.densityTimesHeatCapacity(Tc=Tc))\n\n    def test_getTempChangeForDensityChange(self):\n        Tc = 500.0\n        linearExpansion = self.mat.linearExpansion(Tc=Tc)\n        densityFrac = 1.001\n        linearChange = densityFrac ** (-1.0 / 3.0) - 1.0\n        expectedDeltaT = linearChange / linearExpansion\n        actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False)\n        self.assertAlmostEqual(expectedDeltaT, actualDeltaT)\n\n    def test_duplicate(self):\n        \"\"\"Test the material duplication.\n\n        .. test:: Materials shall calc mass fracs at init.\n            :id: T_ARMI_MAT_FRACS4\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        duplicateU = self.mat.duplicate()\n\n        for key in self.mat.massFrac:\n            self.assertEqual(duplicateU.massFrac[key], self.mat.massFrac[key])\n\n        duplicateMassFrac = deepcopy(self.mat.massFrac)\n        for key in self.mat.massFrac.keys():\n            self.assertEqual(duplicateMassFrac[key], self.mat.massFrac[key])\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n    def test_applyInputParams(self):\n        UO2_TD = materials.UraniumOxide()\n        original = UO2_TD.density(500)\n        UO2_TD.applyInputParams(TD_frac=0.1)\n        new = UO2_TD.density(500)\n        ratio = new / original\n        self.assertAlmostEqual(ratio, 0.1)\n\n        UO2_TD = materials.UraniumOxide()\n        original = UO2_TD.pseudoDensity(500)\n        UO2_TD.applyInputParams(TD_frac=0.1)\n        new = UO2_TD.pseudoDensity(500)\n        ratio = new / original\n        self.assertAlmostEqual(ratio, 0.1)\n\n\nclass Thorium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Thorium\n\n    def test_setDefaultMassFracs(self):\n        \"\"\"\n        Test default mass fractions.\n\n        .. test:: The materials generate nuclide mass fractions.\n            :id: T_ARMI_MAT_FRACS0\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.massFrac\n        ref = {\"TH232\": 1.0}\n        self.assertEqual(cur, ref)\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(30)\n        ref = 11.68\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_linearExpansion(self):\n        cur = self.mat.linearExpansion(400)\n        ref = 11.9e-6\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(400)\n        ref = 43.1\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_meltingPoint(self):\n        cur = self.mat.meltingPoint()\n        ref = 2025.0\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.ThoriumOxide\n\n    def test_density(self):\n        cur = self.mat.density(Tc=25)\n        ref = 10.00\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        # make sure that material modifications are correctly applied\n        self.mat.applyInputParams(TD_frac=0.1)\n        cur = self.mat.density(Tc=25)\n        self.assertAlmostEqual(cur, ref * 0.1, accuracy)\n\n    def test_linearExpansion(self):\n        cur = self.mat.linearExpansion(400)\n        ref = 9.67e-6\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(400)\n        ref = 6.20\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_meltingPoint(self):\n        cur = self.mat.meltingPoint()\n        ref = 3643.0\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Void_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Void\n\n    def test_pseudoDensity(self):\n        \"\"\"This material has a no pseudo-density.\"\"\"\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.pseudoDensity()\n        self.assertEqual(cur, 0.0)\n\n    def test_density(self):\n        \"\"\"This material has no density.\"\"\"\n        self.assertEqual(self.mat.density(500), 0)\n\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.density()\n        self.assertEqual(cur, 0.0)\n\n    def test_linearExpansion(self):\n        \"\"\"This material does not expand linearly.\"\"\"\n        cur = self.mat.linearExpansion(400)\n        ref = 0.0\n        self.assertEqual(cur, ref)\n\n    def test_propertyValidTemperature(self):\n        \"\"\"This material has no valid temperatures.\"\"\"\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Mixture_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials._Mixture\n\n    def test_density(self):\n        \"\"\"This material has no density function.\"\"\"\n        self.assertEqual(self.mat.density(500), 0)\n\n    def test_setDefaultMassFracs(self):\n        \"\"\"\n        Test default mass fractions.\n\n        .. test:: The materials generate nuclide mass fractions.\n            :id: T_ARMI_MAT_FRACS1\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.pseudoDensity(500)\n        self.assertEqual(cur, 0.0)\n\n    def test_linearExpansion(self):\n        with self.assertRaises(NotImplementedError):\n            _cur = self.mat.linearExpansion(400)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Lead_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Lead\n    VALID_TEMP_K = 600\n\n    def test_volumetricExpansion(self):\n        self.assertAlmostEqual(\n            self.mat.volumetricExpansion(800),\n            1.1472e-4,\n            4,\n            msg=\"\\n\\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                self.mat.volumetricExpansion(800), 1.1472e-4\n            ),\n        )\n        self.assertAlmostEqual(\n            self.mat.volumetricExpansion(1200),\n            1.20237e-4,\n            4,\n            msg=\"\\n\\nIncorrect Lead volumetricExpansion(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                self.mat.volumetricExpansion(1200), 1.20237e-4\n            ),\n        )\n\n    def test_linearExpansion(self):\n        \"\"\"Unit tests for lead materials linear expansion.\n\n        .. test:: Fluid materials do not linearly expand, at any temperature.\n            :id: T_ARMI_MAT_FLUID2\n            :tests: R_ARMI_MAT_FLUID\n        \"\"\"\n        for t in range(300, 901, 25):\n            cur = self.mat.linearExpansion(t)\n            self.assertEqual(cur, 0)\n\n    def test_setDefaultMassFracs(self):\n        \"\"\"\n        Test default mass fractions.\n\n        .. test:: The materials generate nuclide mass fractions.\n            :id: T_ARMI_MAT_FRACS2\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.massFrac\n        ref = {\"PB\": 1}\n        self.assertEqual(cur, ref)\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(634.39)\n        ref = 10.6120\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1673.25)\n        ref = 9.4231\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_heatCapacity(self):\n        cur = self.mat.heatCapacity(1200)\n        ref = 138.647\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass LeadBismuth_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.LeadBismuth\n\n    def test_setDefaultMassFracs(self):\n        \"\"\"\n        Test default mass fractions.\n\n        .. test:: The materials generate nuclide mass fractions.\n            :id: T_ARMI_MAT_FRACS3\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        self.mat.setDefaultMassFracs()\n        cur = self.mat.massFrac\n        ref = {\"BI209\": 0.555, \"PB\": 0.445}\n        self.assertEqual(cur, ref)\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(404.77)\n        ref = 10.5617\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.pseudoDensity(1274.20)\n        ref = 9.3627\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_volumetricExpansion(self):\n        cur = self.mat.volumetricExpansion(400)\n        ref = 1.2526e-4\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n        cur = self.mat.volumetricExpansion(800)\n        ref = 1.3187e-4\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_heatCapacity(self):\n        cur = self.mat.heatCapacity(400)\n        ref = 149.2592\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.heatCapacity(800)\n        ref = 141.7968\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_getTempChangeForDensityChange(self):\n        Tc = 800.0\n        densityFrac = 1.001\n        currentDensity = self.mat.pseudoDensity(Tc=Tc)\n        perturbedDensity = currentDensity * densityFrac\n        tAtPerturbedDensity = self.mat.getTemperatureAtDensity(perturbedDensity, Tc)\n        expectedDeltaT = tAtPerturbedDensity - Tc\n        actualDeltaT = self.mat.getTempChangeForDensityChange(Tc, densityFrac, quiet=False)\n        self.assertAlmostEqual(expectedDeltaT, actualDeltaT)\n\n    def test_dynamicVisc(self):\n        ref = self.mat.dynamicVisc(Tc=150)\n        cur = 0.0029355\n        self.assertAlmostEqual(ref, cur, delta=ref * 0.001)\n\n        ref = self.mat.dynamicVisc(Tc=200)\n        cur = 0.0024316\n        self.assertAlmostEqual(ref, cur, delta=ref * 0.001)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Copper_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Cu\n\n    def test_setDefaultMassFracs(self):\n        cur = self.mat.massFrac\n        ref = {\"CU63\": 0.6915, \"CU65\": 0.3085}\n        self.assertEqual(cur, ref)\n\n    def test_densityNeverChanges(self):\n        for tk in [200.0, 400.0, 800.0, 1111.1]:\n            cur = self.mat.density(tk)\n            self.assertAlmostEqual(cur, 8.913, 4)\n\n    def test_linearExpansionPercent(self):\n        temps = [100.0, 200.0, 600.0]\n        expansions = [-0.2955, -0.1500, 0.5326]\n        for i, temp in enumerate(temps):\n            cur = self.mat.linearExpansionPercent(Tk=temp)\n            self.assertAlmostEqual(cur, expansions[i], 4)\n\n    def test_getChildren(self):\n        self.assertEqual(len(self.mat.getChildren()), 0)\n\n    def test_getChildrenWithFlags(self):\n        self.assertEqual(len(self.mat.getChildrenWithFlags(\"anything\")), 0)\n\n\nclass Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Sulfur\n    VALID_TEMP_K = 400\n\n    def test_setDefaultMassFracs(self):\n        cur = self.mat.massFrac\n        ref = {\"S34\": 0.0429, \"S36\": 0.002, \"S33\": 0.0076, \"S32\": 0.9493}\n        self.assertEqual(cur, ref)\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(400)\n        ref = 1.7956\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_volumetricExpansion(self):\n        cur = self.mat.volumetricExpansion(334)\n        ref = 5.28e-4\n        accuracy = 4\n        self.assertAlmostEqual(cur, ref, accuracy)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Zr_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Zr\n\n    def test_thermalConductivity(self):\n        cur = self.mat.thermalConductivity(372.7273)\n        ref = 19.8718698709447\n        self.assertAlmostEqual(cur, ref)\n\n        cur = self.mat.thermalConductivity(1172.727)\n        ref = 23.193177102455\n        self.assertAlmostEqual(cur, ref)\n\n    def test_linearExpansion(self):\n        cur = self.mat.linearExpansion(400)\n        ref = 5.9e-6\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.linearExpansion(800)\n        ref = 7.9e-6\n        delta = ref * 0.05\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_linearExpansionPercent(self):\n        testTemperaturesInK = [\n            293,\n            400,\n            500,\n            600,\n            700,\n            800,\n            900,\n            1000,\n            1100,\n            1137,\n            1200,\n            1400,\n            1600,\n            1800,\n        ]\n        expectedLinearExpansionValues = [\n            0.0007078312624,\n            0.0602048,\n            0.123025,\n            0.1917312,\n            0.2652626,\n            0.3425584,\n            0.4225578,\n            0.5042,\n            0.5864242,\n            0.481608769233,\n            0.5390352,\n            0.7249496,\n            0.9221264,\n            1.1380488,\n        ]\n        for i, temp in enumerate(testTemperaturesInK):\n            Tk = temp\n            Tc = temp - units.C_TO_K\n            self.assertAlmostEqual(self.mat.linearExpansionPercent(Tc=Tc), expectedLinearExpansionValues[i])\n            self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=Tk), expectedLinearExpansionValues[i])\n\n    def test_pseudoDensity(self):\n        testTemperaturesInK = [\n            293,\n            298.15,\n            400,\n            500,\n            600,\n            700,\n            800,\n            900,\n            1000,\n            1100,\n            1137,\n            1200,\n            1400,\n            1600,\n            1800,\n        ]\n        expectedDensityValues = [\n            6.56990469455,\n            6.56955491852,\n            6.56209393299,\n            6.55386200572,\n            6.54487650252,\n            6.53528040809,\n            6.52521578203,\n            6.51482358662,\n            6.50424356114,\n            6.49361414192,\n            6.50716858169,\n            6.49973710507,\n            6.47576529821,\n            6.45048593916,\n            6.4229727005,\n        ]\n        for i, temp in enumerate(testTemperaturesInK):\n            Tk = temp\n            Tc = temp - units.C_TO_K\n            self.assertAlmostEqual(self.mat.pseudoDensity(Tc=Tc), expectedDensityValues[i])\n            self.assertAlmostEqual(self.mat.pseudoDensity(Tk=Tk), expectedDensityValues[i])\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Inconel_TestCase(AbstractMaterialTest, unittest.TestCase):\n    def setUp(self):\n        self.Inconel = materials.Inconel()\n        self.Inconel800 = materials.Inconel800()\n        self.InconelPE16 = materials.InconelPE16()\n        self.mat = self.Inconel\n\n    def tearDown(self):\n        self.Inconel = None\n        self.Inconel800 = None\n        self.InconelPE16 = None\n\n    def test_setDefaultMassFracs(self):\n        self.Inconel.setDefaultMassFracs()\n        self.Inconel800.setDefaultMassFracs()\n        self.InconelPE16.setDefaultMassFracs()\n\n        self.assertAlmostEqual(self.Inconel.getMassFrac(\"MO\"), 0.09)\n        self.assertAlmostEqual(self.Inconel800.getMassFrac(\"AL\"), 0.00375)\n        self.assertAlmostEqual(self.InconelPE16.getMassFrac(\"CR\"), 0.165)\n\n    def test_pseudoDensity(self):\n        self.assertEqual(self.Inconel.pseudoDensity(Tc=25), 8.3600)\n        self.assertEqual(self.Inconel800.pseudoDensity(Tc=21.0), 7.94)\n        self.assertEqual(self.InconelPE16.pseudoDensity(Tc=25), 8.00)\n\n    def test_Iconel800_linearExpansion(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            0.11469329415,\n            0.27968864560,\n            0.454195022850,\n            0.63037690440,\n            0.80645936875,\n            0.98672809440,\n            1.18152935985,\n            1.4072700436,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.Inconel800.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel 800 linearExpansionPercent()\\nReceived:{}\\nExpected:{}\\n\".format(cur, ref)\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.Inconel.propertyValidTemperature), 0)\n        self.assertGreater(len(self.Inconel800.propertyValidTemperature), 0)\n        self.assertEqual(len(self.InconelPE16.propertyValidTemperature), 0)\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Inconel600_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Inconel600\n\n    def test_00_setDefaultMassFracs(self):\n        massFracNameList = [\"NI\", \"CR\", \"FE\", \"C\", \"MN55\", \"S\", \"SI\", \"CU\"]\n        massFracRefValList = [\n            0.7541,\n            0.1550,\n            0.0800,\n            0.0008,\n            0.0050,\n            0.0001,\n            0.0025,\n            0.0025,\n        ]\n\n        for name, frac in zip(massFracNameList, massFracRefValList):\n            cur = self.mat.getMassFrac(name)\n            ref = frac\n            self.assertAlmostEqual(cur, ref)\n\n    def test_01_linearExpansionPercent(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            0.105392,\n            0.24685800000000002,\n            0.39576799999999995,\n            0.552122,\n            0.7159199999999999,\n            0.8871619999999999,\n            1.065848,\n            1.251978,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = (\n                \"\\n\\nIncorrect Inconel 600 linearExpansionPercent(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                    cur, ref\n                )\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_02_linearExpansion(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            1.3774400000000001e-05,\n            1.45188e-05,\n            1.52632e-05,\n            1.60076e-05,\n            1.6752e-05,\n            1.74964e-05,\n            1.82408e-05,\n            1.8985200000000002e-05,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansion(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel 600 linearExpansion(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_03_pseudoDensity(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            8.452174779681522,\n            8.428336592376965,\n            8.40335281361706,\n            8.377239465159116,\n            8.35001319823814,\n            8.321691270531865,\n            8.292291522488402,\n            8.261832353071625,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.pseudoDensity(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel 600 pseudoDensity(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_heatCapacity(self):\n        ref = self.mat.heatCapacity(Tc=100)\n        cur = 461.947021\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n        ref = self.mat.heatCapacity(Tc=200)\n        cur = 482.742084\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Inconel625_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Inconel625\n\n    def test_00_setDefaultMassFracs(self):\n        massFracNameList = [\n            \"NI\",\n            \"CR\",\n            \"FE\",\n            \"MO\",\n            \"TA181\",\n            \"C\",\n            \"MN55\",\n            \"SI\",\n            \"P31\",\n            \"S\",\n            \"AL27\",\n            \"TI\",\n            \"CO59\",\n        ]\n        massFracRefValList = [\n            0.6188,\n            0.2150,\n            0.0250,\n            0.0900,\n            0.0365,\n            0.0005,\n            0.0025,\n            0.0025,\n            0.0001,\n            0.0001,\n            0.0020,\n            0.0020,\n            0.0050,\n        ]\n\n        for name, frac in zip(massFracNameList, massFracRefValList):\n            cur = self.mat.getMassFrac(name)\n            ref = frac\n            self.assertAlmostEqual(cur, ref)\n\n    def test_01_linearExpansionPercent(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            0.09954299999999999,\n            0.22729199999999997,\n            0.36520699999999995,\n            0.513288,\n            0.671535,\n            0.8399479999999999,\n            1.018527,\n            1.207272,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = (\n                \"\\n\\nIncorrect Inconel 625 linearExpansionPercent(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                    cur, ref\n                )\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_02_linearExpansion(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            1.22666e-05,\n            1.32832e-05,\n            1.4299800000000002e-05,\n            1.53164e-05,\n            1.6333e-05,\n            1.73496e-05,\n            1.83662e-05,\n            1.93828e-05,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansion(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel 625 linearExpansion(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_03_pseudoDensity(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            8.423222197446128,\n            8.401763522409897,\n            8.378689129846913,\n            8.354019541533887,\n            8.327776582263244,\n            8.299983337593213,\n            8.270664109510587,\n            8.239844370152333,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.pseudoDensity(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel 625 pseudoDensity(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_heatCapacity(self):\n        ref = self.mat.heatCapacity(Tc=300)\n        cur = 478.776007\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n        ref = self.mat.heatCapacity(Tc=400)\n        cur = 503.399568\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass InconelX750_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.InconelX750\n\n    def test_00_setDefaultMassFracs(self):\n        massFracNameList = [\n            \"NI\",\n            \"CR\",\n            \"FE\",\n            \"TI\",\n            \"AL27\",\n            \"NB93\",\n            \"MN55\",\n            \"SI\",\n            \"S\",\n            \"CU\",\n            \"C\",\n            \"CO59\",\n        ]\n        massFracRefValList = [\n            0.7180,\n            0.1550,\n            0.0700,\n            0.0250,\n            0.0070,\n            0.0095,\n            0.0050,\n            0.0025,\n            0.0001,\n            0.0025,\n            0.0004,\n            0.0050,\n        ]\n\n        for name, frac in zip(massFracNameList, massFracRefValList):\n            cur = self.mat.getMassFrac(name)\n            ref = frac\n            self.assertAlmostEqual(cur, ref)\n\n    def test_01_linearExpansionPercent(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            0.09927680000000001,\n            0.2253902,\n            0.36517920000000004,\n            0.5186438000000001,\n            0.6857840000000001,\n            0.8665998000000001,\n            1.0610912000000001,\n            1.2692582000000001,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = (\n                \"\\n\\nIncorrect Inconel X750 linearExpansionPercent(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                    cur, ref\n                )\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_02_linearExpansion(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            1.1927560000000001e-05,\n            1.329512e-05,\n            1.466268e-05,\n            1.603024e-05,\n            1.73978e-05,\n            1.876536e-05,\n            2.013292e-05,\n            2.150048e-05,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansion(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel X750 linearExpansion(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_03_pseudoDensity(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            8.263584211566972,\n            8.242801193765645,\n            8.219855974833411,\n            8.194776170511199,\n            8.167591802868142,\n            8.138335221416156,\n            8.107041018806447,\n            8.073745941486463,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.pseudoDensity(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Inconel X750 pseudoDensity(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_heatCapacity(self):\n        ref = self.mat.heatCapacity(Tc=100)\n        cur = 459.61381\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n        ref = self.mat.heatCapacity(Tc=200)\n        cur = 484.93968\n        self.assertAlmostEqual(ref, cur, delta=cur * 0.001)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Alloy200_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Alloy200\n\n    def test_nickleContent(self):\n        \"\"\"Assert alloy 200 has more than 99% nickel per its spec.\"\"\"\n        self.assertGreater(self.mat.massFrac[\"NI\"], 0.99)\n\n    def test_linearExpansion(self):\n        ref = self.mat.linearExpansion(Tc=100)\n        cur = 13.3e-6\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_linearExpansionHotter(self):\n        ref = self.mat.linearExpansion(Tk=873.15)\n        cur = 15.6e-6\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass CaH2_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.CaH2\n\n    def test_pseudoDensity(self):\n        cur = 1.7\n\n        ref = self.mat.pseudoDensity(Tc=100)\n        self.assertAlmostEqual(cur, ref, ref * 0.01)\n\n        ref = self.mat.pseudoDensity(Tc=300)\n        self.assertAlmostEqual(cur, ref, ref * 0.01)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass Hafnium_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Hafnium\n\n    def test_pseudoDensity(self):\n        cur = 13.07\n\n        ref = self.mat.pseudoDensity(Tc=100)\n        self.assertAlmostEqual(cur, ref, ref * 0.01)\n\n        ref = self.mat.pseudoDensity(Tc=300)\n        self.assertAlmostEqual(cur, ref, ref * 0.01)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n\n\nclass HastelloyN_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.HastelloyN\n\n    def test_thermalConductivity(self):\n        TcList = [200, 300, 400, 500, 600, 700]\n        refList = [\n            13.171442,\n            14.448584,\n            16.11144,\n            18.16001,\n            20.594294,\n            23.414292,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.thermalConductivity(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Hastelloy N thermalConductivity()\\nReceived:{}\\nExpected:{}\\n\".format(cur, ref)\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_heatCapacity(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700]\n        refList = [\n            419.183138,\n            438.728472,\n            459.630622,\n            464.218088,\n            480.092250,\n            556.547128,\n            573.450902,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.heatCapacity(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Hastelloy N heatCapacity()\\nReceived:{}\\nExpected:{}\\n\".format(cur, ref)\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_linearExpansionPercent(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            0.0976529128,\n            0.2225103228,\n            0.351926722,\n            0.4874638024,\n            0.630683256,\n            0.7831467748,\n            0.9464160508,\n            1.122052776,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Hastelloy N linearExpansionPercent()\\nReceived:{}\\nExpected:{}\\n\".format(cur, ref)\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_meanCoefficientThermalExpansion(self):\n        TcList = [100, 200, 300, 400, 500, 600, 700, 800]\n        refList = [\n            1.22066141e-05,\n            1.23616846e-05,\n            1.25688115e-05,\n            1.28279948e-05,\n            1.31392345e-05,\n            1.35025306e-05,\n            1.39178831e-05,\n            1.4385292e-05,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.meanCoefficientThermalExpansion(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect Hastelloy N meanCoefficientThermalExpansion()\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-7, msg=errorMsg)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass TZM_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.TZM\n\n    def test_00_applyInputParams(self):\n        massFracNameList = [\"C\", \"TI\", \"ZR\", \"MO\"]\n        massFracRefValList = [2.50749e-05, 0.002502504, 0.000761199, 0.996711222]\n\n        self.mat.applyInputParams()\n\n        for name, frac in zip(massFracNameList, massFracRefValList):\n            cur = self.mat.massFrac[name]\n            ref = frac\n            self.assertEqual(cur, ref)\n\n    def test_01_pseudoDensity(self):\n        ref = 10.16  # g/cc\n        cur = self.mat.pseudoDensity(Tc=21.11)\n        self.assertEqual(cur, ref)\n\n    def test_02_linearExpansionPercent(self):\n        TcList = [\n            21.11,\n            456.11,\n            574.44,\n            702.22,\n            840.56,\n            846.11,\n            948.89,\n            1023.89,\n            1146.11,\n            1287.78,\n            1382.22,\n        ]\n        refList = [\n            0.0,\n            1.60e-01,\n            2.03e-01,\n            2.53e-01,\n            3.03e-01,\n            3.03e-01,\n            3.42e-01,\n            3.66e-01,\n            4.21e-01,\n            4.68e-01,\n            5.04e-01,\n        ]\n\n        for Tc, val in zip(TcList, refList):\n            cur = self.mat.linearExpansionPercent(Tc=Tc)\n            ref = val\n            errorMsg = \"\\n\\nIncorrect TZM linearExpansionPercent(Tk=None,Tc=None)\\nReceived:{}\\nExpected:{}\\n\".format(\n                cur, ref\n            )\n            self.assertAlmostEqual(cur, ref, delta=10e-3, msg=errorMsg)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass YttriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.Y2O3\n\n    def test_pseudoDensity(self):\n        cur = 5.03\n\n        ref = self.mat.pseudoDensity(Tc=25)\n        self.assertAlmostEqual(cur, ref, 2)\n\n    def test_linearExpansionPercent(self):\n        ref = self.mat.linearExpansionPercent(Tc=100)\n        cur = 0.069662\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n        ref = self.mat.linearExpansionPercent(Tc=100)\n        cur = 0.0696622\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass ZincOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = materials.ZnO\n\n    def test_density(self):\n        cur = 5.61\n\n        ref = self.mat.density(Tk=10.12)\n        self.assertAlmostEqual(cur, ref, 2)\n\n    def test_linearExpansionPercent(self):\n        ref = self.mat.linearExpansionPercent(Tc=100)\n        cur = 0.04899694350661124\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n        ref = self.mat.linearExpansionPercent(Tc=300)\n        cur = 0.15825020246870625\n        self.assertAlmostEqual(ref, cur, delta=abs(ref * 0.001))\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n\n\nclass FuelMaterial_TestCase(unittest.TestCase):\n    baseInput = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\ncustom isotopics:\n    customIsotopic1:\n        input format: mass fractions\n        density: 1\n        U: 1\n    customIsotopic2:\n        input format: mass fractions\n        density: 1\n        ZR: 1\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n\"\"\"\n\n    def loadAssembly(self, materialModifications):\n        yamlString = self.baseInput + \"\\n\" + materialModifications\n        design = blueprints.Blueprints.load(yamlString)\n        design._prepConstruction(settings.Settings())\n        return design.assemblies[\"fuel a\"]\n\n    def test_class1Class2_class1_wt_frac(self):\n        # should error because class1_wt_frac not in (0,1)\n        with self.assertRaises(ValueError):\n            _a = self.loadAssembly(\n                \"\"\"\n        material modifications:\n            class1_wt_frac: [2.0]\n            class1_custom_isotopics: [customIsotopic1]\n            class2_custom_isotopics: [customIsotopic2]\n        \"\"\"\n            )\n\n    def test_class1Class2_classX_custom_isotopics(self):\n        # should error because class1_custom_isotopics doesn't exist\n        with self.assertRaises(KeyError):\n            _a = self.loadAssembly(\n                \"\"\"\n        material modifications:\n            class1_wt_frac: [0.5]\n            class1_custom_isotopics: [fakeIsotopic]\n            class2_custom_isotopics: [customIsotopic2]\n        \"\"\"\n            )\n\n        # should error because class2_custom_isotopics doesn't exist\n        with self.assertRaises(KeyError):\n            _a = self.loadAssembly(\n                \"\"\"\n        material modifications:\n            class1_wt_frac: [0.5]\n            class1_custom_isotopics: [customIsotopic1]\n            class2_custom_isotopics: [fakeIsotopic]\n        \"\"\"\n            )\n"
  },
  {
    "path": "armi/materials/tests/test_sic.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test for SiC.\"\"\"\n\nimport unittest\n\nfrom armi.materials.siC import SiC\nfrom armi.materials.tests import test_materials\n\n\nclass TestSiC(test_materials.AbstractMaterialTest, unittest.TestCase):\n    \"\"\"SiC tests.\"\"\"\n\n    MAT_CLASS = SiC\n\n    def test_pseudoDensity(self):\n        cur = self.mat.pseudoDensity(Tc=25)\n        ref = 3.159\n        delta = ref * 0.001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_meltingPoint(self):\n        cur = self.mat.meltingPoint()\n        ref = 3003\n        delta = ref * 0.0001\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_heatCapacity(self):\n        delta = 0.0001\n\n        cur = self.mat.heatCapacity(300)\n        ref = 982.20789\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n        cur = self.mat.heatCapacity(1500)\n        ref = 1330.27867\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_sulfur.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for sulfur.\"\"\"\n\nimport unittest\n\nfrom armi.materials.sulfur import Sulfur\nfrom armi.materials.tests.test_materials import AbstractMaterialTest\n\n\nclass Sulfur_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = Sulfur\n    VALID_TEMP_K = 400\n\n    def setUp(self):\n        AbstractMaterialTest.setUp(self)\n        self.mat = Sulfur()\n\n        self.Sulfur_sulfur_density_frac = Sulfur()\n        self.Sulfur_sulfur_density_frac.applyInputParams(sulfur_density_frac=0.5)\n\n        self.Sulfur_TD_frac = Sulfur()\n        self.Sulfur_TD_frac.applyInputParams(TD_frac=0.4)\n\n        self.Sulfur_both = Sulfur()\n        self.Sulfur_both.applyInputParams(sulfur_density_frac=0.5, TD_frac=0.4)\n\n    def test_sulfur_density_frac(self):\n        tk = 410\n        ref = self.mat.pseudoDensity(tk)\n\n        reduced = self.Sulfur_sulfur_density_frac.pseudoDensity(tk)\n        self.assertAlmostEqual(ref * 0.5, reduced)\n\n        reduced = self.Sulfur_TD_frac.pseudoDensity(tk)\n        self.assertAlmostEqual(ref * 0.4, reduced)\n\n        reduced = self.Sulfur_both.pseudoDensity(tk)\n        self.assertAlmostEqual(ref * 0.4, reduced)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_thoriumOxide.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for ThO2.\"\"\"\n\nimport unittest\n\nfrom armi.materials.tests.test_materials import AbstractMaterialTest\nfrom armi.materials.thoriumOxide import ThoriumOxide\n\n\nclass ThoriumOxide_TestCase(AbstractMaterialTest, unittest.TestCase):\n    MAT_CLASS = ThoriumOxide\n\n    def setUp(self):\n        AbstractMaterialTest.setUp(self)\n        self.mat = ThoriumOxide()\n\n        self.ThoriumOxide_TD_frac = ThoriumOxide()\n        self.ThoriumOxide_TD_frac.applyInputParams(TD_frac=0.4)\n\n    def test_theoretical_pseudoDensity(self):\n        ref = self.mat.pseudoDensity(500)\n\n        reduced = self.ThoriumOxide_TD_frac.pseudoDensity(500)\n        self.assertAlmostEqual(ref * 0.4, reduced)\n\n    def test_linearExpansionPercent(self):\n        self.assertAlmostEqual(self.mat.linearExpansionPercent(Tk=500), 0.195334)\n\n    def test_propertyValidTemperature(self):\n        self.assertGreater(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_uZr.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for simplified UZr material.\"\"\"\n\nimport pickle\nfrom unittest import TestCase\n\nfrom armi.materials.uZr import UZr\n\n\nclass UZR_TestCase(TestCase):\n    MAT_CLASS = UZr\n\n    def setUp(self):\n        self.mat = self.MAT_CLASS()\n\n    def test_isPicklable(self):\n        \"\"\"Test that materials are picklable so we can do MPI communication of state.\n\n        .. test:: Test the material base class has temp-dependent thermal conductivity curves.\n            :id: T_ARMI_MAT_PROPERTIES0\n            :tests: R_ARMI_MAT_PROPERTIES\n        \"\"\"\n        stream = pickle.dumps(self.mat)\n        mat = pickle.loads(stream)\n\n        # check a property that is sometimes interpolated.\n        self.assertEqual(self.mat.thermalConductivity(500), mat.thermalConductivity(500))\n\n    def test_TD(self):\n        \"\"\"Test the material theoretical density.\"\"\"\n        self.assertEqual(self.mat.getTD(), self.mat.theoreticalDensityFrac)\n\n        self.mat.clearCache()\n        self.mat._setCache(\"dummy\", 666)\n        self.assertEqual(self.mat.cached, {\"dummy\": 666})\n        self.mat.adjustTD(0.5)\n        self.assertEqual(0.5, self.mat.theoreticalDensityFrac)\n        self.assertEqual(self.mat.cached, {})\n\n    def test_duplicate(self):\n        \"\"\"Test the material duplication.\n\n        .. test:: Materials shall calc mass fracs at init.\n            :id: T_ARMI_MAT_FRACS5\n            :tests: R_ARMI_MAT_FRACS\n        \"\"\"\n        mat = self.mat.duplicate()\n\n        self.assertEqual(len(mat.massFrac), len(self.mat.massFrac))\n        for key in self.mat.massFrac:\n            self.assertEqual(mat.massFrac[key], self.mat.massFrac[key])\n\n        self.assertEqual(mat.parent, self.mat.parent)\n        self.assertEqual(mat.refDens, self.mat.refDens)\n        self.assertEqual(mat.theoreticalDensityFrac, self.mat.theoreticalDensityFrac)\n\n    def test_cache(self):\n        \"\"\"Test the material cache.\"\"\"\n        self.mat.clearCache()\n        self.assertEqual(len(self.mat.cached), 0)\n\n        self.mat._setCache(\"Emmy\", \"Noether\")\n        self.assertEqual(len(self.mat.cached), 1)\n\n        val = self.mat._getCached(\"Emmy\")\n        self.assertEqual(val, \"Noether\")\n\n    def test_densityKgM3(self):\n        \"\"\"Test the density for kg/m^3.\n\n        .. test:: Test the material base class has temp-dependent density.\n            :id: T_ARMI_MAT_PROPERTIES2\n            :tests: R_ARMI_MAT_PROPERTIES\n        \"\"\"\n        dens = self.mat.density(500)\n        densKgM3 = self.mat.densityKgM3(500)\n        self.assertEqual(dens * 1000.0, densKgM3)\n\n    def test_pseudoDensityKgM3(self):\n        \"\"\"Test the pseudo density for kg/m^3.\n\n        .. test:: Test the material base class has temp-dependent 2D density.\n            :id: T_ARMI_MAT_PROPERTIES3\n            :tests: R_ARMI_MAT_PROPERTIES\n        \"\"\"\n        dens = self.mat.pseudoDensity(500)\n        densKgM3 = self.mat.pseudoDensityKgM3(500)\n        self.assertEqual(dens * 1000.0, densKgM3)\n\n    def test_density(self):\n        \"\"\"Test that all materials produce a zero density from density.\n\n        .. test:: Test the material base class has temp-dependent density.\n            :id: T_ARMI_MAT_PROPERTIES1\n            :tests: R_ARMI_MAT_PROPERTIES\n        \"\"\"\n        self.assertNotEqual(self.mat.density(500), 0)\n\n        cur = self.mat.density(400)\n        ref = 15.94\n        delta = ref * 0.01\n        self.assertAlmostEqual(cur, ref, delta=delta)\n\n    def test_propertyValidTemperature(self):\n        self.assertEqual(len(self.mat.propertyValidTemperature), 0)\n"
  },
  {
    "path": "armi/materials/tests/test_water.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for water materials.\"\"\"\n\nimport unittest\n\nfrom armi.materials.water import SaturatedSteam, SaturatedWater, Water\n\n\nclass TestWater(unittest.TestCase):\n    \"\"\"Unit tests for water materials.\"\"\"\n\n    def test_waterAtFreezing(self):\n        \"\"\"\n        Reproduce verification results from IAPWS-IF97 for water at 0C.\n\n        http://www.iapws.org/relguide/supsat.pdf\n\n        .. test:: There is a base class for fluid materials.\n            :id: T_ARMI_MAT_FLUID0\n            :tests: R_ARMI_MAT_FLUID\n        \"\"\"\n        water = SaturatedWater()\n        steam = SaturatedSteam()\n\n        Tk = 273.16\n        ref_vapor_pressure = 611.657\n        ref_dp_dT = 44.436693\n        ref_saturated_water_rho = 999.789\n        ref_saturated_steam_rho = 0.00485426\n        ref_alpha = -11.529101\n        ref_saturated_water_enthalpy = 0.611786\n        ref_saturated_steam_enthalpy = 2500.5e3\n        ref_phi = -0.04\n        ref_saturated_water_entropy = 0\n        ref_saturated_steam_entropy = 9.154e3\n\n        self.assertAlmostEqual(ref_vapor_pressure, water.vaporPressure(Tk=Tk), 3)\n        self.assertAlmostEqual(ref_vapor_pressure, steam.vaporPressure(Tk=Tk), 3)\n\n        self.assertAlmostEqual(ref_dp_dT, water.vaporPressurePrime(Tk=Tk), 3)\n        self.assertAlmostEqual(ref_dp_dT, steam.vaporPressurePrime(Tk=Tk), 3)\n\n        self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)\n        self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)\n\n        self.assertAlmostEqual(ref_alpha, water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)\n        self.assertAlmostEqual(ref_alpha, steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 3)\n\n        self.assertAlmostEqual(ref_saturated_water_enthalpy, water.enthalpy(Tk=Tk), 2)\n        self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)\n\n        self.assertAlmostEqual(ref_phi, water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)\n        self.assertAlmostEqual(ref_phi, steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 2)\n\n        self.assertAlmostEqual(ref_saturated_water_entropy, water.entropy(Tk=Tk), 3)\n        self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)\n\n    def test_waterAtBoiling(self):\n        \"\"\"\n        Reproduce verification results from IAPWS-IF97 for water at 100C.\n\n        http://www.iapws.org/relguide/supsat.pdf\n        \"\"\"\n        water = SaturatedWater()\n        steam = SaturatedSteam()\n\n        Tk = 373.1243\n        ref_vapor_pressure = 0.101325e6\n        ref_dp_dT = 3.616e3\n        ref_saturated_water_rho = 958.365\n        ref_saturated_steam_rho = 0.597586\n        ref_alpha = 417.65e3\n        ref_saturated_water_enthalpy = 417.05e3\n        ref_saturated_steam_enthalpy = 2675.7e3\n        ref_phi = 1.303e3\n        ref_saturated_water_entropy = 1.307e3\n        ref_saturated_steam_entropy = 7.355e3\n\n        self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)\n        self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)\n\n        self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2)\n        self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)\n\n        self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)\n\n    def test_waterAtCritcalPoint(self):\n        \"\"\"\n        Reproduce verification results from IAPWS-IF97 for water at 647.096K.\n\n        http://www.iapws.org/relguide/supsat.pdf\n        \"\"\"\n        water = SaturatedWater()\n        steam = SaturatedSteam()\n\n        Tk = 647.096\n        ref_vapor_pressure = 22.064e6\n        ref_dp_dT = 268e3\n        ref_saturated_water_rho = 322\n        ref_saturated_steam_rho = 322\n        ref_alpha = 1548e3\n        ref_saturated_water_enthalpy = 2086.6e3\n        ref_saturated_steam_enthalpy = 2086.6e3\n        ref_phi = 3.578e3\n        ref_saturated_water_entropy = 4.410e3\n        ref_saturated_steam_entropy = 4.410e3\n\n        self.assertAlmostEqual(ref_vapor_pressure / water.vaporPressure(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_vapor_pressure / steam.vaporPressure(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_dp_dT / water.vaporPressurePrime(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_dp_dT / steam.vaporPressurePrime(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_rho, water.pseudoDensityKgM3(Tk=Tk), 0)\n        self.assertAlmostEqual(ref_saturated_steam_rho, steam.pseudoDensityKgM3(Tk=Tk), 0)\n\n        self.assertAlmostEqual(ref_alpha / water.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_alpha / steam.auxiliaryQuantitySpecificEnthalpy(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_enthalpy / water.enthalpy(Tk=Tk), 1, 2)\n        self.assertAlmostEqual(ref_saturated_steam_enthalpy / steam.enthalpy(Tk=Tk), 1, 2)\n\n        self.assertAlmostEqual(ref_phi / water.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_phi / steam.auxiliaryQuantitySpecificEntropy(Tk=Tk), 1, 3)\n\n        self.assertAlmostEqual(ref_saturated_water_entropy / water.entropy(Tk=Tk), 1, 3)\n        self.assertAlmostEqual(ref_saturated_steam_entropy / steam.entropy(Tk=Tk), 1, 3)\n\n    def test_massFrac(self):\n        for water in [SaturatedWater(), SaturatedSteam()]:\n            massFracO = water.getMassFrac(\"O\")\n            massFracH = water.getMassFrac(\"H\")\n            self.assertAlmostEqual(massFracO, 0.888, places=3)\n            self.assertAlmostEqual(massFracO + massFracH, 1.0)\n\n    def test_propertyValidTemperature(self):\n        water = SaturatedWater()\n        self.assertEqual(len(water.propertyValidTemperature), 0)\n\n        steam = SaturatedSteam()\n        self.assertEqual(len(steam.propertyValidTemperature), 0)\n\n    def test_validateNames(self):\n        water = Water()\n        self.assertEqual(water.name, \"Water\")\n\n        sat = SaturatedWater()\n        self.assertEqual(sat.name, \"SaturatedWater\")\n\n        steam = SaturatedSteam()\n        self.assertEqual(steam.name, \"SaturatedSteam\")\n"
  },
  {
    "path": "armi/materials/thU.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThorium Uranium metal.\n\nData is from [IAEA-TECDOC-1450]_.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials.material import FuelMaterial\nfrom armi.utils.units import getTk\n\n\nclass ThU(FuelMaterial):\n    enrichedNuclide = \"U233\"\n    propertyValidTemperature = {\"linear expansion\": ((30, 600), \"K\")}\n\n    def __init__(self):\n        FuelMaterial.__init__(self)\n        # density in g/cc from IAEA TE 1450\n        self.refDens = 11.68\n\n    def getEnrichment(self):\n        return self.getMassFrac(\"U233\") / (self.getMassFrac(\"U233\") + self.getMassFrac(\"TH232\"))\n\n    def applyInputParams(self, U233_wt_frac=None, *args, **kwargs):\n        runLog.warning(\n            \"Material {} has not yet been tested for accuracy\".format(\"ThU\"),\n            single=True,\n            label=\"ThU applyInputParams\",\n        )\n\n        if U233_wt_frac is not None:\n            self.adjustMassEnrichment(U233_wt_frac)\n\n        FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"TH232\", 1.0)\n        self.setMassFrac(\"U233\", 0.0)\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        \"\"\"Linear expansion in m/m/K from IAEA TE 1450.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n        return 11.9e-6\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"Thermal conductivity in W/m-K from IAEA TE 1450.\"\"\"\n        Tk = getTk(Tc, Tk)\n        return 43.1\n\n    def meltingPoint(self):\n        \"\"\"Melting point in K from IAEA TE 1450.\"\"\"\n        return 2025.0\n"
  },
  {
    "path": "armi/materials/thorium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThorium Metal.\n\nData is from [IAEA-TECDOC-1450]_.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import FuelMaterial\nfrom armi.utils.units import getTk\n\n\nclass Thorium(FuelMaterial):\n    propertyValidTemperature = {\"linear expansion\": ((30, 600), \"K\")}\n\n    def __init__(self):\n        FuelMaterial.__init__(self)\n        self.refDens = 11.68\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"TH232\", 1.0)\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"Linear Expansion in m/m/K from IAEA TECDOC 1450.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n\n        return 11.9e-6\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"W/m-K from IAEA TE 1450.\"\"\"\n        return 43.1\n\n    def meltingPoint(self):\n        \"\"\"Melting point in K from IAEA TE 1450.\"\"\"\n        return 2025.0\n"
  },
  {
    "path": "armi/materials/thoriumOxide.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThorium Oxide solid ceramic.\n\nData is from [IAEA-TECDOC-1450]_.\n\n.. [IAEA-TECDOC-1450] Thorium fuel cycle -- Potential benefits and challenges, IAEA-TECDOC-1450 (2005).\n    https://www-pub.iaea.org/mtcd/publications/pdf/te_1450_web.pdf\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.materials.material import FuelMaterial, Material, SimpleSolid\nfrom armi.utils.units import getTk\n\n\nclass ThoriumOxide(FuelMaterial, SimpleSolid):\n    propertyValidTemperature = {\"linear expansion\": ((298, 1223), \"K\")}\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 10.00\n\n    def applyInputParams(self, TD_frac=None, *args, **kwargs):\n        if TD_frac is not None:\n            if TD_frac > 1.0:\n                runLog.warning(\n                    f\"Theoretical density frac for {self} is {TD_frac}, which is >1\",\n                    single=True,\n                    label=\"Large theoretical density\",\n                )\n            elif TD_frac == 0:\n                runLog.warning(\n                    f\"Theoretical density frac for {self} is zero!\",\n                    single=True,\n                    label=\"Zero theoretical density\",\n                )\n            elif TD_frac < 0:\n                runLog.error(\n                    \"TD_frac is entered as negative. This is not allowed!\",\n                    single=True,\n                    label=\"Negative TD_frac\",\n                )\n            self.adjustTD(TD_frac)\n\n        FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def setDefaultMassFracs(self):\n        r\"\"\"ThO2 mass fractions. Using Pure Th-232. 100% 232.\n\n        Thorium: 232.030806 g/mol\n        Oxygen:  15.9994 g/mol\n\n        2 moles of oxygen/1 mole of Thorium\n\n        grams of Th-232 = 232.030806 g/mol* 1 mol  =  232.030806 g\n        grams of Oxygen = 15.9994 g/mol* 2 mol = 31.9988 g\n        total=264.029606 g.\n        Mass fractions are computed from this.\n        \"\"\"\n        self.setMassFrac(\"TH232\", 0.8788)\n        self.setMassFrac(\"O16\", 0.1212)\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"Linear expansion in m/m/K from IAEA TE 1450.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n\n        return 9.67e-6\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Approximate the linear thermal expansion percent from the linear expansion\n        coefficient, taking 298K as the reference temperature.\n        \"\"\"\n        Tk = getTk(Tc=Tc, Tk=Tk)\n        linearExpansionCoef = self.linearExpansion(Tk=Tk)\n\n        return 100 * (linearExpansionCoef * (Tk - 298))\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        r\"\"\"Thermal conductivity in W/m-K from IAEA TE 1450.\"\"\"\n        return 6.20\n\n    def meltingPoint(self):\n        r\"\"\"Melting point in K from IAEA TE 1450.\"\"\"\n        return 3643.0\n\n    def density(self, Tk=None, Tc=None):\n        return Material.density(self, Tk, Tc) * self.getTD()\n\n\nclass ThO2(ThoriumOxide):\n    \"\"\"Another name for ThoriumOxide.\"\"\"\n\n    pass\n"
  },
  {
    "path": "armi/materials/uZr.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSimplified UZr alloy.\n\nThis is a notional U-10Zr material based on [Chandrabhanu]_.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials import material\nfrom armi.utils import units\n\n\nclass UZr(material.FuelMaterial):\n    \"\"\"\n    Simplified UZr fuel alloy.\n\n    .. warning:: This is an academic-quality material.\n        Only the 10% Zr-frac properties are present.\n        If you use a Zr-frac other than 10%, these properties will be incorrect. Bring\n        in user-provided materials via plugins when necessary.\n\n    .. [Chandrabhanu] Chandrabhanu Basak, G.J. Prasad, H.S. Kamath, N. Prabhu,\n        An evaluation of the properties of As-cast U-rich UZr alloys,\n        Journal of Alloys and Compounds,\n        Volume 480, Issue 2,\n        2009,\n        Pages 857-862,\n        ISSN 0925-8388,\n        https://doi.org/10.1016/j.jallcom.2009.02.077.\n    \"\"\"\n\n    enrichedNuclide = \"U235\"\n    zrFracDefault = 0.10\n    uFracDefault = 1.0 - zrFracDefault\n\n    def __init__(self):\n        material.Material.__init__(self)\n\n    def setDefaultMassFracs(self):\n        \"\"\"U-Pu-Zr mass fractions.\"\"\"\n        u235Enrichment = 0.1\n        self.setMassFrac(\"ZR\", self.zrFracDefault)\n        self.setMassFrac(\"U235\", u235Enrichment * self.uFracDefault)\n        self.setMassFrac(\"U238\", (1.0 - u235Enrichment) * self.uFracDefault)\n        self._calculateReferenceDensity(self.zrFracDefault, self.uFracDefault)\n\n    def applyInputParams(self, U235_wt_frac=None, ZR_wt_frac=None, *args, **kwargs):\n        \"\"\"Apply user input.\"\"\"\n        ZR_wt_frac = self.zrFracDefault if ZR_wt_frac is None else ZR_wt_frac\n        U235_wt_frac = 0.1 if U235_wt_frac is None else U235_wt_frac\n\n        uFrac = 1.0 - ZR_wt_frac\n        self.setMassFrac(\"ZR\", ZR_wt_frac)\n        self.setMassFrac(\"U235\", U235_wt_frac * uFrac)\n        self.setMassFrac(\"U238\", (1.0 - U235_wt_frac) * uFrac)\n        self._calculateReferenceDensity(ZR_wt_frac, uFrac)\n\n        material.FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def _calculateReferenceDensity(self, zrFrac, uFrac):\n        \"\"\"Calculates the reference mass density in g/cc of a U-Pu-Zr alloy at 293K with Vergard's law.\"\"\"\n        # use Vergard's law to mix densities by weight fraction at 293K\n        u0 = 19.1\n        zr0 = 6.52\n        specificVolume = uFrac / u0 + zrFrac / zr0\n        self.refDens = 1.0 / specificVolume\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"Gets the linear expansion from eq. 3 in [Chandrabhanu]_ for U-10Zr.\"\"\"\n        tk = units.getTk(Tc, Tk)\n        tk2 = tk * tk\n        tk3 = tk2 * tk\n        return -0.73 + 3.489e-3 * tk - 5.154e-6 * tk2 + 4.39e-9 * tk3\n"
  },
  {
    "path": "armi/materials/uranium.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUranium metal.\n\nMuch info is from [AAAFuels]_.\n\n.. [AAAFuels]  Kim, Y S, and Hofman, G L. AAA fuels handbook.. United States: N. p., 2003. Web. doi:10.2172/822554.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom numpy import interp\n\nfrom armi import runLog\nfrom armi.materials.material import FuelMaterial\nfrom armi.utils.units import getTk\n\n\nclass Uranium(FuelMaterial):\n    enrichedNuclide = \"U235\"\n\n    materialIntro = \"\"\n\n    propertyNotes = {\"thermal conductivity\": \"\"}\n\n    propertyRawData = {\"thermal conductivity\": \"\"}\n\n    propertyUnits = {\"thermal conductivity\": \"W/m-K\", \"heat capacity\": \"J/kg-K\"}\n\n    propertyEquation = {\"thermal conductivity\": \"21.73 + 0.01591T + 5.907&#215;10<super>-6</super>T<super>2</super>\"}\n\n    _heatCapacityTableK = [\n        298,\n        300,\n        400,\n        500,\n        600,\n        700,\n        800,\n        900,\n        941.9,\n        942,\n        1000,\n        1048.9,\n        1049,\n        1100,\n        1200,\n        1300,\n        1400,\n        1407.9,\n        1408,\n        1500,\n        1600,\n        1700,\n        1800,\n        1900,\n        2000,\n        2100,\n        2200,\n        2400,\n    ]\n\n    _heatCapacityTable = [\n        27.665,\n        27.700,\n        29.684,\n        31.997,\n        34.762,\n        38.021,\n        41.791,\n        46.081,\n        48.038,\n        42.928,\n        42.928,\n        42.928,\n        38.284,\n        38.284,\n        38.284,\n        38.284,\n        38.284,\n        38.284,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n        48.660,\n    ]  # J/K/mol\n\n    _densityTableK = [\n        293,\n        400,\n        500,\n        600,\n        700,\n        800,\n        900,\n        940.9,\n        941,\n        1000,\n        1047.9,\n        1048,\n        1100,\n        1200,\n        1400,\n        1407.9,\n        1408,\n        1500,\n        1600,\n    ]\n\n    _densityTable = [\n        19.07,\n        18.98,\n        18.89,\n        18.79,\n        18.68,\n        18.55,\n        18.41,\n        18.39,\n        18.16,\n        18.11,\n        18.07,\n        17.94,\n        17.88,\n        17.76,\n        17.53,\n        17.52,\n        16.95,\n        16.84,\n        16.71,\n    ]  # g/cc\n\n    _linearExpansionPercent = [\n        0.000,\n        0.157,\n        0.315,\n        0.494,\n        0.697,\n        0.924,\n        1.186,\n        1.300,\n        1.635,\n        1.737,\n        1.820,\n        2.050,\n        2.168,\n        2.398,\n        2.855,\n        2.866,\n        4.006,\n        4.232,\n        4.502,\n    ]  # %\n\n    _linearExpansionTable = [\n        13.9,\n        15.2,\n        16.9,\n        19.0,\n        21.4,\n        24.3,\n        27.7,\n        29.1,\n        17.3,\n        17.3,\n        17.3,\n        22.9,\n        22.9,\n        22.9,\n        22.9,\n        22.9,\n        25.5,\n        25.5,\n        25.5,\n    ]  # 1e6/K\n\n    propertyValidTemperature = {\n        \"thermal conductivity\": ((255.4, 1173.2), \"K\"),\n        \"heat capacity\": ((_heatCapacityTableK[0], _heatCapacityTableK[-1]), \"K\"),\n        \"density\": ((_densityTableK[0], _densityTableK[-1]), \"K\"),\n        \"linear expansion\": ((_densityTableK[0], _densityTableK[-1]), \"K\"),\n        \"linear expansion percent\": ((_densityTableK[0], _densityTableK[-1]), \"K\"),\n    }\n\n    references = {\n        \"thermal conductivity\": [\"AAA Fuels Handbook by YS Kim and G.L. Hofman, ANL, Section 6.1.1\"],\n        \"heat capacity\": [\"AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-14\"],\n        \"melting point\": [\"AAA Fuels Handbook by YS Kim and GL Hofman, Table 2-13\"],\n        \"density\": [\"Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1\"],\n        \"linear expansion\": [\"Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1\"],\n        \"linear expansion percent\": [\"Metallic Fuels Handbook, ANL-NSE-3, Table B.3.3-1\"],\n    }\n\n    def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"The thermal conductivity of pure U in W-m/K.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n\n        kU = 21.73 + (0.01591 * Tk) + (0.000005907 * Tk**2)\n        return kU\n\n    def heatCapacity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Heat capacity in J/kg-K.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n\n        return interp(Tk, self._heatCapacityTableK, self._heatCapacityTable)\n\n    def setDefaultMassFracs(self) -> None:\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            u235Weight = 235.043929425\n            u238Weight = 238.050788298\n            u235Abundance = 0.007204\n        else:\n            u235Weight = nb.byLabel[\"U235\"].weight\n            u238Weight = nb.byLabel[\"U238\"].weight\n            u235Abundance = nb.byLabel[\"U235\"].abundance\n\n        u238Abundance = 1.0 - u235Abundance  # neglect U234 and keep U235 at natural level\n        gramsIn1Mol = u235Abundance * u235Weight + u238Abundance * u238Weight\n\n        self.setMassFrac(\"U235\", u235Weight * u235Abundance / gramsIn1Mol)\n        self.setMassFrac(\"U238\", u238Weight * u238Abundance / gramsIn1Mol)\n\n        self.refDens = 19.07\n\n    def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs):\n        if U235_wt_frac is not None:\n            self.adjustMassEnrichment(U235_wt_frac)\n\n        td = TD_frac\n        if td is not None:\n            if td > 1.0:\n                runLog.warning(\n                    f\"Theoretical density frac for {self} is {td}, which is >1\",\n                    single=True,\n                    label=\"Large theoretical density\",\n                )\n            elif td == 0:\n                runLog.warning(\n                    f\"Theoretical density frac for {self} is zero!\",\n                    single=True,\n                    label=\"Zero theoretical density\",\n                )\n            self.adjustTD(td)\n\n        FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def meltingPoint(self):\n        \"\"\"Melting point in K.\"\"\"\n        return 1408\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Density in g/cc.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return interp(Tk, self._densityTableK, self._densityTable) * self.getTD()\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"2D-expanded density in g/cc.\"\"\"\n        return super().pseudoDensity(Tk=Tk, Tc=Tc) * self.getTD()\n\n    def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Linear expansion coefficient in 1/K.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n\n        return interp(Tk, self._densityTableK, self._linearExpansionTable) / 1e6\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Linear expansion percent.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n\n        return interp(Tk, self._densityTableK, self._linearExpansionPercent)\n"
  },
  {
    "path": "armi/materials/uraniumOxide.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nUranium Oxide properties.\n\nUO2 is a common ceramic nuclear fuel form. It's properties are well known. This mostly\nuses data from [#ornltm2000]_.\n\n.. [#ornltm2000] Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. S.G. Popov,\n    et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351 https://rsicc.ornl.gov/fmdp/tm2000-351.pdf\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nimport collections\nimport math\n\nfrom numpy import interp\n\nfrom armi import runLog\nfrom armi.materials import material\nfrom armi.nucDirectory import thermalScattering as tsl\nfrom armi.utils.units import getTk\n\nHeatCapacityConstants = collections.namedtuple(\"HeatCapacityConstants\", [\"c1\", \"c2\", \"c3\", \"theta\", \"Ea\"])\n\n\nclass UraniumOxide(material.FuelMaterial, material.SimpleSolid):\n    enrichedNuclide = \"U235\"\n\n    REFERENCE_TEMPERATURE = 27\n\n    # ORNL/TM-2000/351 section 4.3\n    heatCapacityConstants = HeatCapacityConstants(c1=302.27, c2=8.463e-3, c3=8.741e7, theta=548.68, Ea=18531.7)\n\n    __meltingPoint = 3123.0\n\n    propertyUnits = {\"heat capacity\": \"J/mol-K\"}\n\n    propertyValidTemperature = {\n        \"density\": ((293.15, 3100), \"K\"),\n        \"heat capacity\": ((298.15, 3120), \"K\"),\n        \"linear expansion\": ((273, 3120), \"K\"),\n        \"linear expansion percent\": ((273, __meltingPoint), \"K\"),\n        \"thermal conductivity\": ((300, 3000), \"K\"),\n    }\n\n    references = {\n        \"thermal conductivity\": \"Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics \"\n        + \"simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999\",\n        \"linear expansion\": \"Thermophysical Properties of MOX and UO2 Fuels Including the Effects of Irradiation. \"\n        + \"S.G. Popov, et.al. Oak Ridge National Laboratory. ORNL/TM-2000/351\",\n        \"heat capacity\": \"ORNL/TM-2000/351\",\n    }\n\n    thermalScatteringLaws = (tsl.fromNameAndCompound(\"U\", tsl.UO2), tsl.fromNameAndCompound(\"O\", tsl.UO2))\n\n    # Thermal conductivity values taken from:\n    # Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics simulation. S. Motoyama.\n    #    Physical Review B, Volume 60, Number 1, July 1999\n    thermalConductivityTableK = [\n        300,\n        600,\n        900,\n        1200,\n        1500,\n        1800,\n        2100,\n        2400,\n        2700,\n        3000,\n    ]\n\n    thermalConductivityTable = [\n        7.991,\n        4.864,\n        3.640,\n        2.768,\n        2.567,\n        2.294,\n        2.073,\n        1.891,\n        1.847,\n        1.718,\n    ]\n\n    def __init__(self):\n        material.FuelMaterial.__init__(self)\n        self.refDens = self.density(Tk=self.refTempK)\n\n    def applyInputParams(self, U235_wt_frac: float = None, TD_frac: float = None, *args, **kwargs) -> None:\n        if U235_wt_frac is not None:\n            self.adjustMassEnrichment(U235_wt_frac)\n\n        td = TD_frac\n        if td is not None:\n            if td > 1.0:\n                runLog.warning(\n                    \"Theoretical density frac for {0} is {1}, which is >1\".format(self, td),\n                    single=True,\n                    label=\"Large theoretical density\",\n                )\n            elif td == 0:\n                runLog.warning(\n                    f\"Theoretical density frac for {self} is zero!\",\n                    single=True,\n                    label=\"Zero theoretical density\",\n                )\n            self.adjustTD(td)\n\n        material.FuelMaterial.applyInputParams(self, *args, **kwargs)\n\n    def setDefaultMassFracs(self) -> None:\n        \"\"\"UO2 mass fractions. Using Natural Uranium without U234.\"\"\"\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            u235Weight = 235.043929425\n            u238Weight = 238.050788298\n            oxygenWeight = 15.999304875697801\n            u235Abundance = 0.007204\n        else:\n            u235Weight = nb.byName[\"U235\"].weight\n            u238Weight = nb.byName[\"U238\"].weight\n            oxygenWeight = nb.byName[\"O\"].weight\n            u235Abundance = nb.byName[\"U235\"].abundance\n\n        u238Abundance = 1.0 - u235Abundance  # neglect U234 and keep U235 at natural level\n        gramsIn1Mol = 2 * oxygenWeight + u235Abundance * u235Weight + u238Abundance * u238Weight\n\n        self.setMassFrac(\"U235\", u235Weight * u235Abundance / gramsIn1Mol)\n        self.setMassFrac(\"U238\", u238Weight * u238Abundance / gramsIn1Mol)\n        self.setMassFrac(\"O\", 2 * oxygenWeight / gramsIn1Mol)\n\n    def meltingPoint(self):\n        \"\"\"\n        Melting point in K.\n\n        From [#ornltm2000]_.\n        \"\"\"\n        return self.__meltingPoint\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Density in (g/cc).\n\n        Polynomial line fit to data from [#ornltm2000]_ on page 11.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        return (-1.01147e-7 * Tk**2 - 1.29933e-4 * Tk + 1.09805e1) * self.getTD()\n\n    def heatCapacity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Heat capacity in J/kg-K.\n\n        From Section 4.3 in  [#ornltm2000]_\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"heat capacity\", Tk)\n\n        hcc = self.heatCapacityConstants\n        # eq 4.2\n        specificHeatCapacity = (\n            hcc.c1 * (hcc.theta / Tk) ** 2 * math.exp(hcc.theta / Tk) / (math.exp(hcc.theta / Tk) - 1.0) ** 2\n            + 2 * hcc.c2 * Tk\n            + hcc.c3 * hcc.Ea * math.exp(-hcc.Ea / Tk) / Tk**2\n        )\n        return specificHeatCapacity\n\n    def linearExpansion(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Linear expansion coefficient.\n\n        Curve fit from data in [#ornltm2000]_\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n\n        return 1.06817e-12 * Tk**2 - 1.37322e-9 * Tk + 1.02863e-5\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Return dL/L.\n\n        From Section 3.3 of [#ornltm2000]_\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n\n        if Tk >= 273.0 and Tk < 923.0:\n            return (-2.66e-03 + 9.802e-06 * Tk - 2.705e-10 * Tk**2 + 4.391e-13 * Tk**3) * 100.0\n        else:\n            return (-3.28e-03 + 1.179e-05 * Tk - 2.429e-09 * Tk**2 + 1.219e-12 * Tk**3) * 100.0\n\n    def thermalConductivity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Thermal conductivity.\n\n        Ref: Thermal conductivity of uranium dioxide by nonequilibrium molecular dynamics\n        simulation. S. Motoyama. Physical Review B, Volume 60, Number 1, July 1999\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n\n        return interp(Tk, self.thermalConductivityTableK, self.thermalConductivityTable)\n\n\nclass UO2(UraniumOxide):\n    \"\"\"Another name for UraniumOxide.\"\"\"\n\n    def __init__(self):\n        UraniumOxide.__init__(self)\n        self._name = \"UraniumOxide\"\n"
  },
  {
    "path": "armi/materials/void.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nVoid material.\n\nUse this to fill empty spaces while maintaining proper volume fractions.\n\"\"\"\n\nfrom armi.materials import material\n\n\nclass Void(material.Fluid):\n    \"\"\"A Void material is a bookkeeping material with zero density.\"\"\"\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        return 0.0\n\n    def density(self, Tk: float = None, Tc: float = None) -> float:\n        return 0.0\n"
  },
  {
    "path": "armi/materials/water.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic water material.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nimport math\n\nfrom armi.materials.material import Fluid\nfrom armi.nucDirectory import elements\nfrom armi.nucDirectory import thermalScattering as tsl\nfrom armi.utils import units\nfrom armi.utils.units import getTk\n\n_REF_SR1_86 = \"IAPWS SR1-86 Revised Supplementary Release on Saturation Properties of Ordinary Water and Steam\"\n\n\nclass Water(Fluid):\n    \"\"\"\n    Water.\n\n    This is a good faith implementation of the Revised Supplementary Properties of Ordinary Water Substance (1992) by\n    IAPWS -- International Association for the Properties of Water and Steam .\n\n    This is an abstract class implemented on the Saturated Water Material  and the Saturated Steam Material Class, which\n    should be good enough for\n    most uses.\n\n    http://www.iapws.org/relguide/supsat.pdf\n    IAPWS-IF97 is now the international standard for calculations in the steam power industry\n    \"\"\"\n\n    thermalScatteringLaws = (tsl.fromNameAndCompound(\"H\", tsl.H2O),)\n    references = {\n        \"vapor pressure\": _REF_SR1_86,\n        \"enthalpy (saturated water)\": _REF_SR1_86,\n        \"enthalpy (saturated steam)\": _REF_SR1_86,\n        \"entropy (saturated water)\": _REF_SR1_86,\n        \"entropy (saturated steam)\": _REF_SR1_86,\n        \"density (saturated water)\": _REF_SR1_86,\n        \"density (saturated steam)\": _REF_SR1_86,\n    }\n\n    TEMPERATURE_CRITICAL_K = 647.096\n\n    DENSITY_CRITICAL_KGPERCUBICMETER = 322.0\n    DENSITY_CRITICAL_GPERCUBICCENTIMETER = DENSITY_CRITICAL_KGPERCUBICMETER * units.G_PER_KG / units.CM3_PER_M3\n    VAPOR_PRESSURE_CRITICAL_MPA = 22.064\n    VAPOR_PRESSURE_CRITICAL_PA = VAPOR_PRESSURE_CRITICAL_MPA * 1e6\n    ALPHA_0 = 1000\n    PHI_0 = ALPHA_0 / TEMPERATURE_CRITICAL_K\n\n    # coefficients for auxiliary quantity for enthalpy and entropy kept as d to match original source\n    d = {\n        1: -5.65134998e-08,\n        2: 2690.66631,\n        3: 127.287297,\n        4: -135.003439,\n        5: 0.981825814,\n        \"alpha\": -1135.905627715,\n        \"phi\": 2319.5246,\n    }\n\n    def setDefaultMassFracs(self) -> None:\n        nb = self.parent.nuclideBases if self.parent else None\n        if nb is None:\n            massHydrogen = 1.007976004510346\n            massOxygen = 15.999304715704756\n        else:\n            massHydrogen = elements.bySymbol[\"H\"].standardWeight\n            massOxygen = elements.bySymbol[\"O\"].standardWeight\n\n        totalMass = 2 * massHydrogen + massOxygen\n        massFrac = {\"H\": 2.0 * massHydrogen / totalMass, \"O\": massOxygen / totalMass}\n        for nucName, mfrac in massFrac.items():\n            self.setMassFrac(nucName, mfrac)\n\n    def theta(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"Returns temperature normalized to the critical temperature.\"\"\"\n        return getTk(Tc=Tc, Tk=Tk) / self.TEMPERATURE_CRITICAL_K\n\n    def tau(self, Tc: float = None, Tk: float = None) -> float:\n        \"\"\"\n        Returns 1 - temperature normalized to the critical temperature.\n\n        Notes\n        -----\n        thermophysical correlations are give in Tau rather than Tk or Tc\n        \"\"\"\n        return 1.0 - self.theta(Tc=Tc, Tk=Tk)\n\n    def vaporPressure(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns vapor pressure in (Pa).\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        vaporPressure: float\n            vapor pressure in Pa\n\n        Notes\n        -----\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the\n        steam power industry\n        \"\"\"\n        tau = self.tau(Tc=Tc, Tk=Tk)\n        T_ratio = self.TEMPERATURE_CRITICAL_K / getTk(Tc=Tc, Tk=Tk)\n\n        a1 = -7.85951783\n        a2 = 1.84408259\n        a3 = -11.7866497\n        a4 = 22.6807411\n        a5 = -15.9618719\n        a6 = 1.80122502\n\n        sum_coefficients = a1 * tau + a2 * tau**1.5 + a3 * tau**3 + a4 * tau**3.5 + a5 * tau**4 + a6 * tau**7.5\n        log_vapor_pressure = T_ratio * sum_coefficients\n        vapor_pressure = self.VAPOR_PRESSURE_CRITICAL_PA * math.e ** (log_vapor_pressure)\n        # past the supercritical point tau's raised to .5 cause complex #'s\n        return vapor_pressure.real\n\n    def vaporPressurePrime(self, Tk: float = None, Tc: float = None, dT: float = 1e-6) -> float:\n        \"\"\"\n        Approximation of derivative of vapor pressure wrt temperature.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Note\n        ----\n        This uses a numerical approximation\n        \"\"\"\n        Tcold = getTk(Tc=Tc, Tk=Tk) - dT / 2.0\n        Thot = Tcold + dT\n\n        dp = self.vaporPressure(Tk=Thot) - self.vaporPressure(Tk=Tcold)\n        return dp / dT\n\n    def auxiliaryQuantitySpecificEnthalpy(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns the auxiliary quantity for specific enthalpy.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        alpha: float\n            specific quantity for enthalpy in J/kg\n\n        Notes\n        -----\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the\n        steam power industry\n\n        alpha is used in the relations for enthalpy\n        h = alpha + T/pressure*dp/dT\n        \"\"\"\n        theta = self.theta(Tc=Tc, Tk=Tk)\n\n        normalized_alpha = (\n            self.d[\"alpha\"]\n            + self.d[1] * theta**-19\n            + self.d[2] * theta\n            + self.d[3] * theta**4.5\n            + self.d[4] * theta**5.0\n            + self.d[5] * theta**54.5\n        )\n\n        # past the supercritical point tau's raised to .5 cause complex #'s\n        return normalized_alpha.real * self.ALPHA_0\n\n    def auxiliaryQuantitySpecificEntropy(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns the auxiliary quantity for specific entropy.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        phi: float\n            specific quantity for entropy in J/(kgK)\n\n        Notes\n        -----\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the\n        steam power industry\n\n        alpha is used in the relations for enthalpy\n        s = phi + 1/pressure*dp/dT\n        \"\"\"\n        theta = self.theta(Tc=Tc, Tk=Tk)\n\n        normalized_phi = (\n            self.d[\"phi\"]\n            + 19.0 / 20.0 * self.d[1] * theta**-20.0\n            + self.d[2] * math.log(theta)\n            + 9.0 / 7.0 * self.d[3] * theta**3.5\n            + 5.0 / 4.0 * self.d[4] * theta**4.0\n            + 109.0 / 107.0 * self.d[5] * theta**53.5\n        )\n\n        # past the supercritical point tau's raised to .5 cause complex #'s\n        return normalized_phi.real * self.PHI_0\n\n    def enthalpy(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns enthalpy of saturated water.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        enthalpy: float\n            vapor pressure in J/kg\n\n        Notes\n        -----\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the\n        steam power industry\n        \"\"\"\n        alpha = self.auxiliaryQuantitySpecificEnthalpy(Tc=Tc, Tk=Tk)\n        T = getTk(Tc=Tc, Tk=Tk)\n        rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk)\n        dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk)\n\n        return alpha + T / rho * dp_dT\n\n    def entropy(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns entropy of saturated water.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        entropy: float\n            entropy in J/(kgK)\n\n        Notes\n        -----\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the\n        steam power industry\n        \"\"\"\n        phi = self.auxiliaryQuantitySpecificEntropy(Tc=Tc, Tk=Tk)\n        rho = self.pseudoDensityKgM3(Tc=Tc, Tk=Tk)\n        dp_dT = self.vaporPressurePrime(Tc=Tc, Tk=Tk)\n\n        return phi + 1.0 / rho * dp_dT\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        \"\"\"\n        Density for arbitrary forms of water.\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        \"\"\"\n        raise NotImplementedError(\"Please use a concrete instance: SaturatedWater or SaturatedSteam.\")\n\n\nclass SaturatedWater(Water):\n    \"\"\"\n    Saturated Water.\n\n    This is a good faith implementation of the Revised Supplementary Properties\n    of Ordinary Water Substance (1992) by IAPWS -- International Association for\n    the Properties of Water and Steam .\n\n    This is the Saturated Liquid Water Material Class. For steam look to the\n    Saturated  Steam Material Class.\n    \"\"\"\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns density in g/cc.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        density: float\n            density in g/cc\n\n        Note\n        ----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the steam power industry\n        \"\"\"\n        tau = self.tau(Tc=Tc, Tk=Tk)\n\n        b1 = 1.99274064\n        b2 = 1.09965342\n        b3 = -0.510839303\n        b4 = -1.75493479\n        b5 = -45.5170352\n        b6 = -6.74694450e5\n\n        normalized_rho = (\n            1\n            + b1 * tau ** (1.0 / 3.0)\n            + b2 * tau ** (2.0 / 3.0)\n            + b3 * tau ** (5.0 / 3.0)\n            + b4 * tau ** (16.0 / 3.0)\n            + b5 * tau ** (43.0 / 3.0)\n            + b6 * tau ** (111.0 / 3.0)\n        )\n\n        # past the supercritical point tau's raised to .5 cause complex #'s\n        return normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER\n\n\nclass SaturatedSteam(Water):\n    \"\"\"\n    Saturated Steam.\n\n    This is a good faith implementation of the Revised Supplementary Properties\n    of Ordinary Water Substance (1992) by IAPWS -- International Association for\n    the Properties of Water and Steam .\n\n    This is the Saturated Liquid Water Material Class. For steam look to the\n    Saturated  Steam Material Class.\n    \"\"\"\n\n    def pseudoDensity(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Returns density in g/cc.\n\n        Parameters\n        ----------\n        Tk: float\n            temperature in Kelvin\n        Tc: float\n            temperature in Celsius\n\n        Returns\n        -------\n        density: float\n            density in g/cc\n\n        Notes\n        -----\n        In ARMI, we define pseudoDensity() and density() as the same for Fluids.\n        IAPWS-IF97\n        http://www.iapws.org/relguide/supsat.pdf\n        IAPWS-IF97 is now the international standard for calculations in the steam power industry\n        \"\"\"\n        tau = self.tau(Tc=Tc, Tk=Tk)\n\n        c1 = -2.03150240\n        c2 = -2.68302940\n        c3 = -5.38626492\n        c4 = -17.2991605\n        c5 = -44.7586581\n        c6 = -63.9201063\n\n        log_normalized_rho = (\n            c1 * tau ** (2.0 / 6.0)\n            + c2 * tau ** (4.0 / 6.0)\n            + c3 * tau ** (8.0 / 6.0)\n            + c4 * tau ** (18.0 / 6.0)\n            + c5 * tau ** (37.0 / 6.0)\n            + c6 * tau ** (71.0 / 6.0)\n        )\n\n        # past the supercritical point tau's raised to .5 cause complex #'s\n        return math.e**log_normalized_rho.real * self.DENSITY_CRITICAL_GPERCUBICCENTIMETER\n"
  },
  {
    "path": "armi/materials/yttriumOxide.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Yttrium Oxide.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass Y2O3(Material):\n    propertyValidTemperature = {\"linear expansion percent\": ((273.15, 1573.15), \"K\")}\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = 5.03\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"Y89\", 0.7875)\n        self.setMassFrac(\"O16\", 0.2125)\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the linear expansion percent for Yttrium Oxide (Yttria).\n\n        Notes\n        -----\n        From Table 5 of \"Thermal Expansion and Phase Inversion of Rare-Earth Oxides.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n\n        return 1.4922e-07 * Tk**2 + 6.2448e-04 * Tk - 1.8414e-01\n"
  },
  {
    "path": "armi/materials/zincOxide.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Zinc Oxide.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass ZnO(Material):\n    propertyValidTemperature = {\"linear expansion percent\": ((10.12, 1491.28), \"K\")}\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"ZN\", 0.8034)\n        self.setMassFrac(\"O16\", 0.1966)\n\n    def density(self, Tk=None, Tc=None):\n        return 5.61\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"\n        Return the linear expansion percent for Polycrystalline ZnO.\n\n        Notes\n        -----\n        Digitized from Figure 1.24 from\n        Zinc Oxide: Fundamentals, Materials and Device Technology\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n\n        return -1.9183e-10 * Tk**3 + 6.5944e-07 * Tk**2 + 5.2992e-05 * Tk - 5.2631e-02\n"
  },
  {
    "path": "armi/materials/zr.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Zirconium metal.\n\nThe data in this file exists for testing and demonstration purposes only. Developers of ARMI applications can refer to\nthis file for a fully worked example of an ARMI material. And this material has proven useful for testing. The data\ncontained in this file should not be used in production simulations.\n\"\"\"\n\nfrom numpy import interp\n\nfrom armi.materials.material import Material\nfrom armi.utils.units import getTk\n\n\nclass Zr(Material):\n    \"\"\"Metallic zirconium.\"\"\"\n\n    propertyValidTemperature = {\n        \"density\": ((293, 1800), \"K\"),\n        \"linear expansion\": ((293, 1800), \"K\"),\n        \"linear expansion percent\": ((293, 1800), \"K\"),\n        \"thermal conductivity\": ((298, 2000), \"K\"),\n    }\n\n    references = {\n        \"density\": \"AAA Materials Handbook 45803\",\n        \"thermal conductivity\": \"AAA Fuels handbook. ANL\",\n        \"linear expansion\": \"Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, \"\n        + \"Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)\",\n        \"linear expansion percent\": \"Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion, \"\n        + \"Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)\",\n    }\n\n    linearExpansionTableK = [\n        293,\n        400,\n        500,\n        600,\n        700,\n        800,\n        900,\n        1000,\n        1100,\n        1136.99999,\n        1137,\n        1200,\n        1400,\n        1600,\n        1800,\n    ]\n\n    linearExpansionTable = [\n        5.70e-6,\n        5.90e-6,\n        6.60e-6,\n        7.10e-6,\n        7.60e-6,\n        7.90e-6,\n        8.00e-6,\n        8.20e-6,\n        8.20e-6,\n        8.20e-6,\n        9.00e-6,\n        9.10e-6,\n        9.50e-6,\n        1.03e-5,\n        1.13e-5,\n    ]\n\n    refTempK = 298.15\n\n    def __init__(self):\n        Material.__init__(self)\n        self.refDens = self._computeReferenceDensity(Tk=self.refTempK)\n\n    def setDefaultMassFracs(self):\n        self.setMassFrac(\"ZR\", 1.0)\n\n    def _computeReferenceDensity(self, Tk=None, Tc=None):\n        r\"\"\"AAA Materials Handbook 45803.\"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"density\", Tk)\n\n        if Tk < 1135:\n            return -3.29256e-8 * Tk**2 - 9.67145e-5 * Tk + 6.60176\n        else:\n            return -2.61683e-8 * Tk**2 - 1.11331e-4 * Tk + 6.63616\n\n    def thermalConductivity(self, Tk=None, Tc=None):\n        \"\"\"\n        Thermal conductivity in W/mK.\n\n        Reference: AAA Fuels handbook. ANL.\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"thermal conductivity\", Tk)\n        return 8.853 + (0.007082 * Tk) + (0.000002533 * Tk**2) + (2992.0 / Tk)\n\n    def linearExpansion(self, Tk=None, Tc=None):\n        r\"\"\"Linear expansion in m/mK.\n\n        Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion,\n                   Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)\n\n        See page 400\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion\", Tk)\n        return interp(Tk, self.linearExpansionTableK, self.linearExpansionTable)\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        r\"\"\"Linear expansion in dL/L.\n\n        Reference: Y.S. Touloukian, R.K. Kirby, R.E. Taylor and P.D. Desai, Thermal Expansion,\n                   Thermophysical Properties of Matter, Vol. 12, IFI/Plenum, New York-Washington (1975)\n\n        See page 400\n        \"\"\"\n        Tk = getTk(Tc, Tk)\n        self.checkPropertyTempRange(\"linear expansion percent\", Tk)\n\n        # NOTE: checkPropertyTempRange takes care of lower/upper limits\n        if Tk < 1137:\n            return -0.111 + (2.325e-4 * Tk) + (5.595e-7 * Tk**2) - (1.768e-10 * Tk**3)\n        else:\n            return -0.759 + (1.474e-3 * Tk) - (5.140e-7 * Tk**2) + (1.559e-10 * Tk**3)\n"
  },
  {
    "path": "armi/meta.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Metadata describing an ARMI distribution.\"\"\"\n\ntry:\n    # Python 3.x < 3.8\n    from importlib import metadata\nexcept ImportError:\n    # Python >= 3.8\n    import importlib_metadata as metadata\n\n__version__ = metadata.version(\"armi\")\n"
  },
  {
    "path": "armi/migration/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMigrate input/output from one version of ARMI to another.\n\nUsers want to be able to upgrade to the latest version of the code without having to\ninvest a bunch of time in updating their previous input and output files. Users have up\nto thousands of inputs that they want to keep working. Even more serious, follow-on\nanalysts who got an output database (including associated inputs) from an ARMI\npower-user strongly prefer to be able to migrate old cases. Oftentimes, an output\ndatabase can be many GB large and be the result of many CPU-weeks, so there's monetary\nand temporal value to be preserved.\n\nMeanwhile, developers want to be able to make upgrades to the input and/or output to fix\nbugs, ease the training and cognitive burden of new users, and so on.\n\nMigrations are key to getting both of these big needs.\n\nMigrations should generally happen in the background from the user's perspective, just\nlike happens in mainstream applications like word processors and spreadsheets.\n\"\"\"\n\nfrom armi.migration import (\n    m0_1_3,\n    m0_1_6,\n)\n\nACTIVE_MIGRATIONS = [\n    m0_1_3.RemoveCentersFromBlueprints,\n    m0_1_3.UpdateElementalNuclides,\n    m0_1_6.ConvertAlphanumLocationSettingsToNum,\n]\n"
  },
  {
    "path": "armi/migration/base.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBase migration classes.\n\nA classic migration takes a file name, read the files, migrates the\ndata, and re-writes the file. Some migrations need to happen live\non a stream. For example, if an old/invalid input file is being read\nin from an old database. The migration class defined here\nchooses this behavior based on whether the ``stream`` or ``path``\nvariables are given in the constructor.\n\"\"\"\n\nimport os\nimport shutil\n\nfrom armi import runLog\nfrom armi.settings import caseSettings\n\n\nclass Migration:\n    \"\"\"Generic migration.\n\n    To implement a concrete Migration, one must often only\n    implement the ``_applyToStream`` method.\n    \"\"\"\n\n    fromVersion = \"x.x.x\"\n    toVersion = \"x.x.x\"\n\n    def __init__(self, stream=None, path=None):\n        if not (bool(stream) ^ bool(path)):\n            # XOR\n            raise RuntimeError(\"Stream and path inputs to migration aremutually exclusive. Choose one or the other.\")\n        self.stream = stream\n        self.path = path\n\n    def __repr__(self):\n        return f\"<Migration from {self.fromVersion}: {self.__doc__[:40]}...\"\n\n    def apply(self):\n        \"\"\"\n        Apply migration.\n\n        This is generally called from a subclass.\n        \"\"\"\n        runLog.info(f\"Applying {self}\")\n        if self.path:\n            self._loadStreamFromPath()\n        newStream = self._applyToStream()\n        if self.path:\n            self._backupOriginal()\n            self._writeNewFile(newStream)\n        return newStream\n\n    def _loadStreamFromPath(self):\n        \"\"\"Common stream-loading code. Must be extended to actually load.\n\n        The operative subclasses implementing this method are below.\n        \"\"\"\n        if not os.path.exists(self.path):\n            raise ValueError(f\"File {self.path} does not exist\")\n\n    def _applyToStream(self):\n        \"\"\"Add actual migration code here in a subclass.\"\"\"\n        raise NotImplementedError()\n\n    def _backupOriginal(self):\n        # must be called after _loadStreamFromPath\n        self.stream.close()\n        shutil.move(self.path, self.path + \"-migrated\")\n\n    def _writeNewFile(self, newStream):\n        i = 0\n        while os.path.exists(self.path):\n            # don't overwrite files (could be blueprints)\n            name, ext = os.path.splitext(self.path)\n            self.path = name + f\"{i}\" + ext\n            i += 1\n\n        with open(self.path, \"w\") as f:\n            f.write(newStream.read())\n\n\nclass BlueprintsMigration(Migration):\n    \"\"\"Migration for blueprints input.\"\"\"\n\n    def _loadStreamFromPath(self):\n        from armi.physics.neutronics.settings import CONF_LOADING_FILE\n\n        Migration._loadStreamFromPath(self)\n        cs = caseSettings.Settings(fName=self.path)\n        self.path = cs[CONF_LOADING_FILE]\n        self.stream = open(self.path)\n\n\nclass SettingsMigration(Migration):\n    \"\"\"Migration for settings input.\"\"\"\n\n    def _loadStreamFromPath(self):\n        Migration._loadStreamFromPath(self)\n        self.stream = open(self.path)\n\n\nclass DatabaseMigration(Migration):\n    \"\"\"Migration for db output.\"\"\"\n\n    pass\n"
  },
  {
    "path": "armi/migration/m0_1_3.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Cleans up blueprints.\"\"\"\n\nimport io\nimport re\n\nfrom armi import runLog\nfrom armi.migration.base import BlueprintsMigration\n\n\nclass RemoveCentersFromBlueprints(BlueprintsMigration):\n    \"\"\"Removes now-invalid `centers:` lines from auto-generated component inputs.\"\"\"\n\n    fromVersion = \"0.1.2\"\n    toVersion = \"0.1.3\"\n\n    def _applyToStream(self):\n        runLog.info(\"Removing `centers:` sections.\")\n        migrated = []\n        for line in self.stream.read().split(\"\\n\"):\n            if re.search(r\"^\\s*centers:\\s*$\", line):\n                continue\n            migrated.append(line)\n        result = \"\\n\".join(migrated)\n        return io.StringIO(result)\n\n\nclass UpdateElementalNuclides(BlueprintsMigration):\n    \"\"\"Update elemental nuclide flags.\"\"\"\n\n    fromVersion = \"0.1.2\"\n    toVersion = \"0.1.3\"\n\n    swaps = (\n        (\"NA23\", \"NA\"),\n        (\"MN55\", \"MN\"),\n        (\"HE4\", \"HE\"),\n        (\"W182\", \"W\"),\n        (\"O16\", \"O\"),\n        (\"AL27\", \"AL\"),\n        (\"N14\", \"N\"),\n    )\n    # these get absorbed into W\n    deletions = (\"W183\", \"W184\", \"W186\")\n\n    def _applyToStream(self):\n        # Change both nuclide flags as well as custom isotopics\n        # Custom isotopics: `        MN: 0.0015135`\n        # Nuclide flags: `    MN55: {burn: false, xs: true}`\n        migrated = []\n        for line in self.stream.read().split(\"\\n\"):\n            for deletion in self.deletions:\n                if re.search(r\"^\\s*{0}: \".format(deletion), line):\n                    continue\n            for swapFrom, swapTo in self.swaps:\n                line = re.sub(\n                    r\"^(\\s+)({0})(:.+)\".format(swapFrom),\n                    r\"\\1{0}\\3\".format(swapTo),\n                    line,\n                )\n            migrated.append(line)\n        result = \"\\n\".join(migrated)\n        return io.StringIO(result)\n"
  },
  {
    "path": "armi/migration/m0_1_6.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Migrate ARMI settings that have alphanumeric location labels to new numeric mode.\"\"\"\n\nimport io\nimport re\n\nfrom armi import runLog\nfrom armi.migration.base import SettingsMigration\nfrom armi.settings import caseSettings, settingsIO\nfrom armi.utils.units import ASCII_LETTER_A, ASCII_ZERO\n\nAXIAL_CHARS = [\n    chr(asciiCode)\n    for asciiCode in (\n        list(range(ASCII_LETTER_A, ASCII_LETTER_A + 26))\n        + list(range(ASCII_ZERO, ASCII_ZERO + 10))\n        + list(range(ASCII_LETTER_A + 26, ASCII_LETTER_A + 32 + 26))\n    )\n]\n\n\nclass ConvertAlphanumLocationSettingsToNum(SettingsMigration):\n    \"\"\"Convert old location label values to new style.\"\"\"\n\n    fromVersion = \"0.1.6\"\n    toVersion = \"0.1.7\"\n\n    def _applyToStream(self):\n        cs = caseSettings.Settings()\n        reader = settingsIO.SettingsReader(cs)\n        reader.readFromStream(self.stream)\n\n        if reader.invalidSettings:\n            runLog.info(\n                \"The following deprecated settings will be deleted:\\n  * {}\".format(\n                    \"\\n  * \".join(list(reader.invalidSettings))\n                )\n            )\n\n        cs = _modify_settings(cs)\n        writer = settingsIO.SettingsWriter(cs)\n        newStream = io.StringIO()\n        writer.writeYaml(newStream)\n        newStream.seek(0)\n        return newStream\n\n\ndef _modify_settings(cs):\n    if cs[\"detailAssemLocationsBOL\"]:\n        newLocs = []\n        for loc in cs[\"detailAssemLocationsBOL\"]:\n            if \"-\" not in loc:\n                # assume it is old style assem location.\n                i, j, _k = getIndicesFromDIF3DStyleLocatorLabel(loc)\n                newLoc = f\"{i:03d}-{j:03d}\"\n                runLog.info(f\"Converting old-style location label `{loc}` to `{newLoc}`, assuming hex geom\")\n                loc = newLoc\n            newLocs.append(loc)\n\n        cs = cs.modified(newSettings={\"detailAssemLocationsBOL\": newLocs})\n\n    return cs\n\n\ndef getIndicesFromDIF3DStyleLocatorLabel(label):\n    \"\"\"Convert a ring-based label like A2003B into 1-based ring, location indices.\"\"\"\n    locMatch = re.search(r\"([A-Z]\\d)(\\d\\d\\d)([A-Z]?)\", label)\n    if locMatch:\n        # we have a valid location label. Process it and set parameters\n        # convert A4 to 04, B2 to 12, etc.\n        ring = locMatch.group(1)\n        posLabel = locMatch.group(2)\n        axLabel = locMatch.group(3)\n        firstDigit = ord(ring[0]) - ASCII_LETTER_A\n        if firstDigit < 10:\n            i = int(\"{0}{1}\".format(firstDigit, ring[1]))\n        else:\n            raise RuntimeError(\"invalid label {0}. 1st character too large.\".format(label))\n        j = int(posLabel)\n        if axLabel:\n            k = AXIAL_CHARS.index(axLabel)\n        else:\n            k = None\n        return i, j, k\n\n    raise RuntimeError(\"No Indices found for DIF3D-style label: {0}\".format(label))\n"
  },
  {
    "path": "armi/migration/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/migration/tests/test_m0_1_6.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test Locationlabel migration.\"\"\"\n\nimport io\nimport unittest\n\nfrom armi.migration.m0_1_6 import ConvertAlphanumLocationSettingsToNum\nfrom armi.settings import caseSettings\nfrom armi.settings.settingsIO import SettingsReader, SettingsWriter\n\n\nclass TestMigration(unittest.TestCase):\n    def test_locationLabelMigration(self):\n        \"\"\"Make a setting with an old value and make sure it migrates to expected new value.\"\"\"\n        cs = caseSettings.Settings()\n        newSettings = {\"detailAssemLocationsBOL\": [\"B1012\"]}\n        cs = cs.modified(newSettings=newSettings)\n\n        writer = SettingsWriter(cs)\n        stream = io.StringIO()\n        writer.writeYaml(stream)\n        stream.seek(0)\n\n        converter = ConvertAlphanumLocationSettingsToNum(stream=stream)\n        newCs = caseSettings.Settings()\n        reader = SettingsReader(newCs)\n        reader.readFromStream(converter.apply())\n        self.assertEqual(newCs[\"detailAssemLocationsBOL\"][0], \"011-012\")\n"
  },
  {
    "path": "armi/migration/tests/test_migration_base.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test base migration classes.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.migration.base import Migration, SettingsMigration\nfrom armi.tests import TEST_ROOT\n\n\nclass TestMigrationBases(unittest.TestCase):\n    def test_basic_validation(self):\n        with self.assertRaises(RuntimeError):\n            _m = Migration(None, None)\n\n        with self.assertRaises(RuntimeError):\n            _m = Migration(\"fake_stream\", \"fake_path\")\n\n        Migration(\"fake_stream\", None)\n        m = Migration(None, \"fake_path\")\n        with self.assertRaises(ValueError):\n            m._loadStreamFromPath()\n\n\nclass TestSettingsMigration(unittest.TestCase):\n    def test_loadStreamFromPath(self):\n        file_path = os.path.join(TEST_ROOT, \"armiRun.yaml\")\n        m = SettingsMigration(None, file_path)\n        m._loadStreamFromPath()\n        self.assertIsNotNone(m.stream)\n"
  },
  {
    "path": "armi/mpiActions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module provides an abstract class to be used to implement \"MPI actions.\".\n\nMPI actions are tasks, activities, or work that can be executed on the worker nodes. The standard\nworkflow is essentially that the primary node creates an :py:class:`~armi.mpiActions.MpiAction`,\nsends it to the workers, and then both the primary and the workers\n:py:meth:`invoke() <armi.mpiActions.MpiAction.invoke>` together. For example:\n\n.. list-table:: Sample MPI Action Workflow\n   :widths: 5 60 35\n   :header-rows: 1\n\n   * - Step\n     - Code\n     - Notes\n   * - 1\n     - **primary**: :py:class:`distributeState = DistributeStateAction() <armi.mpiActions.MpiAction>`\n\n       **worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)`\n     - **primary**: Initializing a distribute state action.\n\n       **worker**: Waiting for something to do, as determined by the primary, this happens within the\n       worker's :py:meth:`~armi.operators.MpiOperator.workerOperate`.\n   * - 2\n     - **primary**: :code:`context.MPI_COMM.bcast(distributeState, root=0)`\n\n       **worker**: :code:`action = context.MPI_COMM.bcast(None, root=0)`\n     - **primary**: Broadcasts a distribute state action to all the worker nodes\n\n       **worker**: Receives the action from the primary, which is a\n       :py:class:`~armi.mpiActions.DistributeStateAction`.\n   * - 3\n     - **primary**: :code:`distributeState.invoke(self.o, self.r, self.cs)`\n\n       **worker**: :code:`action.invoke(self.o, self.r, self.cs)`\n     - Both invoke the action, and are in sync. Any broadcast or receive within the action should\n       also be synced up.\n\nIn order to create a new, custom MPI Action, inherit from :py:class:`~armi.mpiActions.MpiAction`,\nand override the :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method.\n\"\"\"\n\nimport collections\nimport gc\nimport math\nimport pickle\nimport timeit\n\nfrom armi import context, interfaces, runLog, settings, utils\nfrom armi.reactor import reactors\nfrom armi.reactor.parameters import parameterDefinitions\nfrom armi.utils import iterables, tabulate\n\n\nclass MpiAction:\n    \"\"\"Base of all MPI actions.\n\n    MPI Actions are tasks that can be executed without needing lots of other\n    information. When a worker node sits in its main loop, and receives an MPI Action, it will\n    simply call :py:meth:`~armi.mpiActions.MpiAction.invoke`.\n    \"\"\"\n\n    def __init__(self):\n        self.o = None\n        self.r = None\n        self.cs = None\n        self.serial = False\n        # items can be set to exclusive if they will take considerably longer\n        # they will be queued first, and the CPUs for this action will not\n        # be used for any other purpose (except when number of exclusive actions > num CPU groups)\n        self.runActionExclusive = False\n        # lower number is higher; halfway between 1-10.. probably dont need more\n        # than 10 priorities but negative nums work too...\n        self.priority = 5\n\n    @property\n    def parallel(self):\n        return not self.serial\n\n    @classmethod\n    def invokeAsMaster(cls, o, r, cs):\n        \"\"\"Simplified method to call from the primary process.\n\n        This can be used in place of:\n\n            someInstance = MpiAction()\n            someInstance = COMM_WORLD.bcast(someInstance, root=0)\n            someInstance.invoke(o, r, cs)\n\n        Interestingly, the code above can be used in two ways:\n\n        1. Both the primary and worker can call the above code at the same time, or\n        2. the primary can run the above code, which will be handled by the worker's main loop.\n\n        Option number 2 is the most common usage.\n\n        .. warning:: This method will not work if the constructor (i.e. :code:`__init__`) requires\n            additional arguments. Since the method body is so simple, it is strong discouraged to\n            add a :code:`*args` or :code:`**kwargs` arguments to this method.\n\n        Parameters\n        ----------\n        o : :py:class:`armi.operators.Operator`\n            If an operator is not necessary, supply :code:`None`.\n        r : :py:class:`armi.operators.Reactor`\n            If a reactor is not necessary, supply :code:`None`.\n        \"\"\"\n        instance = cls()\n        instance.broadcast()\n        return instance.invoke(o, r, cs)\n\n    def _mpiOperationHelper(self, obj, mpiFunction):\n        \"\"\"Strips off the operator, reactor, cs from the mpiAction before.\"\"\"\n        if obj is None or obj is self:\n            # prevent sending o, r, and cs, they should be handled appropriately by the other nodes\n            # reattach with finally\n            obj = self\n            o, r, cs = self.o, self.r, self.cs\n            self.o = self.r = self.cs = None\n        try:\n            return mpiFunction(obj, root=0)\n        except pickle.PicklingError as error:\n            runLog.error(\"Failed to {} {}.\".format(mpiFunction.__name__, obj))\n            runLog.error(error)\n            raise\n        finally:\n            if obj is self:\n                self.o, self.r, self.cs = o, r, cs\n\n    def broadcast(self, obj=None):\n        \"\"\"\n        A wrapper around ``bcast``, on the primary node can be run with an equals sign, so that it\n        can be consistent within both primary and worker nodes.\n\n        Parameters\n        ----------\n        obj :\n            This is any object that can be broadcast, if it is None, then it will broadcast itself,\n            which triggers it to run on the workers (assuming the workers are in the worker main loop.\n\n        See Also\n        --------\n        armi.operators.operator.OperatorMPI.workerOperate : receives this on the workers and calls ``invoke``\n\n        Notes\n        -----\n        The standard ``bcast`` method creates a new instance even for the root process. Consequently,\n        when passing an object, references can be broken to the original object. Therefore, this\n        method, returns the original object when called by the primary node, or the broadcasted\n        object when called on the worker nodes.\n        \"\"\"\n        if self.serial:\n            return obj if obj is not None else self\n        if context.MPI_SIZE > 1:\n            result = self._mpiOperationHelper(obj, context.MPI_COMM.bcast)\n        # the following if-branch prevents the creation of duplicate objects on the primary node\n        # if the object is large with lots of links, it is prudent to call gc.collect()\n        if obj is None and context.MPI_RANK == 0:\n            return self\n        elif context.MPI_RANK == 0:\n            return obj\n        else:\n            return result\n\n    def gather(self, obj=None):\n        \"\"\"A wrapper around ``MPI_COMM.gather``.\n\n        Parameters\n        ----------\n        obj :\n            This is any object that can be gathered, if it is None, then it will gather itself.\n\n        Notes\n        -----\n        The returned list will contain a reference to the original gathered object, without making a copy of it.\n        \"\"\"\n        if self.serial:\n            return [obj if obj is not None else self]\n        if context.MPI_SIZE > 1:\n            result = self._mpiOperationHelper(obj, context.MPI_COMM.gather)\n            if context.MPI_RANK == 0:\n                # this cannot be result[0] = obj or self, because 0.0, 0, [] all eval to False\n                if obj is None:\n                    result[0] = self\n                else:\n                    result[0] = obj\n            else:\n                result = []\n        else:\n            result = [obj if obj is not None else self]\n        return result\n\n    def invoke(self, o, r, cs):\n        \"\"\"\n        This method is called by worker nodes, and passed the worker node's operator, reactor and\n        settings file.\n\n        Parameters\n        ----------\n        o : :py:class:`armi.operators.operator.Operator`\n            the operator for this process\n        r : :py:class:`armi.reactor.reactors.Reactor`\n            the reactor represented in this process\n        cs : :py:class:`armi.settings.caseSettings.Settings`\n            the case settings\n\n        Returns\n        -------\n        result : object\n            result from invokeHook\n        \"\"\"\n        self.o = o\n        self.r = r\n        self.cs = cs\n        return self.invokeHook()\n\n    @staticmethod\n    def mpiFlatten(allCPUResults):\n        \"\"\"\n        Flatten results to the same order they were in before making a list of mpiIter results.\n\n        See Also\n        --------\n        mpiIter : used for distributing objects/tasks\n        \"\"\"\n        return iterables.flatten(allCPUResults)\n\n    @staticmethod\n    def mpiIter(objectsForAllCoresToIter):\n        \"\"\"\n        Generate the subset of objects one node is responsible for in MPI.\n\n        Notes\n        -----\n        Each CPU will get similar number of objects. E.G. if there are 12 objects and 5\n        CPUs, the first 2 CPUs will get 3 objects and the last 3 CPUS will get 2.\n\n        Parameters\n        ----------\n        objectsForAllCoresToIter: list\n            List of all objects that need to have an MPI calculation performed on.\n            Note, that since len() is needed this method cannot accept a generator.\n\n        See Also\n        --------\n        mpiFlatten : used for collecting results\n        \"\"\"\n        ntasks = len(objectsForAllCoresToIter)\n        numLocalObjects, deficit = divmod(ntasks, context.MPI_SIZE)\n        if deficit > context.MPI_RANK:\n            numLocalObjects += 1\n            first = context.MPI_RANK * numLocalObjects\n        else:\n            first = context.MPI_RANK * numLocalObjects + deficit\n\n        for objIndex in range(first, first + numLocalObjects):\n            yield objectsForAllCoresToIter[objIndex]\n\n    def invokeHook(self):\n        \"\"\"This method must be overridden in sub-clases.\n\n        This method is called by worker nodes, and has access to the worker node's operator, reactor, and settings\n        (through :code:`self.o`, :code:`self.r`, and :code:`self.cs`). It must return a boolean value of :code:`True` or\n        :code:`False`, otherwise the worker node will raise an exception and terminate execution.\n\n        Returns\n        -------\n        result : object\n            Dependent on implementation\n        \"\"\"\n        raise NotImplementedError()\n\n\ndef runActions(o, r, cs, actions, numPerNode=None, serial=False):\n    \"\"\"Run a series of MpiActions in parallel, or in series if :code:`serial=True`.\n\n    Notes\n    -----\n    The number of actions DOES NOT need to match :code:`context.MPI_SIZE`.\n\n    Calling this method may invoke MPI Split which will change the MPI_SIZE during the action. This allows someone to\n    call MPI operations without being blocked by tasks which are not doing the same thing.\n    \"\"\"\n    if not context.MPI_DISTRIBUTABLE or serial:\n        return runActionsInSerial(o, r, cs, actions)\n\n    useForComputation = [True] * context.MPI_SIZE\n    if numPerNode is not None:\n        if numPerNode < 1:\n            raise ValueError(\"numPerNode must be >= 1\")\n        numThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES}\n        for rank, nodeName in enumerate(context.MPI_NODENAMES):\n            # if we have more processors than tasks, disable the extra\n            useForComputation[rank] = numThisNode[nodeName] < numPerNode\n            numThisNode[nodeName] += 1\n\n    queue, numBatches = _makeQueue(actions, useForComputation)\n    runLog.extra(f\"Running {len(actions)} MPI actions in parallel over {numBatches} batches\")\n    results = []\n    batchNum = 0\n    while queue:\n        actionsThisRound = []\n        batchNum += 1\n        runLog.extra(f\"MPI actions, batch {batchNum} of {numBatches}:\\n\")\n        for useRank in useForComputation:\n            actionsThisRound.append(queue.pop(0) if useRank and queue else None)\n        distrib = distributeActions(actionsThisRound, useForComputation)\n        distrib.broadcast()\n        results.append(distrib.invoke(o, r, cs))\n    return results\n\n\ndef runBatchedActions(o, r, cs, actionsByNode, serial=False):\n    \"\"\"Run a series of MpiActions in parallel, or in series if :code:`serial=True`.\n\n    Notes\n    -----\n    This method takes a set of actions that have been batched by the user beforehand.\n\n    This is useful for heterogeneous work packages where some tasks have significantly larger\n    or smaller memory requirements. The user can place an appropriate amount of work on each node.\n    \"\"\"\n    if not context.MPI_DISTRIBUTABLE or serial:\n        actions = []\n        for _node, nodeActions in actionsByNode.items():\n            actions.extend(nodeActions)\n        return runActionsInSerial(o, r, cs, actions)\n\n    # count how many actions will run on each node\n    nodes = set(context.MPI_NODENAMES)\n    numToRunOnThisNode = {nodeName: 0 for nodeName in context.MPI_NODENAMES}\n    for nodeName in nodes:\n        numToRunOnThisNode[nodeName] = len(actionsByNode.get(nodeName, []))\n\n    # determine which ranks will run the actions\n    numAssigned = {nodeName: 0 for nodeName in nodes}\n    useForComputation = [True] * len(context.MPI_NODENAMES)\n    for rank, nodeName in enumerate(context.MPI_NODENAMES):\n        # if we have more processors than tasks, disable the extra\n        useForComputation[rank] = numAssigned[nodeName] < numToRunOnThisNode[nodeName]\n        if useForComputation[rank]:\n            numAssigned[nodeName] += 1\n\n    # check that we do not request more tasks than processors on a node\n    for nodeName in nodes:\n        if numToRunOnThisNode[nodeName] > numAssigned[nodeName]:\n            msg = (\n                f\"There are more actions ({numToRunOnThisNode[nodeName]}) than ranks available \"\n                f\"({numAssigned[nodeName]}) on {nodeName}!\"\n            )\n            runLog.error(msg)\n            raise ValueError(msg)\n\n    totalActions = sum(len(actions) for node, actions in actionsByNode.items())\n    runLog.extra(f\"Running {totalActions} MPI actions in parallel over {len(actionsByNode)} nodes.\")\n\n    results = []\n    actionsThisRound = []\n    for rank, nodeName in enumerate(context.MPI_NODENAMES):\n        queue = actionsByNode.get(nodeName, [])\n        actionsThisRound.append(queue.pop(0) if useForComputation[rank] and queue else None)\n\n    distrib = distributeActions(actionsThisRound, useForComputation)\n    distrib.broadcast()\n    results.append(distrib.invoke(o, r, cs))\n\n    return results\n\n\ndef distributeActions(actionsThisRound, useForComputation):\n    useForComputation = _disableForExclusiveTasks(actionsThisRound, useForComputation)\n    realActions = [\n        (context.MPI_NODENAMES[rank], rank, act) for rank, act in enumerate(actionsThisRound) if act is not None\n    ]\n    tableText = tabulate.tabulate(realActions, headers=[\"Nodename\", \"Rank\", \"Action\"])\n    runLog.extra(f\"Distributing {len(realActions)} MPI actions for parallel processing:\\n{tableText}\")\n    return DistributionAction(actionsThisRound)\n\n\ndef _disableForExclusiveTasks(actionsThisRound, useForComputation):\n    # disable processors that are exclusive for next\n    indicesToDisable = [\n        i for i, action in enumerate(actionsThisRound) if action is not None and action.runActionExclusive\n    ]\n    for i in indicesToDisable:\n        useForComputation[i] = False\n    return useForComputation\n\n\ndef _makeQueue(actions, useForComputation):\n    \"\"\"\n    Sort actions by priority in a queue, if more exclusive than CPUs makes all non-exclusive.\n\n    Notes\n    -----\n    All exclusive actions will occur first regardless of the priority.\n    All non-exclusive actions will be after all exclusive actions regardless of the priority.\n    Within these 2 bins, priority matters.\n    In the event that more exclusive actions are requested than CPUs - 1, all actions will\n    be changed to non-exclusive but previously evaluated order will remain.\n    CPUs - 1 is to reserve at least 1 CPU for non-exclusive actions.\n    \"\"\"\n\n    def sortActionPriority(action):\n        # exclusive actions first and those groups of CPUs only get 1 action\n        exclusivePriority = 1 if action.runActionExclusive else 2\n        return (exclusivePriority, action.priority)\n\n    queue = list(sorted(actions, key=sortActionPriority))\n    minCPUsForRemainingTasks = 1\n    nExclusiveCPUs = len([action for action in queue if action.runActionExclusive])\n    nCPUsAvailable = len([rank for rank in useForComputation if rank])\n    if nExclusiveCPUs + minCPUsForRemainingTasks > nCPUsAvailable:\n        # there are more exclusive tasks than sets of CPUs, so just make them all\n        # non-exclusive and evenly balance them\n        for action in queue:\n            action.runActionExclusive = False\n        numBatches = int(math.ceil(len(actions) / float(nCPUsAvailable)))\n    else:\n        nLeftoverCPUs = nCPUsAvailable - nExclusiveCPUs\n        nLeftoverActions = len(actions) - nExclusiveCPUs\n        numBatches = int(math.ceil(nLeftoverActions / nLeftoverCPUs))\n    return queue, numBatches\n\n\ndef runActionsInSerial(o, r, cs, actions):\n    \"\"\"Run a series of MpiActions in serial.\n\n    Notes\n    -----\n    This will set the `MpiAction.serial` attribute to :code:`True`, and the `MpiAction.broadcast` and `MpiAction.gather`\n    methods will basically just return the value being supplied.\n    \"\"\"\n    results = []\n    runLog.extra(\"Running {} MPI actions in serial\".format(len(actions)))\n    numActions = len(actions)\n    for aa, action in enumerate(actions):\n        canDistribute = context.MPI_DISTRIBUTABLE\n        action.serial = True\n        context.MPI_DISTRIBUTABLE = False\n        runLog.extra(\"Running action {} of {}: {}\".format(aa + 1, numActions, action))\n        results.append(action.invoke(o, r, cs))\n        action.serial = False  # return to original state\n        context.MPI_DISTRIBUTABLE = canDistribute\n    return results\n\n\nclass DistributionAction(MpiAction):\n    \"\"\"\n    This MpiAction scatters the workload of multiple actions to available resources.\n\n    Notes\n    -----\n    This currently only works from the root (of COMM_WORLD). Eventually, it would be nice to make\n    it possible for sub-tasks to manage their own communicators and spawn their own work within some\n    sub-communicator.\n\n    This performs an MPI Split operation and takes over the context.MPI_COMM and associated variables.\n    For this reason, it is possible that when someone thinks they have distributed information to all\n    nodes, it may only be a subset that was necessary to perform the number of actions needed by this\n    DsitributionAction.\n    \"\"\"\n\n    def __init__(self, actions):\n        MpiAction.__init__(self)\n        self._actions = actions\n\n    def __reduce__(self):\n        \"\"\"Reduce prevents from unnecessary actions to others, after all we only want to scatter.\n\n        Consequently, the worker nodes _actions will be None.\n        \"\"\"\n        return DistributionAction, (None,)\n\n    def invokeHook(self):\n        \"\"\"\n        Overrides invokeHook to distribute work amongst available resources as requested.\n\n        Notes\n        -----\n        Two things about this method make it non-recursive\n        \"\"\"\n        canDistribute = context.MPI_DISTRIBUTABLE\n        mpiComm = context.MPI_COMM\n        mpiRank = context.MPI_RANK\n        mpiSize = context.MPI_SIZE\n        mpiNodeNames = context.MPI_NODENAMES\n\n        if self.cs[\"verbosity\"] == \"debug\" and mpiRank == 0:\n            runLog.debug(\"Printing diagnostics for MPI actions!\")\n            objectCountDict = collections.defaultdict(int)\n            for debugAction in self._actions:\n                utils.classesInHierarchy(debugAction, objectCountDict)\n                for objekt, count in objectCountDict.items():\n                    runLog.debug(\"There are {} {} in MPI action {}\".format(count, objekt, debugAction))\n\n        actionResult = None\n        try:\n            action = mpiComm.scatter(self._actions, root=0)\n            # create a new communicator that only has these specific processes running\n            hasAction = action is not None\n            context.MPI_COMM = mpiComm.Split(int(hasAction))\n            context.MPI_RANK = context.MPI_COMM.Get_rank()\n            context.MPI_SIZE = context.MPI_COMM.Get_size()\n            context.MPI_DISTRIBUTABLE = context.MPI_SIZE > 1\n            context.MPI_NODENAMES = context.MPI_COMM.allgather(context.MPI_NODENAME)\n            if hasAction:\n                actionResult = action.invoke(self.o, self.r, self.cs)\n        finally:\n            # restore the global variables\n            context.MPI_DISTRIBUTABLE = canDistribute\n            context.MPI_COMM = mpiComm\n            context.MPI_RANK = mpiRank\n            context.MPI_SIZE = mpiSize\n            context.MPI_NODENAMES = mpiNodeNames\n\n        return actionResult\n\n\nclass MpiActionError(Exception):\n    \"\"\"Exception class raised when error conditions occur during an MpiAction.\"\"\"\n\n\nclass DistributeStateAction(MpiAction):\n    def __init__(self, skipInterfaces=False):\n        MpiAction.__init__(self)\n        self._skipInterfaces = skipInterfaces\n\n    def invokeHook(self):\n        \"\"\"Sync up all nodes with the reactor, the cs, and the interfaces.\n\n        Notes\n        -----\n        This is run by all workers and the primary any time the code needs to sync all processors.\n        \"\"\"\n        if context.MPI_SIZE <= 1:\n            runLog.extra(\"Not distributing state because there is only one processor\")\n            return\n\n        # Detach phase:\n        # The Reactor and the interfaces have links to the Operator, which contains Un-MPI-able objects\n        # like the MPI Comm and the SQL database connections.\n        runLog.info(\"Distributing State\")\n        start = timeit.default_timer()\n        try:\n            cs = self._distributeSettings()\n\n            self._distributeReactor(cs)\n            DistributeStateAction._distributeParamAssignments()\n\n            if self._skipInterfaces:\n                self.o.reattach(self.r, cs)\n            else:\n                self._distributeInterfaces()\n\n            # Lastly, make sure the reactor knows it is up to date. The operator/interface\n            # attachment may invalidate some of the cache, but since all the underlying data is the\n            # same, ultimately all state should be (initially) the same.\n            self.r._markSynchronized()\n\n        except (pickle.PicklingError, TypeError) as error:\n            runLog.error(\"Failed to transmit on distribute state root MPI bcast\")\n            runLog.error(error)\n            # workers are still waiting for a reactor object\n            if context.MPI_RANK == 0:\n                context.MPI_COMM.bcast(\"quit\")  # try to get the workers to quit\n\n            raise\n\n        if context.MPI_RANK != 0:\n            self.r.core.regenAssemblyLists()\n\n        # check to make sure that everything has been properly reattached\n        if self.r.core.getFirstBlock().core.r is not self.r:\n            raise RuntimeError(\"Block.core.r is not self.r. Reattach the blocks!\")\n\n        beforeCollection = timeit.default_timer()\n\n        # force collection; we've just created a bunch of objects that don't need to be used again.\n        runLog.debug(\"Forcing garbage collection.\")\n        gc.collect()\n\n        stop = timeit.default_timer()\n        runLog.extra(\n            \"Distributed state in {}s, garbage collection took {}s\".format(\n                beforeCollection - start, stop - beforeCollection\n            )\n        )\n\n    def _distributeSettings(self):\n        if context.MPI_RANK == 0:\n            runLog.debug(\"Sending the settings object\")\n        self.cs = cs = self.broadcast(self.o.cs)\n        if isinstance(cs, settings.Settings):\n            runLog.setVerbosity(cs[\"verbosity\"] if context.MPI_RANK == 0 else cs[\"branchVerbosity\"])\n            runLog.debug(\"Received settings object\")\n        else:\n            raise RuntimeError(\"Failed to transmit settings, received: {}\".format(cs))\n\n        if context.MPI_RANK != 0:\n            self.o.cs = cs\n        return cs\n\n    def _distributeReactor(self, cs):\n        runLog.debug(\"Sending the Reactor object\")\n        r = self.broadcast(self.r)\n\n        if isinstance(r, reactors.Reactor):\n            runLog.debug(\"Received reactor\")\n        else:\n            raise RuntimeError(\"Failed to transmit reactor, received: {}\".format(r))\n\n        if context.MPI_RANK == 0:\n            # on the primary node this unfortunately created a __deepcopy__ of the reactor, delete it\n            del r\n        else:\n            # maintain original reactor object on primary\n            self.r = r\n            self.o.r = r\n\n        self.r.o = self.o\n\n        runLog.debug(f\"The reactor has {len(self.r.core)} assemblies\")\n        # attach here so any interface actions use a properly-setup reactor.\n        self.o.reattach(self.r, cs)  # sets r and cs\n\n    @staticmethod\n    def _distributeParamAssignments():\n        data = dict()\n        if context.MPI_RANK == 0:\n            data = {\n                (pName, pdType.__name__): pDef.assigned\n                for (\n                    pName,\n                    pdType,\n                ), pDef in parameterDefinitions.ALL_DEFINITIONS.items()\n            }\n\n        data = context.MPI_COMM.bcast(data, root=0)\n\n        if context.MPI_RANK != 0:\n            for (pName, pdType), pDef in parameterDefinitions.ALL_DEFINITIONS.items():\n                pDef.assigned = data[pName, pdType.__name__]\n\n    def _distributeInterfaces(self):\n        \"\"\"\n        Distribute the interfaces to all MPI nodes.\n\n        Interface copy description\n        Since interfaces store information that can influence a calculation, it is important\n        in branch searches to make sure that no information is carried forward from these\n        runs on either the primary node or the workers.  However, there are interfaces that\n        cannot be distributed, making this a challenge.  To solve this problem, any interface\n        that cannot be distributed is simply re-initialized.  If any information needs to be\n        given to the worker nodes on a non-distributable interface, additional function definitions\n        (and likely soul searching as to why needed distributable information is on a\n        non-distributable interface) are required to pass the information around.\n\n        See Also\n        --------\n        armi.interfaces.Interface.preDistributeState : runs on primary before DS\n        armi.interfaces.Interface.postDistributeState : runs on primary after DS\n        armi.interfaces.Interface.interactDistributeState : runs on workers after DS\n        \"\"\"\n        if context.MPI_RANK == 0:\n            # These run on the primary node. (Worker nodes run synchronized code below)\n            toRestore = {}\n            for i in self.o.getInterfaces():\n                if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:\n                    runLog.debug(\"detaching interface {0}\".format(i.name))\n                    i.detachReactor()\n                    toRestore[i] = i.preDistributeState()\n\n            # Verify that the interface stacks are identical.\n            runLog.debug(\"Sending the interface names and flags\")\n            _dumIList = self.broadcast([(i.name, i.distributable()) for i in self.o.getInterfaces()])\n\n            # transmit interfaces\n            for i in self.o.getInterfaces():\n                # avoid sending things that don't pickle, like the database.\n                if i.distributable() == interfaces.Interface.Distribute.DUPLICATE:\n                    runLog.debug(\"Sending the interface {0}\".format(i))\n                    _idum = self.broadcast(i)  # don't send the reactor or operator\n                    i.postDistributeState(toRestore[i])\n                    i.attachReactor(self.o, self.r)\n        else:\n            # These run on the worker nodes.\n            # verify identical interface stack\n            # This list is (interfaceName, distributable) tuples)\n            interfaceList = self.broadcast(None)\n            for iName, distributable in interfaceList:\n                iOld = self.o.getInterface(iName)\n                if distributable == interfaces.Interface.Distribute.DUPLICATE:\n                    # expect a transmission of the interface as a whole.\n                    runLog.debug(\"Receiving new {0}\".format(iName))\n                    iNew = self.broadcast(None)\n                    runLog.debug(\"Received {0}\".format(iNew))\n                    if iNew == \"quit\":\n                        return\n                    self.o.removeInterface(iOld)\n                    self.o.addInterface(iNew)\n                    iNew.interactDistributeState()\n                elif distributable == interfaces.Interface.Distribute.NEW:\n                    runLog.debug(\"Initializing new interface {0}\".format(iName))\n                    # make a fresh instance of the non-transmittable interface.\n                    self.o.removeInterface(iOld)\n                    iNew = iOld.__class__(self.r, self.cs)\n                    if not iNew:\n                        for i in self.o.getInterfaces():\n                            runLog.warning(i)\n                        raise RuntimeError(\n                            \"Non-distributable interface {0} exists on the primary MPI process \"\n                            \"but not on the workers. \"\n                            \"Cannot distribute state.\".format(iName)\n                        )\n                    self.o.addInterface(iNew)\n                    iNew.interactInit()\n                    iNew.interactBOL()\n                else:\n                    runLog.debug(\"Skipping broadcast of interface {0}\".format(iName))\n                    if iOld:\n                        iOld.interactDistributeState()\n"
  },
  {
    "path": "armi/nucDirectory/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThe nucDirectory module contains tools to access nuclide information in the :py:mod:`~armi.nucDirectory.nuclideBases`\nmodule, and information for :py:mod:`~armi.nucDirectory.nuclide` module.\n\n#. :ref:`Element data <doc-elements>` - name, symbol, atomic number (Z).\n#. :ref:`Generic nuclide data <doc-nuclide-bases>` - this includes  mass, atomic number, natural abundance and various\n   names and labels that are used in ARMI for the nuclide. It also includes decay and transmutation modes.\n\n.. _doc-elements:\n\nElements\n========\n\n:py:class:`Elements <armi.nucDirectory.elements.Element>` are simple objects containing minimal information about\natomic elements. This information is loaded from a data file within ARMI; elements.dat.\n\n:py:class:`Elements <armi.nucDirectory.elements.Element>` are mainly used as a building block of the nuclide objects\n, as discussed below. If you need to grab an element there are three available dictionaries provided for rapid access.::\n\n    >>> r = Reactor(\"ExampleReactor\", bp)\n    >>> elements = r.nuclideBases.elements\n    >>> uranium = elements.byZ[92]\n    >>> uranium.name\n    'uranium'\n    >>> uranium.z\n    92\n\nLikewise, elements can be retrieved by their name or symbol.::\n\n    >>> ironFromZ = elements.byZ[26]\n    >>> ironFromName = elements.byName['iron']\n    >>> ironFromSymbol = elements.bySymbol['FE']\n    >>> ironFromZ == ironFromName == ironFromSymbol\n    True\n\n.. note::\n    The :py:attr:`~armi.nucDirectory.elements.Elements.byName` and\n    :py:attr:`~armi.nucDirectory.elements.Elements.bySymbol` are case specific; names are *lower case* and symbols are\n    *UPPER CASE*.\n\nThe elements are truly the *same* :py:class:`~armi.nucDirectory.elements.Element` object. The\n:py:mod:`~armi.nucDirectory` makes efficient use of the memory being used by elements and will only ever contain ~118\n:py:class:`Elements <armi.nucDirectory.elements.Element>`.::\n\n    >>> id(ironFromZ) == id(ironFromName) == id(ironFromSymbol)\n    True\n\n.. _doc-nuclide-bases:\n\nNuclide Bases\n=============\n\nThe :py:mod:`~armi.nucDirectory` allows ARMI to get information about various nuclides, like U235 or FE56. Often times\nyou need to look up cross section or densities for nuclides, or you might need the atomic weight or the natural isotopic\ndistribution. The :py:mod:`~armi.nucDirectory` is here to help.\n\nThe fundamental object of nuclide management in ARMI is the :py:class:`~armi.nucDirectory.nuclideBases.INuclide` object.\nAfter construction, they contain basic information, such as Z, A, and atomic weight (if known). Similar to\n:py:class:`Elements <armi.nucDirectory.elements.Element>`, the information is loaded from a series of data files within\nARMI. The data is originally from [NIST]_::\n\n    >>> r = Reactor(\"ExampleReactor\", bp)\n    >>> u235= r.nuclideBases.byName['U235']\n    >>> u235.z\n    92\n    >>> u235.weight\n    235.0439299\n    >>> u235.a\n    235\n\n.. [NIST] http://physics.nist.gov/cgi-bin/Compositions/stand_alone.pl\n\nUpon creating a Reactor, a fully fledged ``NuclideBases`` object will be created. In that object there will be a full\nUpon loading the :py:mod:`armi.nucDirectory` package, inside that will be a fully instantiated ``Elements`` object and a\nlist called.:py:data:`nuclideBases.instances <armi.nucDirectory.nuclideBases.instances>`. The ``instances`` will\nbe filled with nuclide base objects. Nuclide bases contain a lot of basic information about a nuclide, such as the\natomic mass, atomic number (Z), the mass number (A), and the natural abundance.\n\nNuclide names, labels, and IDs\n------------------------------\nNuclides have names, labels and IDs.\n\n:py:attr:`INuclide.name <armi.nucDirectory.nuclideBases.INuclide.name>`\n    The nuclide name is what *should* be used within ARMI and ARMI-based appliations. This is a human readable name such\n    as, ``U235`` or ``FE``. The names contain **only** capital letters and numbers, made up from the corresponding\n    element symbol and mass number (A).\n\n:py:attr:`INuclide.label <armi.nucDirectory.nuclideBases.INuclide.label>`\n    The nuclide label is a unique 4 character name which identifies the nuclide from all others. The label is fixed to\n    4 characters to conform with the CCCC standard files, which traditionally only allow for a maximum of 6 character\n    labels in legacy nuclear codes. Of the 6 allowable characters, 4 are reserved for the unique identifier of the\n    nuclide and 2 characters are reserved for cross section labels (i.e., AA, AB, ZA, etc.). The cross section labels\n    are based on the cross section group manager implementation within the framework. These labels are not necessarily\n    human readable/interpretable, but are generally the nuclide symbol followed by the last two digits of the mass\n    number (A), so the nuclide for U235 has the label ``U235``, but PU239 has the label ``PU39``.\n\nFor reference, the data used to build the nuclide bases in ARMI comes from a file called ``nuclides.dat``.\n\nIndices - rapid access\n----------------------\n\nThere are three main ways to retrieve a nuclide, which are provided depending on what information you have about a\nnuclide. For example, if you know a nuclide name, use ``NuclideBases.byName`` dictionary. There are also dictionaries\navailable for retrieving by the label, ``NuclideBases.byLabel``, and by other software-specific IDs (i.e., MCNP,\nMC2-2, and MC2-3). The software-specific labels are incorporated into the framework to support plugin developments and\nmay be extended as needed by end-users as needs arise.\n\n    >>> r = Reactor(\"testReactor\", bp)\n    >>> pu239 = r.nuclideBases.byName[\"PU239\"]\n    >>> pu239.z\n    94\n\nJust like with elements, the item retrieved from the various dictionaries are the same object.\n\n    >>> tinFromName = r.nuclideBases.byName[\"SN112\"]\n    >>> tinFromLabel = r.nuclideBases.byLabel[\"SN112\"]\n    >>> tinFromMcc2Id = r.nuclideBases.byName[\"SN1125\"]\n    >>> tinFromMcc3Id = r.nuclideBases.byLabel[\"SN1127\"]\n    >>> tinFromName == tinFromLabel == tinFromMcc2Id == tinFromMcc3Id\n    True\n    >>> id(tinFromName) == id(tinFromLabel) == id(tinFromMcc2Id) == id(tinFromMcc3Id)\n    True\n\n\"\"\"\n"
  },
  {
    "path": "armi/nucDirectory/elements.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module provides fundamental element information to be used throughout the framework and applications.\n\n.. impl:: A tool for querying basic data for elements of the periodic table.\n    :id: I_ARMI_ND_ELEMENTS0\n    :implements: R_ARMI_ND_ELEMENTS\n\n    The :py:mod:`elements <armi.nucDirectory.elements>` module defines the\n    :py:class:`Element <armi.nucDirectory.elements.Element>` class which acts as a data structure for organizing\n    information about an individual element, including number of protons, name, chemical symbol, phase (at STP),\n    periodic table group, standard weight, and a list of isotope\n    :py:class:`nuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instances. The module includes a factory that\n    generates the :py:class:`Element <armi.nucDirectory.elements.Element>` instances by reading from the\n    ``elements.dat`` file stored in the ARMI resources folder. When an\n    :py:class:`Element <armi.nucDirectory.elements.Element>` instance is initialized, it is added to a set of global\n    dictionaries that are keyed by number of protons, element name, and element symbol. The module includes several\n    helper functions for querying these global dictionaries.\n\nThe element class structure is outlined :ref:`here <elements-class-diagram>`.\n\n.. _elements-class-diagram:\n\n.. pyreverse:: armi.nucDirectory.elements\n    :align: center\n    :width: 75%\n\nExamples\n--------\n>>> elements.byZ[92]\n<Element   U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>\n\n>>> elements.bySymbol[\"U\"]\n<Element   U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>\n\n>>> elements.byName[\"Uranium\"]\n<Element   U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>\n\nRetrieve gaseous elements at Standard Temperature and Pressure (STP):\n\n>>> elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS)\n    [<Element   H (Z=1), Hydrogen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>,\n     <Element  HE (Z=2), Helium, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element   N (Z=7), Nitrogen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>,\n     <Element   O (Z=8), Oxygen, ChemicalGroup.NONMETAL, ChemicalPhase.GAS>,\n     <Element   F (Z=9), Fluorine, ChemicalGroup.HALOGEN, ChemicalPhase.GAS>,\n     <Element  NE (Z=10), Neon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element  CL (Z=17), Chlorine, ChemicalGroup.HALOGEN, ChemicalPhase.GAS>,\n     <Element  AR (Z=18), Argon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element  KR (Z=36), Krypton, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element  XE (Z=54), Xenon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element  RN (Z=86), Radon, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>,\n     <Element  OG (Z=118), Oganesson, ChemicalGroup.NOBLE_GAS, ChemicalPhase.GAS>]\n\n\nRetrieve elements that are classified as actinides:\n\n >>> elements.getElementsByChemicalGroup(elements.ChemicalGroup.ACTINIDE)\n    [<Element  AC (Z=89), Actinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  TH (Z=90), Thorium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  PA (Z=91), Protactinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element   U (Z=92), Uranium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  NP (Z=93), Neptunium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  PU (Z=94), Plutonium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  AM (Z=95), Americium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  CM (Z=96), Curium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  BK (Z=97), Berkelium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  CF (Z=98), Californium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  ES (Z=99), Einsteinium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  FM (Z=100), Fermium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  MD (Z=101), Mendelevium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  NO (Z=102), Nobelium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>,\n     <Element  LR (Z=103), Lawrencium, ChemicalGroup.ACTINIDE, ChemicalPhase.SOLID>]\n\n\n.. only:: html\n\n    For specific data on nuclides within each element, refer to the\n    :ref:`nuclide bases summary table <nuclide-bases-table>`.\n\n    .. exec::\n        from armi.nucDirectory.elements import Elements\n        from armi.utils.tabulate import tabulate\n        from dochelpers import createTable\n\n        attributes = ['z',\n                    'name',\n                    'symbol',\n                    'phase',\n                    'group',\n                    'is naturally occurring?',\n                    'is heavy metal?',\n                    'num. nuclides',]\n\n        def getAttributes(element):\n            return [\n                f'``{element.z}``',\n                f'``{element.name}``',\n                f'``{element.symbol}``',\n                f'``{element.phase}``',\n                f'``{element.group}``',\n                f'``{element.isNaturallyOccurring()}``',\n                f'``{element.isHeavyMetal()}``',\n                f'``{len(element.nuclides)}``',\n            ]\n\n        elements = Elements()\n        elements.factory()\n        sortedElements = sorted(elements.byZ.values())\n        return createTable(tabulate(data=[getAttributes(elem) for elem in sortedElements],\n                                    headers=attributes,\n                                    tableFmt='rst'),\n                           caption='List of elements',\n                           label='nuclide-bases-table')\n\nNotes\n-----\nCurrently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this\ndata, moving it out of the global scope, making it part of the reactor data model, and making it configurable via\nSettings. Pardon the mess during this transition.\n\"\"\"\n\nimport os\nfrom enum import Enum\nfrom typing import List\n\nfrom armi import context\nfrom armi.utils.units import HEAVY_METAL_CUTOFF_Z\n\nelements = None\nbyZ = None\nbyName = None\nbySymbol = None\n\n\nclass ChemicalPhase(Enum):\n    GAS = 1\n    LIQUID = 2\n    SOLID = 3\n    UNKNOWN = 4\n\n\nclass ChemicalGroup(Enum):\n    ALKALI_METAL = 1\n    ALKALINE_EARTH_METAL = 2\n    NONMETAL = 3\n    TRANSITION_METAL = 4\n    POST_TRANSITION_METAL = 5\n    METALLOID = 6\n    HALOGEN = 7\n    NOBLE_GAS = 8\n    LANTHANIDE = 9\n    ACTINIDE = 10\n    UNKNOWN = 11\n\n\nclass Element:\n    \"\"\"Represents an element defined on the Periodic Table.\"\"\"\n\n    def __init__(self, z, symbol, name, phase=\"UNKNOWN\", group=\"UNKNOWN\"):\n        \"\"\"\n        Creates an instance of an Element.\n\n        .. impl:: An element of the periodic table.\n            :id: I_ARMI_ND_ELEMENTS1\n            :implements: R_ARMI_ND_ELEMENTS\n\n            The :py:class:`Element <armi.nucDirectory.elements.Element>` class acts as a data structure for organizing\n            information about an individual element, including number of protons, name, chemical symbol, phase (at STP),\n            periodic table group, standard weight, and a list of isotope\n            :py:class:`nuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instances.\n\n            The :py:class:`Element <armi.nucDirectory.elements.Element>` class has a few methods for appending\n            additional isotopes, checking whether an isotope is naturally occurring, retrieving the natural isotopic\n            abundance, or whether the element is a heavy metal.\n\n        Parameters\n        ----------\n        z : int\n            atomic number, number of protons\n        symbol : str\n            element symbol\n        name: str\n            element name\n        phase : str\n            Chemical phase of the element at standard temperature and pressure (e.g., gas, liquid, solid).\n        group : str\n            Chemical group of the element.\n        \"\"\"\n        self.z = z\n        self.symbol = symbol\n        self.name = name\n        self.phase = ChemicalPhase[phase]\n        self.group = ChemicalGroup[group]\n        self.standardWeight = None\n        self.nuclides = []\n\n    def __repr__(self):\n        return f\"<Element {self.symbol:>3s} (Z={self.z}), {self.name}, {self.group}, {self.phase}>\"\n\n    def __hash__(self):\n        return hash((self.name, self.z, self.symbol, self.phase, self.group, len(self.nuclides)))\n\n    def __lt__(self, other):\n        return self.z < other.z\n\n    def __eq__(self, other):\n        return hash(self) == hash(other)\n\n    def __iter__(self):\n        for nuc in sorted(self.nuclides):\n            yield nuc\n\n    def append(self, nuclide):\n        \"\"\"Assigns and sorts the nuclide to the element and ensures no duplicates.\"\"\"\n        if nuclide in self.nuclides:\n            return\n        self.nuclides.append(nuclide)\n        self.nuclides = sorted(self.nuclides)\n\n    def isNaturallyOccurring(self):\n        \"\"\"Return True if the element is occurs in nature.\"\"\"\n        return any([nuc.abundance > 0.0 for nuc in self.nuclides])\n\n    def getNaturalIsotopics(self):\n        \"\"\"\n        Return a list of nuclides that are naturally occurring for this element.\n\n        Notes\n        -----\n        This method will filter out any NaturalNuclideBases from the `nuclides` attribute.\n        \"\"\"\n        return [nuc for nuc in self.nuclides if nuc.abundance > 0.0 and nuc.a > 0]\n\n    def isHeavyMetal(self):\n        \"\"\"\n        Return True if all nuclides belonging to the element are heavy metals.\n\n        Notes\n        -----\n        Heavy metal in this instance is not related to an exact weight or density cut-off, but rather is designated for\n        nuclear fuel burn-up evaluations, where the initial heavy metal mass within a component should be tracked. It is\n        typical to include any element/nuclide above Actinium.\n        \"\"\"\n        return self.z > HEAVY_METAL_CUTOFF_Z\n\n\ndef getElementsByChemicalPhase(phase: ChemicalPhase) -> List[Element]:\n    \"\"\"Pass through to Elements.getElementsByChemicalPhase() for the global Elements object.\"\"\"\n    global elements\n    return elements.getElementsByChemicalPhase(phase)\n\n\ndef getElementsByChemicalGroup(group: ChemicalGroup) -> List[Element]:\n    \"\"\"Pass through to Elements.getElementsByChemicalGroup() for the global Elements object.\"\"\"\n    global elements\n    return elements.getElementsByChemicalGroup(group)\n\n\ndef getName(z: int = None, symbol: str = None) -> str:\n    \"\"\"Pass through to Elements.getName() for the global Elements object.\"\"\"\n    global elements\n    return elements.getName(z, symbol)\n\n\ndef getSymbol(z: int = None, name: str = None) -> str:\n    \"\"\"Pass through to Elements.getSymbol() for the global Elements object.\"\"\"\n    global elements\n    return elements.getSymbol(z, name)\n\n\ndef getElementZ(symbol: str = None, name: str = None) -> int:\n    \"\"\"Pass through to Elements.getElementZ() for the global Elements object.\"\"\"\n    global elements\n    return elements.getElementZ(symbol, name)\n\n\ndef factory(elementsFile: str = None):\n    \"\"\"Pass through to Elements.factory() for the global Elements object.\"\"\"\n    global elements\n    global byZ\n    global byName\n    global bySymbol\n\n    elements = Elements()\n    elements.factory(elementsFile)\n\n    byZ = elements.byZ\n    byName = elements.byName\n    bySymbol = elements.bySymbol\n\n\ndef addGlobalElement(element: Element):\n    \"\"\"Pass through to Elements.addElement() for the global Elements object.\"\"\"\n    global elements\n    elements.addElement(element)\n\n\ndef destroyGlobalElements():\n    \"\"\"Pass through to Elements.clear() for the global Elements object.\"\"\"\n    global elements\n    elements.clear()\n\n\nclass Elements:\n    \"\"\"\n    A container for all the atomics elements information in the simulation.\n\n    By design, you would only expect to have one instance of this object in memory during a simulation.\n\n    Attributes\n    ----------\n    byZ: dict[int, Element]\n        A dictionary to find Element objects by atomic number (integer Z).\n    byName: dict[str, Element]\n        A dictionary to find Element objects by unique string identifier (\"C\", \"PU239\", \"U235\", etc).\n    bySymbol: dict[str, Element]\n        A dictionary to find Element objects by atomic symbol (\"C\", \"N\", \"PU\", etc).\n    elementsFile: str\n        File path to the custom ARMI \"elements.dat\" file.\n    \"\"\"\n\n    DEFAULT_ELEMENTS_FILE = os.path.join(context.RES, \"elements.dat\")\n\n    def __init__(self, elementsFile: str = None):\n        self.byZ: dict[int, Element] = {}\n        self.byName: dict[str, Element] = {}\n        self.bySymbol: dict[str, Element] = {}\n        self.elementsFile: str = elementsFile if elementsFile else self.DEFAULT_ELEMENTS_FILE\n\n    def clear(self):\n        \"\"\"Empty all the data in this collection.\"\"\"\n        self.byZ.clear()\n        self.byName.clear()\n        self.bySymbol.clear()\n\n    def addElement(self, element: Element):\n        \"\"\"Add an element to this collection.\n\n        Raises\n        ------\n        ValueError\n            If the element already exists in the collection.\n        \"\"\"\n        if element.z in self.byZ or element.name in self.byName or element.symbol in self.bySymbol:\n            raise ValueError(f\"{element} has already been added and cannot be duplicated.\")\n\n        self.byZ[element.z] = element\n        self.byName[element.name] = element\n        self.bySymbol[element.symbol] = element\n\n    def factory(self, elementsFile: str = None):\n        \"\"\"Generate the :class:`Elements <Element>` instances.\"\"\"\n        self.clear()\n\n        # If an input file is provided, use it, otherwise there is a class default.\n        if elementsFile:\n            self.elementsFile = elementsFile\n\n        with open(self.elementsFile, \"r\") as f:\n            for line in f:\n                # Skip header lines\n                if line.startswith(\"#\") or line.startswith(\"Z\"):\n                    continue\n                # read z, symbol, name, phase, and chemical group\n                lineData = line.split()\n                z = int(lineData[0])\n                sym = lineData[1].upper()\n                name = lineData[2]\n                phase = lineData[3]\n                group = lineData[4]\n                standardWeight = lineData[5]\n                e = Element(z, sym, name, phase, group)\n                if standardWeight != \"Derived\":\n                    e.standardWeight = float(standardWeight)\n                self.addElement(e)\n\n    def getElementsByChemicalPhase(self, phase: ChemicalPhase) -> List[Element]:\n        \"\"\"\n        Returns all elements that are of the given chemical phase.\n\n        Parameters\n        ----------\n        phase: ChemicalPhase\n            This should be one of the valid options from the `ChemicalPhase` class.\n\n        Returns\n        -------\n        elems : List[Element]\n            A list of elements that are associated with the given chemical phase.\n        \"\"\"\n        elems = []\n        if not isinstance(phase, ChemicalPhase):\n            raise TypeError(f\"{phase} is not an instance of {ChemicalPhase}\")\n\n        for element in self.byName.values():\n            if element.phase == phase:\n                elems.append(element)\n\n        return elems\n\n    def getElementsByChemicalGroup(self, group: ChemicalGroup) -> List[Element]:\n        \"\"\"\n        Returns all elements that are of the given chemical group.\n\n        Parameters\n        ----------\n        group: ChemicalGroup\n            This should be one of the valid options from the `ChemicalGroup` class.\n\n        Returns\n        -------\n        elems : List[Element]\n            A list of elements that are associated with the given chemical group.\n        \"\"\"\n        elems = []\n        if not isinstance(group, ChemicalGroup):\n            raise ValueError(f\"{group} is not an instance of {ChemicalGroup}\")\n\n        for element in self.byName.values():\n            if element.group == group:\n                elems.append(element)\n\n        return elems\n\n    def getName(self, z: int = None, symbol: str = None) -> str:\n        r\"\"\"\n        Returns element name.\n\n        Parameters\n        ----------\n        z : int\n            Atomic number\n        symbol : str\n            Element abbreviation e.g. 'Zr'\n\n        Examples\n        --------\n        >>> elements.getName(10)\n        'Neon'\n        >>> elements.getName(symbol=\"Ne\")\n        'Neon'\n        \"\"\"\n        element = None\n        if z:\n            element = self.byZ[z]\n        else:\n            element = self.byName[symbol.upper()]\n\n        return element.name\n\n    def getSymbol(self, z: int = None, name: str = None) -> str:\n        r\"\"\"\n        Returns element abbreviation given atomic number Z.\n\n        Parameters\n        ----------\n        z : int\n            Atomic number\n        name : str\n            Element name E.g. Zirconium\n\n        Examples\n        --------\n        >>> elements.getSymbol(10)\n        'Ne'\n        >>> elements.getSymbol(name=\"Neon\")\n        'Ne'\n\n        \"\"\"\n        element = None\n        if z:\n            element = self.byZ[z]\n        else:\n            element = self.byName[name.lower()]\n\n        return element.symbol\n\n    def getElementZ(self, symbol: str = None, name: str = None) -> int:\n        \"\"\"\n        Get element atomic number given a symbol or name.\n\n        Parameters\n        ----------\n        symbol : str\n            Element symbol e.g. 'Zr'\n        name : str\n            Element name e.g. 'Zirconium'\n\n        Examples\n        --------\n        >>> elements.getZ(\"Zr\")\n        40\n        >>> elements.getZ(name=\"Zirconium\")\n        40\n\n        Notes\n        -----\n        Element Z is stored in elementZBySymbol, indexed by upper-case element symbol.\n        \"\"\"\n        if not symbol and not name:\n            return None\n\n        element = None\n        if symbol:\n            element = self.bySymbol[symbol.upper()]\n        else:\n            element = self.byName[name.lower()]\n\n        return element.z\n\n\nfactory()\n"
  },
  {
    "path": "armi/nucDirectory/nucDir.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSome original nuclide directory code.\n\nNotes\n-----\nThis may be deprecated. Consider using the appropriate instance methods available through the\n:py:class:`armi.nucDirectory.nuclideBases.INuclide` objects and/or the\n:py:mod:`armi.nucDirectory.nuclideBases` module.\n\"\"\"\n\nimport re\n\nfrom armi.nucDirectory import elements, nuclideBases\n\nnuclidePattern = re.compile(r\"([A-Za-z]+)-?(\\d{0,3})(\\d*)(\\S*)\")\nzaPat = re.compile(r\"([A-Za-z]+)-?([0-9]+)\")\n\n# Partially from table 2.2 in Was\n# See also: Table 2.4 in Primary Radiation Damage in Materials\n# https://www.oecd-nea.org/science/docs/2015/nsc-doc2015-9.pdf\neDisplacement = {\n    \"H\": 10.0,\n    \"C\": 31.0,\n    \"N\": 30.0,\n    \"NA\": 25.0,\n    \"SI\": 25.0,\n    \"V\": 40.0,\n    \"CR\": 40.0,\n    \"MN\": 40.0,\n    \"NI\": 40.0,\n    \"MO\": 60.0,\n    \"FE\": 40.0,\n    \"W\": 90.0,\n    \"TI\": 30.0,\n    \"NB\": 60.0,\n    \"ZR\": 40.0,\n    \"CU\": 30.0,\n    \"CO\": 40.0,\n    \"AL\": 25.0,\n    \"PB\": 25.0,\n    \"TA\": 90.0,\n}\n\n\ndef getNuclideFromName(name):\n    actualName = name\n    if \"-\" in name:\n        actualName = name.replace(\"-\", \"\")\n    if \"_\" in name:\n        actualName = name.replace(\"_\", \"\")\n\n    return nuclideBases.byName[actualName]\n\n\ndef getNaturalIsotopics(elementSymbol=None, z=None):\n    \"\"\"\n    Determines the atom fractions of all natural isotopes.\n\n    Parameters\n    ----------\n    elementSymbol : str, optional\n        The element symbol, e.g. Zr, U\n    z : int, optional\n        The atomic number of the element\n\n    Returns\n    -------\n    abundances : list\n        A list of (A,fraction) tuples where A is the mass number of the isotopes\n    \"\"\"\n    element = None\n    if z:\n        element = elements.byZ[z]\n    else:\n        element = elements.bySymbol[elementSymbol]\n    return [(nn.a, nn.abundance) for nn in element.getNaturalIsotopics()]\n\n\ndef getNaturalMassIsotopics(elementSymbol=None, z=None):\n    \"\"\"Return mass fractions of all natural isotopes.\n\n    To convert number fractions to mass fractions, we multiply by A.\n    \"\"\"\n    numIso = getNaturalIsotopics(elementSymbol, z)\n    terms = []\n    for a, frac in numIso:\n        terms.append(a * frac)\n    s = sum(terms)\n\n    massIso = []\n    for i, (a, frac) in enumerate(numIso):\n        massIso.append((a, terms[i] / s))\n\n    return massIso\n\n\ndef getMc2Label(name):\n    \"\"\"\n    Return a MC2 prefix label without a xstype suffix.\n\n    MC**2 has labels and library names. The labels are like\n    U235IA, ZIRCFB, etc. and the library names are references\n    to specific data sets on the MC**2 libraries (e.g. U-2355, etc.)\n\n    This method returns the labels without the xstype suffixes (IA, FB).\n    Rather than maintaining a lookup table, this simply converts\n    the ARMI nuclide names to MC**2 names.\n\n    Parameters\n    ----------\n    name : str\n        ARMI nuclide name of the nuclide\n\n    Returns\n    -------\n    mc2LibLabel : str\n        The MC**2 prefix for this nuclide.\n\n    Examples\n    --------\n    >>> nucDir.getMc2Label(\"U235\")\n    'U235'\n    >> nucDir.getMc2Label('FE')\n    'FE'\n    >>> nucDir.getMc2Label(\"IRON\")\n    'FE'\n    >>> nucDir.getMc2Label(\"AM242\")\n    A242\n\n    \"\"\"\n    # First translate to the proper nuclide. CARB->C\n    nuc = getNuclide(name)\n    return nuc.label\n\n\ndef getElementName(z=None, symbol=None):\n    \"\"\"\n    Returns element name.\n\n    Parameters\n    ----------\n    z : int\n        Atomic number\n    symbol : str\n        Element abbreviation e.g. 'Zr'\n\n    Examples\n    --------\n    >>> nucDir.getElementName(10)\n    'Neon'\n    >>> nucDir.getElementName(symbol=\"Zr\")\n    'Neon'\n\n    \"\"\"\n    element = None\n    if z:\n        element = elements.byZ[z]\n    else:\n        element = elements.byName[symbol.upper()]\n    return element.name\n\n\ndef getElementSymbol(z=None, name=None):\n    \"\"\"\n    Returns element abbreviation given atomic number Z.\n\n    Parameters\n    ----------\n    z : int\n        Atomic number\n    name : str\n        Element name E.g. Zirconium\n\n    Examples\n    --------\n    >>> nucDir.getElementSymbol(10)\n    'Ne'\n    >>> nucDir.getElementSymbol(name=\"Neon\")\n    'Ne'\n\n    \"\"\"\n    element = None\n    if z:\n        element = elements.byZ[z]\n    else:\n        element = elements.byName[name.lower()]\n    return element.symbol\n\n\ndef getNuclide(nucName):\n    \"\"\"\n    Looks up the ARMI nuclide object that has this name.\n\n    Parameters\n    ----------\n    nucName : str\n        A nuclide name like U-235 or AM241, AM242M, AM242M\n\n    Returns\n    -------\n    nuc : Nuclide\n        An armi nuclide object.\n    \"\"\"\n    nuc = nuclideBases.byName.get(nucName, None)\n    if nucName and not nuc:\n        nuc = getNuclideFromName(nucName)\n    if not nuc:\n        raise KeyError(f\"Nuclide name {nucName} is invalid.\")\n    return nuc\n\n\ndef getNuclides(nucName=None, elementSymbol=None):\n    \"\"\"\n    Returns a list of nuclide names in a particular nuclide or element.\n\n    If no arguments, returns all nuclideBases in the directory\n\n    Used to convert things to DB name, to adjustNuclides, etc.\n\n    Parameters\n    ----------\n    nucName : str\n        ARMI nuclide label\n    elementSymbol : str\n        Element symbol e.g. 'Zr'\n    \"\"\"\n    if nucName:\n        # just spit back the nuclide if it's in here. Useful when iterating over the result.\n        nucList = [getNuclide(nucName)]\n    elif elementSymbol:\n        nucList = elements.bySymbol[elementSymbol].nuclides\n    else:\n        # all nuclideBases, including shortcut nuclideBases ('CARB')\n        nucList = [nuc for nuc in nuclideBases.instances if nuc.getMcc2Id() is not None]\n\n    return nucList\n\n\ndef getNuclideNames(nucName=None, elementSymbol=None):\n    \"\"\"\n    Returns a list of nuclide names in a particular nuclide or element.\n\n    If no arguments, returns all nuclideBases in the directory.\n\n    .. warning:: You will get both isotopes and NaturalNuclideBases for each element.\n\n    Parameters\n    ----------\n    nucName : str\n        ARMI nuclide label\n    elementSymbol : str\n        Element symbol e.g. 'Zr'\n    \"\"\"\n    nucList = getNuclides(nucName, elementSymbol)\n    return [nn.name for nn in nucList]\n\n\ndef getAtomicWeight(lab=None, z=None, a=None):\n    \"\"\"\n    Returns atomic weight in g/mole.\n\n    Parameters\n    ----------\n    lab : str, optional\n        nuclide label, like U235\n    z : int, optional\n        atomic number\n    a : int, optional\n        mass number\n\n    Returns\n    -------\n    aMass : float\n        Atomic weight in grams /mole from NIST, or just mass number if not in library (U239 gives 239)\n\n    Examples\n    --------\n    >>> from armi.nucDirectory import nucDir\n    >>> nucDir.getAtomicWeight(\"U235\")\n    235.0439299\n\n    >>> nucDir.getAtomicWeight(\"U239\")\n    239\n\n    >>> nucDir.getAtomicWeight(\"U238\")\n    238.0507882\n\n    >>> nucDir.getAtomicWeight(z=94, a=239)\n    239.0521634\n    \"\"\"\n    if lab:\n        nuclide = None\n        if lab in nuclideBases.byLabel:\n            nuclide = nuclideBases.byLabel[lab]\n        elif lab in nuclideBases.byMcc3Id:\n            nuclide = nuclideBases.byMcc3Id[lab]\n        else:\n            nuclide = getNuclideFromName(lab)\n        return nuclide.weight\n    elif z == 0 and a == 0:\n        return 0.0\n    if a == 0 and z:\n        element = elements.byZ[z]\n        return element.standardWeight\n    else:\n        nuclide = nuclideBases.single(lambda nn: nn.a == a and nn.z == z)\n        return nuclide.weight\n\n\ndef isHeavyMetal(name):\n    try:\n        return getNuclide(name).isHeavyMetal()\n    except AttributeError:\n        raise AttributeError(\"The nuclide {0} is not found in the nuclide directory\".format(name))\n\n\ndef isFissile(name):\n    try:\n        return getNuclide(name).isFissile()\n    except AttributeError:\n        raise AttributeError(\"The nuclide {0} is not found in the nuclide directory\".format(name))\n\n\ndef getThresholdDisplacementEnergy(nuc):\n    \"\"\"\n    Return the Lindhard cutoff; the energy required to displace an atom.\n\n    From SPECTER.pdf Table II\n    Greenwood, \"SPECTER: Neutron Damage Calculations for Materials Irradiations\",\n    ANL.FPP/TM-197, Argonne National Lab., (1985).\n\n    Parameters\n    ----------\n    nuc : str\n        nuclide name\n\n    Returns\n    -------\n    Ed : float\n        The cutoff energy in eV\n    \"\"\"\n    nuc = getNuclide(nuc)\n    el = elements.byZ[nuc.z]\n    try:\n        ed = eDisplacement[el.symbol]\n    except KeyError:\n        print(\n            \"The element {0} of nuclide {1} does not have a displacement energy in the library. Please add one.\".format(\n                el, nuc\n            )\n        )\n        raise\n\n    return ed\n"
  },
  {
    "path": "armi/nucDirectory/nuclideBases.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis module provides access to fundamental nuclide information to be used throughout the framework and applications.\n\n.. impl:: Isotopes and isomers can be queried by name, label, MC2-3 ID, MCNP ID, and AAAZZZS ID.\n    :id: I_ARMI_ND_ISOTOPES0\n    :implements: R_ARMI_ND_ISOTOPES\n\n    The :py:mod:`nuclideBases <armi.nucDirectory.nuclideBases>` module defines the\n    :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` class which is used to organize and store\n    metadata about each nuclide. The metadata is read from a provided ``nuclides.dat`` file, which contains metadata for\n    thousands of isotopes. The module also contains classes for special types of nuclides, including\n    :py:class:`DummyNuclideBase <armi.nucDirectory.nuclideBases.DummyNuclideBase>` for dummy nuclides,\n    :py:class:`LumpNuclideBase <armi.nucDirectory.nuclideBases.LumpNuclideBase>`, for lumped fission product nuclides,\n    and :py:class:`NaturalNuclideBase <armi.nucDirectory.nuclideBases.NaturalNuclideBase>` for when data is given\n    collectively for an element at natural abundance rather than for individual isotopes.\n\n    The :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` provides a data structure for information\n    about a single nuclide, including the atom number, atomic weight, element, isomeric state, half-life, and name.\n\n    The :py:mod:`nuclideBases <armi.nucDirectory.nuclideBases>` module provides a factory and associated functions for\n    instantiating the :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` objects. It is expected that\n    during a simulation, the ``Reactor`` will contain an instance of ``NuclideBases`` to handle building the nuclide\n    data dictionaries, including:\n\n    * ``elements`` (collection of Element objects)\n    * ``instances`` (list of INuclide objects)\n    * ``byName`` (keyed by name, e.g., ``U235``)\n    * ``byDBName`` (keyed by database name, e.g., ``nU235``)\n    * ``byLabel`` (keyed by label, e.g., ``U235``)\n    * ``byMcc2Id`` (keyed by MC\\ :sup:`2`-2 ID, e.g., ``U-2355``)\n    * ``byMcc3Id`` (keyed by MC\\ :sup:`2`-3 ID, e.g., ``U235_7``)\n    * ``byMcc3IdEndfbVII0`` (keyed by MC\\ :sup:`2`-3 ID, e.g., ``U235_7``)\n    * ``byMcc3IdEndfbVII1`` (keyed by MC\\ :sup:`2`-3 ID, e.g., ``U235_7``)\n    * ``byMcnpId`` (keyed by MCNP ID, e.g., ``92235``)\n    * ``byAAAZZZSId`` (keyed by AAAZZZS, e.g., ``2350920``)\n\nThe nuclide class structure is outlined :ref:`here <nuclide-bases-class-diagram>`.\n\n.. _nuclide-bases-class-diagram:\n\n.. pyreverse:: armi.nucDirectory.nuclideBases\n    :align: center\n    :width: 75%\n\n    Class inheritance diagram for :py:class:`INuclide`.\n\nExamples\n--------\n>>> r = Reactor(\"ExampleReactor\", bp)\n>>> r.nuclideBases.byName[\"U235\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\n>>> r.nuclideBases.byLabel[\"U235\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\nRetrieve U-235 by the MC2-2 ID:\n\n>>> r.nuclideBases.byMcc2Id[\"U-2355\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\nRetrieve U-235 by the MC2-3 ID:\n\n>>> r.nuclideBases.byMcc3IdEndfVII0[\"U235_7\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\nRetrieve U-235 by the MCNP ID:\n\n>>> r.nuclideBases.byMcnpId[\"92235\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\nRetrieve U-235 by the AAAZZZS ID:\n\n>>> r.nuclideBases.byAAAZZZSId[\"2350920\"]\n<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\n\nNotes\n-----\nCurrently, this module contains a lot of data in the global scope. But ARMI is in the process of encapsulating this\ndata, moving it out of the global scope, making it part of the reactor data model, and making it configurable via\nSettings. Pardon the mess during this transition.\n\"\"\"\n\nimport os\n\nimport numpy as np\nfrom ruamel.yaml import YAML\n\nfrom armi import context, runLog\nfrom armi.nucDirectory import elements, transmutations\nfrom armi.utils.units import HEAVY_METAL_CUTOFF_Z\n\n# Global nuclide and nuclideBases data\nnuclideBases = None\ninstances = []\nburnChainImposed = False\nbyName = None\nbyDBName = None\nbyLabel = None\nbyMcc2Id = None\nbyMcc3Id = None  # for backwards compatibility. Identical to byMcc3IdEndfbVII1\nbyMcc3IdEndfbVII0 = None\nbyMcc3IdEndfbVII1 = None\nbyMcnpId = None\nbyAAAZZZSId = None\n\n# lookup table from https://t2.lanl.gov/nis/data/endf/endfvii-n.html\nBASE_ENDFB7_MAT_NUM = {\n    \"PM\": 139,\n    \"RA\": 223,\n    \"AC\": 225,\n    \"TH\": 227,\n    \"PA\": 229,\n    \"NP\": 230,\n    \"PU\": 235,\n    \"AM\": 235,\n    \"CM\": 240,\n    \"BK\": 240,\n    \"CF\": 240,\n    \"TC\": 99,\n}\n\n\nclass NuclideInterface:\n    \"\"\"An abstract nuclide implementation which defining various methods required for a nuclide object.\"\"\"\n\n    def getDatabaseName(self):\n        \"\"\"Return the the nuclide label for the ARMI database (i.e. \"nPu239\").\"\"\"\n        raise NotImplementedError\n\n    def getDecay(self, decayType):\n        \"\"\"\n        Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object.\n\n        Parameters\n        ----------\n        decType: str\n            Name of decay mode, e.g. 'sf', 'alpha'\n\n        Returns\n        -------\n        decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>`\n        \"\"\"\n        raise NotImplementedError\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.\"\"\"\n        raise NotImplementedError\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        raise NotImplementedError\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.\"\"\"\n        raise NotImplementedError\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        raise NotImplementedError\n\n    def getSerpentId(self):\n        \"\"\"Get the Serpent nuclide identification label.\"\"\"\n        raise NotImplementedError\n\n    def getNaturalIsotopics(self):\n        \"\"\"Return the natural isotopics root :py:class:`~elements.Element`.\"\"\"\n        raise NotImplementedError\n\n    def isFissile(self):\n        \"\"\"Return boolean value indicating whether this nuclide is fissile.\"\"\"\n        raise NotImplementedError\n\n    def isHeavyMetal(self):\n        \"\"\"Return boolean value indicating whether this nuclide is a heavy metal.\"\"\"\n        raise NotImplementedError\n\n\nclass NuclideWrapper(NuclideInterface):\n    \"\"\"A nuclide wrapper class, used as a base class for nuclear data file nuclides.\"\"\"\n\n    def __init__(self, container, key):\n        self._base = None\n        self.container = container\n        self.containerKey = key\n        self.nucLabel = key[:-2]\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.containerKey}>\"\n\n    def __format__(self, format_spec):\n        return format_spec.format(repr(self))\n\n    @property\n    def name(self):\n        \"\"\"\n        Return the underlying nuclide's name (i.e. \"PU239\").\n\n        Notes\n        -----\n        The nuclide name consists of the capitalized 2 character element symbol and atomic mass number.\n        \"\"\"\n        return self._base.name\n\n    @property\n    def weight(self):\n        \"\"\"Get the underlying nuclide's weight.\"\"\"\n        return self._base.weight\n\n    def getDatabaseName(self):\n        \"\"\"Get the database name of the underlying nuclide (i.e. \"nPu239\").\"\"\"\n        return self._base.getDatabaseName()\n\n    def getDecay(self, decayType):\n        \"\"\"\n        Return a :py:class:`~armi.nucDirectory.transmutations.DecayMode` object.\n\n        Parameters\n        ----------\n        decType: str\n            Name of decay mode, e.g. 'sf', 'alpha'\n\n        Returns\n        -------\n        decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>`\n        \"\"\"\n        return self._base.getDecay(decayType)\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide based on the ENDF/B-V.2 cross section library.\"\"\"\n        return self._base.getMcc2Id()\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.getMcc3IdEndfbVII1()\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide based on the ENDF/B-VII.0 cross section library.\"\"\"\n        return self._base.getMcc3IdEndfbVII0()\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self._base.getMcc3IdEndfbVII1()\n\n    def getNaturalIsotopics(self):\n        \"\"\"Return the natural isotopics root :py:class:`~elements.Element`.\"\"\"\n        return self._base.getNaturalIsotopics()\n\n    def isFissile(self):\n        \"\"\"Return boolean indicating whether or not the underlying nuclide is fissle.\"\"\"\n        return self._base.isFissile()\n\n    def isHeavyMetal(self):\n        \"\"\"Return boolean indicating whether or not the underlying nuclide is a heavy metal.\"\"\"\n        return self._base.isHeavyMetal()\n\n\nclass INuclide(NuclideInterface):\n    \"\"\"\n    Nuclide interface, the base of all nuclide objects.\n\n    Attributes\n    ----------\n    z : int\n        Number of protons.\n    a : int\n        Number of nucleons.\n    state : int\n        Indicates excitement, 1 is more excited than 0.\n    abundance : float\n        Isotopic fraction of a naturally occurring nuclide. The sum of all nuclide abundances for a naturally occurring\n        element should be 1.0. This is atom fraction, not mass fraction.\n    name : str\n        ARMI's unique name for the given nuclide.\n    label : str\n        ARMI's unique 4 character label for the nuclide. These are not human readable, but do not lose any information.\n        The label is effectively the :py:attr:`Element.symbol <armi.nucDirectory.elements.Element.symbol>` padded to two\n        characters, plus the mass number (A) in base-26 (0-9, A-Z). Additional support for meta-states is provided by\n        adding 100 * the state to the mass number (A).\n    nuSF : float\n        Neutrons released per spontaneous fission. This should probably be moved at some point.\n    \"\"\"\n\n    fissile = [\"U235\", \"PU239\", \"PU241\", \"AM242M\", \"CM244\", \"U233\"]\n    TRANSMUTATION = \"transmutation\"\n    DECAY = \"decay\"\n    SPONTANEOUS_FISSION = \"nuSF\"\n\n    def __init__(\n        self,\n        element,\n        a,\n        state,\n        weight,\n        abundance,\n        halflife,\n        name,\n        label,\n        mcc2id=None,\n        mcc3idEndfbVII0=None,\n        mcc3idEndfbVII1=None,\n    ):\n        \"\"\"\n        Create an instance of an INuclide.\n\n        Warning\n        -------\n        Do not call this constructor directly; use the factory instead.\n        \"\"\"\n        if state < 0:\n            raise ValueError(\n                f\"Error in initializing nuclide {name}. An invalid state {state} is provided. The state must be a \"\n                \"positive integer.\"\n            )\n        if halflife < 0.0:\n            raise ValueError(f\"Error in initializing nuclide {name}. The halflife must be a positive value.\")\n\n        self.element = element\n        self.z = element.z\n        self.a = a\n        self.state = state\n        self.decays = []\n        self.trans = []\n        self.weight = weight\n        self.abundance = abundance\n        self.halflife = halflife\n        self.name = name\n        self.label = label\n        self.nuSF = 0.0\n        self.mcc2id = mcc2id or \"\"\n        self.mcc3idEndfbVII0 = mcc3idEndfbVII0 or \"\"\n        self.mcc3idEndfbVII1 = mcc3idEndfbVII1 or \"\"\n        self.element.append(self)\n\n    def __hash__(self):\n        return hash((self.a, self.z, self.state))\n\n    def __reduce__(self):\n        return fromName, (self.name,)\n\n    def __lt__(self, other):\n        return (self.z, self.a, self.state) < (other.z, other.a, other.state)\n\n    def __eq__(self, other):\n        return hash(self) == hash(other)\n\n    def _processBurnData(self, burnInfo):\n        \"\"\"\n        Process YAML burn transmutation, decay, and spontaneous fission data for this nuclide.\n\n        This clears out any existing transmutation/decay information before processing.\n\n        Parameters\n        ----------\n        burnInfo: list\n            List of dictionaries containing burn information for the current nuclide\n        \"\"\"\n        self.decays = []\n        self.trans = []\n        for nuclideBurnCategory in burnInfo:\n            # Check that the burn category has only one defined burn type\n            if len(nuclideBurnCategory) > 1:\n                raise ValueError(\n                    f\"Improperly defined ``burn-chain`` of {self}. {nuclideBurnCategory.keys()} should be a single \"\n                    \"burn type.\"\n                )\n            nuclideBurnType = list(nuclideBurnCategory.keys())[0]\n            if nuclideBurnType == self.TRANSMUTATION:\n                self.trans.append(transmutations.Transmutation(self, nuclideBurnCategory[nuclideBurnType]))\n            elif nuclideBurnType == self.DECAY:\n                self.decays.append(transmutations.DecayMode(self, nuclideBurnCategory[nuclideBurnType]))\n            elif nuclideBurnType == self.SPONTANEOUS_FISSION:\n                userSpontaneousFissionYield = nuclideBurnCategory.get(nuclideBurnType, None)\n\n                # Check for user-defined value of nuSF within the burn-chain data. If this is updated then prefer the\n                # user change and then note this to the user. Otherwise, maintain the default loaded from the nuclide\n                # bases.\n                if userSpontaneousFissionYield:\n                    if userSpontaneousFissionYield != self.nuSF:\n                        runLog.info(\n                            f\"nuSF provided for {self} will be updated from {self.nuSF:<8.6e} to \"\n                            f\"{userSpontaneousFissionYield:<8.6e} based on user provided burn-chain data.\"\n                        )\n                        self.nuSF = userSpontaneousFissionYield\n            else:\n                raise Exception(\n                    f\"Undefined Burn Data {nuclideBurnType} for {self}. Expected {self.TRANSMUTATION}, {self.DECAY}, \"\n                    f\"or {self.SPONTANEOUS_FISSION}.\"\n                )\n\n    def getDecay(self, decayType):\n        \"\"\"Get a :py:class:`~armi.nucDirectory.transmutations.DecayMode`.\n\n        Retrieve the first :py:class:`~armi.nucDirectory.transmutations.DecayMode` matching the specified decType.\n\n        Parameters\n        ----------\n        decType: str\n            Name of decay mode e.g. 'sf', 'alpha'\n\n        Returns\n        -------\n        decay : :py:class:`DecayModes <armi.nucDirectory.transmutations.DecayMode>`\n        \"\"\"\n        for d in self.decays:\n            if d.type == decayType:\n                return d\n\n        return None\n\n    def isFissile(self):\n        \"\"\"Determine if the nuclide is fissile.\n\n        Returns\n        -------\n        answer: bool\n            True if the :py:class:`INuclide` is fissile, otherwise False.\n        \"\"\"\n        return self.name in self.fissile\n\n    def getNaturalIsotopics(self):\n        r\"\"\"Gets the naturally occurring nuclides for this nuclide.\n\n        Abstract method, see concrete types for implementation.\n\n        Returns\n        -------\n        nuclides: list\n            List of :py:class:`INuclides <INuclide>`\n\n        See Also\n        --------\n        :meth:`NuclideBase.getNaturalIsotopics`\n        :meth:`NaturalNuclideBase.getNaturalIsotopics`\n        :meth:`LumpNuclideBase.getNaturalIsotopics`\n        :meth:`DummyNuclideBase.getNaturalIsotopics`\n        \"\"\"\n        raise NotImplementedError\n\n    def getDatabaseName(self):\n        \"\"\"Get the name of the nuclide used in the database (i.e. \"nPu239\").\"\"\"\n        return f\"n{self.name.capitalize()}\"\n\n    def isHeavyMetal(self):\n        return self.z > HEAVY_METAL_CUTOFF_Z\n\n\nclass IMcnpNuclide:\n    \"\"\"Abstract class for retrieving nuclide identifiers for the MCNP software.\"\"\"\n\n    def getMcnpId(self):\n        \"\"\"Return a string that represents a nuclide label for a material card in MCNP.\"\"\"\n        raise NotImplementedError\n\n    def getAAAZZZSId(self):\n        \"\"\"Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S.\"\"\"\n        raise NotImplementedError\n\n\nclass NuclideBase(INuclide, IMcnpNuclide):\n    r\"\"\"Represents an individual nuclide/isotope.\n\n    .. impl:: Isotopes and isomers can be queried by name and label.\n        :id: I_ARMI_ND_ISOTOPES1\n        :implements: R_ARMI_ND_ISOTOPES\n\n        The :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` class provides a data structure for\n        information about a single nuclide, including the atom number, atomic weight, element, isomeric state,\n        half-life, and name. The class contains static methods for creating an internal ARMI name or label for a\n        nuclide. There are instance methods for generating the nuclide ID for external codes, e.g. MCNP or Serpent, and\n        retrieving the nuclide ID for MC\\ :sup:`2`-2 or MC\\ :sup:`2`-3. There are also instance methods for generating\n        an AAAZZZS ID and an ENDF MAT number.\n    \"\"\"\n\n    def __init__(self, element, a, weight, abundance, state, halflife):\n        IMcnpNuclide.__init__(self)\n        INuclide.__init__(\n            self,\n            element=element,\n            a=a,\n            state=state,\n            weight=weight,\n            abundance=abundance,\n            halflife=halflife,\n            name=NuclideBase._createName(element, a, state),\n            label=NuclideBase._createLabel(element, a, state),\n        )\n\n    def __repr__(self):\n        return (\n            f\"<{self.__class__.__name__} {self.name}:  Z:{self.z}, A:{self.a}, S:{self.state}, \"\n            + f\"W:{self.weight:<12.6e}, Label:{self.label}>, HL:{self.halflife:<15.11e}, \"\n            + f\"Abund:{self.abundance:<8.6e}>\"\n        )\n\n    @staticmethod\n    def _createName(element, a, state):\n        metaChar = [\"\", \"M\", \"M2\", \"M3\"]\n        if state > len(metaChar):\n            raise ValueError(f\"The state of NuclideBase is not valid and must not be larger than {len(metaChar)}.\")\n\n        return f\"{element.symbol}{a}{metaChar[state]}\"\n\n    @staticmethod\n    def _createLabel(element, a, state):\n        \"\"\"\n        Make label for nuclide base.\n\n        The logic causes labels for things with A<10 to be zero padded like H03 or tritium instead of H3. This avoids\n        the metastable tritium collision which would look like elemental HE. It also allows things like MO100 to be held\n        within 4 characters, which is a constraint of the ISOTXS format if we append 2 characters for XS type.\n        \"\"\"\n        # len(e.symbol) is 1 or 2 => a % (either 1000 or 100)\n        #                         => gives exact a, or last two digits.\n        # the division by 10 removes the last digit.\n        firstTwoDigits = (a % (10 ** (4 - len(element.symbol)))) // 10\n        # the last digit is either 0-9 if state=0, or A-J if state=1, or K-T if state=2, or U-d if state=3\n        lastDigit = \"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcd\"[(a % 10) + state * 10]\n        return f\"{element.symbol}{firstTwoDigits}{lastDigit}\"\n\n    def getNaturalIsotopics(self):\n        \"\"\"Gets the natural isotopics root :py:class:`~elements.Element`.\n\n        Gets the naturally occurring nuclides for this nuclide.\n\n        Returns\n        -------\n        nuclides: list\n            List of :py:class:`INuclides <INuclide>`\n\n        See Also\n        --------\n        :meth:`INuclide.getNaturalIsotopics`\n        \"\"\"\n        return self.element.getNaturalIsotopics()\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.\n\n        .. impl:: Isotopes and isomers can be queried by MC2-2 ID.\n            :id: I_ARMI_ND_ISOTOPES2\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This method returns the ``mcc2id`` attribute of a\n            :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance. This attribute is initially\n            populated by reading from the mcc-nuclides.yaml file in the ARMI resources folder.\n        \"\"\"\n        return self.mcc2id\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.getMcc3IdEndfbVII1()\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.\n\n        .. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.0 ID.\n            :id: I_ARMI_ND_ISOTOPES3\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This method returns the ``mcc3idEndfbVII0`` attribute of a\n            :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>`\n            instance. This attribute is initially populated by reading from the\n            mcc-nuclides.yaml file in the ARMI resources folder.\n        \"\"\"\n        return self.mcc3idEndfbVII0\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\n\n        .. impl:: Isotopes and isomers can be queried by MC2-3 ENDF/B-VII.1 ID.\n            :id: I_ARMI_ND_ISOTOPES7\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This method returns the ``mcc3idEndfbVII1`` attribute of a\n            :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>`\n            instance. This attribute is initially populated by reading from the\n            mcc-nuclides.yaml file in the ARMI resources folder.\n        \"\"\"\n        return self.mcc3idEndfbVII1\n\n    def getMcnpId(self):\n        \"\"\"\n        Gets the MCNP label for this nuclide.\n\n        .. impl:: Isotopes and isomers can be queried by MCNP ID.\n            :id: I_ARMI_ND_ISOTOPES4\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This method generates the MCNP ID for an isotope using the standard\n            MCNP format based on the atomic number A, number of protons Z, and\n            excited state. The implementation includes the special rule for\n            Am-242m, which is 95242. 95642 is used for the less common ground\n            state Am-242.\n\n        Returns\n        -------\n        id : str\n            The MCNP ID e.g. ``92235``, ``94239``, ``6000``\n        \"\"\"\n        z, a = self.z, self.a\n\n        if z == 95 and a == 242:\n            # Am242 has special rules\n            if self.state != 1:\n                # MCNP uses base state for the common metastable state AM242M, so AM242M is just 95242\n                # AM242 base state is called 95642 (+400) in mcnp.\n                # see https://mcnp.lanl.gov/pdf_files/la-ur-08-1999.pdf\n                # New ACE-Formatted Neutron and Proton Libraries Based on ENDF/B-VII.0\n                a += 300 + 100 * max(self.state, 1)\n        elif self.state > 0:\n            # in general mcnp adds 300 + 100*m to the Z number for metastables. see above source\n            a += 300 + 100 * self.state\n\n        return \"{z:d}{a:03d}\".format(z=z, a=a)\n\n    def getAAAZZZSId(self):\n        \"\"\"\n        Return a string that is ordered by the mass number, A, the atomic number, Z, and the isomeric state, S.\n\n        .. impl:: Isotopes and isomers can be queried by AAAZZZS ID.\n            :id: I_ARMI_ND_ISOTOPES5\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This method generates the AAAZZZS format ID for an isotope. Where\n            AAA is the mass number, ZZZ is the atomic number, and S is the\n            isomeric state. This is a general format independent of any code that\n            precisely defines an isotope or isomer.\n\n        Notes\n        -----\n        An example would be for U235, where A=235, Z=92, and S=0, returning ``2350920``.\n        \"\"\"\n        return f\"{self.a}{self.z:>03d}{self.state}\"\n\n    def getSerpentId(self):\n        \"\"\"\n        Returns the SERPENT style ID for this nuclide.\n\n        Returns\n        -------\n        id: str\n            The ID of this nuclide based on it's elemental name, weight,\n            and state, eg ``U-235``, ``Te-129m``.\n        \"\"\"\n        symbol = self.element.symbol.capitalize()\n        return f\"{symbol}-{self.a}{'m' if self.state else ''}\"\n\n    def getEndfMatNum(self):\n        \"\"\"\n        Gets the ENDF MAT number.\n\n        MAT numbers are defined as described in section 0.4.1 of the NJOY manual. Basically, it's Z * 100 + I where I is\n        an isotope number. I=25 is defined as the lightest known stable isotope of element Z, so for Uranium, Z=92 and\n        I=25 refers to U234. The values of I go up by 3 for each mass number, so U235 is 9228. This leaves room for\n        three isomeric states of each nuclide.\n\n        Returns\n        -------\n        id : str\n            The MAT number e.g. ``9237`` for U238\n        \"\"\"\n        z, a = self.z, self.a\n        if self.element.symbol in BASE_ENDFB7_MAT_NUM:\n            # no stable isotopes (or other special case). Use lookup table\n            smallestStableA = BASE_ENDFB7_MAT_NUM[self.element.symbol]\n        else:\n            naturalIsotopes = self.getNaturalIsotopics()\n            if naturalIsotopes:\n                smallestStableA = min(ni.a for ni in naturalIsotopes)  # no guarantee they were sorted\n            else:\n                raise KeyError(f\"Nuclide {self} is unknown in the MAT number lookup\")\n\n        isotopeNum = (a - smallestStableA) * 3 + self.state + 25\n        mat = z * 100 + isotopeNum\n        return str(mat)\n\n\nclass NaturalNuclideBase(INuclide, IMcnpNuclide):\n    \"\"\"\n    Represents an individual nuclide/isotope that is naturally occurring.\n\n    Notes\n    -----\n    This is meant to represent the combination of all naturally occurring nuclides within an element. The abundance is\n    forced to zero here so that it does not have any interactions with the NuclideBase objects.\n    \"\"\"\n\n    def __init__(self, name, element):\n        INuclide.__init__(\n            self,\n            element=element,\n            a=0,\n            state=0,\n            weight=sum([nn.weight * nn.abundance for nn in element.getNaturalIsotopics()]),\n            abundance=0.0,\n            halflife=np.inf,\n            name=name,\n            label=name,\n        )\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.name}:  Z:{self.z}, W:{self.weight:<12.6e}, Label:{self.label}>\"\n\n    def getNaturalIsotopics(self):\n        \"\"\"Gets the natural isotopics root :py:class:`~elements.Element`.\n\n        Gets the naturally occurring nuclides for this nuclide.\n\n        Returns\n        -------\n        nuclides: list\n            List of :py:class:`INuclides <INuclide>`.\n\n        See Also\n        --------\n        :meth:`INuclide.getNaturalIsotopics`\n        \"\"\"\n        return self.element.getNaturalIsotopics()\n\n    def getMcnpId(self):\n        \"\"\"Gets the MCNP ID for this element.\n\n        Returns\n        -------\n        id : str\n            The MCNP ID e.g. ``1000``, ``92000``. Not zero-padded on the left.\n        \"\"\"\n        return \"{0:d}000\".format(self.z)\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.\"\"\"\n        return self.mcc2id\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.getMcc3IdEndfbVII1()\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.\"\"\"\n        return self.mcc3idEndfbVII0\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.mcc3idEndfbVII1\n\n    def getSerpentId(self):\n        \"\"\"Gets the SERPENT ID for this natural nuclide.\n\n        Returns\n        -------\n        id: str\n            SERPENT ID: ``C-nat``, `Fe-nat``\n        \"\"\"\n        return f\"{self.element.symbol.capitalize()}-nat\"\n\n    def getEndfMatNum(self):\n        \"\"\"Get the ENDF mat number for this element.\"\"\"\n        if self.z != 6:\n            runLog.warning(\n                f\"The only elemental in ENDF/B VII.1 is carbon. ENDF mat num was requested for the elemental {self} and\"\n                \"will not be helpful for working with ENDF/B VII.1. Try to expandElementalsToIsotopics\"\n            )\n\n        return str(self.z * 100)\n\n\nclass DummyNuclideBase(INuclide):\n    \"\"\"\n    Represents a dummy/placeholder nuclide within the system.\n\n    Notes\n    -----\n    This may be used to store mass from a depletion calculation, specifically in the instances where the burn chain is\n    truncated.\n    \"\"\"\n\n    def __init__(self, element, name, weight):\n        INuclide.__init__(\n            self,\n            element=element,\n            a=0,\n            state=0,\n            weight=weight,\n            abundance=0.0,\n            halflife=np.inf,\n            name=name,\n            label=\"DMP\" + name[4],\n        )\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.name}:  W:{self.weight:<12.6e}, Label:{self.label}>\"\n\n    def __hash__(self):\n        return hash((self.a, self.z, self.state, self.weight))\n\n    def __lt__(self, other):\n        return (self.z, self.a, self.state, self.weight) < (\n            other.z,\n            other.a,\n            other.state,\n            other.weight,\n        )\n\n    def getNaturalIsotopics(self):\n        \"\"\"Gets the natural isotopics, an empty iterator.\n\n        Gets the naturally occurring nuclides for this nuclide.\n\n        Returns\n        -------\n        empty: iterator\n            An empty generator\n\n        See Also\n        --------\n        :meth:`INuclide.getNaturalIsotopics`\n        \"\"\"\n        return\n        yield\n\n    def isHeavyMetal(self):\n        return False\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.\"\"\"\n        return self.mcc2id\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.getMcc3IdEndfbVII1()\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.\"\"\"\n        return self.mcc3idEndfbVII0\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.mcc3idEndfbVII1\n\n\nclass LumpNuclideBase(INuclide):\n    \"\"\"\n    Represents a combination of many nuclides from `NuclideBases` into a single lumped nuclide.\n\n    See Also\n    --------\n    armi.physics.neutronics.fissionProduct model:\n        Describes what nuclides LumpNuclideBase is expend to.\n    \"\"\"\n\n    def __init__(self, element, name, weight):\n        INuclide.__init__(\n            self,\n            element=element,\n            a=0,\n            state=0,\n            weight=weight,\n            abundance=0.0,\n            halflife=np.inf,\n            name=name,\n            label=name[1:],\n        )\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.name}:  W:{self.weight:<12.6e}, Label:{self.label}>\"\n\n    def __hash__(self):\n        return hash((self.a, self.z, self.state, self.weight))\n\n    def __lt__(self, other):\n        return (self.z, self.a, self.state, self.weight) < (\n            other.z,\n            other.a,\n            other.state,\n            other.weight,\n        )\n\n    def getNaturalIsotopics(self):\n        \"\"\"Gets the natural isotopics, an empty iterator.\n\n        Gets the naturally occurring nuclides for this nuclide.\n\n        Returns\n        -------\n        empty: iterator\n            An empty generator\n\n        See Also\n        --------\n        :meth:`INuclide.getNaturalIsotopics`\n        \"\"\"\n        return\n        yield\n\n    def isHeavyMetal(self):\n        return False\n\n    def getMcc2Id(self):\n        \"\"\"Return the MC2-2 nuclide identification label based on the ENDF/B-V.2 cross section library.\"\"\"\n        return self.mcc2id\n\n    def getMcc3Id(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.getMcc3IdEndfbVII1()\n\n    def getMcc3IdEndfbVII0(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.0 cross section library.\"\"\"\n        return self.mcc3idEndfbVII0\n\n    def getMcc3IdEndfbVII1(self):\n        \"\"\"Return the MC2-3 nuclide identification label based on the ENDF/B-VII.1 cross section library.\"\"\"\n        return self.mcc3idEndfbVII1\n\n\ndef initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides):\n    \"\"\"Pass through to NuclideBases.initReachableActiveNuclidesThroughBurnChain() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.initReachableActiveNuclidesThroughBurnChain(nuclides, numberDensities, activeNuclides)\n\n\ndef getIsotopics(nucName):\n    \"\"\"Pass through to NuclideBases.getIsotopics() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.getIsotopics(nucName)\n\n\ndef fromName(name):\n    \"\"\"Pass through to NuclideBases.fromName() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.fromName(name)\n\n\ndef isMonoIsotopicElement(name):\n    \"\"\"Pass through to NuclideBases.isMonoIsotopicElement() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.isMonoIsotopicElement(name)\n\n\ndef where(predicate):\n    \"\"\"Pass through to NuclideBases.where() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.where(predicate)\n\n\ndef single(predicate):\n    \"\"\"Pass through to NuclideBases.single() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    return nuclideBases.single(predicate)\n\n\ndef changeLabel(nuclideBase, newLabel):\n    \"\"\"Pass through to NuclideBases.changeLabel() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.changeLabel(nuclideBase, newLabel)\n\n\ndef getDepletableNuclides(activeNuclides, obj):\n    \"\"\"Get nuclides in this object that are in the burn chain.\"\"\"\n    return sorted(set(activeNuclides) & set(obj.getNuclides()))\n\n\ndef imposeBurnChain(burnChainStream):\n    \"\"\"Pass through to NuclideBases.imposeBurnChain() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.imposeBurnChain(burnChainStream)\n\n\ndef factory():\n    \"\"\"Pass through to NuclideBases.factory() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    global burnChainImposed\n    global instances\n    global byName\n    global byDBName\n    global byLabel\n    global byMcc2Id\n    global byMcc3Id\n    global byMcc3IdEndfbVII0\n    global byMcc3IdEndfbVII1\n    global byMcnpId\n    global byAAAZZZSId\n\n    nuclideBases = NuclideBases()\n\n    instances = nuclideBases.instances\n    burnChainImposed = nuclideBases.burnChainImposed\n    byName = nuclideBases.byName\n    byDBName = nuclideBases.byDBName\n    byLabel = nuclideBases.byLabel\n    byMcc2Id = nuclideBases.byMcc2Id\n    byMcc3Id = nuclideBases.byMcc3Id  # for backwards compatibility. Identical to byMcc3IdEndfbVII1\n    byMcc3IdEndfbVII0 = nuclideBases.byMcc3IdEndfbVII0\n    byMcc3IdEndfbVII1 = nuclideBases.byMcc3IdEndfbVII1\n    byMcnpId = nuclideBases.byMcnpId\n    byAAAZZZSId = nuclideBases.byAAAZZZSId\n\n\ndef addNuclideBases():\n    \"\"\"Pass through to NuclideBases.addNuclideBases() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.addNuclideBases()\n\n\ndef readMCCNuclideData():\n    \"\"\"Pass through to NuclideBases.readMCCNuclideData() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.readMCCNuclideData()\n\n\ndef updateNuclideBasesForSpecialCases():\n    \"\"\"Pass through to NuclideBases.updateNuclideBasesForSpecialCases() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.updateNuclideBasesForSpecialCases()\n\n\ndef addGlobalNuclide(nuclide: NuclideBase):\n    \"\"\"Pass through to NuclideBases.addNuclide() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.addNuclide(nuclide)\n\n\ndef destroyGlobalNuclides():\n    \"\"\"Pass through to NuclideBases.clear() for the global NuclideBases object.\"\"\"\n    global nuclideBases\n    nuclideBases.clear()\n\n\nclass NuclideBases:\n    \"\"\"\n    A container for all the nuclide information in the simulation.\n\n    By design, you would only expect to have one instance of this object in memory during a simulation.\n\n    Attributes\n    ----------\n    burnChainImposed: bool\n        Have we applied transmutation and decay data to each nuclide?\n    instances: list[INuclide]\n        A simple list of the nuclides in this class.\n    byName: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by name, e.g., \"U235\".\n    byDBName: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by database name, e.g., \"nU235\".\n    byLabel: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by label, e.g., \"U235\".\n    byMcc2Id: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by MC2-2 ID, e.g., \"U-2355\".\n    byMcc3Id: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., \"U235_7\".\n        (This exists for backwards compat. Identical to byMcc3IdEndfbVII1.)\n    byMcc3IdEndfbVII0: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., \"U235_7\".\n    byMcc3IdEndfbVII1: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by MC2-3 ID, e.g., \"U235_7\".\n    byMcnpId: dict[str, INuclide]\n        A dictionary of the nuclides in this class, keyed by MCNP ID, e.g., 92235.\n    byAAAZZZSId: dict[int, INuclide]\n        A dictionary of the nuclides in this class, keyed by AAAZZZS, e.g., 2350920.\n    elements: Elements\n        A container for all the atomics elements information in the simulation.\n    nuclidesFile: str\n        File path to the custom ARMI \"nuclides.dat\" file, containing a plain text description of all the nuclides to be\n        modeled including: Z, number of neutrons, mass number, amu, natural abundance, half life and nu-bar and more.\n    mccNuclidesFile: str\n        File path to the \"mcc-nuclides.yaml\" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with\n        various ENDF/B-V mappings.\n    \"\"\"\n\n    DEFAULT_NUCLIDES_FILE = os.path.join(context.RES, \"nuclides.dat\")\n    DEFAULT_MCC_NUCLIDES_FILE = os.path.join(context.RES, \"mcc-nuclides.yaml\")\n\n    def __init__(self, nuclidesFile=None, mccNuclidesFile=None):\n        self.burnChainImposed: bool = False\n        self.elements = None\n        self.instances: list[INuclide] = []\n        self.byName: dict[str, INuclide] = {}\n        self.byDBName: dict[str, INuclide] = {}\n        self.byLabel: dict[str, INuclide] = {}\n        self.byMcc2Id: dict[str, INuclide] = {}\n        self.byMcc3Id: dict[str, INuclide] = {}\n        self.byMcc3IdEndfbVII0: dict[str, INuclide] = {}\n        self.byMcc3IdEndfbVII1: dict[str, INuclide] = {}\n        self.byMcnpId: dict[str, INuclide] = {}\n        self.byAAAZZZSId: dict[int, INuclide] = {}\n        self.nuclidesFile: str = nuclidesFile if nuclidesFile else self.DEFAULT_NUCLIDES_FILE\n        self.mccNuclidesFile: str = mccNuclidesFile if mccNuclidesFile else self.DEFAULT_MCC_NUCLIDES_FILE\n        self.factory()\n\n    def clear(self):\n        \"\"\"Empty all the data containers in this object.\"\"\"\n        # grab all the globals\n        global burnChainImposed\n        global instances\n        global byName\n        global byDBName\n        global byLabel\n        global byMcc2Id\n        global byMcc3Id\n        global byMcc3IdEndfbVII0\n        global byMcc3IdEndfbVII1\n        global byMcnpId\n        global byAAAZZZSId\n\n        # reset the class attributes\n        self.burnChainImposed = False\n        self.elements = None\n        self.instances = []\n        self.byName = {}\n        self.byDBName = {}\n        self.byLabel = {}\n        self.byMcc2Id = {}\n        self.byMcc3Id = {}\n        self.byMcc3IdEndfbVII0 = {}\n        self.byMcc3IdEndfbVII1 = {}\n        self.byMcnpId = {}\n        self.byAAAZZZSId = {}\n\n        # reset the globals\n        instances = self.instances\n        burnChainImposed = self.burnChainImposed\n        byName = self.byName\n        byDBName = self.byDBName\n        byLabel = self.byLabel\n        byMcc2Id = self.byMcc2Id\n        byMcc3Id = self.byMcc3Id\n        byMcc3IdEndfbVII0 = self.byMcc3IdEndfbVII0\n        byMcc3IdEndfbVII1 = self.byMcc3IdEndfbVII1\n        byMcnpId = self.byMcnpId\n        byAAAZZZSId = self.byAAAZZZSId\n\n    def addNuclide(self, nuclide: INuclide):\n        \"\"\"Add an element to the dictionaries in this class.\"\"\"\n        if nuclide.name in self.byName or nuclide.getDatabaseName() in self.byDBName or nuclide.label in self.byLabel:\n            raise ValueError(f\"{nuclide} has already been added.\")\n\n        self.instances.append(nuclide)\n        self.byName[nuclide.name] = nuclide\n        self.byDBName[nuclide.getDatabaseName()] = nuclide\n        self.byLabel[nuclide.label] = nuclide\n\n        # Add look-up based on the MCNP nuclide ID\n        if isinstance(nuclide, IMcnpNuclide):\n            if nuclide.getMcnpId() in self.byMcnpId:\n                raise ValueError(f\"{nuclide} with McnpId {nuclide.getMcnpId()} has already been added.\")\n            self.byMcnpId[nuclide.getMcnpId()] = nuclide\n\n        if not isinstance(nuclide, (NaturalNuclideBase, LumpNuclideBase, DummyNuclideBase)):\n            self.byAAAZZZSId[nuclide.getAAAZZZSId()] = nuclide\n\n    def factory(self, nuclidesFile: str = None, mccNuclidesFile: str = None, elementsFile: str = None):\n        \"\"\"\n        Reads data files to instantiate the :py:class:`INuclides <INuclide>`.\n\n        Reads NIST, MC**2 and burn chain data files to instantiate the :py:class:`INuclides <INuclide>`. Also clears and\n        fills in the class attibues: instances, byName, byLabel, byMcc3IdEndfbVII0, and byMcc3IdEndfbVII1. This method\n        is automatically run upon initializing the class, hence it is not usually necessary to re-run it unless there is\n        a change to the data files, which should not happen during run time, or a *bad* :py:class`INuclide` is created.\n\n        Parameters\n        ----------\n        nuclidesFile: str\n            File path to the custom ARMI \"nuclides.dat\" file, containing a plain text description of all nuclides to be\n            modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more.\n        mccNuclidesFile: str\n            File path to the \"mcc-nuclides.yaml\" file, containing nuclides defined by the MC2-2 and MC2-3 codes, with\n            various ENDF/B-V mappings.\n        elementsFile: str\n            File path to the custom ARMI \"elements.dat\" file.\n\n        Notes\n        -----\n        This cannot be run more than once. NuclideBase instances are used throughout the ARMI ecosystem and are even\n        class attributes in some cases. Re-instantiating them would orphan any existing ones and break everything.\n        \"\"\"\n        if len(self.instances) != 0:\n            raise RuntimeError(\n                \"Nuclides are already initialized and cannot be re-initialized unless `nuclideBases.clear()` is called \"\n                \"first.\"\n            )\n\n        # If an input file is provided, use it, otherwise there is a class default.\n        if nuclidesFile:\n            self.nuclidesFile = nuclidesFile\n\n        if mccNuclidesFile:\n            self.mccNuclidesFile = mccNuclidesFile\n\n        # load the fundamental elements library\n        elements.factory(elementsFile)\n        self.elements = elements.elements\n\n        # load the isotopes and isomers library\n        self.addNuclideBases(self.nuclidesFile)\n        self.__addNaturalNuclideBases()\n        self.__addDummyNuclideBases()\n        self.__addLumpedFissionProductNuclideBases()\n        self.updateNuclideBasesForSpecialCases()\n        self.readMCCNuclideData(self.mccNuclidesFile)\n        self.__renormalizeNuclideToElementRelationship()\n        self.__deriveElementalWeightsByNaturalNuclideAbundances()\n\n    def initReachableActiveNuclidesThroughBurnChain(self, nuclides, numberDensities, activeNuclides):\n        \"\"\"\n        March through the depletion chain and find all nuclides that can be reached by depleting nuclides passed in.\n\n        This limits depletion to the smallest set of nuclides that matters.\n\n        Parameters\n        ----------\n        nuclides : np.array, dtype=\"S6\"\n            Starting array of nuclide names\n        numberDensities : np.array, dtype=np.float64\n            Starting array of number densities\n        activeNuclides : OrderedSet\n            Active nuclides defined on the reactor blueprints object. See: armi.reactor.blueprints.py\n        \"\"\"\n        if not self.burnChainImposed:\n            return nuclides, numberDensities\n\n        missingActiveNuclides = set()\n        memo = set()\n        nucNames = [nucName.decode() for nucName in nuclides]\n        difference = set(nucNames).difference(memo)\n        while any(difference):\n            newNucs = set()\n            nuclide = difference.pop()\n            memo.add(nuclide)\n            # Skip the nuclide if it is not `active` in the burn-chain\n            if nuclide not in activeNuclides:\n                continue\n\n            nuclideObj = self.byName[nuclide]\n\n            for interaction in nuclideObj.trans + nuclideObj.decays:\n                try:\n                    # Interaction nuclides can only be added to the number density dictionary if they are a part of the\n                    # user-defined active nuclides\n                    productNuclide = interaction.getPreferredProduct(activeNuclides)\n                    if productNuclide not in nucNames:\n                        newNucs.add(productNuclide.encode())\n                except KeyError:\n                    # Keep track of the first production nuclide\n                    missingActiveNuclides.add(interaction.productNuclides)\n\n            # add the new nuclides to the number density arrays\n            newNDens = np.zeros(len(newNucs), dtype=np.float64)\n            nuclides = np.append(nuclides, list(newNucs))\n            numberDensities = np.append(numberDensities, newNDens)\n\n            nucNames = [nucName.decode() for nucName in nuclides]\n            difference = set(nucNames).difference(memo)\n\n        if self.burnChainImposed and missingActiveNuclides:\n            self._failOnMissingActiveNuclides(missingActiveNuclides)\n\n        return nuclides, numberDensities\n\n    def _failOnMissingActiveNuclides(self, missingActiveNuclides):\n        \"\"\"Raise ValueError with notification of which nuclides to include in the burn-chain.\"\"\"\n        msg = \"Missing active nuclides in loading file. Add the following nuclides:\"\n        for i, nucList in enumerate(missingActiveNuclides, 1):\n            msg += f\"\\n {i} - \"  # Index of\n            for j, nuc in enumerate(nucList, 1):\n                delimiter = \" or \" if j < len(nucList) else \"\"\n                msg += f\"{nuc}{delimiter}\"\n\n        raise ValueError(msg)\n\n    def getIsotopics(self, nucName):\n        \"\"\"Expand elemental nuc name to isotopic nuc bases.\"\"\"\n        nb = self.byName[nucName]\n        if isinstance(nb, (LumpNuclideBase, DummyNuclideBase)):\n            # skip lumped fission products or dumps\n            return []\n        elif isinstance(nb, NaturalNuclideBase):\n            isotopics = nb.getNaturalIsotopics()\n        else:\n            isotopics = [nb]\n\n        return isotopics\n\n    def fromName(self, name):\n        \"\"\"Return a nuclide from its name.\"\"\"\n        matches = [nn for nn in self.instances if nn.name == name]\n        if len(matches) != 1:\n            raise Exception(f\"Too many or too few ({len(matches)}) matches for {name}\")\n\n        return matches[0]\n\n    def isMonoIsotopicElement(self, name):\n        \"\"\"Return true if this is the only naturally occurring isotope of its element.\"\"\"\n        base = self.byName[name]\n        return base.abundance > 0 and len([e for e in base.element.nuclides if e.abundance > 0]) == 1\n\n    def where(self, predicate):\n        \"\"\"\n        Return all :py:class:`INuclides <INuclide>` objects matching a condition.\n\n        Returns an iterator of :py:class:`INuclides <INuclide>` matching the specified condition.\n\n        Parameters\n        ----------\n        predicate: lambda\n            A lambda, or function, accepting a :py:class:`INuclide` as a parameter\n\n        Examples\n        --------\n        >>> from armi.nucDirectory.nuclideBases import NuclideBases\n        >>> nuclideBases = NuclideBases()\n        >>> [nn.name for nn in nuclideBases.where(lambda nb: \"Z\" in nb.name)]\n        ['ZN64', 'ZN66', 'ZN67', 'ZN68', 'ZN70', 'ZR90', 'ZR91', 'ZR92', 'ZR94', 'ZR96', 'ZR93', 'ZR95', 'ZR']\n        >>> # in order to get length, convert to list\n        >>> isomers90 = list(nuclideBases.where(lambda nb: nb.a == 95))\n        >>> len(isomers90)\n        3\n        >>> for iso in isomers:\n        ...     print(iso)\n        <NuclideBase MO95: Z:42, A:95, S:0, label:MO2N>\n        <NuclideBase NB95: Z:41, A:95, S:0, label:NB2N>\n        <NuclideBase ZR95: Z:40, A:95, S:0, label:ZR2N>\n        \"\"\"\n        return filter(predicate, self.instances)\n\n    def single(self, predicate):\n        \"\"\"\n        Return a single :py:class:`INuclide` object meeting the specified condition.\n\n        Similar to :py:func:`where`, this function uses a lambda input to filter the\n        :py:attr:`INuclide instances <instances>`. If there is not 1 and only 1 match for the specified condition, an\n        exception is raised.\n\n        Examples\n        --------\n        >>> from armi.nucDirectory import nuclideBases\n        >>> nuclideBases.single(lambda nb: nb.name == \"C\")\n        <NaturalNuclideBase C: Z:6, w:12.0107358968, label:C>\n        >>> nuclideBases.single(lambda nb: nb.z == 95 and nb.a == 242 and nb.state == 1)\n        <NuclideBase AM242M: Z:95, A:242, S:1, label:AM4C>\n        \"\"\"\n        matches = [nuc for nuc in self.instances if predicate(nuc)]\n        if len(matches) != 1:\n            raise IndexError(\n                \"Expected single match, but got {} matches:\\n  {}\".format(\n                    len(matches), \"\\n  \".join(str(mo) for mo in matches)\n                )\n            )\n\n        return matches[0]\n\n    def changeLabel(self, nuclideBase, newLabel):\n        \"\"\"\n        Updates a nuclide label and modifies the ``byLabel`` look-up dictionary.\n\n        Notes\n        -----\n        Since nuclide objects are defined and stored globally, any change to the attributes will be maintained.\n        \"\"\"\n        nuclideBase.label = newLabel\n        self.byLabel[newLabel] = nuclideBase\n\n    def imposeBurnChain(self, burnChainStream):\n        \"\"\"\n        Apply transmutation and decay information to each nuclide.\n\n        Notes\n        -----\n        You cannot impose a burn chain twice. Doing so would require that you clean out the transmutations and decays\n        from all the module-level nuclide bases, which generally requires that you rebuild them. But rebuilding those is\n        not an option because some of them get set as class-level attributes and would be orphaned. If a need to change\n        burn chains mid-run re-arises, then a better nuclideBase-level burnchain cleanup should be implemented so the\n        objects don't have to change identity.\n\n        See Also\n        --------\n        armi.nucDirectory.transmutations : describes file format\n        \"\"\"\n        if self.burnChainImposed:\n            # The only time this should happen is if in a unit test that has already processed conftest.py and is now\n            # building a Case that also imposes this.\n            runLog.warning(\"Burn chain already imposed. Skipping reimposition.\")\n            return\n\n        self.burnChainImposed = True\n        global burnChainImposed\n        burnChainImposed = True\n        yaml = YAML(typ=\"rt\")\n        yaml.allow_duplicate_keys = False\n        burnData = yaml.load(burnChainStream)\n\n        for nucName, burnInfo in burnData.items():\n            nuclide = self.byName[nucName]\n            # think of this protected stuff as \"module level protection\" rather than class.\n            nuclide._processBurnData(burnInfo)\n\n    def addNuclideBases(self, nuclidesFile: str):\n        \"\"\"\n        Read natural abundances of any natural nuclides.\n\n        This adjusts already-existing NuclideBases and Elements with the new information.\n\n        .. impl:: Separating natural abundance data from code.\n            :id: I_ARMI_ND_DATA0\n            :implements: R_ARMI_ND_DATA\n\n            This function reads the ``nuclides.dat`` file from the ARMI resources folder. This file contains metadata\n            for 4,614 nuclides, including number of protons, number of neutrons, atomic number, excited state, element\n            symbol, atomic mass, natural abundance, half-life, and spontaneous fission yield. The data in\n            ``nuclides.dat`` have been collected from multiple different sources; the references are given in comments\n            at the top of that file.\n\n        Parameters\n        ----------\n        nuclidesFile: str\n            File path to the custom ARMI \"nuclides.dat\" file, containing a plain text description of all nuclides to be\n            modeled including: Z, number of neutrons, mass number, AMU, natural abundance, half life, nu-bar and more.\n        \"\"\"\n        with open(nuclidesFile, \"r\") as f:\n            for line in f:\n                # Skip header lines\n                if line.startswith(\"#\") or line.startswith(\"Z\"):\n                    continue\n                lineData = line.split()\n                _z = int(lineData[0])\n                _n = int(lineData[1])\n                a = int(lineData[2])\n                state = int(lineData[3])\n                sym = lineData[4].upper()\n                mass = float(lineData[5])\n                abun = float(lineData[6])\n                halflife = lineData[7]\n                if halflife == \"inf\":\n                    halflife = np.inf\n                else:\n                    halflife = float(halflife)\n                nuSF = float(lineData[8])\n\n                element = self.elements.bySymbol[sym]\n                nb = NuclideBase(element, a, mass, abun, state, halflife)\n                nb.nuSF = nuSF\n                self.addNuclide(nb)\n\n    def __addNaturalNuclideBases(self):\n        \"\"\"Generates a complete set of nuclide bases for each naturally occurring element.\"\"\"\n        for element in self.elements.byZ.values():\n            if element.symbol not in self.byName:\n                if element.isNaturallyOccurring():\n                    self.addNuclide(NaturalNuclideBase(element.symbol, element))\n\n    def __addDummyNuclideBases(self):\n        \"\"\"\n        Generates a set of dummy nuclides.\n\n        Notes\n        -----\n        These nuclides can be used to truncate a depletion / burn-up chain within the MC2 program.\n        \"\"\"\n        self.addNuclide(DummyNuclideBase(element=self.elements.byName[\"Dummy\"], name=\"DUMP1\", weight=10.0))\n        self.addNuclide(DummyNuclideBase(element=self.elements.byName[\"Dummy\"], name=\"DUMP2\", weight=240.0))\n\n    def __addLumpedFissionProductNuclideBases(self):\n        \"\"\"Generates a set of nuclides for use as lumped fission products.\"\"\"\n        self.addNuclide(\n            LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LFP35\", weight=233.273)\n        )\n        self.addNuclide(\n            LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LFP38\", weight=235.78)\n        )\n        self.addNuclide(\n            LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LFP39\", weight=236.898)\n        )\n        self.addNuclide(\n            LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LFP40\", weight=237.7)\n        )\n        self.addNuclide(\n            LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LFP41\", weight=238.812)\n        )\n        self.addNuclide(LumpNuclideBase(element=self.elements.byName[\"LumpedFissionProduct\"], name=\"LREGN\", weight=1.0))\n\n    def readMCCNuclideData(self, mccNuclidesFile):\n        r\"\"\"Read in the label data for the MC2-2 and MC2-3 cross section codes to the nuclide bases.\n\n        .. impl:: Separating MCC data from code.\n            :id: I_ARMI_ND_DATA1\n            :implements: R_ARMI_ND_DATA\n\n            This function reads the mcc-nuclides.yaml file from the ARMI resources folder. This file contains the\n            MC\\ :sup:`2`-2 ID (from ENDF/B-V.2) and MC\\ :sup:`2`-3 ID (from ENDF/B-VII.0) for all nuclides in\n            MC\\ :sup:`2`. The ``mcc2id``, ``mcc3idEndfVII0``, and  ``mcc3idEndfVII1`` attributes of each\n            :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>` instance are updated as the data is\n            read, and the global dictionaries ``byMcc2Id`` ``byMcc3IdEndfVII0`` and ``byMcc3IdEndfVII1`` are populated\n            with the nuclide bases keyed by their corresponding ID for each code.\n        \"\"\"\n        with open(mccNuclidesFile, \"r\") as f:\n            yaml = YAML(typ=\"rt\")\n            nuclides = yaml.load(f)\n\n        for n in nuclides:\n            nb = self.byName[n]\n            mcc2id = nuclides[n][\"ENDF/B-V.2\"]\n            mcc3idEndfbVII0 = nuclides[n][\"ENDF/B-VII.0\"]\n            mcc3idEndfbVII1 = nuclides[n][\"ENDF/B-VII.1\"]\n            if mcc2id is not None:\n                nb.mcc2id = mcc2id\n                self.byMcc2Id[nb.getMcc2Id()] = nb\n            if mcc3idEndfbVII0 is not None:\n                nb.mcc3idEndfbVII0 = mcc3idEndfbVII0\n                self.byMcc3IdEndfbVII0[nb.getMcc3IdEndfbVII0()] = nb\n            if mcc3idEndfbVII1 is not None:\n                nb.mcc3idEndfbVII1 = mcc3idEndfbVII1\n                self.byMcc3IdEndfbVII1[nb.getMcc3IdEndfbVII1()] = nb\n\n        # Have the byMcc3Id dictionary be VII.1 IDs.\n        self.byMcc3Id = self.byMcc3IdEndfbVII1\n\n    def updateNuclideBasesForSpecialCases(self):\n        \"\"\"\n        Update the nuclide bases for special case name changes.\n\n        .. impl:: The special case name Am242g is supported.\n            :id: I_ARMI_ND_ISOTOPES6\n            :implements: R_ARMI_ND_ISOTOPES\n\n            This function updates the keys for the :py:class:`NuclideBase <armi.nucDirectory.nuclideBases.NuclideBase>`\n            instances for Am-242m and Am-242 in the ``byName`` and ``byDBName`` global dictionaries. This function\n            associates the more common isomer Am-242m with the name \"AM242\", and uses \"AM242G\" to denote the ground\n            state.\n\n        Notes\n        -----\n        This function is specifically added to change the definition of `AM242` to refer to its metastable isomer,\n        `AM242M` by default. `AM242M` is most common isomer of `AM242` and is typically the desired isomer when being\n        requested rather than than the ground state (i.e., S=0) of `AM242`.\n        \"\"\"\n        # Change the name of `AM242` to specific represent its ground state.\n        am242g = self.byName[\"AM242\"]\n        am242g.name = \"AM242G\"\n        self.byName[\"AM242G\"] = am242g\n        self.byDBName[self.byName[\"AM242G\"].getDatabaseName()] = am242g\n\n        # Update the pointer of `AM242` to refer to `AM242M`.\n        am242m = self.byName[\"AM242M\"]\n        self.byName[\"AM242\"] = am242m\n        self.byDBName[\"nAm242\"] = am242m\n        self.byDBName[self.byName[\"AM242\"].getDatabaseName()] = am242m\n\n    def __renormalizeNuclideToElementRelationship(self):\n        \"\"\"Fill in the missing element data for each nuclide.\"\"\"\n        for nuc in self.instances:\n            if nuc.element is None:\n                nuc.element = self.elements.byZ[nuc.z]\n                nuc.element.append(nuc)\n\n    def __deriveElementalWeightsByNaturalNuclideAbundances(self):\n        \"\"\"Derives and sets the standard atomic weights for each element that has naturally occurring nuclides.\"\"\"\n        for element in self.elements.byName.values():\n            numer = 0.0\n            denom = 0.0\n            for nb in element.getNaturalIsotopics():\n                numer += nb.weight * nb.abundance\n                denom += nb.abundance\n\n            if denom:\n                element.standardWeight = numer / denom\n\n\nfactory()\n"
  },
  {
    "path": "armi/nucDirectory/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import path\n\nNUCDIRECTORY_TESTS_DEFAULT_DIR_PATH = path.dirname(__file__)\n"
  },
  {
    "path": "armi/nucDirectory/tests/test_elements.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Elements.\"\"\"\n\nimport unittest\n\nfrom armi.nucDirectory.elements import Element, Elements\n\n\nclass TestElements(unittest.TestCase):\n    def setUp(self):\n        self.elements = Elements()\n\n    def test_elements_elementBulkProperties(self):\n        numElements = len(self.elements.byZ)\n        self.assertEqual(numElements, len(self.elements.byZ.values()))\n        self.assertEqual(numElements, len(self.elements.byName))\n        self.assertEqual(numElements, len(self.elements.bySymbol))\n\n    def test_element_elementByNameReturnsElement(self):\n        \"\"\"Get elements by name.\n\n        .. test:: Get elements by name.\n            :id: T_ARMI_ND_ELEMENTS0\n            :tests: R_ARMI_ND_ELEMENTS\n        \"\"\"\n        for ee in self.elements.byZ.values():\n            self.assertIs(ee, self.elements.byName[ee.name])\n\n    def test_element_elementByZReturnsElement(self):\n        \"\"\"Get elements by Z.\n\n        .. test:: Get elements by Z.\n            :id: T_ARMI_ND_ELEMENTS1\n            :tests: R_ARMI_ND_ELEMENTS\n        \"\"\"\n        for ee in self.elements.byZ.values():\n            self.assertIs(ee, self.elements.byZ[ee.z])\n\n    def test_element_elementBySymbolReturnsElement(self):\n        \"\"\"Get elements by symbol.\n\n        .. test:: Get elements by symbol.\n            :id: T_ARMI_ND_ELEMENTS2\n            :tests: R_ARMI_ND_ELEMENTS\n        \"\"\"\n        for ee in self.elements.byZ.values():\n            self.assertIs(ee, self.elements.bySymbol[ee.symbol])\n\n    def test_element_addExistingElementFails(self):\n        for ee in self.elements.byZ.values():\n            with self.assertRaises(ValueError):\n                self.elements.Element(ee.z, ee.symbol, ee.name)\n\n    def test_addedElementAppearsInElementList(self):\n        self.assertNotIn(\"bacon\", self.elements.byName)\n        self.assertNotIn(999, self.elements.byZ)\n        self.assertNotIn(\"BZ\", self.elements.bySymbol)\n        self.elements.addElement(Element(999, \"BZ\", \"bacon\"))\n        self.assertIn(\"bacon\", self.elements.byName)\n        self.assertIn(999, self.elements.byZ)\n        self.assertIn(\"BZ\", self.elements.bySymbol)\n\n    def test_elementGetNatIsosOnlyRetrievesAbund(self):\n        for ee in self.elements.byZ.values():\n            if not ee.isNaturallyOccurring():\n                continue\n\n            for nuc in ee.getNaturalIsotopics():\n                self.assertGreater(nuc.abundance, 0.0)\n                self.assertGreater(nuc.a, 0)\n\n    def test_elementIsNatOccurring(self):\n        \"\"\"\n        Test isNaturallyOccurring method by manually testing all elements.\n\n        Uses RIPL definitions of naturally occurring. Protactinium is debated as naturally occurring. Yeah it exists as\n        a U235 decay product but it's kind of pseudo-natural.\n\n        .. test:: Get elements by Z to show if they are naturally occurring.\n            :id: T_ARMI_ND_ELEMENTS3\n            :tests: R_ARMI_ND_ELEMENTS\n        \"\"\"\n        for ee in self.elements.byZ.values():\n            if ee.z == 43 or ee.z == 61 or 84 <= ee.z <= 89 or ee.z >= 93:\n                self.assertFalse(ee.isNaturallyOccurring())\n            else:\n                nat = ee.isNaturallyOccurring()\n                self.assertTrue(nat)\n\n    def test_abundancesAddToOne(self):\n        for ee in self.elements.byZ.values():\n            if not ee.isNaturallyOccurring():\n                continue\n\n            totAbund = sum([iso.abundance for iso in ee.nuclides])\n            self.assertAlmostEqual(\n                totAbund,\n                1.0,\n                places=4,\n            )\n\n    def test_isHeavyMetal(self):\n        \"\"\"Get elements by Z.\n\n        .. test:: Get elements by Z to show if they are heavy metals.\n            :id: T_ARMI_ND_ELEMENTS4\n            :tests: R_ARMI_ND_ELEMENTS\n        \"\"\"\n        for ee in self.elements.byZ.values():\n            if ee.z > 89:\n                self.assertTrue(ee.isHeavyMetal())\n            else:\n                self.assertFalse(ee.isHeavyMetal())\n"
  },
  {
    "path": "armi/nucDirectory/tests/test_nucDirectory.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests nuclide directory.\"\"\"\n\nimport unittest\n\nfrom armi.nucDirectory import nucDir\nfrom armi.nucDirectory.nuclideBases import NuclideBases\n\n\nclass TestNucDirectory(unittest.TestCase):\n    def test_nucDir_getNameForOldDashedNames(self):\n        oldNames = [\n            \"U-232\",\n            \"U-233\",\n            \"U-234\",\n            \"U-235\",\n            \"U-236\",\n            \"U-238\",\n            \"B-10\",\n            \"B-11\",\n            \"BE-9\",\n            \"F-19\",\n            \"LI-6\",\n            \"LI-7\",\n            \"W-182\",\n            \"W-183\",\n            \"W-184\",\n            \"W-186\",\n            \"S-32\",\n            \"O-16\",\n        ]\n        for oldName in oldNames:\n            self.assertIsNotNone(nucDir.getNuclideFromName(oldName))\n\n    def test_nucDir_getNucFromNucNameReturnsNuc(self):\n        nb = NuclideBases()\n        for nuc in nb.instances:\n            self.assertEqual(nuc, nucDir.getNuclideFromName(nuc.name))\n\n    def test_nucDir_getNuclidesFromForBadName(self):\n        with self.assertRaises(Exception):\n            nucDir.getNuclideFromName(\"Charlie\")\n\n    def test_getDisplacementEnergy(self):\n        \"\"\"Test getting the displacement energy for a given nuclide.\"\"\"\n        ed = nucDir.getThresholdDisplacementEnergy(\"H1\")\n        self.assertEqual(ed, 10.0)\n\n        with self.assertRaises(KeyError):\n            nucDir.getThresholdDisplacementEnergy(\"fail\")\n"
  },
  {
    "path": "armi/nucDirectory/tests/test_nuclideBases.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for NuclideBases.\"\"\"\n\nimport math\nimport os\nimport random\nimport unittest\n\nfrom ruamel.yaml import YAML\n\nfrom armi.context import RES\nfrom armi.nucDirectory.nuclideBases import (\n    DummyNuclideBase,\n    LumpNuclideBase,\n    NaturalNuclideBase,\n    NuclideBases,\n)\nfrom armi.nucDirectory.tests import NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH\nfrom armi.utils.units import AVOGADROS_NUMBER, CURIE_PER_BECQUEREL, SECONDS_PER_HOUR\n\n\nclass TestNuclideBases(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.nucDirectoryTestsPath = NUCDIRECTORY_TESTS_DEFAULT_DIR_PATH\n        cls.nuclideBases = NuclideBases()\n\n        # Ensure that the burn chain data is initialized before running these tests.\n        cls.nuclideBases.burnChainImposed = False\n        with open(os.path.join(RES, \"burn-chain.yaml\"), \"r\") as burnChainStream:\n            cls.nuclideBases.imposeBurnChain(burnChainStream)\n\n    def test_nucBases_fromNameBadNameRaisesException(self):\n        with self.assertRaises(KeyError):\n            self.nuclideBases.byName[\"Cat\"]\n\n    def test_nucBase_AllAbundancesAddToOne(self):\n        for zz in range(1, 102):\n            nuclides = self.nuclideBases.elements.byZ[zz].nuclides\n            # We only process nuclides with measured masses. Some are purely theoretical, mostly over z=100\n            self.assertGreater(len(nuclides), 0, msg=f\"z={zz} unexpectedly has no nuclides\")\n            total = sum([nn.abundance for nn in nuclides if nn.a > 0])\n            self.assertAlmostEqual(\n                any([nn.abundance > 0 for nn in nuclides]),\n                total,\n                delta=1e-4,\n                msg=\"Abundance ({}) not 1.0 for nuclideBases:\\n  {}\".format(\n                    total, \"\\n  \".join(repr(nn) for nn in nuclides)\n                ),\n            )\n\n    def test_nucBases_AllLabelsAreUnique(self):\n        labels = []\n        for nn in self.nuclideBases.instances:\n            self.assertNotIn(nn.label, labels, f\"Label already exists: {nn.label}\")\n            labels.append(nn.label)\n\n    def test_nucBases_NegativeZRaisesException(self):\n        for _ in range(0, 5):\n            with self.assertRaises(Exception):\n                self.nuclideBases.isotopes(random.randint(-1000, -1))\n\n    def test_nucBases_Z295RaisesException(self):\n        with self.assertRaises(Exception):\n            self.nuclideBases.isotopes(295)\n\n    def test_nucBases_Mc2Elementals(self):\n        notElemental = [\n            \"LFP35\",\n            \"LFP38\",\n            \"LFP39\",\n            \"LFP40\",\n            \"LFP41\",\n            \"DUMMY\",\n            \"DUMP1\",\n            \"DUMP2\",\n            \"LREGN\",\n        ]\n        for lump in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase)):\n            if lump.name in notElemental:\n                self.assertIsInstance(lump, LumpNuclideBase)\n            else:\n                self.assertIsInstance(lump, NaturalNuclideBase)\n\n    def test_LumpNucBaseGetNatIsotopDoesNotFail(self):\n        for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, LumpNuclideBase) and nn.z == 0):\n            self.assertEqual(0, len(list(nuc.getNaturalIsotopics())), nuc)\n\n    def test_NaturalNuclideBase_getNatrualIsotpics(self):\n        for nuc in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)):\n            numNaturals = len(list(nuc.getNaturalIsotopics()))\n            self.assertGreaterEqual(len(nuc.element.nuclides) - 1, numNaturals)\n\n    def test_nucBases_singleFailsWithMultipleMatches(self):\n        with self.assertRaises(Exception):\n            self.nuclideBases.single(lambda nuc: nuc.z == 92)\n\n    def test_nucBases_singleFailsWithNoMatches(self):\n        with self.assertRaises(Exception):\n            self.nuclideBases.single(lambda nuc: nuc.z == 1000)\n\n    def test_nucBases_singleIsPrettySpecific(self):\n        u235 = self.nuclideBases.single(lambda nuc: nuc.name == \"U235\")\n        self.assertEqual(235, u235.a)\n        self.assertEqual(92, u235.z)\n\n    def test_natNucStomicWgtIsAvgOfNatIsotopes(self):\n        for natNuk in self.nuclideBases.where(lambda nn: isinstance(nn, NaturalNuclideBase)):\n            atomicMass = 0.0\n            for natIso in natNuk.getNaturalIsotopics():\n                atomicMass += natIso.abundance * natIso.weight\n            self.assertAlmostEqual(atomicMass, natNuk.weight, delta=0.000001)\n\n    def test_nucBasesLabelAndNameCollsAreForSameNuc(self):\n        \"\"\"The name and labels for correct for nuclides.\n\n        .. test:: Validate the name, label, and DB name are accessible for nuclides.\n            :id: T_ARMI_ND_ISOTOPES0\n            :tests: R_ARMI_ND_ISOTOPES\n        \"\"\"\n        count = 0\n        for nuc in self.nuclideBases.where(lambda nn: nn.name == nn.label):\n            count += 1\n            self.assertIs(nuc, self.nuclideBases.byName[nuc.name])\n            self.assertIs(nuc, self.nuclideBases.byDBName[nuc.getDatabaseName()])\n            self.assertIs(nuc, self.nuclideBases.byLabel[nuc.label])\n        self.assertGreater(count, 10)\n\n    def test_nucBases_imposeBurnChainDecayBulkStats(self):\n        \"\"\"Test must be updated manually when burn chain is modified.\"\"\"\n        decayers = list(self.nuclideBases.where(lambda nn: len(nn.decays) > 0))\n        self.assertTrue(decayers)\n        for nuc in decayers:\n            if nuc.name in [\n                \"U238\",\n                \"PU240\",\n                \"PU242\",\n                \"CM242\",\n                \"CM244\",\n                \"CM246\",\n                \"CF250\",\n                \"CF252\",\n            ]:\n                continue\n            self.assertAlmostEqual(1.0, sum(dd.branch for dd in nuc.decays))\n\n    def test_nucBasesImposeBurnChainTransmBulkStats(self):\n        \"\"\"\n        Make sure all branches are equal to 1 for every transmutation type.\n\n        Exception: We allow 3e-4 threshold to account for ternary fissions, which are usually < 2e-4 per fission.\n        \"\"\"\n        trasmuters = self.nuclideBases.where(lambda nn: len(nn.trans) > 0)\n        self.assertTrue(trasmuters)\n        for nuc in trasmuters:\n            expected = len(set(tt.type for tt in nuc.trans))\n            self.assertTrue(all(0.0 <= tt.branch <= 1.0 for tt in nuc.trans))\n            actual = sum(tt.branch for tt in nuc.trans)\n            # ternary fission\n            self.assertAlmostEqual(\n                expected,\n                actual,\n                msg=f\"{nuc} has {expected} transmutation but the branches add up to {actual}\",\n                delta=3e-4,\n            )\n\n    def test_nucBases_imposeBurn_nuSF(self):\n        \"\"\"Test the nuclide data from file (specifically neutrons / sponaneous fission).\n\n        .. test:: Test that nuclide data was read from file instead of code.\n            :id: T_ARMI_ND_DATA0\n            :tests: R_ARMI_ND_DATA\n        \"\"\"\n        actual = {nn.name: nn.nuSF for nn in self.nuclideBases.where(lambda nn: nn.nuSF > 0.0)}\n        expected = {\n            \"CM248\": 3.1610,\n            \"BK249\": 3.4000,\n            \"CF249\": 3.4000,\n            \"CF250\": 3.5200,\n            \"CF252\": 3.7676,\n            \"U232\": 1.710000,\n            \"U234\": 1.8000,\n            \"U235\": 1.8700,\n            \"U236\": 1.900,\n            \"U238\": 2.000,\n            \"PU236\": 2.1200,\n            \"PU238\": 2.2100,\n            \"PU239\": 2.3200,\n            \"PU240\": 2.1510,\n            \"PU242\": 2.1410,\n            \"CM242\": 2.5280,\n            \"CM243\": 0.0000,\n            \"CM244\": 2.6875,\n            \"CM245\": 0.0000,\n            \"CM246\": 2.9480,\n            \"TH230\": 1.390000,\n            \"TH232\": 1.5,\n            \"NP237\": 2.05,\n            \"PA231\": 1.710000,\n            \"PU241\": 2.25,\n            \"PU244\": 2.290000,\n            \"U233\": 1.76,\n            \"AM241\": 2.5,\n            \"AM242M\": 2.56,\n            \"AM243\": 2.61,\n            \"ES253\": 4.700000,\n        }\n        for key, val in actual.items():\n            self.assertEqual(val, expected[key])\n\n    def test_nucBases_databaseNamesStartWith_n(self):\n        for nb in self.nuclideBases.instances:\n            self.assertEqual(\"n\", nb.getDatabaseName()[0])\n\n    def test_nucBases_AllDatabaseNamesAreUnique(self):\n        self.assertEqual(\n            len(self.nuclideBases.instances),\n            len(set(nb.getDatabaseName() for nb in self.nuclideBases.instances)),\n        )\n\n    def test_nucBases_Am242m(self):\n        \"\"\"Test the correct am242g and am242m abbreviations are supported.\n\n        .. test:: Specifically test for Am242 and Am242g because it is a special case.\n            :id: T_ARMI_ND_ISOTOPES1\n            :tests: R_ARMI_ND_ISOTOPES\n        \"\"\"\n        am242m = self.nuclideBases.byName[\"AM242\"]\n        self.assertIs(am242m, self.nuclideBases.byName[\"AM242M\"])\n        self.assertEqual(\"nAm242m\", am242m.getDatabaseName())\n        self.assertIs(am242m, self.nuclideBases.byDBName[\"nAm242\"])\n        self.assertAlmostEqual(am242m.weight, 242.059601666)\n\n        am242g = self.nuclideBases.byName[\"AM242G\"]\n        self.assertIs(am242g, self.nuclideBases.byName[\"AM242G\"])\n        self.assertEqual(\"nAm242g\", am242g.getDatabaseName())\n        self.assertIs(am242g, self.nuclideBases.byDBName[\"nAm242g\"])\n\n    def test_nucBases_isHeavyMetal(self):\n        for nb in self.nuclideBases.where(lambda nn: nn.z <= 89):\n            self.assertFalse(nb.isHeavyMetal())\n        for nb in self.nuclideBases.where(lambda nn: nn.z > 89):\n            if isinstance(nb, (DummyNuclideBase, LumpNuclideBase)):\n                self.assertFalse(nb.isHeavyMetal())\n            else:\n                self.assertTrue(nb.isHeavyMetal())\n\n    def test_getDecay(self):\n        nb = list(self.nuclideBases.where(lambda nn: nn.z == 89))[0]\n        # This test is a bit boring, because the test nuclide library is a bit boring.\n        self.assertIsNone(nb.getDecay(\"sf\"))\n\n    def test_getEndfMatNum(self):\n        \"\"\"Test get nuclides by name.\n\n        .. test:: Test get nuclides by name.\n            :id: T_ARMI_ND_ISOTOPES2\n            :tests: R_ARMI_ND_ISOTOPES\n        \"\"\"\n        self.assertEqual(self.nuclideBases.byName[\"U235\"].getEndfMatNum(), \"9228\")\n        self.assertEqual(self.nuclideBases.byName[\"U238\"].getEndfMatNum(), \"9237\")\n        self.assertEqual(self.nuclideBases.byName[\"PU239\"].getEndfMatNum(), \"9437\")\n        self.assertEqual(self.nuclideBases.byName[\"TC99\"].getEndfMatNum(), \"4325\")\n        self.assertEqual(self.nuclideBases.byName[\"AM242\"].getEndfMatNum(), \"9547\")  # meta 1\n        self.assertEqual(self.nuclideBases.byName[\"CF252\"].getEndfMatNum(), \"9861\")\n        self.assertEqual(self.nuclideBases.byName[\"NP237\"].getEndfMatNum(), \"9346\")\n        self.assertEqual(self.nuclideBases.byName[\"PM151\"].getEndfMatNum(), \"6161\")\n        self.assertEqual(self.nuclideBases.byName[\"PA231\"].getEndfMatNum(), \"9131\")\n\n    def test_NonMc2Nuclide(self):\n        \"\"\"Make sure nuclides that aren't in MC2 still get nuclide bases.\"\"\"\n        nuc = self.nuclideBases.byName[\"YB154\"]\n        self.assertEqual(nuc.a, 154)\n\n    def test_kryptonDecayConstants(self):\n        \"\"\"Tests that the nuclides data contains the expected decay constants.\"\"\"\n        # hand calculated reference data includes stable isotopes, radioactive\n        # isotopes, metastable isotopes and exercises metastable minimum halflife\n        REF_KR_DECAY_CONSTANTS = [\n            (\"KR69\", 24.755256448569472),\n            (\"KR70\", 17.3286795139986),\n            (\"KR71\", 6.93147180559945),\n            (\"KR72\", 0.04053492283976288),\n            (\"KR73\", 0.0253900066139174),\n            (\"KR74\", 0.0010045611312463),\n            (\"KR75\", 0.00251140282811574),\n            (\"KR76\", 0.0000130095191546536),\n            (\"KR77\", 0.000162139691359051),\n            (\"KR78\", 0),\n            (\"KR79\", 5.49488822742219e-06),\n            (\"KR79M\", 0.0138629436111989),\n            (\"KR80\", 0),\n            (\"KR81\", 9.591693391393433e-14),\n            (\"KR81M\", 0.0529119985160263),\n            (\"KR82\", 0),\n            (\"KR83\", 0),\n            (\"KR83M\", math.log(2) / (1.83 * SECONDS_PER_HOUR)),\n            (\"KR84\", 0),\n            (\"KR85\", 2.0453466678736843e-09),\n            (\"KR85M\", 4.29725468419061e-05),\n            (\"KR86\", 0),\n            (\"KR87\", 0.000151408296321526),\n            (\"KR88\", 0.0000681560649518136),\n            (\"KR89\", 0.00366744539978807),\n            (\"KR90\", 0.021446385537127),\n            (\"KR91\", 0.0808806511738559),\n            (\"KR92\", 0.376710424217362),\n            (\"KR93\", 0.538994697169475),\n            (\"KR94\", 3.26956217245257),\n            (\"KR95\", 6.08023842596443),\n            (\"KR96\", 8.66433975699932),\n            (\"KR97\", 11.0023361993642),\n            (\"KR98\", 16.1197018734871),\n            (\"KR99\", 53.3190138892265),\n            (\"KR100\", 99.0210257942778),\n            (\"KR101\", 1091570.36308652),\n        ]\n\n        for nucName, refDecayConstant in REF_KR_DECAY_CONSTANTS:\n            refNb = self.nuclideBases.byName[nucName]\n            decayConstantNb = math.log(2) / refNb.halflife\n            try:\n                self.assertAlmostEqual((refDecayConstant - decayConstantNb) / refDecayConstant, 0, 6)\n            except ZeroDivisionError:\n                self.assertEqual(refDecayConstant, decayConstantNb)\n            except AssertionError:\n                errorMessage = (\n                    f\"{nucName} reference decay constant {refDecayConstant} ARMI decay constant {decayConstantNb}\"\n                )\n                raise AssertionError(errorMessage)\n\n        for nucName in [\"XE134\", \"XE136\", \"EU151\"]:\n            nb = self.nuclideBases.byName[nucName]\n            decayConstantNb = math.log(2) / nb.halflife\n            self.assertAlmostEqual(decayConstantNb, 0, places=3)\n\n    def test_curieDefinitionWithRa226(self):\n        \"\"\"\n        Tests that the decay constant of Ra-226 is close to 1 Ci.\n\n        Notes\n        -----\n        The original definition of 1 Ci was based on the half-life of Ra-226 for 1 gram. The latest evaluations show\n        that 1 gram is defined as 0.988 Ci.\n        \"\"\"\n        ra226 = self.nuclideBases.byName[\"RA226\"]\n        decayConstantRa226 = math.log(2) / ra226.halflife\n        weight = ra226.weight\n        mass = 1  # gram\n        activity = mass * AVOGADROS_NUMBER / weight * decayConstantRa226  # 1 gram\n        activity = activity * CURIE_PER_BECQUEREL\n        self.assertAlmostEqual(activity, 0.9885593, places=6)\n\n    def test_loadMcc2Data(self):\n        \"\"\"Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-V.2 nuclides in the data model.\n\n        .. test:: Test that MCC v2 ENDF/B-V.2 IDs can be queried by nuclides.\n            :id: T_ARMI_ND_ISOTOPES3\n            :tests: R_ARMI_ND_ISOTOPES\n        \"\"\"\n        with open(os.path.join(RES, \"mcc-nuclides.yaml\")) as f:\n            yaml = YAML(typ=\"rt\")\n            data = yaml.load(f)\n            expectedNuclides = set([nuc for nuc in data.keys() if data[nuc][\"ENDF/B-V.2\"] is not None])\n\n        for nuc, nb in self.nuclideBases.byMcc2Id.items():\n            self.assertIn(nb.name, expectedNuclides)\n            self.assertEqual(nb.getMcc2Id(), nb.mcc2id)\n            self.assertEqual(nb.getMcc2Id(), nuc)\n\n        self.assertEqual(len(self.nuclideBases.byMcc2Id), len(expectedNuclides))\n\n    def test_loadMcc3EndfVII0Data(self):\n        \"\"\"Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.0 nuclides in the data model.\n\n        .. test:: Test that MCC v3 ENDF/B-VII.0 IDs can be queried by nuclides.\n            :id: T_ARMI_ND_ISOTOPES4\n            :tests: R_ARMI_ND_ISOTOPES\n\n        .. test:: Test the MCC ENDF/B-VII.0 nuclide data that was read from file instead of code.\n            :id: T_ARMI_ND_DATA1\n            :tests: R_ARMI_ND_DATA\n        \"\"\"\n        with open(os.path.join(RES, \"mcc-nuclides.yaml\")) as f:\n            yaml = YAML(typ=\"rt\")\n            data = yaml.load(f)\n            expectedNuclides = set([nuc for nuc in data.keys() if data[nuc][\"ENDF/B-VII.0\"] is not None])\n\n        for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII0.items():\n            self.assertIn(nb.name, expectedNuclides)\n            self.assertEqual(nb.getMcc3IdEndfbVII0(), nb.mcc3idEndfbVII0)\n            self.assertEqual(nb.getMcc3IdEndfbVII0(), nuc)\n\n        # Subtract 1 nuclide due to DUMP2.\n        self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII0), len(expectedNuclides) - 1)\n\n    def test_loadMcc3EndfVII1Data(self):\n        \"\"\"Tests consistency with the `mcc-nuclides.yaml` input and the ENDF/B-VII.1 nuclides in the data model.\n\n        .. test:: Test that MCC v3 ENDF/B-VII.1 IDs can be queried by nuclides.\n            :id: T_ARMI_ND_ISOTOPES6\n            :tests: R_ARMI_ND_ISOTOPES\n\n        .. test:: Test the MCC ENDF/B-VII.1 nuclide data that was read from file instead of code.\n            :id: T_ARMI_ND_DATA2\n            :tests: R_ARMI_ND_DATA\n        \"\"\"\n        with open(os.path.join(RES, \"mcc-nuclides.yaml\")) as f:\n            yaml = YAML(typ=\"rt\")\n            data = yaml.load(f)\n            expectedNuclides = set([nuc for nuc in data.keys() if data[nuc][\"ENDF/B-VII.1\"] is not None])\n\n        for nuc, nb in self.nuclideBases.byMcc3IdEndfbVII1.items():\n            self.assertIn(nb.name, expectedNuclides)\n            self.assertEqual(nb.getMcc3IdEndfbVII1(), nb.mcc3idEndfbVII1)\n            self.assertEqual(nb.getMcc3IdEndfbVII1(), nuc)\n            self.assertEqual(nb.getMcc3Id(), nb.mcc3idEndfbVII1)\n            self.assertEqual(nb.getMcc3Id(), nuc)\n\n        # Subtract 1 nuclide due to DUMP2\n        self.assertEqual(len(self.nuclideBases.byMcc3IdEndfbVII1), len(expectedNuclides) - 1)\n\n\nclass TestAAAZZZSId(unittest.TestCase):\n    def test_AAAZZZSNameGenerator(self):\n        \"\"\"Test that AAAZZS ID name generator.\n\n        .. test:: Query the AAAZZS IDs can be retrieved for nuclides.\n            :id: T_ARMI_ND_ISOTOPES5\n            :tests: R_ARMI_ND_ISOTOPES\n        \"\"\"\n        referenceNucNames = [(\"C12\", \"120060\"), (\"U235\", \"2350920\"), (\"AM242M\", \"2420951\")]\n\n        nuclideBases = NuclideBases()\n        for nucName, refAaazzzs in referenceNucNames:\n            nb = nuclideBases.byName[nucName]\n            if refAaazzzs:\n                self.assertEqual(refAaazzzs, nb.getAAAZZZSId())\n"
  },
  {
    "path": "armi/nucDirectory/tests/test_thermalScattering.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the composite pattern.\"\"\"\n\nimport unittest\n\nfrom armi.nucDirectory import thermalScattering as ts\n\n\nclass TestThermalScattering(unittest.TestCase):\n    def test_dataValidity(self):\n        \"\"\"Ensure that over time the raw thermal scattering data in ARMI remains valid.\"\"\"\n        for key, val in ts.BY_NAME_AND_COMPOUND.items():\n            # nuclide name must be a non-empty string\n            self.assertIsInstance(key[0], str)\n            self.assertGreater(len(key[0]), 0)\n\n            if key[1] is not None:\n                # compound CAN be None, but otherwise must be a non-empty string\n                self.assertIsInstance(key[1], str)\n                self.assertGreater(len(key[1]), 0)\n\n            # ENDF/B-VIII label must be a non-empty string\n            self.assertIsInstance(val[0], str)\n            self.assertGreater(len(val[0]), 0)\n\n            # ACE label must be a non-empty string\n            self.assertIsInstance(val[1], str)\n            self.assertGreater(len(val[1]), 0)\n\n    def test_fromNameCompInvalid(self):\n        \"\"\"If the name/compound inputs aren't valid, we should get a ValueError.\"\"\"\n        with self.assertRaises(ValueError):\n            ts.fromNameAndCompound(\"hi\", \"mom\")\n\n        with self.assertRaises(ValueError):\n            ts.fromNameAndCompound(\"C\", None)\n\n        with self.assertRaises(ValueError):\n            ts.fromNameAndCompound(\"O\", None)\n\n        with self.assertRaises(ValueError):\n            ts.fromNameAndCompound(\"FE56\", \"FE56\")\n\n    def test_fromNameCompSpotCheck(self):\n        \"\"\"Spot check some examples that should work.\"\"\"\n        tsl = ts.fromNameAndCompound(\"FE56\", None)\n        self.assertIsInstance(tsl, ts.ThermalScatteringLabels)\n        self.assertEqual(tsl.endf8Label, \"tsl-026_Fe_056.endf\")\n        self.assertEqual(tsl.aceLabel, \"fe-56\")\n\n        tsl = ts.fromNameAndCompound(\"H\", ts.H2O)\n        self.assertIsInstance(tsl, ts.ThermalScatteringLabels)\n        self.assertEqual(tsl.endf8Label, \"tsl-HinH2O.endf\")\n        self.assertEqual(tsl.aceLabel, \"h-h2o\")\n\n        tsl = ts.fromNameAndCompound(\"O\", ts.D2O)\n        self.assertIsInstance(tsl, ts.ThermalScatteringLabels)\n        self.assertEqual(tsl.endf8Label, f\"tsl-Oin{ts.D2O}.endf\")\n        self.assertEqual(tsl.aceLabel, \"o-d2o\")\n\n        tsl = ts.fromNameAndCompound(\"U\", ts.UO2)\n        self.assertIsInstance(tsl, ts.ThermalScatteringLabels)\n        self.assertEqual(tsl.endf8Label, \"tsl-UinUO2.endf\")\n        self.assertEqual(tsl.aceLabel, \"u-uo2\")\n"
  },
  {
    "path": "armi/nucDirectory/tests/test_transmutations.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for transmutations.\"\"\"\n\nimport random\nimport string\nimport unittest\n\nfrom armi.nucDirectory import transmutations\nfrom armi.nucDirectory.nuclideBases import NuclideBases\n\n\ndef randomString(length):\n    return \"\".join(random.choice(string.ascii_lowercase) for _ in range(length))\n\n\nclass TransmutationTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.nuclideBases = NuclideBases()\n\n    def test_Transmutation_validReactionTypes(self):\n        data = {\"products\": [\"\"]}\n        for rxn in transmutations.TRANSMUTATION_TYPES:\n            data[\"type\"] = rxn\n            temp = transmutations.Transmutation(self.nuclideBases.byName[\"AM242M\"], data)\n            self.assertEqual(temp.type, rxn)\n            self.assertEqual(temp.productParticle, transmutations.PRODUCT_PARTICLES.get(temp.type))\n\n    def test_Transmutation_productParticle(self):\n        temp = transmutations.Transmutation(self.nuclideBases.byName[\"AM242M\"], {\"products\": [\"\"], \"type\": \"nalph\"})\n        self.assertEqual(temp.productParticle, \"HE4\")\n\n    def test_Transmutation_invalidReactionTypes(self):\n        data = {\"products\": [\"\"], \"branch\": 1.0}\n        errorCount = 0\n        for _ in range(0, 5):\n            rxn = randomString(3)\n            data[\"type\"] = rxn\n            if rxn in transmutations.TRANSMUTATION_TYPES:\n                self.assertIsNotNone(transmutations.Transmutation(self.nuclideBases.byName[\"AM242M\"], data))\n            else:\n                with self.assertRaises(KeyError):\n                    errorCount += 1\n                    transmutations.Transmutation(self.nuclideBases.byName[\"AM242M\"], data)\n        self.assertGreater(errorCount, 2)\n\n\nclass DecayModeTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.nuclideBases = NuclideBases()\n\n    def test_DecayMode_validReactionTypes(self):\n        data = {\"products\": [\"\"], \"branch\": 1.0, \"halfLifeInSeconds\": 1.0}\n        for rxn in transmutations.DECAY_MODES:\n            data[\"type\"] = rxn\n            decay = transmutations.DecayMode(self.nuclideBases.byName[\"AM242M\"], data)\n            self.assertEqual(decay.type, rxn)\n\n    def test_DecayMode_invalidReactionTypes(self):\n        data = {\"products\": [\"\"], \"branch\": 1.0, \"halfLifeInSeconds\": 1.0}\n        for _ in range(0, 25):\n            rxn = randomString(3)\n            data[\"type\"] = rxn\n            if rxn in transmutations.DECAY_MODES:\n                self.assertIsNotNone(transmutations.DecayMode(self.nuclideBases.byName[\"AM242M\"], data))\n            else:\n                with self.assertRaises(KeyError):\n                    transmutations.DecayMode(self.nuclideBases.byName[\"AM242M\"], data)\n"
  },
  {
    "path": "armi/nucDirectory/thermalScattering.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nHandle awareness of Thermal Scattering labels for ENDF/B-VIII and ACE.\n\nThe information below is based on Parsons, LA-UR-18-25096, https://mcnp.lanl.gov/pdf_files/la-ur-18-25096.pdf\n\nScattering law labels are currently available for a variety of classifications:\n\n* Element in Compound (H in H2O, Be in BeO)\n* Element in structure (C in Graphite, Be in metal)\n\n    * Can be separated as crystalline, 30% porous, 10% porous, etc.\n\n* Element in spin isomer (para H, ortho H, para D, ortho D, etc.)\n* Compound in phase (solid CH4, liquid CH4, SiO2-alpha, SiO2-beta).\n* Just compound (benzene)\n* Just isotope (Fe56, Al27)\n\nThe labels for these vary across evaluations (e.g. ENDF/B-VII, ENDF/B-VIII, etc.). We provide ENDF/B-III.0 and ACE\nlabels. Other physics kernels will have to derive their own labels as appropriate in client code.\n\"\"\"\n\nfrom dataclasses import dataclass\n\n# strings that users might want to reference downstream\nBE_METAL = \"Be-metal\"\nBEO = \"BeO\"\nCRYSTALLINE_GRAPHITE = \"crystalline-graphite\"\nD2O = \"D2O\"\nGRAPHITE_10P = \"reactor-graphite-10P\"\nGRAPHITE_30P = \"reactor-graphite-30P\"\nH2O = \"H2O\"\nSIC = \"SiC\"\nUN = \"UN\"\nUO2 = \"UO2\"\nZRH = \"ZrH\"\n\n# thermal scattering label data\nBY_NAME_AND_COMPOUND = {\n    (\"AL27\", None): (\"tsl-013_Al_027.endf\", \"al-27\"),\n    (\"BE\", BE_METAL): (f\"tsl-{BE_METAL}.endf\", \"be-met\"),\n    (\"BE\", BEO): (BEO, \"be-beo\"),\n    (\"C\", CRYSTALLINE_GRAPHITE): (f\"tsl-{CRYSTALLINE_GRAPHITE}.endf\", \"grph\"),\n    (\"C\", GRAPHITE_10P): (f\"tsl-{GRAPHITE_10P}.endf\", \"grph10\"),\n    (\"C\", GRAPHITE_30P): (f\"tsl-{GRAPHITE_30P}.endf\", \"grph30\"),\n    (\"C\", SIC): (\"tsl-CinSiC.endf\", \"c-sic\"),\n    (\"FE56\", None): (\"tsl-026_Fe_056.endf\", \"fe-56\"),\n    (\"H\", H2O): (\"tsl-HinH2O.endf\", \"h-h2o\"),\n    (\"H\", ZRH): (\"tsl-HinZrH.endf\", \"h-zrh\"),\n    (\"H2\", D2O): (f\"tsl-Din{D2O}.endf\", \"d-d2o\"),\n    (\"N\", UN): (\"tsl-NinUN.endf\", \"n-un\"),\n    (\"O\", BEO): (\"tsl-OinBeO.endf\", \"o-beo\"),\n    (\"O\", D2O): (f\"tsl-Oin{D2O}.endf\", \"o-d2o\"),\n    (\"O\", UO2): (\"tsl-OinUO2.endf\", \"o-uo2\"),\n    (\"SI\", SIC): (\"tsl-SIinSiC.endf\", \"si-sic\"),\n    (\"U\", UN): (\"tsl-UinUN.endf\", \"u-un\"),\n    (\"U\", UO2): (\"tsl-UinUO2.endf\", \"u-uo2\"),\n    (\"ZR\", ZRH): (\"tsl-ZRinZrH.endf\", \"zr-zrh\"),\n}\n\n\n@dataclass(frozen=True)\nclass ThermalScatteringLabels:\n    \"\"\"Container for the labels for a particular nuclide/compound combination.\n\n    Attributes\n    ----------\n    name: str\n        Name of the nuclide. This should match the string in the \"byName\" field in nuclideBases.\n    compound: str\n        Label indicating what the subjects are in (e.g. ``\"Graphite\"`` or ``\"H2O\"``. Can be left off for, e.g. Fe56.\n    endf8Label: str\n        Label for ENDF/B-VIII evaluation.\n    aceLabel: str\n        Lavel for ACE.\n    \"\"\"\n\n    name: str\n    compound: str\n    endf8Label: str\n    aceLabel: str\n\n\ndef fromNameAndCompound(name: str, compound: str):\n    \"\"\"The standard interface for getting ENDF/B-VIII and ACE labels for a given nuclide.\n\n    Parameters\n    ----------\n    name: str\n        Name of the nuclide.\n    compound: str\n        Name of the compound (can be None).\n\n    Returns\n    -------\n    ThermalScatteringLabels\n        An instance of the data class used to contain the ENDF/ACE labels for this nuclide/componound combination.\n\n    Raises\n    ------\n    ValueError\n        ARMI does not store a large data set of labels. If the user requests one ARMI does not have, they get an error.\n    \"\"\"\n    if (name, compound) in BY_NAME_AND_COMPOUND:\n        endf, ace = BY_NAME_AND_COMPOUND[(name, compound)]\n        return ThermalScatteringLabels(name, compound, endf, ace)\n    else:\n        raise ValueError(f\"No thermal scattering labels are known for name/compound: {name}/{compound}\")\n"
  },
  {
    "path": "armi/nucDirectory/transmutations.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains the definition of :py:class:`~Transmutation` and :py:class:`~Decay` classes.\n\n.. inheritance-diagram::\n    Transmutation DecayMode\n\nThe mappings between active nuclides during transmutation and decay are described in a\n``burn-chain.yaml`` file pointed to by the ``burnChainFileName``\nsetting. This file contains one entry per nuclide that can transmute or decay that\nlook similar to the example below::\n\n    U238:\n    - nuSF: 2.0000\n    - transmutation:\n        branch: 1.0\n        products:\n        - NP237\n        type: n2n\n    - transmutation:\n        branch: 1.0\n        products:\n        - LFP38\n        type: fission\n    - transmutation:\n        branch: 1.0\n        products:\n        - NP239\n        - PU239\n        type: nGamma\n    - decay:\n        branch: 5.45000e-07\n        halfLifeInSeconds: 1.4099935680e+17\n        products:\n        - LFP38\n        type: sf\n\nThis example defines 3 transmutations (an ``(n,2n)`` reaction, an ``(n,fission)`` reaction, an\n``(n,gamma``)`` reaction), and a spontaneous fission decay reaction with a very low branching\nratio. Valid reaction ``type`` values are listed in :py:class:`~armi.nucDirectory.transmutations.Transmutation`\nand :py:class:`~armi.nucDirectory.transmutations.DecayMode`.\n\nThe ``branch`` entry determines the fraction of the products of a given reaction that will end up\nin a particular product. The branches must never sum up to anything other than 1.0.\n\nThe ``products`` entry is a list, but only one entry will be the actual product. The list defines\na preference order. For example, if ``NP239`` is being tracked as an active nuclide in the problem\nit will be the product of the ``nGamma`` reaction above. Otherwise, ``U238`` will transmute directly\nto the alternate product, ``PU239``.\n\n.. warning:: If you track very short-lived decays explicitly then the burn matrix becomes very\n             ill-conditioned and numerical solver issues can result. Specialized matrix\n             exponential solvers (e.g. CRAM [1]) are required to get adequate solutions in these cases [2].\n\nThe example above also defines a ``nuSF`` item, which is how many neutrons are emitted per spontaneous\nfission. This is used for intrinsic source term calculations.\n\n[1] Pusa, Maria, and Jaakko Leppanen. \"Computing the matrix exponential in burnup calculations.\"\n    Nuclear science and engineering 164.2 (2010): 140-150.\n\n[2] Moler, Cleve, and Charles Van Loan. \"Nineteen dubious ways to compute the exponential of a matrix.\"\n    SIAM review 20.4 (1978): 801-836.\n\"\"\"\n\nimport math\n\nfrom armi import runLog\nfrom armi.utils import iterables\n\nLN2 = math.log(2)\n\nTRANSMUTATION_TYPES = [\"n2n\", \"fission\", \"nGamma\", \"nalph\", \"np\", \"nd\", \"nt\"]\n\nDECAY_MODES = [\n    \"bmd\",  # beta minus\n    \"bpd\",  # beta plus\n    \"ad\",  # alpha decay\n    \"ec\",  # electron capture\n    \"sf\",\n]  # spontaneous-fission\n\nPRODUCT_PARTICLES = {\"nalph\": \"HE4\", \"np\": \"H1\", \"nd\": \"H2\", \"nt\": \"H3\", \"ad\": \"HE4\"}\n\n\nclass Transmutable:\n    \"\"\"\n    Transmutable base class.\n\n    Attributes\n    ----------\n    parent : NuclideBase\n        The parent nuclide in this reaction.\n    type : str\n        The type name of reaction (e.g. ``n2n``, ``fission``, etc.)\n    productNuclides : list\n        The names of potential product nuclides of this reaction, in order of preference.\n        Multiple options exist to allow the library to specify a transmutation\n        to one nuclide if the user is modeling that nuclide, and other ones\n        as fallbacks in case the user is not tracking the preferred product.\n        Only one of these products will be created.\n    productParticle : str\n        The outgoing particle of this reaction. Could be HE4 for n,alpha, etc.\n        Default is None.\n    branch : float\n        The fraction of the time that this transmutation occurs. Should be between\n        0 and 1. Less than 1 when a decay or reaction can branch between multiple productNuclides.\n        Do not make this >1 to get more than one product because it scales the reaction cross section\n        which will double-deplete the parent.\n\n    Notes\n    -----\n    These are used to link two :py:class:`~armi.nucDirectory.nuclideBases.NuclideBase` objects through transmutation or\n    decay.\n\n    See Also\n    --------\n    Transmutation\n    DecayMode\n    \"\"\"\n\n    def __init__(self, parent, dataDict):\n        self.parent = parent\n        self.type = dataDict[\"type\"]\n        self.productNuclides = tuple(dataDict[\"products\"])\n        self.productParticle = dataDict.get(\"productParticle\", PRODUCT_PARTICLES.get(self.type))\n        self.branch = dataDict.get(\"branch\", None)\n        if self.branch is None:\n            self.branch = 1.0\n            runLog.info(f\"The branching ratio for {self} was not defined and is assumed to be 1.0.\")\n\n    def getPreferredProduct(self, libraryNucNames):\n        \"\"\"\n        Get the index of the most preferred transmutation product/decay daughter.\n\n        Notes\n        -----\n        The ARMI burn chain is not a full burn chain. It short circuits shorter half-lives, and uses lumped nuclides\n        as catch-all objects for things that just sit around. Consequently, the \"preferred\" product/daughter\n        may not be actual physical product/daughter.\n        \"\"\"\n        for product in self.productNuclides:\n            if product in libraryNucNames:\n                return product\n        groupedNames = iterables.split(libraryNucNames, max(1, int(len(libraryNucNames) / 10)))\n        msg = \"Could not find suitable product/daughter for {}.\\nThe available options were:\\n  {}\".format(\n            self, \",\\n  \".join(\", \".join(chunk) for chunk in groupedNames)\n        )\n        raise KeyError(msg)\n\n\nclass Transmutation(Transmutable):\n    r\"\"\"\n    A transmutation from one nuclide to another.\n\n    Notes\n    -----\n    The supported transmutation types include:\n\n    * :math:`n,2n`\n    * :math:`n,fission`\n    * :math:`n,\\gamma` (``nGamma``)\n    * :math:`n,\\alpha` (``nalph``)\n    * :math:`n,p` (proton) (``np``)\n    * :math:`n,d` (deuteron) (``nd``)\n    * :math:`n,t` (triton) (``nt``)\n    \"\"\"\n\n    def __init__(self, parent, dataDict):\n        Transmutable.__init__(self, parent, dataDict)\n        if self.type not in TRANSMUTATION_TYPES:\n            raise KeyError(\"{} not in {}\".format(self.type, TRANSMUTATION_TYPES))\n\n    def __repr__(self):\n        return \"<Transmutation by {} from {:7s} to {} with branching ratio of {:12.5E}>\".format(\n            self.type, self.parent.name, self.productNuclides, self.branch\n        )\n\n\nclass DecayMode(Transmutable):\n    r\"\"\"Defines a decay from one nuclide to another.\n\n    Notes\n    -----\n    The supported decay types are also all transmutations, and include:\n\n    * :math:`\\beta^-` (``bmd``)\n    * :math:`\\beta^+` (``bpd``)\n    * :math:`\\alpha` (``ad``)\n    * Electron capture (``ec``)\n    * Spontaneous fission (``sf``)\n\n    Of note, the following are not supported:\n\n    * Internal conversion\n    * Gamma decay\n    \"\"\"\n\n    def __init__(self, parent, dataDict):\n        Transmutable.__init__(self, parent, dataDict)\n        self.halfLifeInSeconds = parent.halflife\n\n        # Check for user-defined value of half-life within the burn-chain data. If this is\n        # updated then prefer the user change and then note this to the user. Otherwise,\n        # maintain the default loaded from the nuclide bases.\n        userHalfLife = dataDict.get(\"halfLifeInSeconds\", None)\n        if userHalfLife:\n            if userHalfLife != parent.halflife:\n                runLog.info(\n                    f\"Half-life provided for {self} will be updated from \"\n                    f\"{parent.halflife:<15.11e} to {userHalfLife:<15.11e} seconds based on \"\n                    \"user provided burn-chain data.\"\n                )\n\n                self.halfLifeInSeconds = userHalfLife\n        self.decay = LN2 / self.halfLifeInSeconds * self.branch  # decay constant, reduced by branch to make it accurate\n\n        if self.type not in DECAY_MODES:\n            raise KeyError(\"{} is not in {}\".format(self.type, DECAY_MODES))\n\n    def __repr__(self):\n        return \"<DecayMode by {} from {:7s} to {} with a half-life of {:12.5E} s>\".format(\n            self.type,\n            self.parent.name,\n            self.productNuclides,\n            self.halfLifeInSeconds,\n        )\n"
  },
  {
    "path": "armi/nuclearDataIO/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Read and/or write data files associated with nuclear data and reactor physics data.\"\"\"\n# ruff: noqa: F401\n\n# Export the cccc modules here for backward compatibility, though prefer full imports in new code.\nfrom armi.nuclearDataIO.cccc import (\n    compxs,\n    dif3d,\n    dlayxs,\n    fixsrc,\n    gamiso,\n    geodst,\n    isotxs,\n    labels,\n    nhflux,\n    pmatrx,\n    pwdint,\n    rtflux,\n    rzflux,\n)\nfrom armi.physics import neutronics\n\n\ndef getExpectedISOTXSFileName(cycle=None, node=None, suffix=None, xsID=None):\n    \"\"\"\n    Return the ISOTXS file that matches either the current cycle or xsID with a suffix.\n\n    See Also\n    --------\n    getExpectedCOMPXSFileName\n    getExpectedGAMISOFileName\n    getExpectedPMATRXFileName\n    \"\"\"\n    if xsID is not None and cycle is not None:\n        raise ValueError(\"Both `xsID` and `cycle` cannot be specified together.\")\n\n    if suffix is not None and cycle is not None:\n        raise ValueError(\"Both `suffix` and ``cycle cannot be specified together.\")\n\n    if xsID is not None:\n        neutronFileName = neutronics.ISOTXS[:3]\n    else:\n        neutronFileName = neutronics.ISOTXS\n    return _findExpectedNeutronFileName(neutronFileName, _getNeutronKeywords(cycle, node, suffix, xsID))\n\n\ndef getExpectedCOMPXSFileName(cycle=None, node=None):\n    \"\"\"\n    Return the COMPXS file that matches either the current cycle.\n\n    See Also\n    --------\n    getExpectedISOTXSFileName\n    getExpectedGAMISOFileName\n    getExpectedPMATRXFileName\n    \"\"\"\n    return _findExpectedNeutronFileName(neutronics.COMPXS, _getNeutronKeywords(cycle, node, suffix=None, xsID=None))\n\n\ndef _findExpectedNeutronFileName(fileType, fileNameKeywords):\n    return fileType + \"\".join(fileNameKeywords)\n\n\ndef _getNeutronKeywords(cycle, node, suffix, xsID):\n    if cycle is not None and xsID is not None:\n        raise ValueError(\"Keywords are over-specified. Choose `cycle` or `xsID` only\")\n\n    # If neither cycle or xsID are provided there are no additional keywords to add to the file name\n    if cycle is None and xsID is None:\n        keywords = []\n    else:\n        # example: ISOTXS-c0\n        if cycle is not None:\n            keywords = [f\"-c{cycle}n{node}\"] if node is not None else [\"-c\", str(cycle)]\n        # example: ISOAA-test\n        elif xsID is not None:\n            keywords = [xsID]\n            if suffix not in [None, \"\"]:\n                keywords.append(\"-\" + suffix)\n\n    return keywords\n\n\ndef getExpectedGAMISOFileName(cycle=None, node=None, suffix=None, xsID=None):\n    \"\"\"\n    Return the GAMISO file that matches either the ``cycle`` or ``xsID`` and ``suffix``.\n\n    For example:\n        If ``cycle`` is set to 0, then ``cycle0.gamiso`` will be returned.\n        If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then\n        ``AA-test.gamiso`` will be returned.\n\n    See Also\n    --------\n    getExpectedISOTXSFileName\n    getExpectedCOMPXSFileName\n    getExpectedPMATRXFileName\n    \"\"\"\n    if any(i is not None for i in (cycle, suffix, xsID)):\n        # file path extensions are lower case\n        gamiso0 = neutronics.GAMISO_EXT\n    else:\n        # GAMISO as a file is upper case\n        gamiso0 = neutronics.GAMISO\n\n    return _findExpectedGammaFileName(gamiso0, _getGammaKeywords(cycle, node, suffix, xsID))\n\n\ndef getExpectedPMATRXFileName(cycle=None, node=None, suffix=None, xsID=None):\n    \"\"\"\n    Return the PMATRX file that matches either the ``cycle`` or ``xsID`` and ``suffix``.\n\n    For example:\n        If ``cycle`` is set to 0 d, then ``cycle0.pmatrx`` will be returned.\n        If ``xsID`` is set to ``AA`` with a ``suffix`` of ``test``, then\n        ``AA-test.pmatrx`` will be returned.\n\n    See Also\n    --------\n    getExpectedISOTXSFileName\n    getExpectedCOMPXSFileName\n    getExpectedGAMISOFileName\n    \"\"\"\n    if any(i is not None for i in (cycle, suffix, xsID)):\n        # file path extensions are lower case\n        pmatrx0 = neutronics.PMATRX_EXT\n    else:\n        # PMATRX as a file is upper case\n        pmatrx0 = neutronics.PMATRX\n\n    return _findExpectedGammaFileName(pmatrx0, _getGammaKeywords(cycle, node, suffix, xsID))\n\n\ndef _findExpectedGammaFileName(fileType, fileNameKeywords):\n    return \"\".join(fileNameKeywords) + fileType\n\n\ndef _getGammaKeywords(cycle, node, suffix, xsID):\n    if cycle is not None and xsID is not None:\n        raise ValueError(\"Keywords are over-specified. Choose `cycle` or `xsID` only\")\n\n    # If neither cycle or xsID are provided there are no additional keywords to add\n    # to the file name\n    if cycle is None and xsID is None:\n        keywords = []\n    else:\n        # example: cycle0.gamiso\n        if cycle is not None:\n            keywords = [f\"cycle{cycle}node{node}\"] if node is not None else [f\"cycle{cycle}\"]\n\n        elif xsID is not None:\n            keywords = [xsID]\n            if suffix not in [None, \"\"]:\n                if not suffix.startswith(\"-\"):\n                    suffix = \"-\" + suffix\n                keywords.append(suffix)\n        else:\n            raise ValueError(\"The cycle or XS ID must be specified.\")\n        keywords.append(\".\")\n    return keywords\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis subpackage reads and writes CCCC standard interface files for reactor physics codes.\n\nStarting in the late 1960s, the computational nuclear analysis community recognized a need to\nestablish some standard file formats to exchange reactor descriptions and reactor physics\nquantities. They formed the Committee on Computer Code Coordination (CCCC) and issued\nseveral versions of their standards. The latest was issued in 1977 as [CCCC-IV]_. Many\nreactor codes to this day use these files. This package provides a Python abstraction to\nread many (though not necessarily all) of these files, manipulate the data, and\nwrite them back out to disk.\n\nSection IV of [CCCC-IV]_ defines the standard interface files that were created by the\nCCCC. In addition to the standard files listed in this document, software like DIF3D,\nPARTISN, and other reactor physics codes may have their own code-dependent interface files.\nIn most cases, they follow a similar structure and definition as the standardized formats,\nbut were not general enough to be used and implemented across all codes. The following\nare listed as the standard interface files:\n\n* ISOTXS (:py:mod:`armi.nuclearDataIO.cccc.isotxs`) - Nuclide (isotope) - ordered, multigroup\n  neutron cross section data\n* GRUPXS - Group-ordered, isotopic, multigroup neutron cross section data.\n* BRKOXS - Bondarenko (Russian format) self-shielding data\n* DLAYXS (:py:mod:`armi.nuclearDataIO.cccc.dlayxs`) - Delayed neutron precursor data\n* ISOGXS (:py:mod:`armi.nuclearDataIO.cccc.gamiso`) - Nuclide (isotope) - ordered, multigroup\n  gamma cross section data\n* GEODST (:py:mod:`armi.nuclearDataIO.cccc.geodst`) - Geometry description\n* NDXSRF - Nuclear density and cross section referencing data\n* ZNATDN - Zone and subzone atomic densities\n* SEARCH - Criticality search data\n* SNCON - Sn (Discrete Ordinates) constants\n* FIXSRC (:py:mod:`armi.nuclearDataIO.cccc.fixsrc`) - Distributed and surface fixed sources\n* RTFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Regular total (scalar) neutron flux\n* ATFLUX (:py:mod:`armi.nuclearDataIO.cccc.rtflux`) - Adjoint total (scalar) neutron flux\n* RCURNT - Regular neutron current\n* ACURNT - Adjoint neutron current\n* RAFLUX - Regular angular neutron flux\n* AAFLUX - Adjoint angular neutron flux\n* RZFLUX (:py:mod:`armi.nuclearDataIO.cccc.rzflux`) - Regular, zone-avearged flux by neutron group\n* PWDINT (:py:mod:`armi.nuclearDataIO.cccc.pwdint`) - Power densitiy by mesh interval\n* WORTHS - Reactivity (per cc) by mesh interval\n\nOther code-dependent interface files may also be included in this package but should be\ndocumented which software they are created from and used for. The file structures should\nalso be provided in the module-level docstrings.\n\n.. [CCCC-IV] R. Douglas O'Dell, \"Standard Interface Files and Procedures for Reactor Physics\n             Codes, Version IV,\" LA-6941-MS, Los Alamos National Laboratory (September 1977).\n             Web. doi:10.2172/5369298. (`OSTI <https://www.osti.gov/biblio/5369298>`__)\n\nUsing the system\n----------------\nMost supported files are in their own module. Each has their own :py:class:`cccc.DataContainer` to\nhold the data and one or more :py:class:`cccc.Stream` objects representing different I/O formats.\nThe general pattern is to use any of the following methods on a ``Stream`` object:\n\n* :py:meth:`cccc.Stream.readBinary`\n* :py:meth:`cccc.Stream.readAscii`\n* :py:meth:`cccc.Stream.writeBinary`\n* :py:meth:`cccc.Stream.writeAscii`\n\nFor example, to get an RTFLUX data structure from a binary file named ``RTFLUX``, you run::\n\n>>> from armi.nuclearDataIO.cccc import rtflux\n>>> rtfluxData = rtflux.RtfluxStream.readBinary(\"RTFLUX\")\n\nThen if you want to write that data to an ASCII file named ``rtflux.ascii``, you run:\n\n>>> rtflux.RtfluxStream.writeAscii(rtfluxData, \"rtflux.ascii\")\n\nImplementation details\n----------------------\nWe have come up with a powerful but somewhat confusing-at-first implementation that allows\nus to define the structure of the files in code just once, in a way that can both read and write\nthe files. Many methods start with the prefix ``rw`` to indicate that they are used\nduring both reading and writing.\n\nNormal users of this code do not need to know the implementation details.\n\nDiscussion\n----------\nWhile loading from stream classmethods is explicit and nice and all, there has been some\ntalk about moving the read/write ascii/binary methods to the data classes for\nimplementations that use data structures. This would hide the Stream subclasses from\nusers, which may be appropriate. On the other hand, logic to select which stream\nsubclass to user (e.g. adjoint vs. real) will have to be moved into the\ndata classes.\n\nNotes\n-----\nA CCCC record consists of a leading and ending integer, which indicates the size of the record in\nbytes. (This is actually just FORTRAN unformatted sequential files are written, see e.g.\nhttps://gcc.gnu.org/onlinedocs/gfortran/File-format-of-unformatted-sequential-files.html)\nAs a result, it is possible to perform a check when reading in a record to determine if it\nwas read correctly, by making sure the record size at the beginning and ending of a record are\nalways equal.\n\nThere are similarities between this code and that in the PyNE cccc subpackage.\nThis is the original source of the code. TerraPower authorized the publication\nof some of the CCCC code to the PyNE project way back in the 2011 era. This code\nhas since been updated significantly to both read and write the files.\n\nThis was originally inspired by Prof. James Paul Holloway's alpha\nrelease of ccccutils written in c++ from 2001.\n\"\"\"\n\nfrom armi.nuclearDataIO.cccc.cccc import *  # noqa: F403\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/cccc.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDefines containers for the reading and writing standard interface files for reactor physics codes.\n\n.. impl:: Generic tool for reading and writing Committee on Computer Code Coordination (CCCC) format\n    files for reactor physics codes\n    :id: I_ARMI_NUCDATA\n    :implements: R_ARMI_NUCDATA_ISOTXS,\n                 R_ARMI_NUCDATA_GAMISO,\n                 R_ARMI_NUCDATA_GEODST,\n                 R_ARMI_NUCDATA_DIF3D,\n                 R_ARMI_NUCDATA_PMATRX,\n                 R_ARMI_NUCDATA_DLAYXS\n\n    This module provides a number of base classes that implement general capabilities for binary and\n    ASCII file I/O. The :py:class:`IORecord` serves as an abstract base class that instantiates a\n    number of methods that the binary and ASCII children classes are meant to implement. These\n    methods, prefixed with ``rw``, are meant to convert literal data types, e.g. float or int, to\n    either binary or ASCII. This base class does its own conversion for container data types, e.g.\n    list or matrix, relying on the child implementation of the literal types that the container\n    possesses. The binary conversion is implemented in :py:class:`BinaryRecordReader` and\n    :py:class:`BinaryRecordWriter`. The ASCII conversion is implemented in\n    :py:class:`AsciiRecordReader` and :py:class:`AsciiRecordWriter`.\n\n    These :py:class:`IORecord` classes are used within :py:class:`Stream` objects for the data\n    conversion. :py:class:`Stream` is a context manager that opens a file for reading or writing on\n    the ``__enter__`` and closes that file upon ``__exit__``. :py:class:`Stream` is an abstract base\n    class that is subclassed for each CCCC file. It is subclassed directly for the CCCC files that\n    contain cross-section data:\n\n      * :py:class:`ISOTXS <armi.nuclearDataIO.cccc.isotxs.IsotxsIO>`\n      * :py:mod:`GAMISO <armi.nuclearDataIO.cccc.gamiso>`\n      * :py:class:`PMATRX <armi.nuclearDataIO.cccc.pmatrx.PmatrxIO>`\n      * :py:class:`DLAYXS <armi.nuclearDataIO.cccc.dlayxs.DlayxsIO>`\n      * :py:mod:`COMPXS <armi.nuclearDataIO.cccc.compxs>`\n\n    For the CCCC file types that are outputs from a flux solver such as DIF3D (e.g., GEODST, DIF3D,\n    NHFLUX) the streams are subclassed from :py:class:`StreamWithDataContainer`, which is a special\n    abstract subclass of :py:class:`Stream` that implements a common pattern used for these file\n    types. In a :py:class:`StreamWithDataContainer`, the data is directly read to or written from a\n    specialized data container.\n\n    The data container structure for each type of CCCC file is implemented in the module for that\n    file, as a subclass of :py:class:`DataContainer`. The subclasses for each CCCC file type define\n    standard attribute names for the data that will be read from or written to the CCCC file. CCCC\n    file types that follow this pattern include:\n\n      * :py:class:`GEODST <armi.nuclearDataIO.cccc.geodst.GeodstData>`\n      * :py:class:`DIF3D <armi.nuclearDataIO.cccc.dif3d.Dif3dData>`\n      * :py:class:`NHFLUX <armi.nuclearDataIO.cccc.nhflux.NHFLUX>` (and multiple sub-classes)\n      * :py:class:`LABELS <armi.nuclearDataIO.cccc.labels.LabelsData>`\n      * :py:class:`PWDINT <armi.nuclearDataIO.cccc.pwdint.PwdintData>`\n      * :py:class:`RTFLUX <armi.nuclearDataIO.cccc.rtflux.RtfluxData>`\n      * :py:class:`RZFLUX <armi.nuclearDataIO.cccc.rzflux.RzfluxData>`\n      * :py:class:`RTFLUX <armi.nuclearDataIO.cccc.rtflux.RtfluxData>`\n\n    The logic to parse or write each specific file format is contained within the\n    :py:meth:`Stream.readWrite` implementations of the respective subclasses.\n\"\"\"\n\nimport io\nimport itertools\nimport os\nimport struct\nfrom copy import deepcopy\nfrom typing import List\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import nuclearFileMetadata\n\nIMPLICIT_INT = \"IJKLMN\"\n\"\"\"Letters that trigger implicit integer types in old FORTRAN 77 codes.\"\"\"\n\n\nclass IORecord:\n    \"\"\"\n    A single CCCC record.\n\n    Reads or writes information to or from a stream.\n\n    Parameters\n    ----------\n    stream\n        A collection of data to be read or written\n\n    hasRecordBoundaries : bool\n        A True value means the fortran file was written using access='sequential' and contains\n        a 4 byte int count at the beginning and end of each record. Otherwise, if False the\n        fortran file was written using access='direct'.\n\n    Notes\n    -----\n    The methods in this object often have `rw` prefixes, meaning the same method\n    can be used for both reading and writing. We consider this a significant\n    achievement that enforces consistency between the code for reading and writing\n    CCCC records. The tradeoff is that it's a bit challenging to comprehend at first.\n    \"\"\"\n\n    _intSize = struct.calcsize(\"i\")\n    _longSize = struct.calcsize(\"q\")\n    maxsize = len(str(2**31 - 1))  # limit to max short even though Python3 can go bigger.\n    _intFormat = \" {{:>+{}}}\".format(maxsize)\n    _intLength = maxsize + 1\n\n    _floatSize = struct.calcsize(\"f\")\n    _floatFormat = \" {:+.16E}\"\n    _floatLength = 2 + 2 + 16 + 4\n\n    _characterSize = struct.calcsize(\"c\")\n    count = 0\n\n    def __init__(self, stream, hasRecordBoundaries=True):\n        IORecord.count += 1\n        self._stream = stream\n        self.numBytes = 0\n        self.byteCount = 0\n        self._hasRecordBoundaries = hasRecordBoundaries\n\n    def __enter__(self):\n        \"\"\"Open the stream for reading/writing and return :code:`self`.\n\n        See Also\n        --------\n        armi.nuclearDataIO.cccc.IORecord.open\n        \"\"\"\n        self.open()\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        if exc_type is not None:\n            return\n        try:\n            self.close()\n        except Exception as ee:\n            runLog.error(\"Failed to close CCCC record.\")\n            runLog.error(ee)\n            raise BufferError(\n                \"Failed to close record, {}.\\n{}\\n\"\n                \"It is possible too much data was read from the \"\n                \"record, and the end of the stream was reached.\\n\"\n                \"\".format(self, ee)\n            )\n\n    def open(self):\n        \"\"\"Abstract method for opening the stream.\"\"\"\n        raise NotImplementedError()\n\n    def close(self):\n        \"\"\"Abstract method for closing the stream.\"\"\"\n        raise NotImplementedError()\n\n    def rwInt(self, val):\n        \"\"\"Abstract method for reading or writing an integer.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`val` should have value, but when the record is being read,\n        :code:`val` can be :code:`None` or anything else; it is ignored.\n        \"\"\"\n        raise NotImplementedError()\n\n    def rwBool(self, val):\n        \"\"\"Read or write a boolean value from an integer.\"\"\"\n        val = False if not isinstance(val, bool) else val\n        return bool(self.rwInt(int(val)))\n\n    def rwFloat(self, val):\n        \"\"\"Abstract method for reading or writing a floating point (single precision) value.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`val` should have value, but when the record is being read,\n        :code:`val` can be :code:`None` or anything else; it is ignored.\n        \"\"\"\n        raise NotImplementedError()\n\n    def rwDouble(self, val):\n        \"\"\"Abstract method for reading or writing a floating point (double precision) value.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`val` should have value, but when the record is being read,\n        :code:`val` can be :code:`None` or anything else; it is ignored.\n        \"\"\"\n        raise NotImplementedError()\n\n    def rwString(self, val, length):\n        \"\"\"Abstract method for reading or writing a string.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`val` should have value, but when the record is being read,\n        :code:`val` can be :code:`None` or anything else; it is ignored.\n        \"\"\"\n        raise NotImplementedError()\n\n    def rwList(self, contents, containedType, length, strLength=0):\n        \"\"\"\n        A method for reading and writing a (array) of items of a specific type.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`contents` should have value, but when the record is being read,\n        :code:`contents` can be :code:`None` or anything else; it is ignored.\n\n        Warning\n        -------\n        If a :code:`contents` evaluates to :code:`True`, the array must be the same size as\n        :code:`length`.\n        \"\"\"\n        actions = {\n            \"int\": self.rwInt,\n            \"float\": self.rwFloat,\n            \"string\": lambda val: self.rwString(val, strLength),\n            \"double\": self.rwDouble,\n        }\n        action = actions.get(containedType)\n        if action is None:\n            raise Exception('Cannot pack or unpack the type \"{}\".'.format(containedType))\n        # this little trick will make this work for both reading and writing, yay!\n        if contents is None or len(contents) == 0:\n            contents = [None for _ in range(length)]\n        return np.array([action(contents[ii]) for ii in range(length)])\n\n    def rwMatrix(self, contents, *shape):\n        \"\"\"A method for reading and writing a matrix of floating point values.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`contents` should have value, but when the record is being read,\n        :code:`contents` can be :code:`None` or anything else; it is ignored.\n\n        Warning\n        -------\n        If a :code:`contents` is not :code:`None`, the array must be the same shape as\n        :code:`*shape`.\n        \"\"\"\n        return self._rwMatrix(contents, self.rwFloat, *shape)\n\n    def rwDoubleMatrix(self, contents, *shape):\n        \"\"\"Read or write a matrix of floating point values.\n\n        Notes\n        -----\n        The method has a seemingly odd signature, because it is used for both reading and writing.\n        When writing, the :code:`contents` should have value, but when the record is being read,\n        :code:`contents` can be :code:`None` or anything else; it is ignored.\n\n        Warning\n        -------\n        If a :code:`contents` is not :code:`None`, the array must be the same shape as\n        :code:`*shape`.\n        \"\"\"\n        return self._rwMatrix(contents, self.rwDouble, *shape)\n\n    def rwIntMatrix(self, contents, *shape):\n        \"\"\"Read or write a matrix of int values.\"\"\"\n        return self._rwMatrix(contents, self.rwInt, *shape)\n\n    @staticmethod\n    def _rwMatrix(contents, func, *shape):\n        \"\"\"\n        Read/write a matrix.\n\n        Notes\n        -----\n        This can be important for performance when reading large matrices (e.g. scatter\n        matrices). It may be worth investigating ``np.frombuffer`` on read and\n        something similar on write.\n\n        With shape, the first shape argument should be the outermost loop because\n        these are stored in column major order (the FORTRAN way).\n\n        Note that np.ndarrays can be built with ``order=\"F\"`` to have column-major ordering.\n\n        So if you have ``((MR(I,J),I=1,NCINTI),J=1,NCINTJ)`` you would pass in\n        the shape as (NCINTJ, NCINTI).\n        \"\"\"\n        fortranShape = list(reversed(shape))\n        if contents is None or contents.size == 0:\n            contents = np.empty(fortranShape)\n        for index in itertools.product(*[range(ii) for ii in shape]):\n            fortranIndex = tuple(reversed(index))\n            contents[fortranIndex] = func(contents[fortranIndex])\n        return contents\n\n    def rwImplicitlyTypedMap(self, keys: List[str], contents) -> dict:\n        \"\"\"\n        Read a dict of floats and/or ints with FORTRAN77-style implicit typing.\n\n        Length of list is determined by length of list of keys passed in.\n        \"\"\"\n        for key in keys:\n            # ready for some implicit madness from the FORTRAN 77 days?\n            if key[0].upper() in IMPLICIT_INT:\n                contents[key] = self.rwInt(contents[key])\n            else:\n                contents[key] = self.rwFloat(contents[key])\n        return contents\n\n\nclass BinaryRecordReader(IORecord):\n    \"\"\"\n    Writes a single CCCC record in binary format.\n\n    Notes\n    -----\n    This class reads a single CCCC record in binary format. A CCCC record consists of a leading and\n    ending integer indicating how many bytes the record is. The data contained within the record may\n    be integer, float, double, or string.\n    \"\"\"\n\n    def open(self):\n        \"\"\"Open the record by reading the number of bytes in the record, this value will be used\n        to ensure the entire record was read.\n        \"\"\"\n        if not self._hasRecordBoundaries:\n            return\n        self.numBytes = self.rwInt(None)\n        self.byteCount -= 4\n\n    def close(self):\n        \"\"\"Closes the record by reading the number of bytes from then end of the record, if it\n        does not match the initial value, an exception will be raised.\n        \"\"\"\n        if not self._hasRecordBoundaries:\n            return\n        # now read end of record\n        numBytes2 = self.rwInt(None)\n        self.byteCount -= 4\n        if numBytes2 != self.numBytes:\n            raise BufferError(\n                \"Number of bytes specified at end the of record, {}, \"\n                \"does not match the originally specified number, {}.\\n\"\n                \"Read {} bytes.\".format(numBytes2, self.numBytes, self.byteCount)\n            )\n\n    def rwInt(self, val):\n        \"\"\"Reads an integer value from the binary stream.\"\"\"\n        self.byteCount += self._intSize\n        (i,) = struct.unpack(\"i\", self._stream.read(self._intSize))\n        return i\n\n    def rwBool(self, val):\n        \"\"\"Read or write a boolean value from an integer.\"\"\"\n        return IORecord.rwBool(self, val)\n\n    def rwLong(self, val):\n        \"\"\"Reads an integer value from the binary stream.\"\"\"\n        self.byteCount += self._longSize\n        (ll,) = struct.unpack(\"q\", self._stream.read(self._longSize))\n        return ll\n\n    def rwFloat(self, val):\n        \"\"\"Reads a single precision floating point value from the binary stream.\"\"\"\n        self.byteCount += self._floatSize\n        (f,) = struct.unpack(\"f\", self._stream.read(self._floatSize))\n        return f\n\n    def rwDouble(self, val):\n        \"\"\"Reads a double precision floating point value from the binary stream.\"\"\"\n        self.byteCount += self._floatSize * 2\n        (d,) = struct.unpack(\"d\", self._stream.read(self._floatSize * 2))\n        return d\n\n    def rwString(self, val, length):\n        \"\"\"Reads a string of specified length from the binary stream.\"\"\"\n        self.byteCount += length\n        (s,) = struct.unpack(\"%ds\" % length, self._stream.read(length))\n        return s.rstrip().decode()  # convert bytes to string on reading.\n\n\nclass BinaryRecordWriter(IORecord):\n    \"\"\"\n    Reads a single CCCC record in binary format.\n\n    Reads binary information sequentially.\n    \"\"\"\n\n    def __init__(self, stream, hasRecordBoundaries=True):\n        IORecord.__init__(self, stream, hasRecordBoundaries)\n        self.data = None\n\n    def open(self):\n        self.data = []\n\n    def close(self):\n        if self._hasRecordBoundaries:\n            packedNumBytes = self._getPackedNumBytes()\n            self._stream.write(packedNumBytes)\n        for i in range(0, len(self.data) + 1, io.DEFAULT_BUFFER_SIZE):\n            self._write_buffer_to_stream(i)\n\n        if self._hasRecordBoundaries:\n            self._stream.write(packedNumBytes)\n        self.data = None\n\n    def _getPackedNumBytes(self):\n        return struct.pack(\"i\", self.numBytes)\n\n    def _write_buffer_to_stream(self, i):\n        self._stream.write(b\"\".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE]))\n\n    def rwInt(self, val):\n        self.numBytes += self._intSize\n        self.data.append(struct.pack(\"i\", val))\n        return val\n\n    def rwBool(self, val):\n        \"\"\"Read or write a boolean value from an integer.\"\"\"\n        return IORecord.rwBool(self, val)\n\n    def rwLong(self, val):\n        \"\"\"Reads an integer value from the binary stream.\"\"\"\n        self.byteCount += self._longSize\n        self.data.append(struct.pack(\"q\", val))\n        return val\n\n    def rwFloat(self, val):\n        self.numBytes += self._floatSize\n        self.data.append(struct.pack(\"f\", val))\n        return val\n\n    def rwDouble(self, val):\n        self.numBytes += self._floatSize * 2\n        self.data.append(struct.pack(\"d\", val))\n        return val\n\n    def rwString(self, val, length):\n        self.numBytes += length * self._characterSize\n        self.data.append(struct.pack(\"%ds\" % length, val.ljust(length).encode(\"utf-8\")))\n        return val\n\n\nclass AsciiRecordReader(BinaryRecordReader):\n    \"\"\"\n    Reads a single CCCC record in ASCII format.\n\n    See Also\n    --------\n    AsciiRecordWriter\n    \"\"\"\n\n    def close(self):\n        BinaryRecordReader.close(self)\n        # read one extra character for the new line \\n... python somehow correctly figures out\n        # that on windows \\r\\n is really just a \\n... no idea how.\n        self._stream.read(1)\n\n    def _getPackedNumBytes(self):\n        return self.numBytes\n\n    def _write_buffer_to_stream(self, i):\n        self._stream.write(\"\".join(self.data[i : i + io.DEFAULT_BUFFER_SIZE]))\n\n    def rwInt(self, val):\n        return int(self._stream.read(self._intLength))\n\n    def rwFloat(self, val):\n        return float(self._stream.read(self._floatLength))\n\n    def rwDouble(self, val):\n        return self.rwFloat(val)\n\n    def rwString(self, val, length):\n        # read one space\n        self._stream.read(1)\n        return self._stream.read(length).rstrip()\n\n\nclass AsciiRecordWriter(IORecord):\n    r\"\"\"\n    Writes a single CCCC record in ASCII format.\n\n    Since there is no specific format of an ASCII CCCC record, the format is roughly the same as\n    the :py:class:`BinaryRecordWriter`, except that the :class:`AsciiRecordReader` puts a space in\n    front of all values (ints, floats, and strings), and puts a newline character :code:`\\\\n` at the\n    end of all records.\n    \"\"\"\n\n    def __init__(self, stream, hasRecordBoundaries=True):\n        IORecord.__init__(self, stream, hasRecordBoundaries)\n        self.data = None\n        self.numBytes = 0\n\n    def open(self):\n        self.data = []\n\n    def close(self):\n        self._stream.write(self._intFormat.format(self.numBytes))\n        self._stream.write(\"\".join(self.data))\n        self._stream.write(self._intFormat.format(self.numBytes))\n        self._stream.write(\"\\n\")\n        self.data = None\n\n    def rwInt(self, val):\n        self.numBytes += self._intSize\n        self.data.append(self._intFormat.format(val))\n        return val\n\n    def rwFloat(self, val):\n        self.numBytes += self._floatSize\n        self.data.append(self._floatFormat.format(val))\n        return val\n\n    def rwDouble(self, val):\n        self.numBytes += self._floatSize * 2\n        self.data.append(self._floatFormat.format(val))\n        return val\n\n    def rwString(self, val, length):\n        self.numBytes += length * self._characterSize\n        self.data.append(\" {value:<{length}}\".format(length=length, value=val))\n        return val\n\n\nclass DataContainer:\n    \"\"\"\n    Data representation that can be read/written to/from with a cccc.Stream.\n\n    This is an optional convenience class expected to be used in\n    concert with :py:class:`StreamWithDataStructure`.\n    \"\"\"\n\n    def __init__(self):\n        # Need Metadata subclass for default keys\n        self.metadata = nuclearFileMetadata._Metadata()\n\n\nclass Stream:\n    \"\"\"\n    An abstract CCCC IO stream.\n\n    Warning\n    -------\n    This is more of a stream Parser/Serializer than an actual stream.\n\n    Notes\n    -----\n    A concrete instance of this class should implement the\n    :py:meth:`~armi.nuclearDataIO.cccc.Stream.readWrite` method.\n    \"\"\"\n\n    _fileModes = {\n        \"rb\": BinaryRecordReader,\n        \"wb\": BinaryRecordWriter,\n        \"r\": AsciiRecordReader,\n        \"w\": AsciiRecordWriter,\n    }\n\n    def __init__(self, fileName, fileMode):\n        \"\"\"\n        Create an instance of a :py:class:`~armi.nuclearDataIO.cccc.Stream`.\n\n        Parameters\n        ----------\n        fileName : str\n            name of the file to be read\n        fileMode : str\n            the file mode, i.e. 'w' for writing ASCII, 'r' for reading ASCII, 'wb' for writing\n            binary, and 'rb' for reading binary.\n        \"\"\"\n        self._fileName = fileName\n        self._fileMode = fileMode\n        self._stream = None\n\n        if fileMode not in self._fileModes:\n            raise KeyError(\"{} not in {}\".format(\"fileMode\", list(self._fileModes.keys())))\n\n    def __deepcopy__(self, memo):\n        \"\"\"Open file objects can't be deepcopied so we clear them before copying.\"\"\"\n        cls = self.__class__\n        result = cls.__new__(cls)\n        result._stream = None\n        memo[id(self)] = result\n        for k, v in self.__dict__.items():\n            if k != \"_stream\":\n                setattr(result, k, deepcopy(v, memo))\n        return result\n\n    def __repr__(self):\n        return \"<{} {}>\".format(self.__class__.__name__, self._fileName)\n\n    def __enter__(self):\n        \"\"\"At the inception of a with command, open up the file for a read/write.\"\"\"\n        try:\n            self._stream = open(self._fileName, self._fileMode)\n        except IOError:\n            runLog.error(\"Cannot find {} in {}\".format(self._fileName, os.getcwd()))\n            raise\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        \"\"\"At the termination of a with command, close the file.\"\"\"\n        self._stream.close()\n\n    def readWrite(self):\n        \"\"\"This method should be implemented on any sub-classes to specify the order of records.\"\"\"\n        raise NotImplementedError()\n\n    def createRecord(self, hasRecordBoundaries=True):\n        recordClass = self._fileModes[self._fileMode]\n        return recordClass(self._stream, hasRecordBoundaries)\n\n    @classmethod\n    def readBinary(cls, fileName: str):\n        \"\"\"Read data from a binary file into a data structure.\"\"\"\n        return cls._read(fileName, \"rb\")\n\n    @classmethod\n    def readAscii(cls, fileName: str):\n        \"\"\"Read data from an ASCII file into a data structure.\"\"\"\n        return cls._read(fileName, \"r\")\n\n    @classmethod\n    def _read(cls, fileName, fileMode):\n        raise NotImplementedError()\n\n    @classmethod\n    def writeBinary(cls, data: DataContainer, fileName: str):\n        \"\"\"Write the contents of a data container to a binary file.\"\"\"\n        return cls._write(data, fileName, \"wb\")\n\n    @classmethod\n    def writeAscii(cls, data: DataContainer, fileName: str):\n        \"\"\"Write the contents of a data container to an ASCII file.\"\"\"\n        return cls._write(data, fileName, \"w\")\n\n    @classmethod\n    def _write(cls, lib, fileName, fileMode):\n        raise NotImplementedError()\n\n\nclass StreamWithDataContainer(Stream):\n    \"\"\"\n    A cccc.Stream that reads/writes to a specialized data container.\n\n    This is a relatively common pattern so some of the boilerplate\n    is handled here.\n\n    Warning\n    -------\n    This is more of a stream Parser/Serializer than an actual stream.\n\n    Notes\n    -----\n    It should be possible to fully merge this with ``Stream``, which may make\n    this a little less confusing.\n    \"\"\"\n\n    def __init__(self, data: DataContainer, fileName: str, fileMode: str):\n        Stream.__init__(self, fileName, fileMode)\n        self._data = data\n        self._metadata = self._data.metadata\n\n    @staticmethod\n    def _getDataContainer() -> DataContainer:\n        raise NotImplementedError()\n\n    @classmethod\n    def _read(cls, fileName: str, fileMode: str):\n        data = cls._getDataContainer()\n        return cls._readWrite(\n            data,\n            fileName,\n            fileMode,\n        )\n\n    @classmethod\n    def _write(cls, data: DataContainer, fileName: str, fileMode: str):\n        return cls._readWrite(data, fileName, fileMode)\n\n    @classmethod\n    def _readWrite(cls, data: DataContainer, fileName: str, fileMode: str):\n        with cls(data, fileName, fileMode) as rw:\n            rw.readWrite()\n        return data\n\n\ndef getBlockBandwidth(m, nintj, nblok):\n    \"\"\"\n    Return block bandwidth JL, JU from CCCC interface files.\n\n    It is common for CCCC files to block data in various records with\n    a description along the lines of::\n\n        WITH M AS THE BLOCK INDEX, JL=(M-1)*((NINTJ-1)/NBLOK +1)+1\n        AND JU=MIN0(NINTJ,JUP) WHERE JUP=M*((NINTJ-1)/NBLOK +1)\n\n    This function computes JL and JU for these purposes. It also converts\n    JL and JU to zero based indices rather than 1 based ones, as is almost\n    always wanted when dealing with python/numpy matrices.\n\n    The term *bandwidth* refers to a kind of sparse matrix representation.\n    Some rows only have columns JL to JH in them rather than 0 to JMAX.\n    The non-zero band from JL to JH is what we're talking about here.\n    \"\"\"\n    x = (nintj - 1) // nblok + 1\n    jLow = (m - 1) * x + 1\n    jHigh = min(nintj, m * x)\n    return jLow - 1, jHigh - 1\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/compxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCOMPXS is a binary file that contains multigroup macroscopic cross sections for homogenized\nregions in a full core. The file format can be found in [DIF3D]_.\n\n.. [DIF3D] Derstine, K. L. DIF3D: A Code to Solve One-, Two-, and\n           Three-Dimensional Finite-Difference Diffusion Theory Problems,\n           report, April 1984; Argonne, Illinois.\n           (https://digital.library.unt.edu/ark:/67531/metadc283553/:\n           accessed October 17, 2019), University of North Texas Libraries,\n           Digital Library, https://digital.library.unt.edu; crediting UNT\n           Libraries Government Documents Department.\n\nThe file structure is listed here ::\n\n          RECORD TYPE                           PRESENT IF\n          ===================================   ==========\n          SPECIFICATIONS                        ALWAYS\n          COMPOSITION INDEPENDENT DATA          ALWAYS\n    ********* (REPEAT FOR ALL COMPOSITIONS)\n    *     COMPOSITION SPECIFICATIONS            ALWAYS\n    *  ****** (REPEAT FOR ALL ENERGY GROUPS\n    *  *       IN THE ORDER OF DECREASING\n    *  *       ENERGY)\n    *  *  COMPOSITION MACROSCOPIC GROUP         ALWAYS\n    *  *  CROSS SECTIONS\n    *********\n          POWER CONVERSION FACTORS              ALWAYS\n\nSee Also\n--------\n:py:mod:`armi.nuclearDataIO.cccc.isotxs`\n\nExamples\n--------\n    >>> from armi.nuclearDataIO import compxs\n    >>> lib = compxs.readBinary(\"COMPXS\")\n    >>> r0 = lib.regions[0]\n    >>> r0.macros.fission\n    # returns fission XS for this region\n    >>> r0.macros.higherOrderScatter[1]\n    # returns P1 scattering matrix\n    >>> r0.macros.higherOrderScatter[5] *= 0  # zero out P5 scattering matrix\n    >>> compxs.writeBinary(lib, \"COMPXS2\")\n\nNotes\n-----\nPower conversion factors are used by some codes to determine how to scale the flux\nin a region to a desired power based on either fissions/watt-second or\ncaptures/watt-second. If the user does not plan on using these values, the COMPXS\nformat indicates the values should be set to ``-1E+20``.\n\nThe value of ``powerConvMult`` \"times the group J integrated flux for the regions\ncontaining the current composition yields the total power in those regions and\nenergy group J due to fissions and non-fission absorptions.\"\n\nThe ``d<1,2,3>Multiplier`` values are the first, second, and third dimension\ndirectional diffusion coefficient multipliers, respectively. Similarly, the ``d<1,2,3>Additive``\nvalues are the first, second, and third dimension directional diffusion coefficient\nadditive terms, respectively.\n\"\"\"\n\nfrom traceback import format_exc\n\nimport numpy as np\nfrom scipy.sparse import csc_matrix\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc\nfrom armi.nuclearDataIO.nuclearFileMetadata import (\n    COMPXS_POWER_CONVERSION_FACTORS,\n    REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF,\n    RegionXSMetadata,\n)\nfrom armi.nuclearDataIO.xsCollections import XSCollection\nfrom armi.utils.properties import lockImmutableProperties, unlockImmutableProperties\n\n\ndef _getRegionIO():\n    return _CompxsRegionIO\n\n\ndef _flattenScatteringVector(colVector, group, numUpScatter, numDownScatter):\n    flatVector = colVector[group - numDownScatter : group + numUpScatter + 1].toarray().flatten()\n    return list(reversed(flatVector))\n\n\ndef compare(lib1, lib2, tolerance=0.0, verbose=False):\n    \"\"\"\n    Compare two COMPXS libraries and return True if equal, or False if not equal.\n\n    Parameters\n    ----------\n    lib1: XSLibrary\n        first library\n    lib2: XSLibrary\n        second library\n    tolerance: float\n        Disregard errors that are less than tolerance.\n    verbose: bool\n        show the macroscopic cross sections that are not equal\n\n    Returns\n    -------\n    equals: bool\n        True if libraries are equal, else false\n    \"\"\"\n    from armi.nuclearDataIO.xsLibraries import compareLibraryNeutronEnergies\n\n    equals = True\n    equals &= compareLibraryNeutronEnergies(lib1, lib2, tolerance)\n    equals &= lib1.compxsMetadata.compare(lib2.compxsMetadata, lib1, lib2, tolerance)\n    for regionName in set(lib1.regionLabels + lib2.regionLabels):\n        region1 = lib1[regionName]\n        region2 = lib2[regionName]\n        if region1 is None or region2 is None:\n            warning = \"Region {} is not in library {} and cannot be compared\"\n            if region1:\n                runLog.warning(warning.format(region1, 2))\n            if region2:\n                runLog.warning(warning.format(region2, 1))\n                equals = False\n                continue\n        equals &= _compareRegionXS(region1, region2, tolerance, verbose)\n    return equals\n\n\ndef _compareRegionXS(region1, region2, tolerance, verbose):\n    \"\"\"Compare the macroscopic cross sections between two homogenized regions.\"\"\"\n    return region1.macros.compare(region2.macros, None, tolerance, verbose)\n\n\nclass _CompxsIO(cccc.Stream):\n    \"\"\"Semi-abstract stream used for reading to/writing from a COMPXS file.\n\n    Parameters\n    ----------\n    fileName: str\n        path to compxs file\n    lib: armi.nuclearDataIO.xsLibrary.CompxsLibrary\n        Compxs library that is being written to or read from `fileName`\n    fileMode: str\n        string indicating if ``fileName`` is being read or written, and\n        in ascii or binary format\n    getRegionFunc: function\n        function that returns a :py:class:`CompxsRegion` object given the name of\n        the region.\n\n    See Also\n    --------\n    armi.nuclearDataIO.cccc.isotxs.IsotxsIO\n    \"\"\"\n\n    _METADATA_TAGS = (\n        \"numComps\",\n        \"numGroups\",\n        \"fileWideChiFlag\",\n        \"numFissComps\",\n        \"maxUpScatterGroups\",\n        \"maxDownScatterGroups\",\n        \"numDelayedFam\",\n        \"maxScatteringOrder\",\n    )\n\n    def __init__(self, fileName, lib, fileMode, getRegionFunc):\n        cccc.Stream.__init__(self, fileName, fileMode)\n        self._lib = lib\n        self._metadata = self._getFileMetadata()\n        self._metadata.fileNames.append(fileName)\n        self._getRegion = getRegionFunc\n        self._isReading = \"r\" in self._fileMode\n\n    def _getFileMetadata(self):\n        return self._lib.compxsMetadata\n\n    def isReadingCompxs(self):\n        return self._isReading\n\n    def fileMode(self):\n        return self._fileMode\n\n    @classmethod\n    def _read(cls, fileName, fileMode):\n        from armi.nuclearDataIO.xsLibraries import CompxsLibrary\n\n        lib = CompxsLibrary()\n        return cls._readWrite(\n            lib,\n            fileName,\n            fileMode,\n            lambda containerKey: CompxsRegion(lib, containerKey),\n        )\n\n    @classmethod\n    def _write(cls, lib, fileName, fileMode):\n        return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])\n\n    @classmethod\n    def _readWrite(cls, lib, fileName, fileMode, getRegionFunc):\n        with _CompxsIO(fileName, lib, fileMode, getRegionFunc) as rw:\n            rw.readWrite()\n        return lib\n\n    def readWrite(self):\n        \"\"\"\n        Read from or write to the COMPXS file.\n\n        See Also\n        --------\n        armi.nuclearDataIO.cccc.isotxs.IsotxsIO.readWrite : reading/writing ISOTXS files\n        \"\"\"\n        runLog.info(\"{} macroscopic cross library {}\".format(\"Reading\" if self._isReading else \"Writing\", self))\n        unlockImmutableProperties(self._lib)\n        try:\n            regNames = self._rw1DRecord(self._lib.regionLabels)\n            self._rw2DRecord()\n            for regLabel in regNames:\n                region = self._getRegion(regLabel)\n                regionIO = _getRegionIO()(region, self, self._lib)\n                regionIO.rwRegionData()\n            self._rw5DRecord()\n        except Exception:\n            raise OSError(\"Failed to {} {} \\n\\n\\n{}\".format(\"read\" if self._isReading else \"write\", self, format_exc()))\n        finally:\n            lockImmutableProperties(self._lib)\n\n    def _rw1DRecord(self, regNames):\n        \"\"\"Write the specifications block.\"\"\"\n        with self.createRecord() as record:\n            for datum in self._METADATA_TAGS:\n                self._metadata[datum] = record.rwInt(self._metadata[datum])\n            self._metadata[\"reservedFlag1\"] = record.rwInt(self._metadata[\"reservedFlag1\"])\n            self._metadata[\"reservedFlag2\"] = record.rwInt(self._metadata[\"reservedFlag2\"])\n            regNames = list(range(self._metadata[\"numComps\"]))\n        return regNames\n\n    def _rw2DRecord(self):\n        \"\"\"Write the composition independent data block.\"\"\"\n        with self.createRecord() as record:\n            if self._metadata[\"fileWideChiFlag\"]:\n                self._metadata[\"fileWideChi\"] = record.rwMatrix(\n                    self._metadata[\"fileWideChi\"],\n                    (self._metadata[\"fileWideChiFlag\"], self._metadata[\"numGroups\"]),\n                )\n            self._rwLibraryEnergies(record)\n            self._metadata[\"minimumNeutronEnergy\"] = record.rwDouble(self._metadata[\"minimumNeutronEnergy\"])\n            self._rwDelayedProperties(record, self._metadata[\"numDelayedFam\"])\n\n    def _rwLibraryEnergies(self, record):\n        self._lib.neutronVelocity = record.rwList(self._lib.neutronVelocity, \"double\", self._metadata[\"numGroups\"])\n        self._lib.neutronEnergyUpperBounds = record.rwList(\n            self._lib.neutronEnergyUpperBounds, \"double\", self._metadata[\"numGroups\"]\n        )\n\n    def _rwDelayedProperties(self, record, numDelayedFam):\n        if numDelayedFam:\n            self._metadata[\"delayedChi\"] = record.rwMatrix(\n                self._metadata[\"delayedChi\"],\n                (self._metadata[\"numGroups\"], numDelayedFam),\n            )\n\n            self._metadata[\"delayedDecayConstant\"] = record.rwList(\n                self._metadata[\"delayedDecayConstant\"], \"double\", numDelayedFam\n            )\n\n        self._metadata[\"compFamiliesWithPrecursors\"] = record.rwList(\n            self._metadata[\"compFamiliesWithPrecursors\"],\n            \"int\",\n            self._metadata[\"numComps\"],\n        )\n\n    def _rw5DRecord(self):\n        \"\"\"Write power conversion factors.\"\"\"\n        numComps = self._getFileMetadata()[\"numComps\"]\n        with self.createRecord() as record:\n            for factor in COMPXS_POWER_CONVERSION_FACTORS:\n                self._getFileMetadata()[factor] = record.rwList(self._getFileMetadata()[factor], \"double\", numComps)\n\n\nreadBinary = _CompxsIO.readBinary\nreadAscii = _CompxsIO.readAscii\nwriteBinary = _CompxsIO.writeBinary\nwriteAscii = _CompxsIO.writeAscii\n\n\nclass _CompxsRegionIO:\n    \"\"\"\n    Specific object assigned a single region to read/write composition information.\n\n    Used with _COMPXS object to read/write 3D and 4D records -\n    composition specifications and compsosition macroscopic cross sections.\n\n    Cross sections are read/written in order of decreasing energy.\n\n    This differs from the _COMPXS object, as this object acts on a single region, but\n    uses the file mode and file path from the _COMPXS region that instantiated this object.\n    \"\"\"\n\n    _ORDERED_PRIMARY_XS = (\"absorption\", \"total\", \"removal\", \"transport\")\n\n    def __init__(self, region, compxsIO, lib):\n        self._lib = lib\n        self._compxsIO = compxsIO\n        self._region = region\n        self._numGroups = self._getFileMetadata()[\"numGroups\"]\n        self._fileMode = compxsIO.fileMode()\n        self._isReading = compxsIO.isReadingCompxs()\n\n    def _getRegionMetadata(self):\n        return self._region.metadata\n\n    def _getFileMetadata(self):\n        return self._lib.compxsMetadata\n\n    def rwRegionData(self):\n        \"\"\"Read/write the region specific information for this composition.\"\"\"\n        self._rw3DRecord()\n        self._rw4DRecord()\n\n    def _rw3DRecord(self):\n        r\"\"\"Write the composition specifications block.\"\"\"\n        with self._compxsIO.createRecord() as record:\n            self._getRegionMetadata()[\"chiFlag\"] = record.rwInt(self._getRegionMetadata()[\"chiFlag\"])\n            self._getRegionMetadata()[\"numUpScatterGroups\"] = record.rwList(\n                self._getRegionMetadata()[\"numUpScatterGroups\"], \"int\", self._numGroups\n            )\n            self._getRegionMetadata()[\"numDownScatterGroups\"] = record.rwList(\n                self._getRegionMetadata()[\"numDownScatterGroups\"],\n                \"int\",\n                self._numGroups,\n            )\n            if self._getRegionMetadata()[\"numPrecursorFamilies\"]:\n                self._getRegionMetadata()[\"numFamI\"] = record.rwList(\n                    self._getRegionMetadata()[\"numFamI\"],\n                    \"int\",\n                    self._getRegionMetadata()[\"numPrecursorFamilies\"],\n                )\n\n    def _rw4DRecord(self):\n        r\"\"\"Write the composition macroscopic cross sections.\"\"\"\n        if self._isReading:\n            self._region.allocateXS(self._getFileMetadata()[\"numGroups\"])\n\n        for group in range(self._getFileMetadata()[\"numGroups\"]):\n            with self._compxsIO.createRecord() as record:\n                self._rwGroup4DRecord(record, group, self._region.macros)\n        if self._isReading:\n            self._region.makeScatteringMatrices()\n\n    def _rwGroup4DRecord(self, record, group, macros):\n        self._rwPrimaryXS(record, group, macros)\n        self._rwScatteringMatrix(record, group, macros, 0)\n\n        for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:\n            self._getRegionMetadata()[datum][group] = record.rwDouble(self._getRegionMetadata()[datum][group])\n\n        if self._getRegionMetadata()[\"numPrecursorFamilies\"]:\n            self._getRegionMetadata()[\"numPrecursorsProduced\", group] = record.rwList(\n                self._getRegionMetadata()[\"numPrecursorsProduced\", group],\n                \"int\",\n                self._getRegionMetadata()[\"numPrecursorFamilies\"],\n            )\n\n        macros.n2n[group] = record.rwDouble(macros.n2n[group])\n        for higherOrder in range(1, self._getFileMetadata()[\"maxScatteringOrder\"] + 1):\n            self._rwScatteringMatrix(record, group, macros, higherOrder)\n\n    def _rwPrimaryXS(self, record, group, macros):\n        for xs in self._ORDERED_PRIMARY_XS:\n            macros[xs][group] = record.rwDouble(macros[xs][group])\n\n        if self._getRegionMetadata()[\"chiFlag\"]:\n            macros[\"fission\"][group] = record.rwDouble(macros[\"fission\"][group])\n            macros[\"nuSigF\"][group] = record.rwDouble(macros[\"nuSigF\"][group])\n            macros[\"chi\"][group] = record.rwList(macros[\"chi\"][group], \"double\", self._getRegionMetadata()[\"chiFlag\"])\n\n    def _rwScatteringMatrix(self, record, group, macros, order):\n        numUpScatter = self._getRegionMetadata()[\"numUpScatterGroups\"][group]\n        numDownScatter = self._getRegionMetadata()[\"numDownScatterGroups\"][group]\n\n        sparseMat = macros.higherOrderScatter[order] if order else macros.totalScatter\n\n        dataj = (\n            None\n            if self._isReading\n            else _flattenScatteringVector(sparseMat[:, group], group, numUpScatter, numDownScatter)\n        )\n\n        dataj = record.rwList(dataj, \"double\", numUpScatter + 1 + numDownScatter)\n        indicesj = list(reversed(range(group - numDownScatter, group + numUpScatter + 1)))\n\n        if self._isReading:\n            sparseMat.addColumnData(dataj, indicesj)\n\n\nclass _CompxsScatterMatrix:\n    \"\"\"When reading COMPXS scattering blocks, store the data here and then reconstruct after.\"\"\"\n\n    def __init__(self, shape):\n        self.data = []\n        self.indices = []\n        self.indptr = [0]\n        self.shape = shape\n\n    def addColumnData(self, dataj, indicesj):\n        self.data.extend(dataj)\n        self.indices.extend(indicesj)\n        self.indptr.append(len(dataj) + self.indptr[-1])\n\n    def makeSparse(self, sparseFunc=csc_matrix):\n        self.data = np.array(self.data, dtype=\"d\")\n        self.indices = np.array(self.indices, dtype=\"d\")\n        self.indptr = np.array(self.indptr, dtype=\"d\")\n        return sparseFunc((self.data, self.indices, self.indptr), shape=self.shape)\n\n\nclass CompxsRegion:\n    \"\"\"\n    Class for creating/tracking homogenized region information.\n\n    Notes\n    -----\n    Region objects are created from reading COMPXS files through\n    :py:meth:`~_CompxsIO.readWrite` and connected to the resulting library,\n    similar to instances of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`. This allows instances\n    of :py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` to read from and write to ``COMPXS`` files,\n    access region information by name, and plot macroscopic cross sections from the homogenized regions.\n\n    The main attributes for an instance of `Region` are the macroscopic cross sections,\n    ``macros``, and the metadata. The metadata deals primarily with delayed neutron information\n    and use of the ``fileWideChi``, if that option is set.\n\n    See Also\n    --------\n    armi.nuclearDataIO.xsNuclides.XSNuclide\n\n    Examples\n    --------\n    >>> lib = compxs.readBinary(\"COMPXS\")\n    >>> lib.regions\n        <Region REG00>\n        <Region REG01>\n        <Region REG02>\n        ...\n        <Region RegNN>\n    >>> r0 = lib.regions[0]\n    >>> r10 = lib.regions[10]\n    >>> r0.isFissile\n        False\n    >>> r10.isFissile\n        True\n    >>> r10.macros.fission\n        array([0.01147095,  0.01006284,  0.0065597,  0.00660079,  0.005587,\n               ...\n               0.08920149,  0.13035864,  0.16192732]\n    \"\"\"\n\n    _primaryXS = (\"absorption\", \"total\", \"removal\", \"transport\", \"n2n\")\n\n    def __init__(self, lib, regionNumber):\n        self.container = lib\n        lib[regionNumber] = self\n        self.regionNumber = regionNumber\n        self.macros = XSCollection(parent=self)\n        self.metadata = self._getMetadata()\n\n    def __repr__(self):\n        return \"<{} {}>\".format(self.__class__.__name__, self.regionNumber)\n\n    def _getFileMetadata(self):\n        return self.container.compxsMetadata\n\n    def _getMetadata(self):\n        specs = RegionXSMetadata()\n        chiFlag = specs[\"fileWideChiFlag\"] = self._getFileMetadata()[\"fileWideChiFlag\"]\n        if chiFlag:\n            self.macros.chi = specs[\"fileWideChi\"] = self._getFileMetadata()[\"fileWideChi\"]\n        compFamiliesWithPrecursors = self._getFileMetadata()[\"compFamiliesWithPrecursors\"]\n        if compFamiliesWithPrecursors is not None and compFamiliesWithPrecursors.size:\n            specs[\"numPrecursorFamilies\"] = compFamiliesWithPrecursors[self.regionNumber]\n        else:\n            specs[\"numPrecursorFamilies\"] = 0\n\n        return specs\n\n    def initMetadata(self, groups):\n        \"\"\"Initialize the metadata for this region.\"\"\"\n        self.metadata = self._getMetadata()\n        for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:\n            if \"Additive\" in datum:\n                quantity = 0.0\n            else:\n                quantity = 1.0\n            self.metadata[datum] = groups * [quantity]\n        for datum in COMPXS_POWER_CONVERSION_FACTORS:\n            self.metadata[datum] = 1.0\n\n    @property\n    def isFissile(self):\n        return self.macros.fission is not None\n\n    def allocateXS(self, numGroups):\n        r\"\"\"\n        Allocate the cross section arrays.\n\n        When reading in the cross sections from a COMPXS file, the cross sections are read\n        for each energy group, i.e. ..math::\n\n            \\Sigma_{a,1},\\Sigma_{t,1},\\Sigma_{rem,1}, \\cdots,\n            \\Sigma_{a,2},\\Sigma_{t,2},\\Sigma_{rem,2}, \\cdots,\n            \\Sigma_{a,G},\\Sigma_{t,G{,\\Sigma_{rem,G}\n\n        Since the cross sections can not be read in with a single read command, the\n        arrays are allocated here to be populated later.\n\n        Scattering matrices are read in as columns of a sparse scattering matrix and\n        reconstructed after all energy groups have been read in.\n\n        See Also\n        --------\n        :py:meth:`makeScatteringMatrices`\n        \"\"\"\n        for xs in self._primaryXS:\n            self.macros[xs] = np.zeros(numGroups)\n\n        self.macros.totalScatter = _CompxsScatterMatrix((numGroups, numGroups))\n\n        if self.metadata[\"chiFlag\"]:\n            self.macros.fission = np.zeros(numGroups)\n            self.macros.nuSigF = np.zeros(numGroups)\n            self.macros.chi = np.zeros((numGroups, self.metadata[\"chiFlag\"]))\n\n        if self._getFileMetadata()[\"maxScatteringOrder\"]:\n            for scatterOrder in range(1, self._getFileMetadata()[\"maxScatteringOrder\"] + 1):\n                self.macros.higherOrderScatter[scatterOrder] = _CompxsScatterMatrix((numGroups, numGroups))\n\n        for datum in REGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF:\n            self.metadata[datum] = (np.zeros(numGroups) if \"Additive\" in datum else np.ones(numGroups)).tolist()\n\n    def makeScatteringMatrices(self):\n        r\"\"\"\n        Create the sparse scattering matrix from components.\n\n        The scattering matrix :math:`S_{i,j}=\\Sigma_{s,i\\rightarrow j}` is read in\n        from the COMPXS as segments on each column in three parts: ..math::\n\n            XSCATU_J = \\lbrace S_{g', J}\\vert g'=J+NUP(J), J+NUP(J)-1, cdots, J+1\\rbrace\n\n            XSCATJ_J = S_{J,J}\n\n            XSCATD_J = \\lbrace S_{g', J}\\vert g'=J-1, J-2, \\cdots, J_NDN(J) \\rbrace\n\n        where :math:`NUP(J)` and :math:`NDN(J)` are the number of group that upscatter and\n        downscatter into energy group :math:`J`\n\n        See Also\n        --------\n        :py:class:`scipy.sparse.csc_matrix`\n        \"\"\"\n        self.macros.totalScatter = self.macros.totalScatter.makeSparse()\n        self.macros.totalScatter.eliminate_zeros()\n        if self._getFileMetadata()[\"maxScatteringOrder\"]:\n            for sctOrdr, sctObj in self.macros.higherOrderScatter.items():\n                self.macros.higherOrderScatter[sctOrdr] = sctObj.makeSparse()\n                self.macros.higherOrderScatter[sctOrdr].eliminate_zeros()\n\n    def getXS(self, interaction):\n        \"\"\"\n        Get the macroscopic cross sections for a specific interaction.\n\n        See Also\n        --------\n        :py:meth:`armi.nucDirectory.XSNuclide.getXS`\n        \"\"\"\n        return self.macros[interaction]\n\n    def merge(self, other):\n        \"\"\"Merge attributes of two homogenized Regions.\"\"\"\n        self.metadata = self.metadata.merge(other.metadata, self, other, \"COMPXS\", OSError)\n        self.macros.merge(other.macros)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/dif3d.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for reading from and writing to DIF3D files, which are module dependent\nbinary inputs for the DIF3D code.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc\n\nFILE_SPEC_2D_PARAMS = (\n    [\n        \"IPROBT\",\n        \"ISOLNT\",\n        \"IXTRAP\",\n        \"MINBSZ\",\n        \"NOUTMX\",\n        \"IRSTRT\",\n        \"LIMTIM\",\n        \"NUPMAX\",\n        \"IOSAVE\",\n        \"IOMEG1\",\n        \"INRMAX\",\n        \"NUMORP\",\n        \"IRETRN\",\n    ]\n    + [f\"IEDF{e}\" for e in range(1, 11)]\n    + [\n        \"NOUTBQ\",\n        \"I0FLUX\",\n        \"NOEDIT\",\n        \"NOD3ED\",\n        \"ISRHED\",\n        \"NSN\",\n        \"NSWMAX\",\n        \"NAPRX\",\n        \"NAPRXZ\",\n        \"NFMCMX\",\n        \"NXYSWP\",\n        \"NZSWP\",\n        \"ISYMF\",\n        \"NCMRZS\",\n        \"ISEXTR\",\n        \"NPNO\",\n        \"NXTR\",\n        \"IOMEG2\",\n        \"IFULL\",\n        \"NVFLAG\",\n        \"ISIMPL\",\n        \"IWNHFL\",\n        \"IPERT\",\n        \"IHARM\",\n    ]\n)\n\nFILE_SPEC_3D_PARAMS = [\n    \"EPS1\",\n    \"EPS2\",\n    \"EPS3\",\n    \"EFFK\",\n    \"FISMIN\",\n    \"PSINRM\",\n    \"POWIN\",\n    \"SIGBAR\",\n    \"EFFKQ\",\n    \"EPSWP\",\n] + [f\"DUM{e}\" for e in range(1, 21)]\n\nTITLE_RANGE = 11\n\n\nclass Dif3dData(cccc.DataContainer):\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n\n        self.twoD = {e: None for e in FILE_SPEC_2D_PARAMS}\n        self.threeD = {e: None for e in FILE_SPEC_3D_PARAMS}\n        self.fourD = None\n        self.fiveD = None\n\n\nclass Dif3dStream(cccc.StreamWithDataContainer):\n    \"\"\"Tool to read and write DIF3D files.\"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> Dif3dData:\n        return Dif3dData()\n\n    def _rwFileID(self) -> None:\n        \"\"\"\n        Record for file identification information.\n\n        The parameters are stored as a dictionary under the attribute `metadata`.\n        \"\"\"\n        with self.createRecord() as record:\n            for param in [\"HNAME\", \"HUSE1\", \"HUSE2\"]:\n                self._metadata[param] = record.rwString(self._metadata[param], 8)\n            self._metadata[\"VERSION\"] = record.rwInt(self._metadata[\"VERSION\"])\n\n    def _rw1DRecord(self) -> None:\n        \"\"\"\n        Record for problem title, storage, and dump specifications.\n\n        The parameters are stored as a dictionary under the attribute `metadata`.\n        \"\"\"\n        with self.createRecord() as record:\n            for i in range(TITLE_RANGE):\n                param = f\"TITLE{i}\"\n                self._metadata[param] = record.rwString(self._metadata[param], 8)\n            self._metadata[\"MAXSIZ\"] = record.rwInt(self._metadata[\"MAXSIZ\"])\n            self._metadata[\"MAXBLK\"] = record.rwInt(self._metadata[\"MAXBLK\"])\n            self._metadata[\"IPRINT\"] = record.rwInt(self._metadata[\"IPRINT\"])\n\n    def _rw2DRecord(self) -> None:\n        \"\"\"\n        Record for DIF3D integer control parameters.\n\n        The parameters are stored as a dictionary under the attribute `twoD`.\n        \"\"\"\n        with self.createRecord() as record:\n            for param in FILE_SPEC_2D_PARAMS:\n                self._data.twoD[param] = record.rwInt(self._data.twoD[param])\n\n    def _rw3DRecord(self) -> None:\n        \"\"\"\n        Record for convergence criteria and other sundry floating point data (such as\n        k-effective).\n\n        The parameters are stored as a dictionary under the attribute `threeD`.\n        \"\"\"\n        with self.createRecord() as record:\n            for param in FILE_SPEC_3D_PARAMS:\n                self._data.threeD[param] = record.rwDouble(self._data.threeD[param])\n\n    def _rw4DRecord(self) -> None:\n        \"\"\"\n        Record for the optimum overrelaxation factors. This record is only present when\n        using DIF3D-FD and if `NUMORP` is greater than 0.\n\n        The parameters are stored as a dictionary under the attribute `fourD`. This\n        could be changed into a list in the future since this record represents groupwise\n        data.\n        \"\"\"\n        if self._data.twoD[\"NUMORP\"] != 0:\n            omegaParams = [f\"OMEGA{e}\" for e in range(1, self._data.twoD[\"NUMORP\"] + 1)]\n\n            with self.createRecord() as record:\n                # Initialize the record if we're reading\n                if self._data.fourD is None:\n                    self._data.fourD = {omegaParam: None for omegaParam in omegaParams}\n\n                for omegaParam in omegaParams:\n                    self._data.fourD[omegaParam] = record.rwDouble(self._data.fourD[omegaParam])\n\n    def _rw5DRecord(self) -> None:\n        \"\"\"\n        Record for the axial coarse mesh rebalancing boundaries. Coarse mesh balancing is\n        disabled in DIF3D-VARIANT, so this record is only relevant for DIF3D-Nodal. This\n        record is only present if `NCMRZS` is greater than 0.\n\n        The parameters are stored as a dictionary under the attribute `fiveD`.\n        \"\"\"\n        if self._data.twoD[\"NCMRZS\"] != 0:\n            zcmrcParams = [f\"ZCMRC{e}\" for e in range(1, self._data.twoD[\"NCMRZS\"] + 1)]\n            nzintsParams = [f\"NZINTS{e}\" for e in range(1, self._data.twoD[\"NCMRZS\"] + 1)]\n\n            with self.createRecord() as record:\n                # Initialize the record if we're reading\n                if self._data.fiveD is None:\n                    self._data.fiveD = {zcmrcParam: None for zcmrcParam in zcmrcParams}\n                    self._data.fiveD.update({nzintsParam: None for nzintsParam in nzintsParams})\n\n                for zcmrcParam in zcmrcParams:\n                    self._data.fiveD[zcmrcParam] = record.rwDouble(self._data.fiveD[zcmrcParam])\n                for nzintsParam in nzintsParams:\n                    self._data.fiveD[nzintsParam] = record.rwInt(self._data.fiveD[nzintsParam])\n\n    def readWrite(self):\n        \"\"\"Reads or writes metadata and data from the five records of the DIF3D binary file.\n\n        .. impl:: Tool to read and write DIF3D files.\n            :id: I_ARMI_NUCDATA_DIF3D\n            :implements: R_ARMI_NUCDATA_DIF3D\n\n            The reading and writing of the DIF3D binary file is performed using\n            :py:class:`StreamWithDataContainer <.cccc.StreamWithDataContainer>`\n            from the :py:mod:`~armi.nuclearDataIO.cccc` package. This class\n            allows for the reading and writing of CCCC binary files, processing\n            one record at a time using subclasses of the :py:class:`IORecord\n            <.cccc.IORecord>`. Each record in a CCCC binary file consists of\n            words that represent integers (short or long), floating-point\n            numbers (single or double precision), or strings of data. One or\n            more of these words are parsed one at a time by the reader. Multiple\n            words processed together have meaning, such as such as groupwise\n            overrelaxation factors. While reading, the data is stored in a\n            Python dictionary as an attribute on the object, one for each\n            record. The keys in each dictionary represent the parsed grouping of\n            words in the records; for example, for the 4D record (stored as the\n            attribute ``fourD``), each groupwise overrelaxation factor is stored\n            as the key ``OMEGA{i}``, where ``i`` is the group number. See\n            :need:`I_ARMI_NUCDATA` for more details on the general\n            implementation.\n\n            Each record is also embedded with the record size at the beginning\n            and end of the record (always assumed to be present), which is used\n            for error checking at the end of processing each record.\n\n            The DIF3D reader processes the file identification record (stored as\n            the attribute ``_metadata``) and the five data records for the DIF3D\n            file, as defined in the specification for the file distributed with\n            the DIF3D software.\n\n            This class can also read and write an ASCII version of the DIF3D\n            file. While this format is not used by the DIF3D software, it can be\n            a useful representation for users to access the file in a\n            human-readable format.\n        \"\"\"\n        msg = f\"{'Reading' if 'r' in self._fileMode else 'Writing'} DIF3D binary data {self}\"\n        runLog.info(msg)\n\n        self._rwFileID()\n        self._rw1DRecord()\n        self._rw2DRecord()\n        self._rw3DRecord()\n        self._rw4DRecord()\n        self._rw5DRecord()\n\n\nreadBinary = Dif3dStream.readBinary\nreadAscii = Dif3dStream.readAscii\nwriteBinary = Dif3dStream.writeBinary\nwriteAscii = Dif3dStream.writeAscii\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/fixsrc.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nFIXSRC is a CCCC standard data file for storing multigroup fixed sources on a triangular mesh.\n\nCurrently, the FIXSRC writing capability assumes a gamma (not neutron) fixed source problem.\nThis enables photon transport problems. [CCCC-IV]_\n\"\"\"\n\nimport collections\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc\n\n\ndef readBinary(fileName):\n    \"\"\"Read a binary FIXSRC file.\"\"\"\n    with FIXSRC(fileName, \"rb\", np.zeros((0, 0, 0, 0))) as fs:\n        fs.readWrite()\n    return fs.fixSrc\n\n\ndef writeBinary(fileName, fixSrcArray):\n    \"\"\"Write fixed source data to a FIXSRC file.\"\"\"\n    with FIXSRC(fileName, \"wb\", fixSrcArray) as fs:\n        fs.readWrite()\n\n\nclass FIXSRC(cccc.Stream):\n    \"\"\"Read or write a binary FIXSRC file from DIF3D fixed source input.\"\"\"\n\n    def __init__(self, fileName, fileMode, fixSrc):\n        \"\"\"\n        Initialize a gamma FIXSRC class for reading or writing a binary FIXSRC file for DIF3D gamma\n        fixed source input.\n\n        If the intent is to write a gamma FIXSRC file, the variable FIXSRC.fixSrc, which contains\n        to-be-written core-wide multigroup gamma fixed source data, is constructed from an existing\n        neutron RTFLUX file.\n\n        Parameters\n        ----------\n        fileName : str, optional\n            The file name of the RTFLUX/ATFLUX binary file to be read.\n\n        fileMode : str, optional\n            If 'wb', this class writes a FIXSRC binary file.\n            If 'rb', this class reads a preexisting FIXSRC binary file.\n\n        fixSrc : np.ndarray\n            Core-wide multigroup gamma fixed-source data.\n        \"\"\"\n        cccc.Stream.__init__(self, fileName, fileMode)\n\n        # copied from a sample FIXSRC output from \"type 19\" DIF3D input\n        self.label = \"FIXSRC                  \"\n        self.fileId = 1\n        self.fixSrc = fixSrc\n\n        ni, nj, nz, ng = self.fixSrc.shape\n        self.fc = collections.OrderedDict(\n            [\n                (\"itype\", 0),\n                (\"ndim\", 3),\n                (\"ngroup\", ng),\n                (\"ninti\", ni),\n                (\"nintj\", nj),\n                (\"nintk\", nz),\n                (\"idists\", 1),\n                (\"ndcomp\", 1),\n                (\"nscomp\", 0),\n                (\"nedgi\", 0),\n                (\"nedgj\", 0),\n                (\"nedjk\", 0),\n                (\"nblok\", 1),\n            ]\n        )\n\n    def readWrite(self):\n        \"\"\"Read or write a binary FIXSRC file for DIF3D fixed source input.\"\"\"\n        runLog.info(\"{} gamma fixed source file {}\".format(\"Reading\" if \"r\" in self._fileMode else \"Writing\", self))\n\n        self._rwFileID()\n        self._rw1DRecord()\n\n        ng = self.fc[\"ngroup\"]\n        nz = self.fc[\"nintk\"]\n        for g in range(ng):\n            for z in range(nz):\n                self._rw3DRecord(g, z)\n\n    def _rwFileID(self):\n        \"\"\"Read file identification information.\"\"\"\n        with self.createRecord() as fileIdRecord:\n            self.label = fileIdRecord.rwString(self.label, 24)\n            self.fileId = fileIdRecord.rwInt(self.fileId)\n\n    def _rw1DRecord(self):\n        \"\"\"Read/write parameters from/to the FIXSRC 1D block (file control).\"\"\"\n        with self.createRecord() as record:\n            for var in self.fc.keys():\n                self.fc[var] = record.rwInt(self.fc[var])\n\n    def _rw3DRecord(self, g, z):\n        \"\"\"\n        Read/write fixed source data from 3D block records.\n\n        Parameters\n        ----------\n        g : int\n            The gamma energy group index.\n\n        z : int\n            The DIF3D axial node index.\n        \"\"\"\n        with self.createRecord() as record:\n            ni = self.fc[\"ninti\"]\n            nj = self.fc[\"nintj\"]\n\n            for j in range(nj):\n                for i in range(ni):\n                    self.fixSrc[i, j, z, g] = record.rwDouble(self.fixSrc[i, j, z, g])\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/gamiso.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for reading GAMISO files which contains gamma cross section data.\n\nGAMISO is a binary file created by MC**2-v3 that contains multigroup microscopic gamma cross\nsections. GAMISO data is contained within a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.\n\n.. impl:: Tool to read and write GAMISO files.\n    :id: I_ARMI_NUCDATA_GAMISO\n    :implements: R_ARMI_NUCDATA_GAMISO\n\n    The majority of the functionality in this module is inherited from the\n    :py:mod:`~armi.nuclearDataIO.cccc.isotxs` module. See\n    :py:class:`~armi.nuclearDataIO.cccc.isotxs.IsotxsIO` and its associated\n    implementation :need:`I_ARMI_NUCDATA_ISOTXS` for more information. The only\n    difference from ISOTXS neutron data is a special treatment for gamma\n    velocities, which is done by overriding ``_rwLibraryEnergies``.\n\nSee [GAMSOR]_.\n\n.. [GAMSOR] Smith, M. A., Lee, C. H., and Hill, R. N. GAMSOR: Gamma Source Preparation and DIF3D\n            Flux Solution. United States: N. p., 2016. Web. doi:10.2172/1343095. `On OSTI\n            <https://www.osti.gov/biblio/1343095-gamsor-gamma-source-preparation-dif3d-flux-solution>`__\n\"\"\"\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import xsLibraries, xsNuclides\nfrom armi.nuclearDataIO.cccc import isotxs\n\n\ndef compare(lib1, lib2):\n    \"\"\"Compare two XSLibraries, and return True if equal, or False if not.\"\"\"\n    equal = True\n    # first check the lib properties (also need to unlock to prevent from getting an exception).\n    equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, \"gammaEnergyUpperBounds\")\n    # compare the meta data\n    equal &= lib1.gamisoMetadata.compare(lib2.gamisoMetadata, lib1, lib2)\n    # check the nuclides\n    for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):\n        nuc1 = lib1.get(nucName, None)\n        nuc2 = lib2.get(nucName, None)\n        if nuc1 is None or nuc2 is None:\n            continue\n        equal &= compareNuclideXS(nuc1, nuc2)\n    return equal\n\n\ndef compareNuclideXS(nuc1, nuc2):\n    equal = nuc1.gamisoMetadata.compare(nuc2.gamisoMetadata, nuc1.container, nuc2.container)\n    equal &= nuc1.gammaXS.compare(nuc2.gammaXS, [])\n    return equal\n\n\ndef addDummyNuclidesToLibrary(lib, dummyNuclides):\n    \"\"\"\n    This method adds DUMMY nuclides to the current GAMISO library.\n\n    Parameters\n    ----------\n    lib : obj\n        GAMISO library object\n\n    dummyNuclides: list\n        List of DUMMY nuclide objects that will be copied and added to the GAMISO file\n\n    Notes\n    -----\n    Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to\n    provide a consistent set of nuclide-level data across all the nuclides in a\n    :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.\n    \"\"\"\n    if not dummyNuclides:\n        runLog.important(\"No dummy nuclide data provided to be added to {}\".format(lib))\n        return False\n    elif len(lib.xsIDs) > 1:\n        runLog.warning(\n            \"Cannot add dummy nuclide data to GAMISO library {} containing data for more than 1 XS ID.\".format(lib)\n        )\n        return False\n\n    dummyNuclideKeysAddedToLibrary = []\n    for dummyNuclide in dummyNuclides:\n        dummyKey = dummyNuclide.nucLabel\n        if len(lib.xsIDs):\n            dummyKey += lib.xsIDs[0]\n        if dummyKey in lib:\n            continue\n\n        runLog.debug(\"Adding {} nuclide data to {}\".format(dummyKey, lib))\n        newDummy = xsNuclides.XSNuclide(lib, dummyKey)\n        # Copy gamiso metadata from the isotxs metadata of the given dummy nuclide\n        for kk, vv in dummyNuclide.isotxsMetadata.items():\n            if kk in [\"jj\", \"jband\"]:\n                # clear out data here before populating with gamma groups\n                newDummy.gamisoMetadata[kk] = {}\n                for gNum in range(lib.gamisoMetadata[\"numGroups\"]):\n                    for bNum in range(lib.gamisoMetadata[\"maxScatteringBlocks\"]):\n                        newDummy.gamisoMetadata[kk][(gNum, bNum)] = 1\n            else:\n                newDummy.gamisoMetadata[kk] = vv\n        lib[dummyKey] = newDummy\n        dummyNuclideKeysAddedToLibrary.append(dummyKey)\n\n    return any(dummyNuclideKeysAddedToLibrary)\n\n\nclass _GamisoIO(isotxs.IsotxsIO):\n    \"\"\"\n    A reader/writer for GAMISO data files.\n\n    Notes\n    -----\n    The GAMISO file format is identical to ISOTXS.\n    \"\"\"\n\n    def _getFileMetadata(self):\n        return self._lib.gamisoMetadata\n\n    def _getNuclideIO(self):\n        return _GamisoNuclideIO\n\n    def _rwMessage(self):\n        runLog.debug(\"{} GAMISO data {}\".format(\"Reading\" if \"r\" in self._fileMode else \"Writing\", self))\n\n    def _rwLibraryEnergies(self, record):\n        # neutron velocity (cm/s)\n        metadata = self._getFileMetadata()\n        metadata[\"gammaVelocity..NOT\"] = record.rwList(\n            metadata[\"gammaVelocity..NOT\"], \"float\", self._metadata[\"numGroups\"]\n        )\n        # read emax for each group in descending eV.\n        self._lib.gammaEnergyUpperBounds = record.rwMatrix(\n            self._lib.gammaEnergyUpperBounds, self._metadata[\"numGroups\"]\n        )\n\n\nreadBinary = _GamisoIO.readBinary\nreadAscii = _GamisoIO.readAscii\nwriteBinary = _GamisoIO.writeBinary\nwriteAscii = _GamisoIO.writeAscii\n\n\nclass _GamisoNuclideIO(isotxs._IsotxsNuclideIO):\n    \"\"\"\n    A reader/writer for GAMISO nuclides.\n\n    Notes\n    -----\n    The GAMISO file format is identical to ISOTXS.\n    \"\"\"\n\n    _FILE_LABEL = \"GAMISO\"\n\n    def _getFileMetadata(self):\n        return self._lib.gamisoMetadata\n\n    def _getNuclideMetadata(self):\n        return self._nuclide.gamisoMetadata\n\n    def _getMicros(self):\n        return self._nuclide.gammaXS\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/geodst.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRead/write a CCCC GEODST geometry definition file.\n\nGEODST files define fine and coarse meshes and mappings between\nregion numbers and mesh indices. They also store some zone\ninformation.\n\nFile format definition is from [CCCC-IV]_.\n\nExamples\n--------\n>>> geo = geodst.readBinary(\"GEODST\")\n>>> print(geo.xmesh)\n>>> geo.zmesh[-1] *= 2  # make a modification to data\n>>> geodst.writeBinary(geo, \"GEODST2\")\n\n\"\"\"\n\nimport numpy as np\n\nfrom armi.nuclearDataIO import cccc\n\nGEODST = \"GEODST\"\n\n# See CCCC-IV documentation for definitions\nFILE_SPEC_1D_KEYS = (\n    \"IGOM\",\n    \"NZONE\",\n    \"NREG\",\n    \"NZCL\",\n    \"NCINTI\",\n    \"NCINTJ\",\n    \"NCINTK\",\n    \"NINTI\",\n    \"NINTJ\",\n    \"NINTK\",\n    \"IMB1\",\n    \"IMB2\",\n    \"JMB1\",\n    \"JMB2\",\n    \"KMB1\",\n    \"KMB2\",\n    \"NBS\",\n    \"NBCS\",\n    \"NIBCS\",\n    \"NZWBB\",\n    \"NTRIAG\",\n    \"NRASS\",\n    \"NTHPT\",\n    \"NGOP1\",\n    \"NGOP2\",\n    \"NGOP3\",\n    \"NGOP4\",\n)\n\n\nclass GeodstData(cccc.DataContainer):\n    \"\"\"\n    Data representation that can be read from or written to a GEODST file.\n\n    The region numbers in this data structure START AT 1, not zero! Thus\n    you must always remember the off-by-one conversion when comparing\n    with list or matrix indices.\n\n    Notes\n    -----\n    Analogous to a IsotxsLibrary for ISTOXS files.\n    \"\"\"\n\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n\n        # 4D data\n        self.xmesh = None\n        self.ymesh = None\n        self.zmesh = None\n        self.iintervals = None\n        self.jintervals = None\n        self.kintervals = None\n\n        # 5D data\n        self.regionVolumes = None\n        self.bucklings = None\n        self.boundaryConstants = None\n        self.internalBlackBoundaryConstants = None\n        self.zonesWithBlackAbs = None\n        self.zoneClassifications = None\n        self.regionZoneNumber = None\n\n        # 6d\n        self.coarseMeshRegions = None\n\n        # 7d\n        self.fineMeshRegions = None\n\n\nclass GeodstStream(cccc.StreamWithDataContainer):\n    \"\"\"\n    Stream for reading to/writing from with GEODST data.\n\n    Parameters\n    ----------\n    geom : GeodstData\n        Data structure\n    fileName: str\n        path to geodst file\n    fileMode: str\n        string indicating if ``fileName`` is being read or written, and\n        in ascii or binary format\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> GeodstData:\n        return GeodstData()\n\n    def readWrite(self):\n        \"\"\"\n        Step through the structure of a GEODST file and read/write it.\n\n        Logic to control which records will be present is here, which\n        comes directly off the File specification.\n\n        .. impl:: Tool to read and write GEODST files.\n            :id: I_ARMI_NUCDATA_GEODST\n            :implements: R_ARMI_NUCDATA_GEODST\n\n            Reading and writing GEODST files is performed using the general\n            nuclear data I/O functionalities described in\n            :need:`I_ARMI_NUCDATA`. Reading/writing a GEODST file is performed\n            through the following steps:\n\n            #. Read/write file ID record\n\n            #. Read/write file specifications on 1D record.\n\n            #. Based on the geometry type (``IGOM``), one of following records\n               are read/written:\n\n                * Slab (1), cylinder (3), or sphere (3): Read/write 1-D coarse\n                  mesh boundaries and fine mesh intervals.\n                * X-Y (6), R-Z (7), Theta-R (8), uniform triangular (9),\n                  hexagonal (10), or R-Theta (11): Read/write 2-D coarse mesh\n                  boundaries and fine mesh intervals.\n                * R-Theta-Z (12, 15), R-Theta-Alpha (13, 16), X-Y-Z (14),\n                  uniform triangular-Z (17), hexagonal-Z(18): Read/write 3-D\n                  coarse mesh boundaries and fine mesh intervals.\n\n            #. If the geometry is not zero-dimensional (``IGOM`` > 0) and\n               buckling values are specified (``NBS`` > 0): Read/write geometry\n               data from 5D record.\n\n            #. If the geometry is not zero-dimensional (``IGOM`` > 0) and region\n               assignments are coarse-mesh-based (``NRASS`` = 0): Read/write\n               region assignments to coarse mesh interval.\n\n            #. If the geometry is not zero-dimensional (``IGOM`` > 0) and region\n               assignments are fine-mesh-based (``NRASS`` = 1): Read/write\n               region assignments to fine mesh interval.\n        \"\"\"\n        self._rwFileID()\n        self._rw1DRecord()\n        geomType = self._metadata[\"IGOM\"]\n        if 0 > geomType >= 3:\n            self._rw2DRecord()\n        elif 6 <= geomType <= 11:\n            self._rw3DRecord()\n        elif geomType >= 12:\n            self._rw4DRecord()\n\n        if geomType > 0 or self._metadata[\"NBS\"] > 0:\n            self._rw5DRecord()\n\n        if geomType > 0:\n            if self._metadata[\"NRASS\"] == 0:\n                self._rw6DRecord()\n            elif self._metadata[\"NRASS\"] == 1:\n                self._rw7DRecord()\n\n    def _rwFileID(self):\n        \"\"\"\n        Read/write file id record.\n\n        Notes\n        -----\n        The number 28 was actually obtained from\n        a hex editor and may be code specific.\n        \"\"\"\n        with self.createRecord() as record:\n            self._metadata[\"label\"] = record.rwString(self._metadata[\"label\"], 28)\n\n    def _rw1DRecord(self):\n        \"\"\"\n        Read/write File specifications on 1D record.\n\n        This record contains 27 integers.\n        \"\"\"\n        with self.createRecord() as record:\n            for key in FILE_SPEC_1D_KEYS:\n                self._metadata[key] = record.rwInt(self._metadata[key])\n\n    def _rw2DRecord(self):\n        \"\"\"Read/write 1-D coarse mesh boundaries and fine mesh intervals.\"\"\"\n        with self.createRecord() as record:\n            self._data.xmesh = record.rwList(self._data.xmesh, \"double\", self._metadata[\"NCINTI\"] + 1)\n            self._data.iintervals = record.rwList(self._data.iintervals, \"int\", self._metadata[\"NCINTI\"])\n\n    def _rw3DRecord(self):\n        \"\"\"Read/write 2-D coarse mesh boundaries and fine mesh intervals.\"\"\"\n        with self.createRecord() as record:\n            self._data.xmesh = record.rwList(self._data.xmesh, \"double\", self._metadata[\"NCINTI\"] + 1)\n            self._data.ymesh = record.rwList(self._data.ymesh, \"double\", self._metadata[\"NCINTJ\"] + 1)\n            self._data.iintervals = record.rwList(self._data.iintervals, \"int\", self._metadata[\"NCINTI\"])\n            self._data.jintervals = record.rwList(self._data.jintervals, \"int\", self._metadata[\"NCINTJ\"])\n\n    def _rw4DRecord(self):\n        \"\"\"Read/write 3-D coarse mesh boundaries and fine mesh intervals.\"\"\"\n        with self.createRecord() as record:\n            self._data.xmesh = record.rwList(self._data.xmesh, \"double\", self._metadata[\"NCINTI\"] + 1)\n            self._data.ymesh = record.rwList(self._data.ymesh, \"double\", self._metadata[\"NCINTJ\"] + 1)\n            self._data.zmesh = record.rwList(self._data.zmesh, \"double\", self._metadata[\"NCINTK\"] + 1)\n            self._data.iintervals = record.rwList(self._data.iintervals, \"int\", self._metadata[\"NCINTI\"])\n            self._data.jintervals = record.rwList(self._data.jintervals, \"int\", self._metadata[\"NCINTJ\"])\n            self._data.kintervals = record.rwList(self._data.kintervals, \"int\", self._metadata[\"NCINTK\"])\n\n    def _rw5DRecord(self):\n        \"\"\"Read/write Geometry data from 5D record.\"\"\"\n        with self.createRecord() as record:\n            self._data.regionVolumes = record.rwList(self._data.regionVolumes, \"float\", self._metadata[\"NREG\"])\n            self._data.bucklings = record.rwList(self._data.bucklings, \"float\", self._metadata[\"NBS\"])\n            self._data.boundaryConstants = record.rwList(self._data.boundaryConstants, \"float\", self._metadata[\"NBCS\"])\n            self._data.internalBlackBoundaryConstants = record.rwList(\n                self._data.internalBlackBoundaryConstants,\n                \"float\",\n                self._metadata[\"NIBCS\"],\n            )\n            self._data.zonesWithBlackAbs = record.rwList(self._data.zonesWithBlackAbs, \"int\", self._metadata[\"NZWBB\"])\n            self._data.zoneClassifications = record.rwList(\n                self._data.zoneClassifications, \"int\", self._metadata[\"NZONE\"]\n            )\n            self._data.regionZoneNumber = record.rwList(self._data.regionZoneNumber, \"int\", self._metadata[\"NREG\"])\n\n    def _rw6DRecord(self):\n        \"\"\"Read/write region assignments to coarse mesh interval.\"\"\"\n        if self._data.coarseMeshRegions is None:\n            # initialize all-zeros here before reading now that we\n            # have the matrix dimension metadata available.\n            self._data.coarseMeshRegions = np.zeros(\n                (\n                    self._metadata[\"NCINTI\"],\n                    self._metadata[\"NCINTJ\"],\n                    self._metadata[\"NCINTK\"],\n                ),\n                dtype=np.int32,\n            )\n        for ki in range(self._metadata[\"NCINTK\"]):\n            with self.createRecord() as record:\n                self._data.coarseMeshRegions[:, :, ki] = record.rwIntMatrix(\n                    self._data.coarseMeshRegions[:, :, ki],\n                    self._metadata[\"NCINTJ\"],\n                    self._metadata[\"NCINTI\"],\n                )\n\n    def _rw7DRecord(self):\n        \"\"\"Read/write region assignments to fine mesh interval.\"\"\"\n        if self._data.fineMeshRegions is None:\n            # initialize all-zeros here before reading now that we\n            # have the matrix dimension metadata available.\n            self._data.fineMeshRegions = np.zeros(\n                (\n                    self._metadata[\"NINTI\"],\n                    self._metadata[\"NINTJ\"],\n                    self._metadata[\"NINTK\"],\n                ),\n                dtype=np.int16,\n            )\n        for ki in range(self._metadata[\"NINTK\"]):\n            with self.createRecord() as record:\n                self._data.fineMeshRegions[:, :, ki] = record.rwIntMatrix(\n                    self._data.fineMeshRegions[:, :, ki],\n                    self._metadata[\"NINTJ\"],\n                    self._metadata[\"NINTI\"],\n                )\n\n\nreadBinary = GeodstStream.readBinary\nreadAscii = GeodstStream.readAscii\nwriteBinary = GeodstStream.writeBinary\nwriteAscii = GeodstStream.writeAscii\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/isotxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module reads and writes ISOTXS files.\n\nISOTXS is a binary file that contains multigroup microscopic cross sections.\nISOTXS stands for  *Isotope Cross Sections*.\n\nISOTXS files are often created by a lattice physics code such as MC2 or DRAGON and\nused as input to a global flux solver such as DIF3D.\n\nThis module implements reading and writing of the\nISOTXS file format, consistent with [CCCC-IV]_.\n\nExamples\n--------\n>>> from armi.nuclearDataIO.cccc import isotxs\n>>> myLib = isotxs.readBinary(\"ISOTXS-ref\")\n>>> nuc = myLib.getNuclide(\"U235\", \"AA\")\n>>> fis5 = nuc.micros.fission[5]\n>>> scat = nuc.micros.scatter[(0, 5, 6, 1)]  # 1st order elastic scatter from group 5->6\n>>> nuc.micros.fission[7] = fis5 * 1.01  # you can modify the isotxs too.\n>>> captureEnergy = nuc.isotxsMetadata[\"ecapt\"]\n>>> isotxs.writeBinary(myLib, \"ISOTXS-modified\")\n\n\"\"\"\n\nimport itertools\nimport traceback\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc, xsLibraries, xsNuclides\nfrom armi.utils import properties\n\n# scattering block definitions from ISOTXS\n# The definition is:\nTOTAL_SCATTER = 0  # 000 + NN = total scattering for Legendre Order NN\nELASTIC_SCATTER = 100  # 100 + NN, ELASTIC SCATTERING\nINELASTIC_SCATTER = 200  # 200 + NN, INELASTIC SCATTERING\nN2N_SCATTER = 300  # 300 + NN, (N,2N) SCATTERING\n\n\ndef compareSet(fileNames, tolerance=0.0, verbose=False):\n    \"\"\"\n    Takes a list of strings and reads all binaries with that name comparing them in all combinations.\n\n    Notes\n    -----\n    useful for finding mcc bugs when you want to compare a series of very similar isotxs outputs\n    Verbose gets VERY long\n    \"\"\"\n    comparisons = []\n\n    xsLibs = [readBinary(fileName) for fileName in fileNames]\n    for thisXSLib, thatXSLib in itertools.combinations(xsLibs, 2):\n        # all unique combinations with 2 items\n        runLog.info(\"\\n*****\\n*****comparing {} and {}\\n*****\".format(thisXSLib, thatXSLib))\n        comparisons.append((compare(thisXSLib, thatXSLib, tolerance, verbose), thisXSLib, thatXSLib))\n\n    sameFileNames = \"\\n\"\n    for comparison in comparisons:\n        if comparison[0]:\n            sameFileNames += \"\\t{} and {}\\n\".format(comparison[1], comparison[2])\n\n    sameFileNames = sameFileNames + \"None were the same\" if sameFileNames == \"\\n\" else sameFileNames\n    runLog.info(\"the following libraries are the same within the specified tolerance:{}\".format(sameFileNames))\n\n\ndef compare(lib1, lib2, tolerance=0.0, verbose=False):\n    \"\"\"\n    Compare two XSLibraries, and return True if equal, or False if not.\n\n    Notes\n    -----\n    Tolerance allows the user to ignore small changes that may be caused by\n    small library differences or floating point calculations\n    the closer to zero the more differences will be shown\n    10**-5 is a good tolerance to use if not using default.\n    Verbose shows the XS matrixes that are not equal\n    \"\"\"\n    equal = True\n    # first check the lib properties (also need to unlock to prevent from getting an exception).\n    equal &= xsLibraries.compareLibraryNeutronEnergies(lib1, lib2, tolerance)\n    # compare the meta data\n    equal &= lib1.isotxsMetadata.compare(lib2.isotxsMetadata, lib1, lib2)\n    # check the nuclides\n    for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):\n        nuc1 = lib1.get(nucName, None)\n        nuc2 = lib2.get(nucName, None)\n        if nuc1 is None or nuc2 is None:\n            warning = \"Nuclide {:>20} in library {} is not present in library {} and cannot be compared\"\n            if nuc1:\n                runLog.warning(warning.format(nuc1, 1, 2))\n            if nuc2:\n                runLog.warning(warning.format(nuc2, 2, 1))\n            equal = False\n            continue\n        nucEqual = compareNuclideXS(nuc1, nuc2, tolerance, verbose, nucName)\n        equal &= nucEqual\n    return equal\n\n\ndef compareNuclideXS(nuc1, nuc2, tolerance=0.0, verbose=False, nucName=\"\"):\n    equal = nuc1.isotxsMetadata.compare(nuc2.isotxsMetadata, nuc1, nuc2)\n    equal &= nuc1.micros.compare(nuc2.micros, [], tolerance, verbose, nucName=nucName)\n    return equal\n\n\ndef addDummyNuclidesToLibrary(lib, dummyNuclides):\n    \"\"\"\n    This method adds DUMMY nuclides to the current ISOTXS library.\n\n    Parameters\n    ----------\n    lib : obj\n        ISOTXS library object\n\n    dummyNuclides: list\n        List of DUMMY nuclide objects that will be copied and added to the GAMISO file\n\n    Notes\n    -----\n    Since MC2-3 does not write DUMMY nuclide information for GAMISO files, this is necessary to provide a\n    consistent set of nuclide-level data across all the nuclides in a\n    :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.\n    \"\"\"\n    if not dummyNuclides:\n        runLog.important(\"No dummy nuclide data provided to be added to {}\".format(lib))\n        return False\n    elif len(lib.xsIDs) > 1:\n        runLog.warning(\n            \"Cannot add dummy nuclide data to ISOTXS library {} containing data for more than 1 XS ID.\".format(lib)\n        )\n        return False\n\n    dummyNuclideKeysAddedToLibrary = []\n    for dummyNuclide in dummyNuclides:\n        dummyKey = dummyNuclide.nucLabel\n        if len(lib.xsIDs):\n            dummyKey += lib.xsIDs[0]\n        if dummyKey in lib:\n            continue\n\n        newDummy = xsNuclides.XSNuclide(lib, dummyKey)\n        newDummy.micros = dummyNuclide.micros\n        # Copy isotxs metadata from the isotxs metadata of the given dummy nuclide\n        for kk, vv in dummyNuclide.isotxsMetadata.items():\n            if kk in [\"jj\", \"jband\"]:\n                newDummy.isotxsMetadata[kk] = {}\n                for mm in vv:\n                    newDummy.isotxsMetadata[kk][mm] = 1\n            else:\n                newDummy.isotxsMetadata[kk] = vv\n\n        lib[dummyKey] = newDummy\n        dummyNuclideKeysAddedToLibrary.append(dummyKey)\n\n    return any(dummyNuclideKeysAddedToLibrary)\n\n\nclass IsotxsIO(cccc.Stream):\n    \"\"\"\n    A semi-abstract stream for reading and writing to a :py:class:`~armi.nuclearDataIO.isotxs.Isotxs`.\n\n    Notes\n    -----\n    This is a bit of a special case compared to most other CCCC files because of the special\n    nuclide-level container in addition to the XSLibrary container.\n\n    The :py:meth:`~armi.nuclearDataIO.isotxs.IsotxsIO.readWrite` defines the ISOTXS file structure as\n    specified in http://t2.lanl.gov/codes/transx-hyper/isotxs.html.\n    \"\"\"\n\n    _FILE_LABEL = \"ISOTXS\"\n\n    def __init__(self, fileName, lib, fileMode, getNuclideFunc):\n        cccc.Stream.__init__(self, fileName, fileMode)\n        self._lib = lib\n        self._metadata = self._getFileMetadata()\n        self._metadata.fileNames.append(fileName)\n        self._getNuclide = getNuclideFunc\n\n    def _getFileMetadata(self):\n        return self._lib.isotxsMetadata\n\n    def _getNuclideIO(self):\n        return _IsotxsNuclideIO\n\n    @classmethod\n    def _read(cls, fileName, fileMode):\n        lib = xsLibraries.IsotxsLibrary()\n        return cls._readWrite(\n            lib,\n            fileName,\n            fileMode,\n            lambda containerKey: xsNuclides.XSNuclide(lib, containerKey),\n        )\n\n    @classmethod\n    def _write(cls, lib, fileName, fileMode):\n        return cls._readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])\n\n    @classmethod\n    def _readWrite(cls, lib, fileName, fileMode, getNuclideFunc):\n        with cls(fileName, lib, fileMode, getNuclideFunc) as rw:\n            rw.readWrite()\n        return lib\n\n    def _rwMessage(self):\n        runLog.debug(\"{} ISOTXS data {}\".format(\"Reading\" if \"r\" in self._fileMode else \"Writing\", self))\n\n    def _updateFileLabel(self):\n        \"\"\"\n        Update the file label when reading in the ISOTXS-like file if it differs from its expected value.\n\n        Notes\n        -----\n        This occurs when MC2-3 is preparing GAMISO files.\n        The merging of ISOTXS-like files fail if the labels are not unique (i.e. merging ISOTXS into GAMISO with\n        each file having a file label of `ISOTXS`.\n        \"\"\"\n        if self._metadata[\"label\"] != self._FILE_LABEL:\n            runLog.debug(\n                \"File label in {} is not the expected type. Updating the label from {} to {}\".format(\n                    self, self._metadata[\"label\"], self._FILE_LABEL\n                )\n            )\n            self._metadata[\"label\"] = self._FILE_LABEL\n\n    def readWrite(self):\n        \"\"\"Read and write ISOTSX file.\n\n        .. impl:: Tool to read and write ISOTXS files.\n            :id: I_ARMI_NUCDATA_ISOTXS\n            :implements: R_ARMI_NUCDATA_ISOTXS\n\n            Reading and writing ISOTXS files is performed using the general\n            nuclear data I/O functionalities described in\n            :need:`I_ARMI_NUCDATA`. Reading/writing a ISOTXS file is performed\n            through the following steps:\n\n            #. Read/write file ID record\n            #. Read/write file 1D record, which includes:\n\n                * Number of energy groups (``NGROUP``)\n                * Maximum number of up-scatter groups (``MAXUP``)\n                * Maximum number of down-scatter groups (``MAXDN``)\n                * Maximum scattering order (``MAXORD``)\n                * File-wide specification on fission spectrum type, i.e. vector\n                  or matrix (``ICHIST``)\n                * Maximum number of blocks of scattering data (``MSCMAX``)\n                * Subblocking control for scatter matrices (``NSBLOK``)\n\n            #. Read/write file 2D record, which includes:\n\n                * Library IDs for each isotope (``HSETID(I)``)\n                * Isotope names (``HISONM(I)``)\n                * Global fission spectrum (``CHI(J)``) if file-wide spectrum is\n                  specified (``ICHIST`` = 1)\n                * Energy group structure (``EMAX(J)`` and ``EMIN``)\n                * Locations of each nuclide record in the file (``LOCA(I)``)\n\n                    .. note::\n\n                        The offset data is not read from the binary file because\n                        the ISOTXS reader can dynamically calculate the offset\n                        itself. Therefore, during a read operation, this data is\n                        ignored.\n\n            #. Read/write file 4D record for each nuclide, which includes\n               isotope-dependent, group-independent data.\n            #. Read/write file 5D record for each nuclide, which includes\n               principal cross sections.\n            #. Read/write file 6D record for each nuclide, which includes\n               fission spectrum if it is flagged as a matrix (``ICHI`` > 1).\n            #. Read/write file 7D record for each nuclide, which includes the\n               scattering matrices.\n        \"\"\"\n        self._rwMessage()\n        properties.unlockImmutableProperties(self._lib)\n        try:\n            self._fileID()\n            numNucs = self._rw1DRecord(len(self._lib))\n            nucNames = self._rw2DRecord(numNucs, self._lib.nuclideLabels)\n            if self._metadata[\"fileWideChiFlag\"] > 1:\n                self._rw3DRecord()\n            for nucLabel in nucNames:\n                # read nuclide name, other global stuff from the ISOTXS library\n                nuc = self._getNuclide(nucLabel)\n                if \"r\" in self._fileMode:\n                    # on add nuclides when reading\n                    self._lib[nucLabel] = nuc\n                nuclideIO = self._getNuclideIO()(nuc, self, self._lib)\n                nuclideIO.rwNuclide()\n        except Exception:\n            raise OSError(\"Failed to read/write {} \\n\\n\\n{}\".format(self, traceback.format_exc()))\n        finally:\n            properties.lockImmutableProperties(self._lib)\n\n    def _fileID(self):\n        with self.createRecord() as record:\n            self._metadata[\"label\"] = record.rwString(self._metadata[\"label\"], 24)\n            self._metadata[\"fileId\"] = record.rwInt(self._metadata[\"fileId\"])\n            self._updateFileLabel()\n\n    def _rw1DRecord(self, numNucs):\n        with self.createRecord() as record:\n            self._metadata[\"numGroups\"] = record.rwInt(self._metadata[\"numGroups\"])\n            numNucs = record.rwInt(numNucs)\n            self._metadata[\"maxUpScatterGroups\"] = record.rwInt(self._metadata[\"maxUpScatterGroups\"])\n            self._metadata[\"maxDownScatterGroups\"] = record.rwInt(self._metadata[\"maxDownScatterGroups\"])\n            self._metadata[\"maxScatteringOrder\"] = record.rwInt(self._metadata[\"maxScatteringOrder\"])\n            self._metadata[\"fileWideChiFlag\"] = record.rwInt(self._metadata[\"fileWideChiFlag\"])\n            self._metadata[\"maxScatteringBlocks\"] = record.rwInt(self._metadata[\"maxScatteringBlocks\"])\n            self._metadata[\"subblockingControl\"] = record.rwInt(self._metadata[\"subblockingControl\"])\n        return numNucs\n\n    def _rw2DRecord(self, numNucs, nucNames):\n        \"\"\"\n        Read 2D ISOTXS record.\n\n        Notes\n        -----\n        Contains isotope names, global chi distribution, energy group structure, and locations of\n        each nuclide record in the file\n        \"\"\"\n        with self.createRecord() as record:\n            # skip \"merger   test...\" string\n            self._metadata[\"libraryLabel\"] = record.rwString(self._metadata[\"libraryLabel\"], 12 * 8)\n            nucNames = record.rwList(nucNames, \"string\", numNucs, 8)\n            if self._metadata[\"fileWideChiFlag\"] == 1:\n                # file-wide chi distribution vector listed here.\n                self._metadata[\"chi\"] = record.rwMatrix(self._metadata[\"chi\"], self._metadata[\"numGroups\"])\n            self._rwLibraryEnergies(record)\n            self._metadata[\"minimumNeutronEnergy\"] = record.rwFloat(self._metadata[\"minimumNeutronEnergy\"])\n            record.rwList(self._computeNuclideRecordOffset(), \"int\", numNucs)\n        return nucNames\n\n    def _rwLibraryEnergies(self, record):\n        # neutron velocity (cm/s)\n        self._lib.neutronVelocity = record.rwMatrix(self._lib.neutronVelocity, self._metadata[\"numGroups\"])\n        # read emax for each group in descending eV.\n        self._lib.neutronEnergyUpperBounds = record.rwMatrix(\n            self._lib.neutronEnergyUpperBounds, self._metadata[\"numGroups\"]\n        )\n\n    def _rw3DRecord(self):\n        \"\"\"Read file-wide chi-distribution matrix.\"\"\"\n        raise NotImplementedError\n\n    def _computeNuclideRecordOffset(self):\n        \"\"\"\n        Compute the record offset of each nuclide.\n\n        Notes\n        -----\n        The offset data is not read from the binary file because the ISOTXS\n        reader can dynamically calculate the offset itself. Therefore, during a\n        read operation, this data is ignored.\n        \"\"\"\n        recordsPerNuclide = [self._computeNumIsotxsRecords(nuc) for nuc in self._lib.nuclides]\n        return [sum(recordsPerNuclide[0:ii]) for ii in range(len(self._lib))]\n\n    def _computeNumIsotxsRecords(self, nuclide):\n        \"\"\"Compute the number of ISOTXS records for a specific nuclide.\"\"\"\n        numRecords = 2\n        metadata = self._getNuclideIO()(nuclide, self, self._lib)._getNuclideMetadata()\n        if metadata[\"chiFlag\"] > 1:\n            numRecords += 1\n        numRecords += sum(1 for _ord in metadata[\"ords\"] if _ord > 0)\n        return numRecords\n\n\nreadBinary = IsotxsIO.readBinary\nreadAscii = IsotxsIO.readAscii\nwriteBinary = IsotxsIO.writeBinary\nwriteAscii = IsotxsIO.writeAscii\n\n\nclass _IsotxsNuclideIO:\n    \"\"\"\n    A reader/writer class for ISOTXS nuclides.\n\n    Notes\n    -----\n    This is to be used in conjunction with an IsotxsIO object.\n    \"\"\"\n\n    def __init__(self, nuclide, isotxsIO, lib):\n        self._nuclide = nuclide\n        self._metadata = self._getNuclideMetadata()\n        self._isotxsIO = isotxsIO\n        self._lib = lib\n        self._fileWideChiFlag = self._getFileMetadata()[\"fileWideChiFlag\"]\n        self._fileWideChi = self._getFileMetadata()[\"chi\"]\n        self._numGroups = self._getFileMetadata()[\"numGroups\"]\n        self._maxScatteringBlocks = self._getFileMetadata()[\"maxScatteringBlocks\"]\n        self._subblockingControl = self._getFileMetadata()[\"subblockingControl\"]\n\n    def _getFileMetadata(self):\n        return self._lib.isotxsMetadata\n\n    def _getNuclideMetadata(self):\n        return self._nuclide.isotxsMetadata\n\n    def _getMicros(self):\n        return self._nuclide.micros\n\n    def rwNuclide(self):\n        \"\"\"Read nuclide name, other global stuff from the ISOTXS library.\"\"\"\n        properties.unlockImmutableProperties(self._nuclide)\n        try:\n            self._rw4DRecord()\n            self._nuclide.updateBaseNuclide()\n            self._rw5DRecord()\n            if self._metadata[\"chiFlag\"] > 1:\n                self._rw6DRecord()\n\n            # get scatter matrix\n            for blockNumIndex in range(self._maxScatteringBlocks):\n                for subBlock in range(self._subblockingControl):\n                    if self._metadata[\"ords\"][blockNumIndex] > 0:\n                        # ords flag == 1 implies this scatter type of scattering exists on this nuclide.\n                        self._rw7DRecord(blockNumIndex, subBlock)\n        finally:\n            properties.lockImmutableProperties(self._nuclide)\n\n    def _rw4DRecord(self):\n        \"\"\"\n        Read 4D ISOTXS record.\n\n        Notes\n        -----\n        Read the following individual nuclide XS record. Load data into nuc.\n        This record contains non-mg data like atomic mass, temperature, and some flags.\n        \"\"\"\n        with self._isotxsIO.createRecord() as nucRecord:\n            # read string data\n            for datum in [\"nuclideId\", \"libName\", \"isoIdent\"]:\n                self._metadata[datum] = nucRecord.rwString(self._metadata[datum], 8)\n\n            # read float data\n            for datum in [\"amass\", \"efiss\", \"ecapt\", \"temp\", \"sigPot\", \"adens\"]:\n                self._metadata[datum] = nucRecord.rwFloat(self._metadata[datum])\n\n            # read integer data\n            for datum in [\n                \"classif\",\n                \"chiFlag\",\n                \"fisFlag\",\n                \"nalph\",\n                \"np\",\n                \"n2n\",\n                \"nd\",\n                \"nt\",\n                \"ltot\",\n                \"ltrn\",\n                \"strpd\",\n            ]:\n                self._metadata[datum] = nucRecord.rwInt(self._metadata[datum])\n\n            # defines what kind of scattering block each block is; total, inelastic, elastic, n2n\n            self._metadata[\"scatFlag\"] = nucRecord.rwList(self._metadata[\"scatFlag\"], \"int\", self._maxScatteringBlocks)\n\n            # number of scattering orders in this block. if 0, this block isn't present.\n            self._metadata[\"ords\"] = nucRecord.rwList(self._metadata[\"ords\"], \"int\", self._maxScatteringBlocks)\n            # bandwidth of this block: number of groups that scatter into this group, including this one.\n            jband = self._metadata[\"jband\"] or {}\n            for n in range(self._maxScatteringBlocks):\n                for j in range(self._numGroups):\n                    jband[j, n] = nucRecord.rwInt(jband.get((j, n), None))\n            self._metadata[\"jband\"] = jband\n\n            # position of in-group scattering for scattering data in group j\n            jj = self._metadata[\"jj\"] or {}\n            # Some mcc**2 cases seem to just have a bunch of 1's listed here.\n            # does this mean we never have upscatter? possibly.\n            for n in range(self._maxScatteringBlocks):\n                for j in range(self._numGroups):\n                    jj[j, n] = nucRecord.rwInt(jj.get((j, n), None))\n            self._metadata[\"jj\"] = jj\n\n    def _rw5DRecord(self):\n        \"\"\"Read principal microscopic MG XS data for a nuclide.\"\"\"\n        with self._isotxsIO.createRecord() as record:\n            micros = self._getMicros()\n            nuc = self._nuclide\n            numGroups = self._numGroups\n            micros.transport = record.rwMatrix(micros.transport, self._metadata[\"ltrn\"], numGroups)\n            micros.total = record.rwMatrix(micros.total, self._metadata[\"ltot\"], numGroups)\n            micros.nGamma = record.rwMatrix(micros.nGamma, numGroups)\n\n            if self._metadata[\"fisFlag\"] > 0:\n                micros.fission = record.rwMatrix(micros.fission, numGroups)\n                micros.neutronsPerFission = record.rwMatrix(micros.neutronsPerFission, numGroups)\n            else:\n                micros.fission = micros.getDefaultXs(numGroups)\n                micros.neutronsPerFission = micros.getDefaultXs(numGroups)\n\n            if self._metadata[\"chiFlag\"] == 1:\n                micros.chi = record.rwMatrix(micros.chi, numGroups)\n            elif self._metadata[\"fisFlag\"] > 0:\n                if self._fileWideChiFlag != 1:\n                    raise OSError(\"Fissile nuclide {} in library but no individual or global chi!\".format(nuc))\n                micros.chi = self._fileWideChi\n            else:\n                micros.chi = micros.getDefaultXs(numGroups)\n\n            # read some other important XS, if they exist\n            for xstype in [\"nalph\", \"np\", \"n2n\", \"nd\", \"nt\"]:\n                if self._metadata[xstype]:\n                    micros.__dict__[xstype] = record.rwMatrix(micros.__dict__[xstype], numGroups)\n                else:\n                    micros.__dict__[xstype] = micros.getDefaultXs(numGroups)\n\n            # coordinate direction transport cross section (for various coordinate directions)\n            if self._metadata[\"strpd\"] > 0:\n                micros.strpd = record.rwMatrix(micros.strpd, self._metadata[\"strpd\"], numGroups)\n            else:\n                micros.strpd = micros.getDefaultXs(numGroups)\n\n    def _rw6DRecord(self):\n        \"\"\"Reads nuclide-level chi dist.\"\"\"\n        raise NotImplementedError\n\n    def _rw7DRecord(self, blockNumIndex, subBlock):\n        \"\"\"\n        Read scatter matrix.\n\n        Parameters\n        ----------\n        blockNumIndex : int\n            Index of the scattering block (aka type of scattering) in this nuclide\n\n        subBlock : int\n            Index-tracking integer. Since neutrons don't scatter to and from all energies,\n            there is a bandwidth defined to save on storage.\n\n        Notes\n        -----\n        The data is stored as a giant array, and read in as a CSR matrix. The below matrix is\n        lower triangular, where periods are non-zero.\n\n            . 0 0 0 0 0\n            . . 0 0 0 0\n            . . . 0 0 0\n            . . . . 0 0\n            . . . . . 0\n            . . . . . .\n\n        The data is read in rows starting at the top and going to the bottom.\n        Per row, there are JBAND non-zero entries. Per row, there are JJ non-zero entries on or\n        beyond the diagonal.\n\n            . 0 0 0 0 0\n            - - - - - -\n            - - - - - -\n            - - - - - -\n            - - - - - -\n            - - - - - -\n\n        Additionally, the data is reversed for whatever reason. So, let's say we are reading the\n        third row in our ficitious matrix. JBAND is 2, JJ is 1. We will read \"1\" first, and then\n        \"2\" from the ISOTXS. Since they are backwards, we need to reverse the numbers before\n        putting them into the matrix.\n\n            . 0 0 0 0 0\n            . . - - - -\n            . 2 1 - - -\n            - - - - - -\n            - - - - - -\n            - - - - - -\n\n        However, since we are reading a CSR, we can just add the indices in reverse (this is fast)\n        and read the data in as is (which is a bit slower). Then we will allow the CSR matrix to\n        fix the order later on, if necessary.\n        \"\"\"\n        scatter = self._getScatterMatrix(blockNumIndex)\n        if scatter is not None:\n            scatter = scatter.toarray()\n        with self._isotxsIO.createRecord() as record:\n            ng = self._numGroups\n            nsblok = self._subblockingControl\n            m = subBlock + 1  # fix starting at zero problem and use same indices as CCCC specification\n            # be careful with starting indices at 0 here!!\n            lordn = self._metadata[\"ords\"][blockNumIndex]\n            # this is basically how many scattering cross sections there are for this scatter type for this nuclide\n            jl = (m - 1) * ((ng - 1) // nsblok + 1) + 1\n            jup = m * ((ng - 1) // nsblok + 1)\n            ju = min(ng, jup)\n\n            metadata = self._metadata\n            indptr = [0]\n            indices = []\n            dataVals = []\n            for _scatterLoopOrder in range(lordn):\n                for g in range(jl - 1, ju):\n                    jup = g + metadata[\"jj\"][g, blockNumIndex]\n                    bandWidth = metadata[\"jband\"][g, blockNumIndex]\n                    jdown = jup - bandWidth\n                    if scatter is None:\n                        indptr.append(len(indices) + bandWidth)\n                        # add the indices in reverse\n                        indices.extend(range(jup - 1, jdown - 1, -1))\n                        # read the data as-is\n                        for _ in range(bandWidth):\n                            dataVals.append(record.rwFloat(0.0))\n                    else:\n                        for xs in reversed(scatter[g, jdown:jup].tolist()):\n                            record.rwFloat(xs)\n\n        if scatter is None:\n            # we're reading.\n            scatter = sparse.csr_matrix((np.array(dataVals), indices, indptr), shape=(ng, ng))\n            scatter.eliminate_zeros()\n            self._setScatterMatrix(blockNumIndex, scatter)\n\n    def _getScatterBlockNum(self, scatterType):\n        \"\"\"\n        Determine which scattering block is elastic scattering.\n\n        This information is stored in the scatFlab libparam and is\n        possibly different for each nuclide (e.g. C, B-10, etc.)\n\n        Parameters\n        ----------\n        scatterType : int\n            ISOTXS-defined special int flag for a scatter type (100 for elastic, etc.)\n\n        Returns\n        -------\n        blockNum : int\n            A index of the scatter matrix.\n        \"\"\"\n        try:\n            return np.where(self._metadata[\"scatFlag\"] == scatterType)[0][0]\n        except IndexError:\n            return None\n\n    def _getElasticScatterBlockNumIndex(self, legendreOrder=0):\n        return self._getScatterBlockNum(ELASTIC_SCATTER + legendreOrder)\n\n    def _getInelasticScatterBlockNumIndex(self):\n        return self._getScatterBlockNum(INELASTIC_SCATTER)\n\n    def _getN2nScatterBlockNumIndex(self):\n        return self._getScatterBlockNum(N2N_SCATTER)\n\n    def _getTotalScatterBlockNumIndex(self):\n        return self._getScatterBlockNum(TOTAL_SCATTER)\n\n    def _setScatterMatrix(self, blockNumIndex, scatterMatrix):\n        \"\"\"\n        Sets scatter matrix data to the proper ``scatterMatrix`` for this ``blockNum``.\n\n        blockNumIndex : int\n            Index of a scattering block.\n        \"\"\"\n        if blockNumIndex == self._getElasticScatterBlockNumIndex():\n            self._getMicros().elasticScatter = scatterMatrix\n        elif blockNumIndex == self._getInelasticScatterBlockNumIndex():\n            self._getMicros().inelasticScatter = scatterMatrix\n        elif blockNumIndex == self._getN2nScatterBlockNumIndex():\n            self._getMicros().n2nScatter = scatterMatrix\n        elif blockNumIndex == self._getTotalScatterBlockNumIndex():\n            self._getMicros().totalScatter = scatterMatrix\n        elif blockNumIndex == self._getElasticScatterBlockNumIndex(1):\n            self._getMicros().elasticScatter1stOrder = scatterMatrix\n        else:\n            self._getMicros().higherOrderScatter[blockNumIndex] = scatterMatrix\n\n    def _getScatterMatrix(self, blockNumIndex):\n        \"\"\"\n        Get the scatter matrix for a particular blockNum.\n\n        Notes\n        -----\n        This logic could be combined with _setScatterMatrix.\n        \"\"\"\n        if blockNumIndex == self._getElasticScatterBlockNumIndex():\n            scatterMatrix = self._getMicros().elasticScatter\n        elif blockNumIndex == self._getInelasticScatterBlockNumIndex():\n            scatterMatrix = self._getMicros().inelasticScatter\n        elif blockNumIndex == self._getN2nScatterBlockNumIndex():\n            scatterMatrix = self._getMicros().n2nScatter\n        elif blockNumIndex == self._getTotalScatterBlockNumIndex():\n            scatterMatrix = self._getMicros().totalScatter\n        elif blockNumIndex == self._getElasticScatterBlockNumIndex(1):\n            scatterMatrix = self._getMicros().elasticScatter1stOrder\n        else:\n            scatterMatrix = self._getMicros().higherOrderScatter.get(blockNumIndex, None)\n\n        return scatterMatrix\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/labels.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nReads and writes region and composition label data from a LABELS interface file.\n\nLABELS files are produced by DIF3D/VARIANT. They are very similar in structure\nand format to CCCC files but are not officially in the CCCC documents.\n\nThe file structure is listed here::\n\n        RECORD TYPE                        PRESENT IF\n        ===============================    ================\n        FILE IDENTIFICATION                ALWAYS\n        SPECIFICATIONS                     ALWAYS\n        LABEL AND AREA DATA                ALWAYS\n        FINITE-GEOMETRY TRANSVERSE         NHTS1.GT.0 OR\n          DISTANCES                         NGTS2.GT.0\n        NUCLIDE SET LABELS                 NSETS.GT.1\n        ALIAS ZONE LABELS                  NALIAS.GT.0\n        GENERAL CONTROL-ROD MODEL DATA     NBANKS.GT.0\n\n ***********(REPEAT FOR ALL BANKS)\n *      CONTROL-ROD BANK DATA              NBANKS.GT.0\n *\n *  *******(REPEAT FOR ALL RODS IN BANK)\n *  *   CONTROL-ROD CHANNEL DATA           (LLCHN+LLROD+MMESH).GT.0\n **********\n        BURNUP DEPENDENT CROSS SECTION     NVARY.GT.0\n          SPECIFICATIONS\n        BURNUP DEPENDENT GROUPS            MAXBRN.GT.0\n        BURNUP DEPENDENT FITTING           MAXORD.GT.0\n          COEFFICIENTS\n\n\nReference: [DIF3D]_.\n\nExamples\n--------\n>>> labelData = LabelStream.readBinary(\"LABELS\")\n\"\"\"\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc\n\nLABELS = \"LABELS\"\n\nFILE_SPEC_1D_KEYS = [\n    \"numZones\",\n    \"numRegions\",\n    \"numAreas\",\n    \"numRegionAreaAssignments\",\n    \"numHalfHeightsDirection1\",\n    \"numHalfHeightsDirection2\",\n    \"numNuclideSets\",\n    \"numZoneAliases\",\n    \"numTrianglesPerHex\",\n    \"numHexagonalRings\",\n    \"numControlRodChannels\",\n    \"numControlRodBanks\",\n    \"numAxialFineMeshBins\",\n    \"maxControlRodBankTimes\",\n    \"maxControlRodsPerBank\",\n    \"maxControlRodsMeshes\",\n    \"maxControlRodPieces\",\n    \"maxControlRodChannels\",\n    \"numBurnupDependentIsotopes\",\n    \"maxBurnupDependentGroups\",\n    \"maxBurnupPolynomialOrder\",\n    \"modelDimensions\",\n]\n\n\nclass LabelsData(cccc.DataContainer):\n    \"\"\"\n    Data structure containing various region, zone, area, nuclide labels.\n\n    This is the data structure that is read from or written to a LABELS file.\n    \"\"\"\n\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n        self.regionLabels = []\n        self.zoneLabels = []\n        self.areaLabels = []\n        self.regionAreaAssignments = []\n        self.halfHeightsDirection1 = []\n        self.halfHeightsDirection2 = []\n        self.extrapolationDistance1 = []\n        self.extrapolationDistance2 = []\n        self.nuclideSetLabels = []\n        self.aliasZoneLabels = []\n\n\nclass LabelsStream(cccc.StreamWithDataContainer):\n    \"\"\"\n    Class for reading and writing the LABELS interface file produced by DIF3D/VARIANT.\n\n    Notes\n    -----\n    Contains region and composition labels, area data, half heights, nuclide set labels, alias zone labels,\n    control-rod model data, and burnup dependent cross section data.\n\n    See Also\n    --------\n    armi.nuclearDataIO.cccc.compxs\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> LabelsData:\n        return LabelsData()\n\n    def readWrite(self):\n        runLog.info(\"{} LABELS data {}\".format(\"Reading\" if \"r\" in self._fileMode else \"Writing\", self))\n        self._rwFileID()\n        self._rw1DRecord()\n        self._rw2DRecord()\n        if self._metadata[\"numHalfHeightsDirection1\"] > 0 or self._metadata[\"numHalfHeightsDirection2\"] > 0:\n            self._rw3DRecord()\n        if self._metadata[\"numNuclideSets\"] > 1:\n            self._rw4DRecord()\n        if self._metadata[\"numZoneAliases\"] > 0:\n            self._rw5DRecord()\n        if self._metadata[\"numControlRodBanks\"] > 0:\n            self._rw6DRecord()\n            self._rw7DRecord()\n            self._rw8DRecord()\n        if self._metadata[\"numBurnupDependentIsotopes\"] > 0:\n            self._rw9DRecord()\n        if self._metadata[\"maxBurnupDependentGroups\"] > 0:\n            self._rw10DRecord()\n        if self._metadata[\"maxBurnupPolynomialOrder\"] > 0:\n            self._rw11DRecord()\n\n    def _rwFileID(self):\n        with self.createRecord() as record:\n            for name in [\"hname\", \"huse\", \"huse2\"]:\n                self._metadata[name] = record.rwString(self._metadata[name], 8)\n            self._metadata[\"version\"] = record.rwInt(self._metadata[\"version\"])\n\n    def _rw1DRecord(self):\n        \"\"\"Read/write the file specification data.\"\"\"\n        with self.createRecord() as record:\n            for param in FILE_SPEC_1D_KEYS:\n                self._metadata[param] = record.rwInt(self._metadata[param])\n            self._metadata[\"dummy\"] = record.rwList(self._metadata[\"dummy\"], \"int\", 2)\n\n    def _rw2DRecord(self):\n        \"\"\"Read/write the label and area data.\"\"\"\n        with self.createRecord() as record:\n            self._data.zoneLabels = record.rwList(self._data.zoneLabels, \"string\", self._metadata[\"numZones\"], 8)\n            self._data.regionLabels = record.rwList(\n                self._data.regionLabels,\n                \"string\",\n                self._metadata[\"numRegions\"],\n                8,\n            )\n            self._data.areaLabels = record.rwList(self._data.areaLabels, \"string\", self._metadata[\"numAreas\"], 8)\n            self._data.regionAreaAssignments = record.rwList(\n                self._data.regionAreaAssignments,\n                \"string\",\n                self._metadata[\"numRegionAreaAssignments\"],\n                8,\n            )\n\n    def _rw3DRecord(self):\n        \"\"\"Read/write the finite-geometry transverse distances.\"\"\"\n        with self.createRecord() as record:\n            self._data.halfHeightsDirection1 = record.rwList(\n                self._data.halfHeightsDirection1,\n                \"float\",\n                self._metadata[\"numHalfHeightsDirection1\"],\n            )\n            self._data.extrapolationDistance1 = record.rwList(\n                self._data.extrapolationDistance1,\n                \"float\",\n                self._metadata[\"numHalfHeightsDirection1\"],\n            )\n            self._data.halfHeightsDirection2 = record.rwList(\n                self._data.halfHeightsDirection2,\n                \"float\",\n                self._metadata[\"numHalfHeightsDirection2\"],\n            )\n            self._data.extrapolationDistance2 = record.rwList(\n                self._data.extrapolationDistance2,\n                \"float\",\n                self._metadata[\"numHalfHeightsDirection2\"],\n            )\n\n    def _rw4DRecord(self):\n        \"\"\"Read/write the nuclide labels.\"\"\"\n        with self.createRecord() as record:\n            self._data.nuclideSetLabels = record.rwList(\n                self._data.nuclideSetLabels,\n                \"string\",\n                self._metadata[\"numNuclideSets\"],\n                8,\n            )\n\n    def _rw5DRecord(self):\n        \"\"\"Read/write the zone aliases.\"\"\"\n        with self.createRecord() as record:\n            self._data.aliasZoneLabels = record.rwList(\n                self._data.aliasZoneLabels,\n                \"string\",\n                self._metadata[\"numZoneAliases\"],\n                8,\n            )\n\n    def _rw6DRecord(self):\n        \"\"\"Read/write the general control-rod model data.\"\"\"\n        raise NotImplementedError(\"Control rod data not implemented\")\n\n    def _rw7DRecord(self):\n        \"\"\"Read/write the control-rod bank data.\"\"\"\n        raise NotImplementedError(\"Control rod data not implemented\")\n\n    def _rw8DRecord(self):\n        \"\"\"Read/write the control-rod channel data.\"\"\"\n        raise NotImplementedError(\"Control rod data not implemented\")\n\n    def _rw9DRecord(self):\n        \"\"\"Read/write the burnup-dependent cross section specifications.\"\"\"\n        raise NotImplementedError(\"BU dependent XS data not implemented\")\n\n    def _rw10DRecord(self):\n        \"\"\"Read/write the burnup-dependent group data.\"\"\"\n        raise NotImplementedError(\"BU dependent XS data not implemented\")\n\n    def _rw11DRecord(self):\n        \"\"\"Read/write the burnup-dependent fitting coefficient data.\"\"\"\n        raise NotImplementedError(\"BU dependent XS data not implemented\")\n\n\nreadBinary = LabelsStream.readBinary\nreadAscii = LabelsStream.readAscii\nwriteBinary = LabelsStream.writeBinary\nwriteAscii = LabelsStream.writeAscii\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/nhflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nNHFLUX is a CCCC interface file that stores flux moments and partial currents from\nDIF3D-Nodal and DIF3D-VARIANT.\n\nExamples\n--------\n>>> nhfluxData = NfluxStream.readBinary(\"NHFLUX\")\n>>> NhfluxStream.writeAscii(nhfluxData, \"nhflux.ascii\")\n\n\"\"\"\n\nimport numpy as np\n\nfrom armi.nuclearDataIO import cccc\n\nFILE_SPEC_1D_KEYS = (\n    \"ndim\",\n    \"ngroup\",\n    \"ninti\",\n    \"nintj\",\n    \"nintk\",\n    \"iter\",\n    \"effk\",\n    \"power\",\n    \"nSurf\",\n    \"nMom\",\n    \"nintxy\",\n    \"npcxy\",\n    \"nscoef\",\n    \"itrord\",\n    \"iaprx\",\n    \"ileak\",\n    \"iaprxz\",\n    \"ileakz\",\n    \"iorder\",\n)\n\nFILE_SPEC_1D_KEYS_VARIANT11 = (\n    \"npcbdy\",\n    \"npcsym\",\n    \"npcsec\",\n    \"iwnhfl\",\n    \"nMoms\",\n)\n\n\nclass NHFLUX(cccc.DataContainer):\n    \"\"\"\n    An abstraction of a NHFLUX file. This format is defined in the DIF3D manual. Note that the\n    format for DIF3D-Nodal and DIF3D-VARIANT are not the same. The VARIANT NHFLUX format has\n    recently changed, so this reader is only compatible with files produced by v11.0 of the solver.\n\n    Attributes\n    ----------\n    metadata : file control\n        The NHFLUX file control info (sort of global for this library). This is the contents of the\n        1D data block on the file.\n\n    incomingPointersToAllAssemblies: 2-D list of floats\n        This is an index map for the \"internal surfaces\" between DIF3D nodal indexing and DIF3D\n        GEODST indexing. It can be used to process incoming partial currents. This uses the same\n        ordering as the geodstCoordMap attribute.\n\n    externalCurrentPointers : list of ints\n        This is an index map for the \"external surfaces\" between DIF3D nodal indexing and DIF3D\n        GEODST indexing. \"External surfaces\" are important because they contain the INCOMING partial\n        currents from the outer reactor boundary. This uses the same ordering as geodstCoordMap,\n        except that each assembly now has multiple subsequent indices. For example, for a hexagonal\n        core, if hex of index n (0 to N-1) has a surface of index k (0 to 5) that lies on the vacuum\n        boundary, then the index of that surface is N*6 + k + 1.\n\n    geodstCoordMap : list of ints\n        This is an index map between DIF3D nodal and DIF3D GEODST. It is necessary for interpreting\n        the ordering of flux and partial current data in the NHFLUX file. Note that this mapping\n        between DIF3D-Nodal and DIF3D-VARIANT is not the same.\n\n    outgoingPCSymSeCPointers: list of ints\n        This is an index map for the outpgoing partial currents on the symmetric and sector lateral\n        boundary. It is only present for DIF3D-VARIANT for hexagonal cores.\n\n    ingoingPCSymSeCPointers: list of ints\n        This is an index map for the ingoing (or incoming) partial currents on the symmetric and\n        sector lateral boundary. It is only present for DIF3D-VARIANT for hexagonal cores.\n\n    fluxMomentsAll : 4-D list of floats\n        This contains all the flux moments for all core assemblies. The jth planar flux moment of\n        assembly i in group g in axial node k is fluxMoments[i][k][j][g]. The assemblies are ordered\n        according to the geodstCoordMap attribute. For DIF3D-VARIANT, this includes both even and\n        odd parity moments.\n\n    partialCurrentsHexAll : 5-D list of floats\n        This contains all the OUTGOING partial currents for all core assemblies. The OUTGOING\n        partial current on surface j in assembly i in axial node k in group g is\n        partialCurrentsHex[i][k][j][g][m], where m=0. The assemblies are ordered according to the\n        geodstCoordMap attribute. For DIF3D-VARIANT, higher-order data is available for the m axis.\n\n    partialCurrentsHex_extAll : 4-D list of floats\n        This contains all the INCOMING partial currents on \"external surfaces\", which are adjacent\n        to the reactor outer boundary (usually vacuum). Internal reflective surfaces are NOT\n        included in this! These \"external surfaces\" are ordered according to\n        externalCurrentPointers. For DIF3D-VARIANT, higher-order data is available for the last\n        axis.\n\n    partialCurrentsZAll : 5-D list of floats\n        This contains all the upward and downward partial currents for all core assemblies. The\n        assemblies are ordered according to the geodstCoordMap attribute. For DIF3D-VARIANT, higher-\n        order data is available for the last axis.\n\n    Warning\n    -------\n    DIF3D outputs NHFLUX at every time node, but REBUS outputs NHFLUX only at every cycle.\n\n    See Also\n    --------\n    [VARIANT-95]_ and [VARIANT-2014]_.\n\n    .. [VARIANT-95] G. Palmiotti, E. E. Lewis, and C. B. Carrico, VARIANT: VARIational Anisotropic\n       Nodal Transport for Multidimensional Cartesian and Hexagonal Geometry Calculation, ANL-95/40,\n       Argonne National Laboratory, Argonne, IL (October 1995).\n\n    .. [VARIANT-2014] Smith, M. A., Lewis, E. E., and Shemon, E. R. DIF3D-VARIANT 11.0: A Decade of\n       Updates. United States: N. p., 2014. Web. doi:10.2172/1127298.\n       https://publications.anl.gov/anlpubs/2014/04/78313.pdf\n    \"\"\"\n\n    def __init__(self, fName=\"NHFLUX\", variant=False, numDataSetsToRead=1):\n        \"\"\"\n        Initialize the NHFLUX or NAFLUX reader object.\n\n        Parameters\n        ----------\n        fName : str, optional\n            Filename of the NHFLUX binary file to be read.\n\n        variant : bool, optional\n            Whether or not this NHFLUX/NAFLUX file has the DIF3D-VARIANT output format, which is\n            different than the DIF3D-Nodal format.\n        \"\"\"\n        cccc.DataContainer.__init__(self)\n\n        self.metadata[\"variantFlag\"] = variant\n        self.metadata[\"numDataSetsToRead\"] = numDataSetsToRead\n\n        # Initialize instance array variables\n        self.incomingPointersToAllAssemblies: np.ndarray = np.array([])\n        self.externalCurrentPointers: np.ndarray = np.array([])\n        self.geodstCoordMap: np.ndarray = np.array([])\n        if self.metadata[\"variantFlag\"]:\n            self.outgoingPCSymSecPointers: np.ndarray = np.array([])\n            self.ingoingPCSymSecPointers: np.ndarray = np.array([])\n        self.fluxMomentsAll: np.ndarray = np.array([])\n        self.partialCurrentsHexAll: np.ndarray = np.array([])\n        self.partialCurrentsHex_extAll: np.ndarray = np.array([])\n        self.partialCurrentsZAll: np.ndarray = np.array([])\n\n    @property\n    def fluxMoments(self):\n        \"\"\"\n        For DIF3D-Nodal, this property is equivalent to the attribute `fluxMomentsAll`. For\n        DIF3D-VARIANT, this property represents the even-parity flux moments.\n\n        Read-only property (there is no setter).\n        \"\"\"\n        nMom = self.metadata[\"nMom\"]\n        return self.fluxMomentsAll[..., :nMom, :]\n\n    @property\n    def partialCurrentsHex(self):\n        \"\"\"\n        For DIF3D-Nodal, this property is almost always equivalent to the attribute\n        ``partialCurrentsHex``. For DIF3D-VARIANT, this property returns the zeroth-order moment of\n        the outgoing radial currents.\n\n        Read-only property (there is no setter).\n        \"\"\"\n        return self.partialCurrentsHexAll[..., 0]\n\n    @property\n    def partialCurrentsHex_ext(self):\n        \"\"\"\n        For DIF3D-Nodal, this property is almost always equivalent to the attribute\n        `partialCurrentsHex_ext`. For DIF3D-VARIANT, this property returns the zeroth-order\n        moment of the incoming/ingoing radial currents.\n\n        Read-only property (there is no setter).\n        \"\"\"\n        return self.partialCurrentsHex_extAll[..., 0]\n\n    @property\n    def partialCurrentsZ(self):\n        \"\"\"\n        For DIF3D-Nodal, this property is almost always equivalent to the attribute\n        `partialCurrentsZ`. For DIF3D-VARIANT, this property returns the zeroth-order\n        moment of the axial currents.\n\n        Read-only property (there is no setter).\n        \"\"\"\n        return self.partialCurrentsZAll[..., 0]\n\n\nclass NhfluxStream(cccc.StreamWithDataContainer):\n    @staticmethod\n    def _getDataContainer() -> NHFLUX:\n        return NHFLUX()\n\n    def readWrite(self):\n        \"\"\"\n        Read everything from the DIF3D binary file NHFLUX.\n\n        Read all surface-averaged partial currents, all planar moments, and the DIF3D nodal\n        coordinate mapping system.\n\n        Notes\n        -----\n        This method should be private but conflicts with ``_readWrite`` so we need a\n        better name.\n\n        Parameters\n        ----------\n        numDataSetsToRead : int, optional\n            The number of whole-core flux data sets included in this NHFLUX/NAFLUX file that one\n            wishes to be read. Some NHFLUX/NAFLUX files, such as NAFLUX files written by\n            SASSYS/DIF3D-K, contain more than one flux data set. Each data set overwrites the\n            previous one on the NHFLUX class object, which will contain only the\n            ``numDataSetsToRead-th`` data set. The first numDataSetsToRead-1 data sets are\n            essentially skipped over.\n        \"\"\"\n        self._rwFileID()\n        self._rwBasicFileData1D()\n\n        # This control info only exists for VARIANT. We can only process entries with 0 or 1.\n        if self._metadata[\"variantFlag\"] and self._metadata[\"iwnhfl\"] == 2:\n            msg = (\n                \"This reader can only read VARIANT NHFLUX files where 'iwnhfl'=0 (both \"\n                \"fluxes and currents are present) or 'iwnhfl'=1 (only fluxes are present). \"\n            )\n            raise ValueError(msg)\n\n        # Read the hex ordering map between DIF3D nodal and DIF3D GEODST. Also read index\n        # pointers to incoming partial currents on outer reactor surface (these don't\n        # belong to any assembly). Incoming partial currents are non-zero due to flux\n        # extrapolation\n        self._rwGeodstCoordMap2D()\n\n        # Number of energy groups\n        ng = self._metadata[\"ngroup\"]\n\n        # Number of axial nodes (same for each assembly in DIF3D)\n        nz = self._metadata[\"nintk\"]\n\n        # Number of XY partial currents on the boundary. Note that for the same model, this\n        # number is not the same between Nodal and VARIANT; VARIANT has more.\n        numPartialCurrentsHex_ext = self._metadata[\"npcxy\"] - self._metadata[\"nintxy\"] * self._metadata[\"nSurf\"]\n\n        # Typically, flux and current data has units of n/cm^2/s. However, when reading\n        # an NHFLUX file produced by VARPOW (where 'iwnhfl'=1), the flux-only data has units\n        # of W/cc (there is no current data written to the file).\n        if self._data.fluxMomentsAll.size == 0:\n            # Initialize using metadata info for reading\n            totalMoments = (\n                self._metadata[\"nMom\"]\n                if not self._metadata[\"variantFlag\"]\n                else (self._metadata[\"nMom\"] + self._metadata[\"nMoms\"])\n            )\n            self._data.fluxMomentsAll = np.zeros((self._metadata[\"nintxy\"], nz, totalMoments, ng))\n\n            if self._metadata[\"iwnhfl\"] != 1:\n                self._data.partialCurrentsHexAll = np.zeros(\n                    (\n                        self._metadata[\"nintxy\"],\n                        nz,\n                        self._metadata[\"nSurf\"],\n                        ng,\n                        self._metadata[\"nscoef\"],\n                    )\n                )\n                self._data.partialCurrentsHex_extAll = np.zeros(\n                    (numPartialCurrentsHex_ext, nz, ng, self._metadata[\"nscoef\"])\n                )\n                self._data.partialCurrentsZAll = np.zeros(\n                    (self._metadata[\"nintxy\"], nz + 1, 2, ng, self._metadata[\"nscoef\"])\n                )\n\n        for _n in range(self._metadata[\"numDataSetsToRead\"]):\n            # Each record contains nodal data for ONE energy group in ONE axial core slice.\n            # Must loop through all energy groups and all axial core slices.\n\n            # The axial surface partial currents are indexed by axial surface (NOT by axial node),\n            # so there are nz+1 records for z-surface currents\n\n            # Loop through all energy groups: high-to-low for forward flux, low-to-high for\n            # adjoint flux\n            for g in range(ng):\n                gEff = self._getEnergyGroupIndex(g)\n\n                # Loop through axial nodes\n                for z in range(nz):\n                    # Process flux moments\n                    self._data.fluxMomentsAll[:, z, :, gEff] = self._rwFluxMoments3D(\n                        self._data.fluxMomentsAll[:, z, :, gEff]\n                    )\n\n                # Process currents\n                if self._metadata[\"iwnhfl\"] != 1:\n                    # Loop through axial nodes\n                    for z in range(nz):\n                        (\n                            self._data.partialCurrentsHexAll[:, z, :, gEff, :],\n                            self._data.partialCurrentsHex_extAll[:, z, gEff, :],\n                        ) = self._rwHexPartialCurrents4D(\n                            self._data.partialCurrentsHexAll[:, z, :, gEff, :],\n                            self._data.partialCurrentsHex_extAll[:, z, gEff, :],\n                        )\n\n                    # Loop through axial surfaces (NOT axial nodes, because there is a \"+1\")\n                    for z in range(nz + 1):\n                        self._data.partialCurrentsZAll[:, z, :, gEff, :] = self._rwZPartialCurrents5D(\n                            self._data.partialCurrentsZAll[:, z, :, gEff, :]\n                        )\n\n    def _getNumOuterSurfacesHex(self):\n        \"\"\"\n        The word \"outer\" in the method name means along the outside of the core. Thus, this\n        is the number of lateral hex surfaces on the outer core boundary (usually vacuum...internal\n        reflective boundaries do NOT count).\n        \"\"\"\n        # Both Nodal and VARIANT files should return the same number, but they are calculated\n        # differently between the two codes\n        if self._metadata[\"variantFlag\"]:\n            numOuterSurfacesHex = self._metadata[\"npcbdy\"]\n        else:\n            # Nodal does not have an \"npcbdy\" metadata parameter, so numOuterSurfacesHex\n            # must be calculated differently. Performing the same calculation below in VARIANT,\n            # which is possible to do, can return a different number, so that is why\n            # we cannot use the same calculation for both codes.\n            numOuterSurfacesHex = self._metadata[\"npcxy\"] - self._metadata[\"nintxy\"] * self._metadata[\"nSurf\"]\n\n        return numOuterSurfacesHex\n\n    def _rwFileID(self):\n        \"\"\"\n        Read/write file id record.\n\n        Notes\n        -----\n        The username, version, etc are embedded in this string but it's\n        usually blank.\n        \"\"\"\n        with self.createRecord() as record:\n            self._metadata[\"label\"] = record.rwString(self._metadata[\"label\"], 28)\n\n    def _rwBasicFileData1D(self):\n        \"\"\"Read basic data parameters (number of energy groups, assemblies, axial nodes, etc.).\"\"\"\n        # Dummy values are stored because sometimes they get assigned\n        # unexpected values anyway, and so we still want to preserve those values anyway\n        if self._metadata[\"variantFlag\"]:\n            keys = FILE_SPEC_1D_KEYS + FILE_SPEC_1D_KEYS_VARIANT11 + tuple(f\"IDUM{e:>02}\" for e in range(1, 7))\n        else:\n            keys = FILE_SPEC_1D_KEYS + tuple(tuple(f\"IDUM{e:>02}\" for e in range(1, 12)))\n\n        with self.createRecord() as record:\n            self._metadata.update(record.rwImplicitlyTypedMap(keys, self._metadata))\n\n    def _rwGeodstCoordMap2D(self):\n        \"\"\"\n        Read/write core geometry indexing from the NHFLUX 2D block.\n\n        This reads the 2-D (x,y) indexing for assemblies. geodstCoordMap maps DIF3D\n        nodal hex indexing to DIF3D GEODST indexing.\n        This DIF3D GEODST indexing is different than (but similar to) the MCNP GEODST ordering.\n\n        For Nodal, let N be the number of assemblies. Let M be the number of\n        \"external hex surfaces\" exposed to the outer reactor boundary (usually vacuum). M\n        does NOT include reflective surfaces!\n\n        N = self._metadata['nintxy']\n        M = self._metadata['npcxy'] - self._metadata['nintxy']*6\n        N*6 + M = self._metadata['npcxy']\n\n        For VARIANT in hexagonal geometry, there are two additional datasets for outgoing\n        and ingoing partial currents on the symmetric and sector xy-plane boundary.\n\n        Examples\n        --------\n            geodstCoordMap[NodalIndex] = geodstIndex\n\n        See Also\n        --------\n        nuclearDataIO.NHFLUX.__init__\n        nuclearDataIO.NHFLUX._rwHexPartialCurrents4D\n        nuclearDataIO.ISOTXS.read2D\n        nuclearDataIO.SPECTR.read2D\n        \"\"\"\n        with self.createRecord() as record:\n            # Number of unique assemblies - this is N in the comments above\n            nAssem = self._metadata[\"nintxy\"]\n\n            # Number of lateral surfaces per assembly (this is 6 for hexagonal cores)\n            nSurf = self._metadata[\"nSurf\"]\n\n            numExternalSurfaces = self._getNumOuterSurfacesHex()\n\n            # Initialize np arrays to store all node ordering (and node surface ordering)\n            # data. We don't actually use incomingPointersToAllAssemblies (basically\n            # equivalent to nearest neighbors indices), but it's here in case someone\n            # needs it in the future.\n\n            # Initialize data size when reading\n            if self._data.incomingPointersToAllAssemblies.size == 0:\n                # Index pointers to INCOMING partial currents on assemblies\n                self._data.incomingPointersToAllAssemblies = np.zeros((nSurf, nAssem), dtype=int)\n                # Index pointers to OUTGOING partial currents on core outer boundary\n                self._data.externalCurrentPointers = np.zeros((numExternalSurfaces), dtype=int)\n                # Index pointers to DIF3D GEODST ordering of each assembly\n                self._data.geodstCoordMap = np.zeros(nAssem, dtype=int)\n\n            self._data.incomingPointersToAllAssemblies = record.rwIntMatrix(\n                self._data.incomingPointersToAllAssemblies, nAssem, nSurf\n            )\n\n            self._data.externalCurrentPointers = record.rwList(\n                self._data.externalCurrentPointers, \"int\", numExternalSurfaces\n            )\n\n            self._data.geodstCoordMap = record.rwList(self._data.geodstCoordMap, \"int\", nAssem)\n\n            # There is additional data to process for VARIANT\n            if self._metadata[\"variantFlag\"]:\n                # Number of symmetry and sector surface pointers\n                npcsto = self._metadata[\"npcsym\"] + self._metadata[\"npcsec\"]\n\n                if self._data.outgoingPCSymSecPointers.size == 0:\n                    self._data.outgoingPCSymSecPointers = np.zeros(npcsto, dtype=int)\n                    self._data.ingoingPCSymSecPointers = np.zeros(npcsto, dtype=int)\n\n                self._data.outgoingPCSymSecPointers = record.rwList(self._data.outgoingPCSymSecPointers, \"int\", npcsto)\n                self._data.ingoingPCSymSecPointers = record.rwList(self._data.ingoingPCSymSecPointers, \"int\", npcsto)\n\n    def _rwFluxMoments3D(self, contents):\n        r\"\"\"\n        Read/write multigroup flux moments from the NHFLUX 3D block.\n\n        This reads/writes the planar moments for each DIF3D node on ONE x,y plane. The\n        planar moments for DIF3D nodes on different x,y planes (different axial slices) are\n        in a different 3D record, so this method must be repeatedly executed in order to\n        process them all.\n\n        Format is ``((FLUX(I,J),I=1,NMOM),J=1,NINTXY)`` so we must pass in ``NINTXY`` as\n        the first item in the shape. However, the caller of this method wants the shape\n        to be (nintxy, nMom) so we actually have to transpose it on the way in/out.\n\n        nMom can also be nMoms when reading/writing for VARIANT.\n        \"\"\"\n        nMom = self._metadata[\"nMom\"]\n        with self.createRecord() as record:\n            result = record.rwDoubleMatrix(\n                contents[:, :nMom].T,\n                self._metadata[\"nintxy\"],\n                nMom,\n            )\n            contents[:, :nMom] = result.T\n\n            # If we have VARIANT data, then we also need to process the odd-parity moments.\n            if self._metadata[\"variantFlag\"] and self._metadata[\"nMoms\"] > 0:\n                result = record.rwDoubleMatrix(\n                    contents[:, nMom:].T,\n                    self._metadata[\"nintxy\"],\n                    self._metadata[\"nMoms\"],\n                )\n                contents[:, nMom:] = result.T\n\n        return contents\n\n    def _rwHexPartialCurrents4D(self, surfCurrents, externalSurfCurrents):\n        r\"\"\"\n        Read/write multigroup lateral partial OUTGOING currents from the NHFLUX 4D block.\n\n        This reads all OUTGOING partial currents for all assembly block lateral surfaces\n        at a fixed axial position. For a hexagonal core, there are 6 surfaces per assembly\n        axial block. The data for the 2 axial surfaces of each block are in the 5D records.\n\n        Each 4D record contains all the surface partial currents on ONE x,y plane. The\n        surface data on different x,y planes (different axial slices) are in a different\n        4D record, so this method must be repeatedly executed in order to process them all.\n\n        If the reactor contains N assemblies and M exterior surfaces (surfaces adjacent to\n        vacuum boundary), this record will contain N*6 + M partial currents. The N*6\n        assembly OUTGOING partial currents are listed first, followed by the M INCOMING\n        partial currents from the outer reactor edge.\n\n        N = self._metadata['nintxy']\n        M = self._metadata['npcxy'] - self._metadata['nintxy']*6\n        N*6 + M = self._metadata['npcxy']\n\n        Notes\n        -----\n        These data are harder to read with rwMatrix, though it could be done if we\n        discarded the unwanted data at another level if that is much faster.\n        \"\"\"\n        with self.createRecord() as record:\n            nAssem = self._metadata[\"nintxy\"]\n            nSurf = self._metadata[\"nSurf\"]\n\n            # This is equal to one for Nodal diffusion theory, but greater than one for\n            # VARIANT.\n            nscoef = self._metadata[\"nscoef\"]\n\n            numPartialCurrentsHex_ext = self._metadata[\"npcxy\"] - self._metadata[\"nintxy\"] * self._metadata[\"nSurf\"]\n\n            # Loop through all lateral surfaces of all assemblies\n            for i in range(nAssem):\n                for j in range(nSurf):\n                    for m in range(nscoef):\n                        # OUTGOING partial currents on each lateral surface in each assembly.\n                        # If m > 0, other NSCOEF options (i.e., half-angle integrated\n                        # flux when reading DIF3D-Nodal data, and higher current moments\n                        # when reading DIF3D-VARIANT data) are processed.\n                        surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m])\n\n            for j in range(numPartialCurrentsHex_ext):\n                for m in range(nscoef):\n                    # INCOMING current at each surface of outer core boundary. If m > 0,\n                    # other NSCOEF options (i.e., half-angle integrated flux when\n                    # reading DIF3D-Nodal data, and higher current moments when reading\n                    # DIF3D-VARIANT data) are processed.\n                    externalSurfCurrents[j, m] = record.rwDouble(externalSurfCurrents[j, m])\n\n            return surfCurrents, externalSurfCurrents\n\n    def _rwZPartialCurrents5D(self, surfCurrents):\n        \"\"\"\n        Read/write multigroup axial partial currents from the NHFLUX 5D block.\n\n        Most other NHFLUX data is indexed by DIF3D node (each axial core slice in its own record).\n        HOWEVER, \"top\" and \"bottom\" surfaces of each DIF3D node are instead indexed by axial\n        surface. If there are Z axial nodes, then there are Z+1 axial surfaces. Thus, there\n        are Z+1 5D records, while there are only Z 3D and Z 4D records.\n\n        Each 5D record (each axial surface) contains two partial currents for each assembly position.\n        The first is the UPWARD partial current, while the second is the DOWNWARD partial current.\n\n        Returns\n        -------\n        surfCurrents : 3-D list of floats\n            This contains all the upward and downward partial currents in all assemblies\n            on ONE whole-core axial slice. The assemblies are ordered according to\n            self.geodstCoordMap.\n\n        See Also\n        --------\n        nuclearDataIO.NHFLUX._rwBasicFileData1D\n        nuclearDataIO.NHFLUX._rwGeodstCoordMap2D\n        \"\"\"\n        with self.createRecord() as record:\n            nAssem = self._metadata[\"nintxy\"]\n            nSurf = 2\n            nscoef = self._metadata[\"nscoef\"]\n\n            # Loop through all (up and down) partial currents on all hexes\n            # These loops are in a different order than in the 4D record above!!!\n            # Here we loop through surface FIRST and assemblies SECOND!!!\n            for j in range(nSurf):\n                for i in range(nAssem):\n                    for m in range(nscoef):\n                        # Outward partial current. For m > 0, other NSCOEF options\n                        # (i.e., half-angle integrated flux when reading DIF3D-Nodal\n                        # data, and higher current moments when reading DIF3D-VARIANT\n                        # data) are processed.\n                        surfCurrents[i, j, m] = record.rwDouble(surfCurrents[i, j, m])\n\n        return surfCurrents\n\n    def _getEnergyGroupIndex(self, g):\n        \"\"\"\n        Real fluxes stored in NHFLUX have \"normal\" (or \"forward\") energy groups. Also see the\n        subclass method NAFLUX.getEnergyGroupIndex().\n        \"\"\"\n        return g\n\n\nclass NafluxStream(NhfluxStream):\n    \"\"\"\n    NAFLUX is similar in format to the NHFLUX, but contains adjoint flux.\n\n    It has reversed energy group ordering.\n    \"\"\"\n\n    def _getEnergyGroupIndex(self, g):\n        \"\"\"Adjoint fluxes stored in NAFLUX have \"reversed\" (or \"backward\") energy groups.\"\"\"\n        ng = self._metadata[\"ngroup\"]\n        return ng - g - 1\n\n\nclass NhfluxStreamVariant(NhfluxStream):\n    \"\"\"\n    Stream for VARIANT version of NHFLUX.\n\n    Notes\n    -----\n    Can be deleted after have the NHFLUX data container be the public interface.\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> NHFLUX:\n        return NHFLUX(variant=True)\n\n\nclass NafluxStreamVariant(NafluxStream):\n    \"\"\"\n    Stream for VARIANT version of NAFLUX.\n\n    Notes\n    -----\n    Can be deleted after have the NHFLUX data container be the public interface.\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> NHFLUX:\n        return NHFLUX(variant=True)\n\n\ndef getNhfluxReader(adjointFlag, variantFlag):\n    \"\"\"\n    Returns the appropriate DIF3D nodal flux binary file reader class, either NHFLUX (real) or\n    NAFLUX (adjoint).\n    \"\"\"\n    if adjointFlag:\n        reader = NafluxStreamVariant if variantFlag else NafluxStream\n    else:\n        reader = NhfluxStreamVariant if variantFlag else NhfluxStream\n\n    return reader\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/pmatrx.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule for reading PMATRX files which contain gamma productions from fission reactions.\n\nSee  [GAMSOR]_ and [MC23]_.\n\n.. [MC23] Lee, Changho, Jung, Yeon Sang, and Yang, Won Sik. MC2-3: Multigroup Cross Section\n          Generation Code for Fast Reactor Analysis Nuclear. United States: N. p., 2018. Web.\n          doi:10.2172/1483949. (`OSTI\n          <https://www.osti.gov/biblio/1483949-mc2-multigroup-cross-section-generation-code-fast-reactor-analysis-nuclear>`__)\n\"\"\"\n\nimport traceback\n\nfrom armi import runLog\nfrom armi.nuclearDataIO import cccc, xsLibraries, xsNuclides\nfrom armi.utils import properties\n\n\ndef compare(lib1, lib2):\n    \"\"\"Compare two XSLibraries, and return True if equal, or False if not.\"\"\"\n    equal = True\n    # first check the lib properties (also need to unlock to prevent from getting an exception).\n    equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, \"neutronEnergyUpperBounds\")\n    equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, \"gammaEnergyUpperBounds\")\n    equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, \"neutronDoseConversionFactors\")\n    equal &= xsLibraries.compareXSLibraryAttribute(lib1, lib2, \"gammaDoseConversionFactors\")\n    # compare the meta data\n    equal &= lib1.pmatrxMetadata.compare(lib2.pmatrxMetadata, lib1, lib2)\n    # check the nuclides\n    for nucName in set(lib1.nuclideLabels + lib2.nuclideLabels):\n        nuc1 = lib1.get(nucName, None)\n        nuc2 = lib2.get(nucName, None)\n        if nuc1 is None or nuc2 is None:\n            continue\n        equal &= compareNuclideXS(nuc1, nuc2)\n    return equal\n\n\ndef compareNuclideXS(nuc1, nuc2):\n    equal = nuc1.pmatrxMetadata.compare(nuc2.pmatrxMetadata, nuc1.container, nuc2.container)\n    for attrName in [\n        \"neutronHeating\",\n        \"neutronDamage\",\n        \"gammaHeating\",\n        \"isotropicProduction\",\n        \"linearAnisotropicProduction\",\n        \"nOrderProductionMatrix\",\n    ]:\n        val1 = getattr(nuc1, attrName)\n        val2 = getattr(nuc2, attrName)\n        if not properties.numpyHackForEqual(val1, val2):\n            runLog.important(\n                \"{} and {} have different `{}` attributes:\\n{}\\n{}\".format(nuc1, nuc2, attrName, val1, val2)\n            )\n            equal &= False\n    return equal\n\n\ndef addDummyNuclidesToLibrary(lib, dummyNuclides):\n    \"\"\"\n    This method adds DUMMY nuclides to the current PMATRX library.\n\n    Parameters\n    ----------\n    lib : obj\n        PMATRX  library object\n\n    dummyNuclides: list\n        List of DUMMY nuclide objects that will be copied and added to the PMATRX file\n\n    Notes\n    -----\n    Since MC2-3 does not write DUMMY nuclide information for PMATRX files, this is necessary to provide a\n    consistent set of nuclide-level data across all the nuclides in a\n    :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.\n    \"\"\"\n    if not dummyNuclides:\n        runLog.important(\"No dummy nuclide data provided to be added to {}\".format(lib))\n        return False\n    if len(lib.xsIDs) > 1:\n        runLog.warning(\n            \"Cannot add dummy nuclide data to PMATRX library {} containing data for more than 1 XS ID.\".format(lib)\n        )\n        return False\n    dummyNuclideKeysAddedToLibrary = []\n    for dummy in dummyNuclides:\n        dummyKey = dummy.nucLabel + lib.xsIDs[0]\n        if dummyKey in lib:\n            continue\n        runLog.debug(\"Adding {} nuclide data to {}\".format(dummyKey, lib))\n        newDummy = xsNuclides.XSNuclide(lib, dummyKey)\n        newDummy.pmatrxMetadata[\"hasNeutronHeatingAndDamage\"] = False\n        newDummy.pmatrxMetadata[\"maxScatteringOrder\"] = 0\n        newDummy.pmatrxMetadata[\"hasGammaHeating\"] = False\n        newDummy.pmatrxMetadata[\"numberNeutronXS\"] = 0\n        newDummy.pmatrxMetadata[\"collapsingRegionNumber\"] = 0\n        lib[dummyKey] = newDummy\n        dummyNuclideKeysAddedToLibrary.append(dummyKey)\n\n    return any(dummyNuclideKeysAddedToLibrary)\n\n\ndef readBinary(fileName):\n    \"\"\"Read a binary PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object.\"\"\"\n    return _read(fileName, \"rb\")\n\n\ndef readAscii(fileName):\n    \"\"\"Read an ASCII PMATRX file into an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` object.\"\"\"\n    return _read(fileName, \"r\")\n\n\ndef _read(fileName, fileMode):\n    lib = xsLibraries.IsotxsLibrary()\n    return _readWrite(\n        lib,\n        fileName,\n        fileMode,\n        lambda containerKey: xsNuclides.XSNuclide(lib, containerKey),\n    )\n\n\ndef writeBinary(lib, fileName):\n    \"\"\"Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`\n    object to a binary file.\n    \"\"\"\n    return _write(lib, fileName, \"wb\")\n\n\ndef writeAscii(lib, fileName):\n    \"\"\"Write the PMATRX data from an :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary`\n    object to an ASCII file.\n    \"\"\"\n    return _write(lib, fileName, \"w\")\n\n\ndef _write(lib, fileName, fileMode):\n    return _readWrite(lib, fileName, fileMode, lambda containerKey: lib[containerKey])\n\n\ndef _readWrite(lib, fileName, fileMode, getNuclideFunc):\n    with PmatrxIO(fileName, lib, fileMode, getNuclideFunc) as rw:\n        rw.readWrite()\n\n    return lib\n\n\nclass PmatrxIO(cccc.Stream):\n    def __init__(self, fileName, xsLib, fileMode, getNuclideFunc):\n        cccc.Stream.__init__(self, fileName, fileMode)\n        self._lib = xsLib\n        self._metadata = xsLib.pmatrxMetadata\n        self._metadata.fileNames.append(fileName)\n        self._getNuclide = getNuclideFunc\n        self._dummyNuclideKeysAddedToLibrary = []\n\n    def _rwMessage(self):\n        runLog.debug(\"{} PMATRX data {}\".format(\"Reading\" if \"r\" in self._fileMode else \"Writing\", self))\n\n    def readWrite(self):\n        \"\"\"Read and write PMATRX files.\n\n        .. impl:: Tool to read and write PMATRX files.\n            :id: I_ARMI_NUCDATA_PMATRX\n            :implements: R_ARMI_NUCDATA_PMATRX\n\n            Reading and writing PMATRX files is performed using the general\n            nuclear data I/O functionalities described in\n            :need:`I_ARMI_NUCDATA`. Reading/writing a PMATRX file is performed\n            through the following steps:\n\n            #. Read/write global information including:\n\n                * Number of gamma energy groups\n                * Number of neutron energy groups\n                * Maximum scattering order\n                * Maximum number of compositions\n                * Maximum number of materials\n                * Maximum number of regions\n\n            #. Read/write energy group structure for neutrons and gammas\n            #. Read/write dose conversion factors\n            #. Read/write gamma production matrices for each nuclide, as well as\n               other reaction constants related to neutron-gamma production.\n        \"\"\"\n        self._rwMessage()\n        properties.unlockImmutableProperties(self._lib)\n        try:\n            numNucs = self._rwFileID()\n            self._rwGroupStructure()\n            self._rwDoseConversionFactor()\n            self._rwIsotopes(numNucs)\n        except Exception:\n            runLog.error(traceback.format_exc())\n            raise OSError(\"Failed to read/write {}\".format(self))\n        finally:\n            properties.lockImmutableProperties(self._lib)\n\n    def _rwFileID(self):\n        with self.createRecord() as record:\n            for name in [\n                \"numberCollapsingSpatialRegions\",\n                \"numGammaGroups\",\n                \"numNeutronGroups\",\n            ]:\n                self._metadata[name] = record.rwInt(self._metadata[name])\n            self._metadata[\"hasInPlateData\"] = record.rwBool(self._metadata[\"hasInPlateData\"])\n            numNucs = record.rwInt(len(self._lib))\n            self._metadata[\"hasDoseConversionFactor\"] = record.rwBool(self._metadata[\"hasDoseConversionFactor\"])\n            for name in [\n                \"maxScatteringOrder\",\n                \"maxNumberOfCompositions\",\n                \"maxMaterials\",\n                \"maxNumberOfRegions\",\n                \"maxNumberOfCollapsingRegions\",\n                \"_dummy1\",\n                \"_dummy2\",\n            ]:\n                self._metadata[name] = record.rwInt(self._metadata[name])\n        return numNucs\n\n    def _rwGroupStructure(self):\n        with self.createRecord() as record:\n            self._lib.neutronEnergyUpperBounds = record.rwMatrix(\n                self._lib.neutronEnergyUpperBounds, self._metadata[\"numNeutronGroups\"]\n            )\n            self._metadata[\"minimumNeutronEnergy\"] = record.rwFloat(self._metadata[\"minimumNeutronEnergy\"])\n            # The lower bound energy is included in this list. We'll drop it to maintain consistency with other\n            # libs by holding only the upper bounds.\n            self._lib.gammaEnergyUpperBounds = record.rwMatrix(\n                self._lib.gammaEnergyUpperBounds, self._metadata[\"numGammaGroups\"]\n            )\n            self._metadata[\"minimumGammaEnergy\"] = record.rwFloat(self._metadata[\"minimumGammaEnergy\"])\n\n    def _rwDoseConversionFactor(self):\n        if self._metadata[\"hasDoseConversionFactor\"]:\n            with self.createRecord() as record:\n                self._lib.neutronDoseConversionFactors = record.rwList(\n                    self._lib.neutronDoseConversionFactors,\n                    \"float\",\n                    self._metadata[\"numNeutronGroups\"],\n                )\n                self._lib.gammaDoseConversionFactors = record.rwList(\n                    self._lib.gammaDoseConversionFactors,\n                    \"float\",\n                    self._metadata[\"numGammaGroups\"],\n                )\n\n    def _rwIsotopes(self, numNucs):\n        with self.createRecord() as record:\n            nuclideLabels = record.rwList(self._lib.nuclideLabels, \"string\", numNucs, 8)\n            record.rwList([1000] * numNucs, \"int\", numNucs)\n        numNeutronGroups = self._metadata[\"numNeutronGroups\"]\n        numGammaGroups = self._metadata[\"numGammaGroups\"]\n        for nucLabel in nuclideLabels:\n            nuclide = self._getNuclide(nucLabel)\n            nuclide.updateBaseNuclide()\n            nuclideReader = _PmatrxNuclideIO(nuclide, self, numNeutronGroups, numGammaGroups)\n            nuclideReader.rwNuclide()\n            if \"r\" in self._fileMode:\n                # on add nuclides when reading\n                self._lib[nucLabel] = nuclide\n\n    def _rwCompositions(self):\n        if self._metadata[\"hasInPlateData\"]:\n            raise NotImplementedError()\n\n\nclass _PmatrxNuclideIO:\n    def __init__(self, nuclide, pmatrixIO, numNeutronGroups, numGammaGroups):\n        self._nuclide = nuclide\n        self._metadata = nuclide.pmatrxMetadata\n        self._pmatrixIO = pmatrixIO\n        self._numNeutronGroups = numNeutronGroups\n        self._numGammaGroups = numGammaGroups\n\n    def rwNuclide(self):\n        self._rwNuclideHeading()\n        self._rwNeutronHeatingAndDamage()\n        self._rwReactionXS()\n        self._rwGammaHeating()\n        self._rwCellAveragedProductionMatrix()\n\n    def _rwNuclideHeading(self):\n        with self._pmatrixIO.createRecord() as record:\n            self._metadata[\"hasNeutronHeatingAndDamage\"] = record.rwBool(self._metadata[\"hasNeutronHeatingAndDamage\"])\n            self._metadata[\"maxScatteringOrder\"] = record.rwInt(self._metadata[\"maxScatteringOrder\"])\n            self._metadata[\"hasGammaHeating\"] = record.rwBool(self._metadata[\"hasGammaHeating\"])\n            self._metadata[\"numberNeutronXS\"] = record.rwInt(self._metadata[\"numberNeutronXS\"])\n            self._metadata[\"collapsingRegionNumber\"] = record.rwInt(self._metadata[\"collapsingRegionNumber\"])\n\n    def _rwNeutronHeatingAndDamage(self):\n        if not self._metadata[\"hasNeutronHeatingAndDamage\"]:\n            return\n        with self._pmatrixIO.createRecord() as record:\n            self._nuclide.neutronHeating = record.rwMatrix(self._nuclide.neutronHeating, self._numNeutronGroups)\n            self._nuclide.neutronDamage = record.rwMatrix(self._nuclide.neutronDamage, self._numNeutronGroups)\n\n    def _rwReactionXS(self):\n        numActivationXS = self._metadata[\"numberNeutronXS\"]\n        pmatrixParams = self._metadata\n        activationXS = self._metadata[\"activationXS\"] = self._metadata[\"activationXS\"] or [None] * numActivationXS\n        activationMT = self._metadata[\"activationMT\"] = self._metadata[\"activationMT\"] or [None] * numActivationXS\n        activationMTU = self._metadata[\"activationMTU\"] = self._metadata[\"activationMTU\"] or [None] * numActivationXS\n        for xsNum in range(numActivationXS):\n            with self._pmatrixIO.createRecord() as record:\n                pmatrixParams[\"activationXS\"][xsNum] = record.rwList(activationXS[xsNum], self._numNeutronGroups)\n                pmatrixParams[\"activationMT\"][xsNum] = record.rwInt(activationMT[xsNum])\n                pmatrixParams[\"activationMTU\"][xsNum] = record.rwInt(activationMTU[xsNum])\n\n    def _rwGammaHeating(self):\n        if not self._metadata[\"hasGammaHeating\"]:\n            return\n        with self._pmatrixIO.createRecord() as record:\n            self._nuclide.gammaHeating = record.rwMatrix(self._nuclide.gammaHeating, self._numGammaGroups)\n\n    def _rwCellAveragedProductionMatrix(self):\n        for lrd in range(1, 1 + self._metadata[\"maxScatteringOrder\"]):\n            with self._pmatrixIO.createRecord() as record:\n                prodMatrix = self._getProductionMatrix(lrd)\n                prodMatrix = record.rwMatrix(prodMatrix, self._numNeutronGroups, self._numGammaGroups)\n                self._setProductionMatrix(lrd, prodMatrix)\n\n    def _getProductionMatrix(self, order):\n        if order == 1:\n            return self._nuclide.isotropicProduction\n        elif order == 2:\n            return self._nuclide.linearAnisotropicProduction\n        else:\n            return self._nuclide.nOrderProductionMatrix[order]\n\n    def _setProductionMatrix(self, order, matrix):\n        if order == 1:\n            self._nuclide.isotropicProduction = matrix\n        elif order == 2:\n            self._nuclide.linearAnisotropicProduction = matrix\n        else:\n            self._nuclide.nOrderProductionMatrix[order] = matrix\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/pwdint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nRead/write a CCCC PWDINT power density definition file.\n\nPWDINT files power density at mesh intervals.\n\nFile format definition is from [CCCC-IV]_.\n\nExamples\n--------\n>>> pwr = pwdint.readBinary(\"PWDINT\")\n>>> pwdint.writeBinary(pwr, \"PWDINT2\")\n\n\"\"\"\n\nimport numpy as np\n\nfrom armi.nuclearDataIO import cccc\n\nPWDINT = \"PWDINT\"\n\n# See CCCC-IV documentation for definitions\nFILE_SPEC_1D_KEYS = (\n    \"TIME\",\n    \"POWER\",\n    \"VOL\",\n    \"NINTI\",\n    \"NINTJ\",\n    \"NINTK\",\n    \"NCY\",\n    \"NBLOK\",\n)\n\n\nclass PwdintData(cccc.DataContainer):\n    \"\"\"\n    Data representation that can be read from or written to a PWDINT file.\n\n    This contains a mapping from the i,j,k GEODST mesh to power density\n    in Watts/cm^3.\n    \"\"\"\n\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n        self.powerDensity = np.array([])\n\n\nclass PwdintStream(cccc.StreamWithDataContainer):\n    \"\"\"\n    Stream for reading to/writing from with PWDINT data.\n\n    Parameters\n    ----------\n    power : PwdintData\n        Data structure\n    fileName: str\n        path to pwdint file\n    fileMode: str\n        string indicating if ``fileName`` is being read or written, and\n        in ascii or binary format\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> PwdintData:\n        return PwdintData()\n\n    def readWrite(self):\n        \"\"\"\n        Step through the structure of a PWDINT file and read/write it.\n\n        Logic to control which records will be present is here, which\n        comes directly off the File specification.\n        \"\"\"\n        self._rwFileID()\n        self._rw1DRecord()\n        self._rw2DRecord()\n\n    def _rwFileID(self):\n        with self.createRecord() as record:\n            self._metadata[\"hname\"] = record.rwString(self._metadata[\"hname\"], 8)\n            for name in [\"huse\", \"huse2\"]:\n                self._metadata[name] = record.rwString(self._metadata[name], 6)\n            self._metadata[\"version\"] = record.rwInt(self._metadata[\"version\"])\n            self._metadata[\"mult\"] = record.rwInt(self._metadata[\"mult\"])\n\n    def _rw1DRecord(self):\n        \"\"\"Read/write File specifications on 1D record.\"\"\"\n        with self.createRecord() as record:\n            self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata))\n\n    def _rw2DRecord(self):\n        \"\"\"Read/write power density by mesh point.\"\"\"\n        imax = self._metadata[\"NINTI\"]\n        jmax = self._metadata[\"NINTJ\"]\n        kmax = self._metadata[\"NINTK\"]\n        nblck = self._metadata[\"NBLOK\"]\n        if self._data.powerDensity.size == 0:\n            # initialize all-zeros here before reading now that we\n            # have the matrix dimension metadata available.\n            self._data.powerDensity = np.zeros(\n                (imax, jmax, kmax),\n                dtype=np.float32,\n            )\n        for ki in range(kmax):\n            for bi in range(nblck):\n                jL, jU = cccc.getBlockBandwidth(bi + 1, jmax, nblck)\n                with self.createRecord() as record:\n                    self._data.powerDensity[:, jL : jU + 1, ki] = record.rwMatrix(\n                        self._data.powerDensity[:, jL : jU + 1, ki],\n                        jU - jL + 1,\n                        imax,\n                    )\n\n\nreadBinary = PwdintStream.readBinary\nreadAscii = PwdintStream.readAscii\nwriteBinary = PwdintStream.writeBinary\nwriteAscii = PwdintStream.writeAscii\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/rtflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nRead and write the Regular Total flux from a RTFLUX CCCC interface file.\n\nRTFLUX is a CCCC standard data file for storing multigroup total flux on a mesh of any\ngeometry type. It is defined in [CCCC-IV]_.\n\nATFLUX is in the same format but holds adjoint flux rather than regular flux.\n\nExamples\n--------\n>>> flux = rtflux.RtfluxStream.readBinary(\"RTFLUX\")\n>>> rtflux.RtfluxStream.writeBinary(flux, \"RTFLUX2\")\n>>> adjointFlux = rtflux.AtfluxStream.readBinary(\"ATFLUX\")\n\nSee Also\n--------\nNHFLUX\n    Reads/write nodal hex flux moments\n\nRZFLUX\n    Reads/writes total fluxes from zones\n\"\"\"\n\nimport numpy as np\n\nfrom armi.nuclearDataIO import cccc\n\nRTFLUX = \"RTFLUX\"\nATFLUX = \"ATFLUX\"\n\n# See CCCC-IV documentation for definitions\nFILE_SPEC_1D_KEYS = (\n    \"NDIM\",\n    \"NGROUP\",\n    \"NINTI\",\n    \"NINTJ\",\n    \"NINTK\",\n    \"ITER\",\n    \"EFFK\",\n    \"POWER\",\n    \"NBLOK\",\n)\n\n\nclass RtfluxData(cccc.DataContainer):\n    \"\"\"\n    Multigroup flux as a function of i,j,k and g indices.\n\n    The metadata also contains the power and k-eff.\n\n    This is the data structure that is read from or written to a RTFLUX file.\n    \"\"\"\n\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n\n        self.groupFluxes: np.ndarray = np.array([])\n        \"\"\"Maps i,j,k,g indices to total real or adjoint flux in n/cm^2-s\"\"\"\n\n\nclass RtfluxStream(cccc.StreamWithDataContainer):\n    \"\"\"\n    Stream for reading/writing a RTFLUX or ATFLUX file.\n\n    Parameters\n    ----------\n    flux : RtfluxData\n        Data structure\n    fileName: str\n        path to RTFLUX file\n    fileMode: str\n        string indicating if ``fileName`` is being read or written, and\n        in ascii or binary format\n\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> RtfluxData:\n        return RtfluxData()\n\n    def readWrite(self):\n        \"\"\"Step through the structure of the file and read/write it.\"\"\"\n        self._rwFileID()\n        self._rw1DRecord()\n        if self._metadata[\"NDIM\"] == 1:\n            self._rw2DRecord()\n        elif self._metadata[\"NDIM\"] >= 2:\n            self._rw3DRecord()\n        else:\n            raise ValueError(f\"Invalid NDIM value {self._metadata['NDIM']} in {self}.\")\n\n    def _rwFileID(self):\n        \"\"\"\n        Read/write file id record.\n\n        Notes\n        -----\n        The username, version, etc are embedded in this string but it's\n        usually blank.\n        \"\"\"\n        with self.createRecord() as record:\n            self._metadata[\"label\"] = record.rwString(self._metadata[\"label\"], 28)\n\n    def _rw1DRecord(self):\n        \"\"\"Read/write File specifications on 1D record.\"\"\"\n        with self.createRecord() as record:\n            self._metadata.update(record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata))\n\n    def _rw2DRecord(self):\n        \"\"\"Read/write 1-dimensional regular total flux.\"\"\"\n        raise NotImplementedError(\"1-D RTFLUX files are not yet implemented.\")\n\n    def _rw3DRecord(self):\n        \"\"\"\n        Read/write multi-dimensional regular total flux.\n\n        The records contain blocks of values in the i-j planes.\n        \"\"\"\n        ng = self._metadata[\"NGROUP\"]\n        imax = self._metadata[\"NINTI\"]\n        jmax = self._metadata[\"NINTJ\"]\n        kmax = self._metadata[\"NINTK\"]\n        nblck = self._metadata[\"NBLOK\"]\n\n        if self._data.groupFluxes.size == 0:\n            self._data.groupFluxes = np.zeros((imax, jmax, kmax, ng))\n\n        for gi in range(ng):\n            gEff = self.getEnergyGroupIndex(gi)\n            for k in range(kmax):\n                # data in i-j plane may be blocked\n                for bi in range(nblck):\n                    # compute blocking parameters\n                    jLow, jUp = cccc.getBlockBandwidth(bi + 1, jmax, nblck)\n                    numZonesInBlock = jUp - jLow + 1\n                    with self.createRecord() as record:\n                        # pass in shape in fortran (read) order\n                        self._data.groupFluxes[:, jLow : jUp + 1, k, gEff] = record.rwDoubleMatrix(\n                            self._data.groupFluxes[:, jLow : jUp + 1, k, gEff],\n                            numZonesInBlock,\n                            imax,\n                        )\n\n    def getEnergyGroupIndex(self, g):\n        r\"\"\"\n        Real fluxes stored in RTFLUX have \"normal\" (or \"forward\") energy groups.\n        Also see the subclass method ATFLUX.getEnergyGroupIndex().\n\n        0 based, so if NG=33 and you want the third group, this return 2.\n        \"\"\"\n        return g\n\n\nclass AtfluxStream(RtfluxStream):\n    r\"\"\"\n    This is a subclass for the ATFLUX file, which is identical in format to the RTFLUX file except\n    that it contains the adjoint flux and has reversed energy group ordering.\n    \"\"\"\n\n    def getEnergyGroupIndex(self, g):\n        r\"\"\"\n        Adjoint fluxes stored in ATFLUX have \"reversed\" (or \"backward\") energy groups.\n\n        0 based, so if NG=33 and you want the third group (g=2), this returns 30.\n        \"\"\"\n        ng = self._metadata[\"NGROUP\"]\n        return ng - g - 1\n\n\ndef getFDFluxReader(adjointFlag):\n    r\"\"\"\n    Returns the appropriate DIF3D FD flux binary file reader class,\n    either RTFLUX (real) or ATFLUX (adjoint).\n    \"\"\"\n    if adjointFlag:\n        return AtfluxStream\n    else:\n        return RtfluxStream\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/rzflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule for reading/writing RZFLUX CCCC interface files.\n\nRZFLUX contains Regular Zone Flux, or multigroup flux by neutron energy group\nin each zone. It also can hold some convergence and neutron balance information.\n\nThe format is defined in [CCCC-IV]_.\n\nExamples\n--------\n>>> flux = rzflux.readBinary(\"RZFLUX\")\n>>> flux.groupFluxes[2, 0] *= 1.1\n>>> rzflux.writeBinary(flux, \"RZFLUX2\")\n>>> rzflux.writeAscii(flux, \"RZFLUX2.ascii\")\n\"\"\"\n\nfrom enum import Enum\n\nimport numpy as np\n\nfrom armi.nuclearDataIO import cccc\n\nRZFLUX = \"RZFLUX\"\n# See CCCC-IV documentation for definitions\nFILE_SPEC_1D_KEYS = (\n    \"TIME\",\n    \"POWER\",\n    \"VOL\",\n    \"EFFK\",\n    \"EIVS\",\n    \"DKDS\",\n    \"TNL\",\n    \"TNA\",\n    \"TNSL\",\n    \"TNBL\",\n    \"TNBAL\",\n    \"TNCRA\",\n    \"X1\",\n    \"X2\",\n    \"X3\",\n    \"NBLOK\",\n    \"ITPS\",\n    \"NZONE\",\n    \"NGROUP\",\n    \"NCY\",\n)\n\n\nclass Convergence(Enum):\n    \"\"\"Convergence behavior flags for ITPS from RZFLUX file.\"\"\"\n\n    NO_ITERATIONS = 0\n    CONVERGED = 1\n    CONVERGING = 2\n    DIVERGING = 3\n\n\nclass RzfluxData(cccc.DataContainer):\n    \"\"\"\n    Data representation that can be read from or written to a RZFLUX file.\n\n    Notes\n    -----\n    Analogous to a IsotxsLibrary for ISTOXS files.\n    \"\"\"\n\n    def __init__(self):\n        cccc.DataContainer.__init__(self)\n        # 2D data\n        self.groupFluxes = None\n\n\nclass RzfluxStream(cccc.StreamWithDataContainer):\n    \"\"\"\n    Stream for reading to/writing from with RZFLUX data.\n\n    Parameters\n    ----------\n    flux : RzfluxData\n        Data structure\n    fileName: str\n        path to RZFLUX file\n    fileMode: str\n        string indicating if ``fileName`` is being read or written, and\n        in ascii or binary format\n    \"\"\"\n\n    @staticmethod\n    def _getDataContainer() -> RzfluxData:\n        return RzfluxData()\n\n    def readWrite(self):\n        \"\"\"Step through the structure of the file and read/write it.\"\"\"\n        self._rwFileID()\n        self._rw1DRecord()\n        self._rw2DRecord()\n\n    def _rwFileID(self):\n        \"\"\"\n        Read/write file id record.\n\n        Notes\n        -----\n        The username, version, etc are embedded in this string but it's\n        usually blank. The number 28 was actually obtained from\n        a hex editor and may be code specific.\n        \"\"\"\n        with self.createRecord() as record:\n            self._metadata[\"label\"] = record.rwString(self._metadata[\"label\"], 28)\n\n    def _rw1DRecord(self):\n        \"\"\"Read/write File specifications on 1D record.\"\"\"\n        with self.createRecord() as record:\n            vals = record.rwImplicitlyTypedMap(FILE_SPEC_1D_KEYS, self._metadata)\n            self._metadata.update(vals)\n\n    def _rw2DRecord(self):\n        \"\"\"\n        Read/write the multigroup fluxes (n/cm^2-s) into a NxG matrix.\n\n        Notes\n        -----\n        Zones are blocked into multiple records so we have to block or unblock\n        them.\n\n        rwMatrix reverses the indices into FORTRAN data order so be\n        very careful with the indices.\n        \"\"\"\n        nz = self._metadata[\"NZONE\"]\n        ng = self._metadata[\"NGROUP\"]\n        nb = self._metadata[\"NBLOK\"]\n        if self._data.groupFluxes is None:\n            # initialize all-zeros here before reading now that we\n            # have the matrix dimension metadata available.\n            self._data.groupFluxes = np.zeros(\n                (ng, nz),\n                dtype=np.float32,\n            )\n        for bi in range(nb):\n            jLow, jUp = cccc.getBlockBandwidth(bi + 1, nz, nb)\n            numZonesInBlock = jUp - jLow + 1\n            with self.createRecord() as record:\n                # pass in shape in fortran (read) order\n                self._data.groupFluxes[:, jLow : jUp + 1] = record.rwMatrix(\n                    self._data.groupFluxes[:, jLow : jUp + 1],\n                    numZonesInBlock,\n                    ng,\n                )\n\n\nreadBinary = RzfluxStream.readBinary\nreadAscii = RzfluxStream.readAscii\nwriteBinary = RzfluxStream.writeBinary\nwriteAscii = RzfluxStream.writeAscii\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_cccc.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test CCCC.\"\"\"\n\nimport io\nimport unittest\n\nfrom armi.nuclearDataIO import cccc\n\n\nclass CcccIOStreamTests(unittest.TestCase):\n    def test_initWithFileMode(self):\n        self.assertIsInstance(cccc.Stream(\"some-file\", \"rb\"), cccc.Stream)\n        self.assertIsInstance(cccc.Stream(\"some-file\", \"wb\"), cccc.Stream)\n        self.assertIsInstance(cccc.Stream(\"some-file\", \"r\"), cccc.Stream)\n        self.assertIsInstance(cccc.Stream(\"some-file\", \"w\"), cccc.Stream)\n        with self.assertRaises(KeyError):\n            cccc.Stream(\"some-file\", \"bacon\")\n\n\nclass CcccBinaryRecordTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.writerClass = cccc.BinaryRecordWriter\n        cls.readerClass = cccc.BinaryRecordReader\n\n    def setUp(self):\n        self.streamCls = io.BytesIO\n\n    def test_writeAndReadSimpleIntegerRecord(self):\n        value = 42\n        stream = self.streamCls()\n        with self.writerClass(stream) as writer:\n            writer.rwInt(value)\n        with self.readerClass(self.streamCls(stream.getvalue())) as reader:\n            self.assertEqual(writer.numBytes, reader.numBytes)\n            self.assertEqual(value, reader.rwInt(None))\n        self.assertEqual(4, writer.numBytes)\n\n    def test_writeAndReadSimpleFloatRecord(self):\n        stream = self.streamCls()\n        value = -33.322222\n        with self.writerClass(stream) as writer:\n            writer.rwFloat(value)\n        with self.readerClass(self.streamCls(stream.getvalue())) as reader:\n            self.assertEqual(writer.numBytes, reader.numBytes)\n            self.assertAlmostEqual(value, reader.rwFloat(None), 5)\n        self.assertEqual(4, writer.numBytes)\n\n    def test_writeAndReadSimpleStringRecord(self):\n        stream = self.streamCls()\n        value = \"Howdy, partner!\"\n        size = 8 * 8\n        with self.writerClass(stream) as writer:\n            writer.rwString(value, size)\n        with self.readerClass(self.streamCls(stream.getvalue())) as reader:\n            self.assertEqual(writer.numBytes, reader.numBytes)\n            self.assertEqual(value, reader.rwString(None, size))\n        self.assertEqual(size, writer.numBytes)\n\n    def test_readPartialRecord(self):\n        \"\"\"Not reading an entire record raises an exception.\"\"\"\n        # I'm going to create a record with two pieces of data, and only read one...\n        stream = self.streamCls()\n        value = 99\n        with self.writerClass(stream) as writer:\n            writer.rwInt(value)\n            writer.rwInt(value)\n\n        self.assertEqual(8, writer.numBytes)\n        with self.assertRaises(BufferError):\n            with self.readerClass(self.streamCls(stream.getvalue())) as reader:\n                self.assertEqual(value, reader.rwInt(None))\n\n    def test_readingBeyondRecordRaisesException(self):\n        # I'm going to create a record with two pieces of data, and only read one...\n        stream = self.streamCls()\n        value = 77\n        with self.writerClass(stream) as writer:\n            writer.rwInt(value)\n\n        self.assertEqual(4, writer.numBytes)\n        with self.assertRaises(BufferError):\n            with self.readerClass(self.streamCls(stream.getvalue())) as reader:\n                self.assertEqual(value, reader.rwInt(None))\n                self.assertEqual(4, reader.rwInt(None))\n\n\nclass CcccAsciiRecordTests(CcccBinaryRecordTests):\n    \"\"\"Runs the same tests as CcccBinaryRecordTests, but using ASCII readers and writers.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.writerClass = cccc.AsciiRecordWriter\n        cls.readerClass = cccc.AsciiRecordReader\n\n    def setUp(self):\n        self.streamCls = io.StringIO\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_compxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the COMPXS reader/writer with a simple problem.\"\"\"\n\nimport os\nimport unittest\n\nimport numpy as np\nfrom scipy.sparse import csc_matrix\n\nfrom armi import nuclearDataIO\nfrom armi.nuclearDataIO.cccc import compxs\nfrom armi.nuclearDataIO.xsLibraries import CompxsLibrary\nfrom armi.tests import COMPXS_PATH\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestCompxs(unittest.TestCase):\n    \"\"\"Test the compxs reader/writer.\"\"\"\n\n    @property\n    def binaryWritePath(self):\n        return os.path.join(self._testMethodName + \"compxs-b\")\n\n    @property\n    def asciiWritePath(self):\n        return os.path.join(self._testMethodName + \"compxs-a.txt\")\n\n    @classmethod\n    def setUpClass(cls):\n        try:\n            cls.lib = compxs.readAscii(COMPXS_PATH)\n        except Exception as ee:\n            raise Exception(\"Failed to load COMPXS ascii.\\n{}\".format(ee))\n        cls.fissileRegion = cls.lib.regions[1]\n        cls.numGroups = cls.lib.compxsMetadata[\"numGroups\"]\n\n    def test_libraryData(self):\n        \"\"\"Test library data including energy group information and number of compositions.\"\"\"\n        self.assertEqual(11, self.numGroups)\n        self.assertEqual(14190675.0, max(self.lib.neutronEnergyUpperBounds))\n        self.assertAlmostEqual(0.41745778918, min(self.lib.neutronEnergyUpperBounds))\n\n    def test_regionPrimaryXS(self):\n        \"\"\"Test the primary cross sections for the second region - fissile.\"\"\"\n        expectedMacros = {\n            \"absorption\": [\n                0.00810444,\n                0.0049346,\n                0.00329084,\n                0.00500318,\n                0.00919719,\n                0.01548523,\n                0.02816499,\n                0.04592259,\n                0.09402685,\n                0.12743879,\n                0.20865865,\n            ],\n            \"fission\": [\n                0.00720288,\n                0.00398085,\n                0.00181345,\n                0.00236554,\n                0.00341723,\n                0.00564286,\n                0.0110835,\n                0.0211668,\n                0.04609869,\n                0.09673319,\n                0.16192732,\n            ],\n            \"total\": [\n                0.18858715,\n                0.18624092,\n                0.22960965,\n                0.27634201,\n                0.33255093,\n                0.61437815,\n                0.42582573,\n                0.48091191,\n                0.4931102,\n                0.49976887,\n                0.58214497,\n            ],\n            \"removal\": [\n                0.07268185,\n                0.03577923,\n                0.01127517,\n                0.01003666,\n                0.01254067,\n                0.02686466,\n                0.02881869,\n                0.04606618,\n                0.09605395,\n                0.13462841,\n                0.20865865,\n            ],\n            \"transport\": [\n                0.10812569,\n                0.13096095,\n                0.18227532,\n                0.24610402,\n                0.29647433,\n                0.55842311,\n                0.40818328,\n                0.45512788,\n                0.45669781,\n                0.49153138,\n                0.55067248,\n            ],\n            \"nuSigF\": [\n                0.02247946,\n                0.01047702,\n                0.00449566,\n                0.00576889,\n                0.00829842,\n                0.01373361,\n                0.02697533,\n                0.05151573,\n                0.11224934,\n                0.23570964,\n                0.39456832,\n            ],\n            \"chi\": [\n                [1.38001099e-01],\n                [6.28044390e-01],\n                [2.04412257e-01],\n                [2.63437497e-02],\n                [2.85959793e-03],\n                [3.03098935e-04],\n                [3.19825784e-05],\n                [3.42715844e-06],\n                [3.00034836e-07],\n                [3.87667231e-08],\n                [2.66151779e-13],\n            ],\n        }\n        for xsName, expectedXS in expectedMacros.items():\n            actualXS = self.fissileRegion.macros[xsName]\n            self.assertTrue(np.allclose(actualXS, expectedXS))\n\n    def test_totalScatterMatrix(self):\n        \"\"\"\n        Test the total scattering matrix by comparing the sparse components.\n\n        Sparse matrices can be constructed from three vectors: data, indices, and indptr.\n        For column matrix, the row indices for column ``j`` are stored in\n        ``indices[indptr[j]:indptr[j + 1]]`` and the corresponding data is stored in\n        ``data[indptr[j]:indptr[j + 1]]``.\n\n        See Also\n        --------\n        scipy.sparse.csc_matrix\n        \"\"\"\n        expectedSparseData = np.array(\n            [\n                1.15905297e-01,\n                1.50461698e-01,\n                4.19181830e-02,\n                2.18334481e-01,\n                2.66726391e-02,\n                2.06841438e-02,\n                2.66305350e-01,\n                7.93398724e-03,\n                3.74972053e-03,\n                2.82068371e-03,\n                3.20010257e-01,\n                4.98916288e-03,\n                4.64327778e-05,\n                3.62943322e-04,\n                2.33116653e-04,\n                5.87513494e-01,\n                3.33728477e-03,\n                4.05355062e-05,\n                3.40557886e-06,\n                5.05978110e-05,\n                2.44368007e-05,\n                3.97007043e-01,\n                1.13794357e-02,\n                5.81324838e-06,\n                3.57958695e-06,\n                4.21100811e-07,\n                6.02755319e-06,\n                3.70765519e-06,\n                4.34845744e-01,\n                6.53692627e-04,\n                3.65838392e-07,\n                1.91840932e-07,\n                6.47891881e-08,\n                4.70903065e-07,\n                7.53010883e-07,\n                3.97056267e-01,\n                1.43584939e-04,\n                1.69959524e-08,\n                7.63482393e-09,\n                1.07996799e-08,\n                7.79766262e-08,\n                1.42976480e-07,\n                3.65140459e-01,\n                2.02709238e-03,\n                1.62021799e-09,\n                1.25812112e-09,\n                3.39504415e-09,\n                2.13443401e-06,\n                7.75326455e-06,\n                3.73486301e-01,\n                7.18962870e-03,\n                4.72605255e-15,\n                5.11975260e-13,\n                1.25417930e-08,\n                4.57563838e-08,\n            ]\n        )\n\n        expectedSparseIndices = [\n            0,\n            1,\n            0,\n            2,\n            1,\n            0,\n            3,\n            2,\n            1,\n            0,\n            4,\n            3,\n            2,\n            1,\n            0,\n            5,\n            4,\n            3,\n            2,\n            1,\n            0,\n            6,\n            5,\n            4,\n            3,\n            2,\n            1,\n            0,\n            7,\n            6,\n            4,\n            3,\n            2,\n            1,\n            0,\n            8,\n            7,\n            4,\n            3,\n            2,\n            1,\n            0,\n            9,\n            8,\n            4,\n            3,\n            2,\n            1,\n            0,\n            10,\n            9,\n            4,\n            2,\n            1,\n            0,\n        ]\n\n        expectedSparseIndptr = [0, 1, 3, 6, 10, 15, 21, 28, 35, 42, 49, 55]\n\n        actualTotalScatter = self.fissileRegion.macros.totalScatter.toarray()\n        expectedTotalScatter = csc_matrix(\n            (expectedSparseData, expectedSparseIndices, expectedSparseIndptr),\n            actualTotalScatter.shape,\n        ).toarray()\n\n        self.assertTrue(np.allclose(actualTotalScatter, expectedTotalScatter))\n\n    def test_binaryRW(self):\n        \"\"\"Test to make sure the binary read/writer reads/writes the exact same library.\"\"\"\n        with TemporaryDirectoryChanger():\n            compxs.writeBinary(self.lib, self.binaryWritePath)\n            self.assertTrue(compxs.compare(self.lib, compxs.readBinary(self.binaryWritePath)))\n\n    def test_asciiRW(self):\n        \"\"\"Test to make sure the ascii reader/writer reads/writes the exact same library.\"\"\"\n        with TemporaryDirectoryChanger():\n            compxs.writeAscii(self.lib, self.asciiWritePath)\n            self.assertTrue(compxs.compare(self.lib, compxs.readAscii(self.asciiWritePath)))\n\n    def test_mergeCompxsLibraries(self):\n        \"\"\"Test to verify the compxs merging returns a library with new regions.\"\"\"\n        someLib = CompxsLibrary()\n        someLib.merge(self.lib)\n        self.assertEqual(len(self.lib.regions), len(someLib.regions))\n        self.assertTrue(self.lib.compxsMetadata.compare(someLib.compxsMetadata, self.lib, someLib))\n\n    def test_getCOMPXSFileName(self):\n        self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=0), \"COMPXS-c0\")\n        self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=1), \"COMPXS-c1\")\n        self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(cycle=23), \"COMPXS-c23\")\n        self.assertEqual(nuclearDataIO.getExpectedCOMPXSFileName(), \"COMPXS\")\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_dif3d.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test reading/writing of DIF3D binary input.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import dif3d\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n\nSIMPLE_HEXZ_INP = os.path.join(THIS_DIR, \"../../tests\", \"simple_hexz.inp\")\nSIMPLE_HEXZ_DIF3D = os.path.join(THIS_DIR, \"fixtures\", \"simple_hexz.dif3d\")\n\n\nclass TestDif3dSimpleHexz(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        \"\"\"\n        Load DIF3D data from binary file. This binary file was generated by running\n        dif3d.exe v11.0r3284 on the SIMPLE_HEXZ_INP file above (and renaming the DIF3D\n        binary file to simple_hexz.dif3d).\n        \"\"\"\n        cls.df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D)\n\n    def test__rwFileID(self):\n        \"\"\"Verify the file identification info.\n\n        .. test:: Test reading DIF3D files.\n            :id: T_ARMI_NUCDATA_DIF3D0\n            :tests: R_ARMI_NUCDATA_DIF3D\n        \"\"\"\n        self.assertEqual(self.df.metadata[\"HNAME\"], \"DIF3D\")\n        self.assertEqual(self.df.metadata[\"HUSE1\"], \"\")\n        self.assertEqual(self.df.metadata[\"HUSE2\"], \"\")\n        self.assertEqual(self.df.metadata[\"VERSION\"], 1)\n\n    def test__rwFile1DRecord(self):\n        \"\"\"Verify the rest of the metadata.\n\n        .. test:: Test reading DIF3D files.\n            :id: T_ARMI_NUCDATA_DIF3D1\n            :tests: R_ARMI_NUCDATA_DIF3D\n        \"\"\"\n        TITLE_A6 = [\"3D Hex\", \"-Z to\", \"genera\", \"te NHF\", \"LUX fi\", \"le\"]\n        EXPECTED_TITLE = TITLE_A6 + [\"\"] * 5\n        for i in range(dif3d.TITLE_RANGE):\n            self.assertEqual(self.df.metadata[f\"TITLE{i}\"], EXPECTED_TITLE[i])\n        self.assertEqual(self.df.metadata[\"MAXSIZ\"], 10000)\n        self.assertEqual(self.df.metadata[\"MAXBLK\"], 1800000)\n        self.assertEqual(self.df.metadata[\"IPRINT\"], 0)\n\n    def test__rw2DRecord(self):\n        \"\"\"Verify the control parameters.\"\"\"\n        EXPECTED_2D = [\n            0,\n            0,\n            0,\n            10000,\n            30,\n            0,\n            1000000000,\n            5,\n            0,\n            0,\n            50,\n            0,\n            1,\n            1,\n            0,\n            0,\n            0,\n            110,\n            10,\n            100,\n            1,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            10,\n            40,\n            32,\n            0,\n            0,\n            2,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n            0,\n        ]\n        for i, param in enumerate(dif3d.FILE_SPEC_2D_PARAMS):\n            self.assertEqual(self.df.twoD[param], EXPECTED_2D[i])\n\n    def test__rw3DRecord(self):\n        \"\"\"Verify the convergence criteria and other floating point data.\"\"\"\n        EXPECTED_3D = [\n            1e-7,\n            1e-5,\n            1e-5,\n            3.823807613470224e-01,\n            1e-3,\n            4e-2,\n            1e0,\n            0e0,\n            0e0,\n            9.999999747378752e-05,\n        ] + [0.0 for i in range(1, 21)]\n        for i, param in enumerate(dif3d.FILE_SPEC_3D_PARAMS):\n            self.assertEqual(self.df.threeD[param], EXPECTED_3D[i])\n\n    def test__rw4DRecord(self):\n        \"\"\"Verify the optimum overrelaxation factors.\"\"\"\n        self.assertEqual(self.df.fourD, None)\n\n    def test__rw5DRecord(self):\n        \"\"\"Verify the axial coarse-mesh rebalance boundaries.\"\"\"\n        self.assertEqual(self.df.fiveD, None)\n\n    def test_writeBinary(self):\n        \"\"\"Verify binary equivalence of written DIF3D file.\n\n        .. test:: Test writing DIF3D files.\n            :id: T_ARMI_NUCDATA_DIF3D2\n            :tests: R_ARMI_NUCDATA_DIF3D\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            dif3d.Dif3dStream.writeBinary(self.df, \"DIF3D2\")\n            with open(SIMPLE_HEXZ_DIF3D, \"rb\") as f1, open(\"DIF3D2\", \"rb\") as f2:\n                expectedData = f1.read()\n                actualData = f2.read()\n            for expected, actual in zip(expectedData, actualData):\n                self.assertEqual(expected, actual)\n\n\nclass TestDif3dEmptyRecords(unittest.TestCase):\n    def test_empty4and5Records(self):\n        \"\"\"Since the inputs results in these being None, get test coverage another way.\"\"\"\n        df = dif3d.Dif3dStream.readBinary(SIMPLE_HEXZ_DIF3D)\n        # Hack some values that allow 4 and 5 records to be populated \\\n        # and then populate them\n        df.twoD[\"NUMORP\"] = 1\n        df.twoD[\"NCMRZS\"] = 1\n        df.fourD = {\"OMEGA1\": 1.0}\n        df.fiveD = {\"ZCMRC1\": 1.0, \"NZINTS1\": 10}\n        with TemporaryDirectoryChanger():\n            # Write then read a new one\n            dif3d.Dif3dStream.writeBinary(df, \"DIF3D2\")\n            df2 = dif3d.Dif3dStream.readBinary(\"DIF3D2\")\n            # Kinda a null test, but this coverage caught some code mistakes!\n            self.assertEqual(df2.fourD, df.fourD)\n            self.assertEqual(df2.fiveD, df.fiveD)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_fixsrc.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the reading and writing of the DIF3D FIXSRC file format.\"\"\"\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom armi.nuclearDataIO.cccc import fixsrc\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n# ruff: noqa: E501\nFIXSRC_ASCII = \"\"\"0 0 0 0 0 0 0.4008E+10 0.4210E+10 0.4822E+10 0.5154E+10 0.4926E+10 0.4621E+10\n0.4246E+10 0.3757E+10 0.3311E+10 0.3479E+10 0.357E+10 0.324E+10 0.2942E+10 0.2903E+10 0.2925E+10 0.2763E+10 0.2414E+10 0.2036E+10\n0.1656E+10 0.1477E+10 0.1455E+10 0.1434E+10 0.1297E+10 0.1153E+10 0.101E+10 0.8841E+9 0.7923E+9 0.7266E+9 0.6575E+9 0.589E+9\n0.5027E+9 0.4146E+9 0.3474E+9 0.3015E+9 0.2403E+9 0.2356E+9 0.1634E+9 0.1521E+9 0.1258E+9 0.9032E+8 0.6156E+8 0.3983E+8\n0.3134E+8 0.303E+8 0.2983E+8 0 0 0 0 0 0 0 0 0\n0 0 0 0 0 0 0 0 0 0 0 0\"\"\"\nFIXSRC_ARRAY = np.array(FIXSRC_ASCII.split(), dtype=np.float32).reshape((3, 3, 2, 4))\n\n\nclass TestFixsrc(unittest.TestCase):\n    def test_writeReadBinaryLoop(self):\n        with TemporaryDirectoryChanger() as newDir:\n            fileName = \"fixsrc_writeBinary.bin\"\n            binaryFilePath = os.path.join(newDir.destination, fileName)\n            fixsrc.writeBinary(binaryFilePath, FIXSRC_ARRAY)\n\n            self.assertIn(fileName, os.listdir(newDir.destination))\n            self.assertGreater(os.path.getsize(binaryFilePath), 0)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_gamiso.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test GAMISO reading and writing.\"\"\"\n\nimport os\nimport unittest\nfrom copy import deepcopy\n\nfrom armi.nuclearDataIO import xsLibraries\nfrom armi.nuclearDataIO.cccc import gamiso, isotxs\nfrom armi.nuclearDataIO.xsNuclides import XSNuclide\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\nFIXTURE_DIR = os.path.join(THIS_DIR, \"..\", \"..\", \"tests\", \"fixtures\")\nGAMISO_AA = os.path.join(FIXTURE_DIR, \"AA.gamiso\")\n\n\nclass TestGamiso(unittest.TestCase):\n    def setUp(self):\n        self.xsLib = xsLibraries.IsotxsLibrary()\n\n    def test_compare(self):\n        \"\"\"Compare the input binary GAMISO file.\n\n        .. test:: Test reading GAMISO files.\n            :id: T_ARMI_NUCDATA_GAMISO0\n            :tests: R_ARMI_NUCDATA_GAMISO\n        \"\"\"\n        gamisoAA = gamiso.readBinary(GAMISO_AA)\n        self.xsLib.merge(deepcopy(gamisoAA))\n        self.assertTrue(gamiso.compare(self.xsLib, gamisoAA))\n\n    def test_writeBinary(self):\n        \"\"\"Write a binary GAMISO file.\n\n        .. test:: Test writing GAMISO files.\n            :id: T_ARMI_NUCDATA_GAMISO1\n            :tests: R_ARMI_NUCDATA_GAMISO\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            data = gamiso.readBinary(GAMISO_AA)\n            binData = gamiso.writeBinary(data, \"gamiso.out\")\n            self.assertTrue(gamiso.compare(data, binData))\n\n    def test_addDummyNuclidesToLibrary(self):\n        dummyNuclides = [XSNuclide(None, \"U238AA\")]\n        before = self.xsLib.getNuclides(\"\")\n        self.assertEqual(len(self.xsLib.xsIDs), 0)\n        self.assertTrue(gamiso.addDummyNuclidesToLibrary(self.xsLib, dummyNuclides))\n        self.assertEqual(len(self.xsLib.xsIDs), 1)\n        self.assertEqual(list(self.xsLib.xsIDs)[0], \"38\")\n\n        after = self.xsLib.getNuclides(\"\")\n        self.assertGreater(len(after), len(before))\n\n        diff = set(after).difference(set(before))\n        self.assertEqual(len(diff), 1)\n        self.assertEqual(list(diff)[0].xsId, \"38\")\n\n    def test_addDummyNuclidesToLibraryNumGroups(self):\n        isoLib = isotxs.readBinary(os.path.join(FIXTURE_DIR, \"ISOAA\"))\n        gamLib = gamiso.readBinary(GAMISO_AA)\n        gamLib.gamisoMetadata[\"numGroups\"] = 50\n        dummyNuc = XSNuclide(isoLib, \"DMP1AA\")\n        dummyNuc.isotxsMetadata = isoLib.getNuclides(\"AA\")[0].isotxsMetadata\n        gamiso.addDummyNuclidesToLibrary(gamLib, [dummyNuc])\n        self.assertEqual(gamLib[\"DMP1AA\"].nucLabel, \"DMP1\")\n        self.assertEqual(gamLib[\"DMP1AA\"].gamisoMetadata[\"jband\"][(49, 3)], 1)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_geodst.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test GEODST reading and writing.\"\"\"\n\nimport os\nimport unittest\n\nfrom numpy.testing import assert_equal\n\nfrom armi.nuclearDataIO.cccc import geodst\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\nSIMPLE_GEODST = os.path.join(THIS_DIR, \"fixtures\", \"simple_hexz.geodst\")\n\n\nclass TestGeodst(unittest.TestCase):\n    \"\"\"\n    Tests the GEODST class.\n\n    This reads from a GEODST file that was created using DIF3D 11 on a small\n    test hex reactor in 1/3 geometry.\n    \"\"\"\n\n    def test_readGeodst(self):\n        \"\"\"Ensure we can read a GEODST file.\n\n        .. test:: Test reading GEODST files.\n            :id: T_ARMI_NUCDATA_GEODST0\n            :tests: R_ARMI_NUCDATA_GEODST\n        \"\"\"\n        geo = geodst.readBinary(SIMPLE_GEODST)\n        self.assertEqual(geo.metadata[\"IGOM\"], 18)\n        self.assertAlmostEqual(geo.xmesh[1], 16.79, places=5)  # hex pitch\n        self.assertAlmostEqual(geo.zmesh[-1], 448.0, places=5)  # top of reactor in cm\n        self.assertEqual(geo.coarseMeshRegions.shape, (10, 10, len(geo.zmesh) - 1))\n        self.assertEqual(geo.coarseMeshRegions.min(), 0)\n        self.assertEqual(geo.coarseMeshRegions.max(), geo.metadata[\"NREG\"])\n\n    def test_writeGeodst(self):\n        \"\"\"Ensure that we can write a modified GEODST.\n\n        .. test:: Test writing GEODST files.\n            :id: T_ARMI_NUCDATA_GEODST1\n            :tests: R_ARMI_NUCDATA_GEODST\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            geo = geodst.readBinary(SIMPLE_GEODST)\n            geo.zmesh[-1] *= 2\n            geodst.writeBinary(geo, \"GEODST2\")\n            geo2 = geodst.readBinary(\"GEODST2\")\n            self.assertAlmostEqual(geo2.zmesh[-1], 448.0 * 2, places=5)\n            assert_equal(geo.kintervals, geo2.kintervals)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_isotxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests the workings of the library wrappers.\"\"\"\n\nimport unittest\n\nfrom armi import nuclearDataIO\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.nuclearDataIO import xsLibraries\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.tests import ISOAA_PATH\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestIsotxs(unittest.TestCase):\n    \"\"\"Tests the ISOTXS class.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # load a library that is in the ARMI tree. This should\n        # be a small library with LFPs, Actinides, structure, and coolant\n        cls.lib = isotxs.readBinary(ISOAA_PATH)\n\n    def test_writeBinary(self):\n        \"\"\"Test reading in an ISOTXS file, and then writing it back out again.\n\n        Now, the library here can't guarantee the output will be the same as the\n        input. But we can guarantee the  written file is still valid, by reading\n        it again.\n\n        .. test:: Write ISOTSX binary files.\n            :id: T_ARMI_NUCDATA_ISOTXS0\n            :tests: R_ARMI_NUCDATA_ISOTXS\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            origLib = isotxs.readBinary(ISOAA_PATH)\n\n            fname = self._testMethodName + \"temp-aa.isotxs\"\n            isotxs.writeBinary(origLib, fname)\n            lib = isotxs.readBinary(fname)\n\n            # validate the written file is still valid\n            nucs = lib.nuclides\n            self.assertTrue(nucs)\n            self.assertIn(\"AA\", lib.xsIDs)\n            nuc = lib[\"U235AA\"]\n            self.assertIsNotNone(nuc)\n            with self.assertRaises(KeyError):\n                lib.getNuclide(\"nonexistent\", \"zz\")\n\n    def test_isotxsGeneralData(self):\n        nucs = self.lib.nuclides\n        self.assertTrue(nucs)\n        self.assertIn(\"AA\", self.lib.xsIDs)\n        nuc = self.lib[\"U235AA\"]\n        self.assertIsNotNone(nuc)\n        with self.assertRaises(KeyError):\n            self.lib.getNuclide(\"nonexistent\", \"zz\")\n\n    def test_isotxsDetailedData(self):\n        self.assertEqual(50, len(self.lib.nuclides))\n        groups = self.lib.neutronEnergyUpperBounds\n        self.assertEqual(33, len(groups))\n        self.assertEqual(14072911.0, max(groups))\n        self.assertEqual(0.4139941930770874, min(groups))\n        # file-wide chi\n        self.assertEqual(33, len(self.lib.isotxsMetadata[\"chi\"]))\n        self.assertEqual(1.0000016745038094, sum(self.lib.isotxsMetadata[\"chi\"]))\n\n    def test_getScatteringWeights(self):\n        self.assertEqual(1650, len(self.lib.getScatterWeights()))\n        refVector = [\n            0.0,\n            0.9924760291647134,\n            0.007523970835286507,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n            0.0,\n        ]\n        for v1, v2 in zip(refVector, self.lib.getScatterWeights()[\"U235AA\", 1].todense().T.tolist()[0]):\n            self.assertAlmostEqual(v1, v2)\n\n    def test_getNuclide(self):\n        nuclideBases = NuclideBases()\n        self.assertEqual(nuclideBases.byName[\"U235\"], self.lib.getNuclide(\"U235\", \"AA\")._base)\n        self.assertEqual(nuclideBases.byName[\"PU239\"], self.lib.getNuclide(\"PU239\", \"AA\")._base)\n\n    def test_n2nIsReactionBased(self):\n        \"\"\"\n        ARMI assumes ISOTXS n2n reactions are all reaction-based. Test this.\n\n        The alternative is production based.\n        Previous studies show that MC**2-2 is reaction based.\n        \"\"\"\n        nuc = self.lib.getNuclide(\"U235\", \"AA\")\n        fromMatrix = nuc.micros.n2nScatter.sum(axis=0).getA1()  # convert to ndarray\n        for base, matrix in zip(fromMatrix, nuc.micros.n2n):\n            self.assertAlmostEqual(base, matrix)\n\n    def test_getScatterWeights(self):\n        scatWeights = self.lib.getScatterWeights()\n        vals = scatWeights[\"U235AA\", 4]\n        self.assertAlmostEqual(sum(vals), 1.0)\n\n    def test_getISOTXSFileName(self):\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0), \"ISOTXS-c0\")\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=1), \"ISOTXS-c1\")\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=0, node=1), \"ISOTXS-c0n1\")\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(cycle=23), \"ISOTXS-c23\")\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(xsID=\"AA\"), \"ISOAA\")\n        self.assertEqual(\n            nuclearDataIO.getExpectedISOTXSFileName(xsID=\"AA\", suffix=\"test\"),\n            \"ISOAA-test\",\n        )\n        self.assertEqual(nuclearDataIO.getExpectedISOTXSFileName(), \"ISOTXS\")\n        with self.assertRaises(ValueError):\n            # Error when over specified\n            nuclearDataIO.getExpectedISOTXSFileName(cycle=10, xsID=\"AA\")\n\n    def test_getGAMISOFileName(self):\n        self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=0), \"cycle0.gamiso\")\n        self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=1), \"cycle1.gamiso\")\n        self.assertEqual(\n            nuclearDataIO.getExpectedGAMISOFileName(cycle=1, node=3),\n            \"cycle1node3.gamiso\",\n        )\n        self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(cycle=23), \"cycle23.gamiso\")\n        self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(xsID=\"AA\"), \"AA.gamiso\")\n        self.assertEqual(\n            nuclearDataIO.getExpectedGAMISOFileName(xsID=\"AA\", suffix=\"test\"),\n            \"AA-test.gamiso\",\n        )\n        self.assertEqual(nuclearDataIO.getExpectedGAMISOFileName(), \"GAMISO\")\n        with self.assertRaises(ValueError):\n            # Error when over specified\n            nuclearDataIO.getExpectedGAMISOFileName(cycle=10, xsID=\"AA\")\n\n\nclass Isotxs_merge_Tests(unittest.TestCase):\n    def test_mergeMccV2FilesRemovesTheFileWideChi(self):\n        \"\"\"Test merging ISOTXS files.\n\n        .. test:: Read ISOTXS files.\n            :id: T_ARMI_NUCDATA_ISOTXS1\n            :tests: R_ARMI_NUCDATA_ISOTXS\n        \"\"\"\n        isoaa = isotxs.readBinary(ISOAA_PATH)\n        self.assertAlmostEqual(1.0, sum(isoaa.isotxsMetadata[\"chi\"]), 5)\n        self.assertAlmostEqual(1, isoaa.isotxsMetadata[\"fileWideChiFlag\"])\n        someIsotxs = xsLibraries.IsotxsLibrary()\n        # semi-copy...\n        someIsotxs.merge(isoaa)\n        self.assertAlmostEqual(1.0, sum(someIsotxs.isotxsMetadata[\"chi\"]), 5)\n        self.assertEqual(1, someIsotxs.isotxsMetadata[\"fileWideChiFlag\"])\n        # OK, now I need to delete all the nuclides, so we can merge again.\n        for key in someIsotxs.nuclideLabels:\n            del someIsotxs[key]\n        someIsotxs.merge(isotxs.readBinary(ISOAA_PATH))\n        self.assertEqual(None, someIsotxs.isotxsMetadata[\"chi\"])\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_labels.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the reading and writing of the DIF3D/VARIANT LABELS interface file.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import labels\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n\nLABELS_FILE_BIN = os.path.join(THIS_DIR, \"fixtures\", \"labels.binary\")\nLABELS_FILE_ASCII = os.path.join(THIS_DIR, \"fixtures\", \"labels.ascii\")\n\n\nclass TestLabels(unittest.TestCase):\n    \"\"\"Tests for labels.\"\"\"\n\n    def test_readLabelsBinary(self):\n        expectedName = \"LABELS\"\n        expectedTrianglesPerHex = 6\n        expectedNumZones = 5800\n        expectedNumRegions = 2900\n        expectedNumHexagonalRings = 13\n        labelsData = labels.readBinary(LABELS_FILE_BIN)\n        self.assertEqual(labelsData.metadata[\"hname\"], expectedName)\n        self.assertEqual(labelsData.metadata[\"numTrianglesPerHex\"], expectedTrianglesPerHex)\n        self.assertEqual(labelsData.metadata[\"numZones\"], expectedNumZones)\n        self.assertEqual(labelsData.metadata[\"numRegions\"], expectedNumRegions)\n        self.assertEqual(labelsData.metadata[\"numHexagonalRings\"], expectedNumHexagonalRings)\n        self.assertEqual(len(labelsData.regionLabels), expectedNumRegions)\n\n    def test_writeLabelsAscii(self):\n        with TemporaryDirectoryChanger():\n            labelsData = labels.readBinary(LABELS_FILE_BIN)\n            labels.writeAscii(labelsData, self._testMethodName + \"labels.ascii\")\n            with open(self._testMethodName + \"labels.ascii\", \"r\") as f:\n                actualData = f.read().splitlines()\n            with open(LABELS_FILE_ASCII) as f:\n                expectedData = f.read().splitlines()\n            for expected, actual in zip(expectedData, actualData):\n                self.assertEqual(expected, actual)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_nhflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test reading/writing of NHFLUX dataset.\"\"\"\n\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom armi.nuclearDataIO.cccc import nhflux\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n\nSIMPLE_HEXZ_INP = os.path.join(THIS_DIR, \"../../tests\", \"simple_hexz.inp\")\nSIMPLE_HEXZ_NHFLUX = os.path.join(THIS_DIR, \"fixtures\", \"simple_hexz.nhflux\")\nSIMPLE_HEXZ_NHFLUX_VARIANT = os.path.join(THIS_DIR, \"fixtures\", \"simple_hexz.nhflux.variant\")\n\n\nclass TestNhflux(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        \"\"\"Load NHFLUX data from binary file.\"\"\"\n        cls.nhf = nhflux.NhfluxStream.readBinary(SIMPLE_HEXZ_NHFLUX)\n\n    def test_fc(self):\n        \"\"\"Verify the file control info.\"\"\"\n        self.assertEqual(self.nhf.metadata[\"ndim\"], 3)\n        self.assertEqual(self.nhf.metadata[\"ngroup\"], 4)\n        self.assertEqual(self.nhf.metadata[\"ninti\"], 5)\n        self.assertEqual(self.nhf.metadata[\"nintj\"], 5)\n        self.assertEqual(self.nhf.metadata[\"nintk\"], 6)\n        self.assertEqual(self.nhf.metadata[\"nSurf\"], 6)\n        self.assertEqual(self.nhf.metadata[\"nMom\"], 5)\n        self.assertEqual(self.nhf.metadata[\"nintxy\"], 19)\n        self.assertEqual(self.nhf.metadata[\"npcxy\"], 144)\n        self.assertEqual(self.nhf.metadata[\"iaprx\"], 4)\n        self.assertEqual(self.nhf.metadata[\"iaprxz\"], 3)\n\n        variantControlInfo = nhflux.FILE_SPEC_1D_KEYS_VARIANT11\n        for info in variantControlInfo:\n            self.assertTrue(info not in self.nhf.metadata)\n\n    def test_fluxMoments(self):\n        \"\"\"\n        Verify that the flux moments are properly read.\n\n        The 5 flux moments values are manually verified for two nodes. The indices\n        are converted to zero based from the original by subtracting one.\n        \"\"\"\n        # node 1 (ring=1, position=1), axial=3, group=2\n        i = 0  # first one in node map (ring=1, position=1)\n        # 13 = 2*5 + 2 + 1 => (i=2, j=2)\n        self.assertEqual(self.nhf.geodstCoordMap[i], 13)\n        iz, ig = 2, 1  # zero based\n        self.assertTrue(\n            np.allclose(\n                self.nhf.fluxMoments[i, iz, :, ig],\n                [1.424926e08, -2.018375e-01, 2.018375e-01, -2.018374e-01, 1.758205e06],\n            )\n        )\n\n        # node 8 (ring=3, position=2), axial=6, group=1\n        i = 7  # ring=3, position=2\n        self.assertEqual(self.nhf.geodstCoordMap[i], 20)  # 20 = 3*5 + 4 + 1 => (i=4, j=3)\n        iz, ig = 5, 0  # zero based\n        self.assertTrue(\n            np.allclose(\n                self.nhf.fluxMoments[i, iz, :, ig],\n                [7.277324e06, -1.453915e06, -1.453915e06, 2.362100e-02, -8.626439e05],\n            )\n        )\n\n    def test_xyPartialCurrents(self):\n        \"\"\"\n        Verify that the XY-directed partial currents can be read.\n\n        The surface partial currents can be used to reconstruct the surface\n        flux and corner flux values. This test shows that the outgoing current\n        in one hex is identical to the incoming current in the adjacent hex.\n        \"\"\"\n        # node 2 (ring=3, position=1), axial=4, group=2, surface=4, outgoing\n        iNode, iSurf, iz, ig = 1, 3, 3, 1  # zero based\n        self.assertEqual(self.nhf.geodstCoordMap[iNode], 15)\n        self.assertAlmostEqual(self.nhf.partialCurrentsHex[iNode, iz, iSurf, ig] / 1.5570424e07, 1.0)\n\n        # node 14 (ring=2, position=1), axial=4, group=2, surface=1, incoming\n        iNode, iSurf = 13, 0\n        ipcpnt = self.nhf.incomingPointersToAllAssemblies[iSurf, iNode]\n        iNode1, iSurf1 = divmod(ipcpnt - 1, self.nhf.metadata[\"nSurf\"])\n        self.assertEqual(iNode1, 1)  # node 2\n        self.assertEqual(iSurf1, 3)  # surface 4\n\n    def test_zPartialCurrents(self):\n        \"\"\"\n        Verify that the Z-directed partial currents can be read.\n\n        The Z-directed partial currents are manually checked for one node\n        surface.\n        \"\"\"\n        # node 15 (ring=2, position=3), axial=3, group=3, j=1 (z-plus)\n        iNode, iz, ig, j = 14, 2, 2, 0\n        self.assertAlmostEqual(self.nhf.partialCurrentsZ[iNode, iz, j, ig] / 1.6928521e06, 1.0)\n\n    def test_write(self):\n        \"\"\"Verify binary equivalence of written binary file.\"\"\"\n        with TemporaryDirectoryChanger():\n            nhflux.NhfluxStream.writeBinary(self.nhf, \"NHFLUX2\")\n            with open(SIMPLE_HEXZ_NHFLUX, \"rb\") as f1, open(\"NHFLUX2\", \"rb\") as f2:\n                expectedData = f1.read()\n                actualData = f2.read()\n            for expected, actual in zip(expectedData, actualData):\n                self.assertEqual(expected, actual)\n\n\nclass TestNhfluxVariant(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        \"\"\"Load NHFLUX data from binary file. This file was produced using VARIANT v11.0.\"\"\"\n        cls.nhf = nhflux.NhfluxStreamVariant.readBinary(SIMPLE_HEXZ_NHFLUX_VARIANT)\n\n    def test_fc(self):\n        \"\"\"Verify the file control info.\"\"\"\n        # These entries exist for both Nodal and VARIANT, but have different values\n        # for the same model\n        print(self.nhf.metadata.items())\n        self.assertEqual(self.nhf.metadata[\"nMom\"], 35)\n        self.assertEqual(self.nhf.metadata[\"nscoef\"], 3)\n\n        # These entries are only for VARIANT\n        self.assertEqual(self.nhf.metadata[\"npcbdy\"], 30)\n        self.assertEqual(self.nhf.metadata[\"npcsym\"], 0)\n        self.assertEqual(self.nhf.metadata[\"npcsec\"], 0)\n        self.assertEqual(self.nhf.metadata[\"iwnhfl\"], 0)\n        self.assertEqual(self.nhf.metadata[\"nMoms\"], 0)\n\n    def test_fluxMoments(self):\n        # node 1 (ring=1, position=1), axial=3, group=2\n        i = 0\n        self.assertEqual(self.nhf.geodstCoordMap[i], 13)\n        iz, ig = 2, 1\n        fluxMoments = self.nhf.fluxMoments[i, iz, :, ig]\n        numZeroFluxMoments = fluxMoments[fluxMoments == 0.0].shape[0]\n        self.assertTrue(numZeroFluxMoments == 23)\n        actualNonzeroFluxMoments = fluxMoments[fluxMoments != 0.0]\n        expectedNonzeroFluxMoments = [\n            1.42816534e08,\n            -5.97642574e06,\n            -1.54354423e06,\n            -2.15736929e06,\n            -1.53415481e06,\n            5.54278533e04,\n            7.74699855e04,\n            2.38133712e04,\n            6.69907176e03,\n            5.49027950e03,\n            9.01170812e03,\n            1.05852790e04,\n        ]\n        self.assertTrue(np.allclose(actualNonzeroFluxMoments, expectedNonzeroFluxMoments))\n\n    def test_write(self):\n        \"\"\"Verify binary equivalence of written binary file.\"\"\"\n        with TemporaryDirectoryChanger():\n            nhflux.NhfluxStreamVariant.writeBinary(self.nhf, \"NHFLUX2\")\n            with open(SIMPLE_HEXZ_NHFLUX_VARIANT, \"rb\") as f1, open(\"NHFLUX2\", \"rb\") as f2:\n                expectedData = f1.read()\n                actualData = f2.read()\n            for expected, actual in zip(expectedData, actualData):\n                self.assertEqual(expected, actual)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_pmatrx.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests the workings of the library wrappers.\"\"\"\n\nimport filecmp\nimport unittest\n\nfrom armi import nuclearDataIO\nfrom armi.nuclearDataIO.cccc import pmatrx\nfrom armi.nuclearDataIO.tests import test_xsLibraries\nfrom armi.utils import properties\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestPmatrxNuclides(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        # load a library that is in the ARMI tree. This should\n        # be a small library with LFPs, Actinides, structure, and coolant\n        cls.libAA = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)\n        cls.libAB = pmatrx.readBinary(test_xsLibraries.PMATRX_AB)\n\n    def _nuclideGeneralHelper(self, u235):\n        self.assertEqual(0, len(u235.pmatrxMetadata[\"activationXS\"]))\n        self.assertEqual(0, len(u235.pmatrxMetadata[\"activationMT\"]))\n        self.assertEqual(0, len(u235.pmatrxMetadata[\"activationMTU\"]))\n        self.assertEqual(33, len(u235.neutronHeating))\n        self.assertEqual(33, len(u235.neutronDamage))\n        self.assertEqual(21, len(u235.gammaHeating))\n        # if there are more scattering orders, should add tests for them as well...\n        self.assertEqual(1, u235.pmatrxMetadata[\"maxScatteringOrder\"])\n        self.assertEqual((21, 33), u235.isotropicProduction.shape)\n\n    def test_pmatrxNuclideDataAA(self):\n        self._nuclideGeneralHelper(self.libAA[\"U235AA\"])\n\n    def test_pmatrxNuclideDataAB(self):\n        self._nuclideGeneralHelper(self.libAB[\"U235AB\"])\n\n    def test_nuclideDataIsDifferent(self):\n        aa = self.libAA[\"U235AA\"]\n        ab = self.libAB[\"U235AB\"]\n        self.assertFalse((aa.isotropicProduction == ab.isotropicProduction).all())\n\n    def test_getPMATRXFileName(self):\n        self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=0), \"cycle0.pmatrx\")\n        self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=1), \"cycle1.pmatrx\")\n        self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(cycle=23), \"cycle23.pmatrx\")\n        self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(xsID=\"AA\"), \"AA.pmatrx\")\n        self.assertEqual(\n            nuclearDataIO.getExpectedPMATRXFileName(xsID=\"AA\", suffix=\"test\"),\n            \"AA-test.pmatrx\",\n        )\n        self.assertEqual(nuclearDataIO.getExpectedPMATRXFileName(), \"PMATRX\")\n        with self.assertRaises(ValueError):\n            # Error when over specified\n            nuclearDataIO.getExpectedPMATRXFileName(cycle=10, xsID=\"AA\")\n\n\nclass TestPmatrx(unittest.TestCase):\n    \"\"\"Tests the Pmatrx gamma production matrix.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # load a library that is in the ARMI tree. This should\n        # be a small library with LFPs, Actinides, structure, and coolant\n        cls.lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_pmatrxGammaEnergies(self):\n        energies = [\n            20000000.0,\n            10000000.0,\n            8000000.0,\n            7000000.0,\n            6000000.0,\n            5000000.0,\n            4000000.0,\n            3000000.0,\n            2500000.0,\n            2000000.0,\n            1500000.0,\n            1000000.0,\n            700000.0,\n            450000.0,\n            300000.0,\n            150000.0,\n            100000.0,\n            74999.8984375,\n            45000.0,\n            30000.0,\n            20000.0,\n        ]\n        self.assertTrue((energies == self.lib.gammaEnergyUpperBounds).all())\n\n    def test_pmatrxNeutronEnergies(self):\n        energies = [\n            14190675.0,\n            10000000.0,\n            6065306.5,\n            3678794.75,\n            2231302.0,\n            1353353.125,\n            820850.0,\n            497870.625,\n            301973.75,\n            183156.34375,\n            111089.875,\n            67379.390625,\n            40867.66796875,\n            24787.498046875,\n            15034.3779296875,\n            9118.810546875,\n            5530.8388671875,\n            3354.624267578125,\n            2034.6827392578125,\n            1234.097412109375,\n            748.5178833007812,\n            453.9991149902344,\n            275.36444091796875,\n            167.01695251464844,\n            101.30089569091797,\n            61.44210433959961,\n            37.26651382446289,\n            22.6032772064209,\n            13.709582328796387,\n            8.31528091430664,\n            3.9278604984283447,\n            0.5315780639648438,\n            0.41745778918266296,\n        ]\n        self.assertTrue((energies == self.lib.neutronEnergyUpperBounds).all())\n\n    def test_pmatrxNuclideNames(self):\n        names = [\n            \"U235AA\",\n            \"U238AA\",\n            \"PU39AA\",\n            \"FE54AA\",\n            \"FE56AA\",\n            \"FE57AA\",\n            \"FE58AA\",\n            \"NA23AA\",\n            \"ZR90AA\",\n            \"ZR91AA\",\n            \"ZR92AA\",\n            \"ZR93AA\",\n            \"ZR94AA\",\n            \"ZR95AA\",\n            \"ZR96AA\",\n            \"XE28AA\",\n            \"XE29AA\",\n            \"XE30AA\",\n            \"XE31AA\",\n            \"XE32AA\",\n            \"XE33AA\",\n            \"XE34AA\",\n            \"XE35AA\",\n            \"XE36AA\",\n            \"FP40AA\",\n        ]\n        self.assertEqual(names, self.lib.nuclideLabels)\n\n    def test_pmatrxDoesntHaveDoseConversionFactors(self):\n        with self.assertRaises(properties.ImmutablePropertyError):\n            _bacon = self.lib.neutronDoseConversionFactors\n        with self.assertRaises(properties.ImmutablePropertyError):\n            _turkey = self.lib.gammaDoseConversionFactors\n        # bravo!\n\n\nclass TestProdMatrix(TestPmatrx):\n    \"\"\"\n    Tests related to reading a PMATRX that was written by ARMI.\n\n    Note that this runs all the tests from TestPmatrx.\n    \"\"\"\n\n    def test_writtenIsIdenticalToOriginal(self):\n        \"\"\"Make sure our writer produces something identical to the original.\n\n        .. test:: Test reading and writing PMATRIX files.\n            :id: T_ARMI_NUCDATA_PMATRX\n            :tests: R_ARMI_NUCDATA_PMATRX\n        \"\"\"\n        origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)\n\n        fname = self._testMethodName + \"temp-aa.pmatrx\"\n        pmatrx.writeBinary(origLib, fname)\n        _lib = pmatrx.readBinary(fname)\n\n        self.assertTrue(filecmp.cmp(test_xsLibraries.PMATRX_AA, fname))\n\n\nclass TestProdMatrixFromAscii(TestPmatrx):\n    \"\"\"\n    Tests that show you can read and write pmatrx files from ascii libraries.\n\n    Notes\n    -----\n    This runs all the tests from TestPmatrx.\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.origLib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n        self.fname = self._testMethodName + \"temp-aa.pmatrx.ascii\"\n        lib = pmatrx.readBinary(test_xsLibraries.PMATRX_AA)\n        pmatrx.writeAscii(lib, self.fname)\n        self.lib = pmatrx.readAscii(self.fname)\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_pwdint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test PWDINT reading and writing.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import pwdint\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\nSIMPLE_PWDINT = os.path.join(THIS_DIR, \"fixtures\", \"simple_cartesian.pwdint\")\n\n\nclass TestGeodst(unittest.TestCase):\n    r\"\"\"\n    Tests the PWDINT class.\n\n    This reads from a PWDINT file that was created using DIF3D 11 on a small\n    test hex reactor in 1/3 geometry.\n    \"\"\"\n\n    def test_readGeodst(self):\n        \"\"\"Ensure we can read a PWDINT file.\"\"\"\n        pwr = pwdint.readBinary(SIMPLE_PWDINT)\n        self.assertGreater(pwr.powerDensity.min(), 0.0)\n\n    def test_writeGeodst(self):\n        \"\"\"Ensure that we can write a modified PWDINT.\"\"\"\n        with TemporaryDirectoryChanger():\n            pwr = pwdint.readBinary(SIMPLE_PWDINT)\n            pwdint.writeBinary(pwr, \"PWDINT2\")\n            pwr2 = pwdint.readBinary(\"PWDINT2\")\n            self.assertTrue((pwr2.powerDensity == pwr.powerDensity).all())\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_rtflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test rtflux reading and writing.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import rtflux\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n# This rtflux was made by DIF3D 11 in a Cartesian test case.\nSIMPLE_RTFLUX = os.path.join(THIS_DIR, \"fixtures\", \"simple_cartesian.rtflux\")\n\n\nclass Testrtflux(unittest.TestCase):\n    r\"\"\"Tests the rtflux class.\"\"\"\n\n    def test_readrtflux(self):\n        \"\"\"Ensure we can read a rtflux file.\"\"\"\n        flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)\n        self.assertEqual(\n            flux.groupFluxes.shape,\n            (\n                flux.metadata[\"NINTI\"],\n                flux.metadata[\"NINTJ\"],\n                flux.metadata[\"NINTK\"],\n                flux.metadata[\"NGROUP\"],\n            ),\n        )\n\n    def test_writertflux(self):\n        \"\"\"Ensure that we can write a modified rtflux file.\"\"\"\n        with TemporaryDirectoryChanger():\n            flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)\n            # perturb off-diag item to check row/col ordering\n            flux.groupFluxes[2, 1, 3, 5] *= 1.1\n            flux.groupFluxes[1, 2, 4, 6] *= 1.2\n            rtflux.RtfluxStream.writeBinary(flux, \"rtflux2\")\n            flux2 = rtflux.RtfluxStream.readBinary(\"rtflux2\")\n            self.assertAlmostEqual(flux2.groupFluxes[2, 1, 3, 5], flux.groupFluxes[2, 1, 3, 5])\n\n    def test_rwAscii(self):\n        \"\"\"Ensure that we can read/write in ascii format.\"\"\"\n        with TemporaryDirectoryChanger():\n            flux = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)\n            rtflux.RtfluxStream.writeAscii(flux, \"rtflux.ascii\")\n            flux2 = rtflux.RtfluxStream.readAscii(\"rtflux.ascii\")\n            self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all())\n\n    def test_adjoint(self):\n        \"\"\"Ensure adjoint reads energy groups differently.\"\"\"\n        real = rtflux.RtfluxStream.readBinary(SIMPLE_RTFLUX)\n        adjoint = rtflux.AtfluxStream.readBinary(SIMPLE_RTFLUX)\n        self.assertFalse((real.groupFluxes == adjoint.groupFluxes).all())\n        g = 3\n        self.assertTrue(\n            (real.groupFluxes[:, :, :, g] == adjoint.groupFluxes[:, :, :, real.metadata[\"NGROUP\"] - g - 1]).all()\n        )\n"
  },
  {
    "path": "armi/nuclearDataIO/cccc/tests/test_rzflux.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test rzflux reading and writing.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import rzflux\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n# This RZFLUX was made by DIF3D 11 in a Cartesian test case.\nSIMPLE_RZFLUX = os.path.join(THIS_DIR, \"fixtures\", \"simple_cartesian.rzflux\")\n\n\nclass TestRzflux(unittest.TestCase):\n    \"\"\"Tests the rzflux class.\"\"\"\n\n    def test_readRzflux(self):\n        \"\"\"Ensure we can read a RZFLUX file.\"\"\"\n        flux = rzflux.readBinary(SIMPLE_RZFLUX)\n        self.assertEqual(flux.groupFluxes.shape, (flux.metadata[\"NGROUP\"], flux.metadata[\"NZONE\"]))\n\n    def test_writeRzflux(self):\n        \"\"\"Ensure that we can write a modified RZFLUX file.\"\"\"\n        with TemporaryDirectoryChanger():\n            flux = rzflux.readBinary(SIMPLE_RZFLUX)\n            rzflux.writeBinary(flux, \"RZFLUX2\")\n            self.assertTrue(binaryFilesEqual(SIMPLE_RZFLUX, \"RZFLUX2\"))\n            # perturb off-diag item to check row/col ordering\n            flux.groupFluxes[2, 10] *= 1.1\n            flux.groupFluxes[12, 1] *= 1.2\n            rzflux.writeBinary(flux, \"RZFLUX3\")\n            flux2 = rzflux.readBinary(\"RZFLUX3\")\n            self.assertAlmostEqual(flux2.groupFluxes[12, 1], flux.groupFluxes[12, 1])\n\n    def test_rwAscii(self):\n        \"\"\"Ensure that we can read/write in ascii format.\"\"\"\n        with TemporaryDirectoryChanger():\n            flux = rzflux.readBinary(SIMPLE_RZFLUX)\n            rzflux.writeAscii(flux, \"RZFLUX.ascii\")\n            flux2 = rzflux.readAscii(\"RZFLUX.ascii\")\n            self.assertTrue((flux2.groupFluxes == flux.groupFluxes).all())\n\n\ndef binaryFilesEqual(fn1, fn2):\n    \"\"\"True if two files are bytewise identical.\"\"\"\n    with open(fn1, \"rb\") as f1, open(fn2, \"rb\") as f2:\n        for byte1, byte2 in zip(f1, f2):\n            if byte1 != byte2:\n                return False\n    return True\n"
  },
  {
    "path": "armi/nuclearDataIO/nuclearFileMetadata.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAssists in reconstruction/rewriting nuclear data files.\n\nOne might\nrefer to the information stored in these files as the scaffolding or blueprints.\nSome of it can/could be derived based on data within the overall file; however, not all of it could be\nand it is always necessary to retain this type of data while reading the file.\n\"\"\"\n\nfrom armi import runLog\nfrom armi.utils import properties\n\nCOMPXS_POWER_CONVERSION_FACTORS = [\"fissionWattSeconds\", \"captureWattSeconds\"]\nREGIONXS_POWER_CONVERT_DIRECTIONAL_DIFF = [\n    \"powerConvMult\",\n    \"d1Multiplier\",\n    \"d1Additive\",\n    \"d1Multiplier\",\n    \"d2Additive\",\n    \"d3Multiplier\",\n    \"d3Additive\",\n]\n\n\nclass _Metadata:\n    \"\"\"Simple dictionary wrapper, that returns :code:`None` if the key does not exist.\n\n    Notes\n    -----\n    Cannot use a dictionary directly because it is difficult to subclass and broadcast them with MPI.\n    \"\"\"\n\n    def __init__(self):\n        self._data = {}\n\n    def __getitem__(self, key):\n        return self._data.get(key, None)\n\n    def __setitem__(self, key, value):\n        self._data[key] = value\n\n    def __iter__(self):\n        return iter(self._data)\n\n    def items(self):\n        \"\"\"Returns items similar to the dict implementation.\"\"\"\n        return self._data.items()\n\n    def __len__(self):\n        return len(self._data)\n\n    def keys(self):\n        \"\"\"Returns keys similar to the dict implementation.\"\"\"\n        return self._data.keys()\n\n    def values(self):\n        return self._data.values()\n\n    def update(self, other):\n        \"\"\"Updates the underlying dictionary, similar to the dict implementation.\"\"\"\n        self._data.update(other._data)\n\n    def merge(self, other, selfContainer, otherContainer, fileType, exceptionClass):\n        \"\"\"\n        Merge the contents of two metadata instances.\n\n        Parameters\n        ----------\n        other: Similar Metadata class as self\n            Metadata to be compared against\n        selfContainer: class\n        otherContainer: class\n            Objects that hold the two metadata instances\n        fileType: str\n            File type that created this metadata. Examples: ``'ISOTXS', 'GAMISO', 'COMPXS'```\n        exceptionClass: Exception\n            Type of exception to raise in the event of dissimilar metadata values\n\n        Returns\n        -------\n        mergedData: Metadata\n            Returns a metadata instance of similar type as ``self`` and ``other``\n            containing the correctly merged data of the two\n        \"\"\"\n        mergedData = self.__class__()\n        if not (any(self.keys()) and any(other.keys())):\n            mergedData.update(self)\n            mergedData.update(other)\n            return mergedData\n        self._mergeLibrarySpecificData(other, selfContainer, otherContainer, mergedData)\n        skippedKeys = self._getSkippedKeys(other, selfContainer, otherContainer, mergedData)\n        for key in set(list(self.keys()) + list(other.keys())) - skippedKeys:\n            selfVal = self[key]\n            otherVal = other[key]\n            mergedVal = None\n            if not properties.numpyHackForEqual(selfVal, otherVal):\n                exceptionMsg = (\n                    \"{libType} {key} metadata differs between {lib1} and {lib2}; Cannot Merge\\n\"\n                    \"{key} has values of {val1} and {val2}\"\n                )\n                raise exceptionClass(\n                    exceptionMsg.format(\n                        libType=fileType,\n                        lib1=selfContainer,\n                        lib2=otherContainer,\n                        key=key,\n                        val1=selfVal,\n                        val2=otherVal,\n                    )\n                )\n            else:\n                mergedVal = selfVal\n            mergedData[key] = mergedVal\n        return mergedData\n\n    def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):\n        return set()\n\n    def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):\n        pass\n\n    def compare(self, other, selfContainer, otherContainer, tolerance=0.0):\n        \"\"\"\n        Compare the metadata for two libraries.\n\n        Parameters\n        ----------\n        other: Similar Metadata class as self\n            Metadata to be compared against\n        selfContainer: class\n        otherContainer: class\n            Objects that hold the two metadata instances\n        tolerance: float\n            Acceptable difference between two metadata values\n\n        Returns\n        -------\n        equal: bool\n            If the metadata are equal or not.\n        \"\"\"\n        equal = True\n        for propName in set(list(self.keys()) + list(other.keys())):\n            selfVal = self[propName]\n            otherVal = other[propName]\n            if not properties.areEqual(selfVal, otherVal, tolerance):\n                runLog.important(\n                    \"{} and {} {} have different {}:\\n{}\\n{}\".format(\n                        selfContainer,\n                        otherContainer,\n                        self.__class__.__name__,\n                        propName,\n                        selfVal,\n                        otherVal,\n                    )\n                )\n                equal = False\n        return equal\n\n\nclass FileMetadata(_Metadata):\n    \"\"\"\n    Metadata description for a file.\n\n    Attributes\n    ----------\n    fileNames : list\n        string list of file names\n    \"\"\"\n\n    def __init__(self):\n        _Metadata.__init__(self)\n        self.fileNames = []\n\n    def update(self, other):\n        \"\"\"Update this metadata with metadata from another file.\"\"\"\n        _Metadata.update(self, other)\n        self.fileNames += other.fileNames\n\n    def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):\n        mergedData.fileNames = self.fileNames + other.fileNames\n\n\nclass NuclideXSMetadata(FileMetadata):\n    \"\"\"Metadata for library files containing nuclide cross sections, e.g. ``ISOTXS``.\"\"\"\n\n    def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):\n        skippedKeys = set([\"chi\", \"libraryLabel\"])\n        if self[\"chi\"] is not None or other[\"chi\"] is not None:\n            runLog.warning(\n                \"File-wide chi is removed merging libraries {lib1} and {lib2}.\\n\"\n                \"This should not impact the calculation, as the file-wide chi is used as\"\n                \" the nuclide-specific chi.\\n The nuclides in {lib2} may be modified as well.\".format(\n                    lib1=selfContainer, lib2=otherContainer\n                )\n            )\n            mergedData[\"fileWideChiFlag\"] = 0\n            skippedKeys.add(\"fileWideChiFlag\")\n            mergedData[\"chi\"] = None\n            for nuc in [nn for nn in selfContainer.nuclides + otherContainer.nuclides]:\n                if nuc.isotxsMetadata[\"fisFlag\"] > 0:\n                    nuc.isotxsMetadata[\"chiFlag\"] = 1\n        return skippedKeys\n\n    def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):\n        FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData)\n        mergedData[\"libraryLabel\"] = self[\"libraryLabel\"] or other[\"libraryLabel\"]\n\n\nclass RegionXSMetadata(FileMetadata):\n    \"\"\"Metadata for library files containing region cross sections, e.g. ``COMPXS``.\"\"\"\n\n    def _mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData):\n        FileMetadata._mergeLibrarySpecificData(self, other, selfContainer, otherContainer, mergedData)\n        for datum in COMPXS_POWER_CONVERSION_FACTORS:\n            mergedData[datum] = self[datum] + other[datum]\n        mergedData[\"compFamiliesWithPrecursors\"] = (\n            self[\"compFamiliesWithPrecursors\"] + other[\"compFamiliesWithPrecursors\"]\n        )\n        mergedData[\"numFissComps\"] = self[\"numFissComps\"] + other[\"numFissComps\"]\n\n    def _getSkippedKeys(self, other, selfContainer, otherContainer, mergedData):\n        return set([\"numComps\", \"compFamiliesWithPrecursors\", \"numFissComps\"] + COMPXS_POWER_CONVERSION_FACTORS)\n\n\nclass NuclideMetadata(_Metadata):\n    \"\"\"Simple dictionary for providing metadata about how to read/write a nuclide to/from a file.\"\"\"\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/library-file-generation/combine-AA-AB.inp",
    "content": "$control\n     c_isotxs_conversion = bin2asc\n/\n$material\n    t_composition(:,1) = U235_7   \"U235AA\"    1.00000E-03   873.000  !   Fuel\n                         U238_7   \"U238AA\"    1.00000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AA\"    1.00000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AA\"    1.00000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AA\"    1.00000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AA\"    1.00000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AA\"    1.00000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AA\"    1.00000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AA\"    1.00000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR93_7   \"ZR93AA\"    1.00000E-15   873.000  !   Fission product\n                         ZR94_7   \"ZR94AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR95_7   \"ZR95AA\"    1.00000E-15   873.000  !   Fission product\n                         ZR96_7   \"ZR96AA\"    1.00000E-05   873.000  !   Composite fission product\n                         XE1287   \"XE28AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1297   \"XE29AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1307   \"XE30AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1317   \"XE31AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1327   \"XE32AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1337   \"XE33AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1347   \"XE34AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1357   \"XE35AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1367   \"XE36AA\"    1.00000E-15   873.000  !   Fission product\n                         FP40AA   FP40AA       1.0           873.0\n                         U235_7   \"U235AB\"    1.10000E-03   873.000  !   Fuel\n                         U238_7   \"U238AB\"    1.10000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AB\"    1.10000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AB\"    1.10000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AB\"    1.10000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AB\"    1.10000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AB\"    1.10000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AB\"    1.10000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AB\"    1.10000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR93_7   \"ZR93AB\"    1.10000E-15   873.000  !   Fission product\n                         ZR94_7   \"ZR94AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR95_7   \"ZR95AB\"    1.10000E-15   873.000  !   Fission product\n                         ZR96_7   \"ZR96AB\"    1.10000E-05   873.000  !   Composite fission product\n                         XE1287   \"XE28AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1297   \"XE29AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1307   \"XE30AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1317   \"XE31AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1327   \"XE32AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1337   \"XE33AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1347   \"XE34AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1357   \"XE35AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1367   \"XE36AB\"    1.10000E-15   873.000  !   Fission product\n                         FP40AB   FP40AB       1.0           873.0\n/\n$output\n  c_isotxs_file = \"../mc2v3-AA.isotxs\" \"../mc2v3-AB.isotxs\"\n/\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/library-file-generation/combine-and-lump-AA-AB.inp",
    "content": "$control\n     c_isotxs_conversion = bin2asc\n/\n$material\n    t_composition(:,1) = U235_7   \"U235AA\"    1.00000E-03   873.000  !   Fuel\n                         U238_7   \"U238AA\"    1.00000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AA\"    1.00000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AA\"    1.00000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AA\"    1.00000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AA\"    1.00000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AA\"    1.00000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AA\"    1.00000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AA\"    1.00000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR94_7   \"ZR94AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR96_7   \"ZR96AA\"    1.00000E-05   873.000  !   Composite fission product\n                         FP40AA   FP40AA      11.0           873.0\n                         U235_7   \"U235AB\"    1.10000E-03   873.000  !   Fuel\n                         U238_7   \"U238AB\"    1.10000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AB\"    1.10000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AB\"    1.10000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AB\"    1.10000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AB\"    1.10000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AB\"    1.10000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AB\"    1.10000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AB\"    1.10000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR94_7   \"ZR94AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR96_7   \"ZR96AB\"    1.10000E-05   873.000  !   Composite fission product\n                         FP40AB   FP40AB       1.0           873.0\n/\n$output\n  c_isotxs_file = \"../mc2v3-AA.isotxs\" \"../mc2v3-AB.isotxs\"\n  c_lump_name(      1) = FP35AA\n  t_lump_isotope(:, 1) = ZR90_7   1.00000E-03\n                         ZR91_7   1.00000E-02\n                         ZR92_7   1.00000E-02\n                         ZR94_7   1.00000E-02\n                         ZR96_7   1.00000E-02\n                         XE1287   1.00000E-05\n                         XE1297   1.00000E-07\n                         XE1307   1.00000E-04\n                         XE1317   1.00000E-02\n                         XE1327   1.00000E-02\n                         XE1347   1.00000E-02\n                         XE1367   1.00000E-02\n                         ZR93_7   1.00000E-02\n                         ZR95_7   1.00000E-03\n                         XE1357   1.00000E-05\n                         XE1337   1.00000E-04\n  c_lump_name(      2) = FP38AA\n  t_lump_isotope(:, 2) = ZR90_7   1.00000E-03\n                         ZR91_7   1.00000E-02\n                         ZR92_7   1.00000E-02\n                         ZR94_7   1.00000E-02\n                         ZR96_7   1.00000E-02\n                         XE1287   1.00000E-05\n                         XE1297   1.00000E-07\n                         XE1307   1.00000E-05\n                         XE1317   1.00000E-02\n                         XE1327   1.00000E-02\n                         XE1347   1.00000E-02\n                         XE1367   1.00000E-02\n                         ZR93_7   1.00000E-02\n                         ZR95_7   1.00000E-03\n                         XE1357   1.00000E-05\n                         XE1337   1.00000E-04\n  c_lump_name(      3) = FP39AA\n  t_lump_isotope(:, 3) = ZR90_7   1.00000E-04\n                         ZR91_7   1.00000E-02\n                         ZR92_7   1.00000E-02\n                         ZR94_7   1.00000E-02\n                         ZR96_7   1.00000E-02\n                         XE1287   1.00000E-04\n                         XE1297   1.00000E-07\n                         XE1307   1.00000E-04\n                         XE1317   1.00000E-02\n                         XE1327   1.00000E-02\n                         XE1347   1.00000E-02\n                         XE1367   1.00000E-02\n                         ZR93_7   1.00000E-02\n                         ZR95_7   1.00000E-03\n                         XE1357   1.00000E-05\n                         XE1337   1.00000E-04\n  c_lump_name(      4) = FP40AA\n  t_lump_isotope(:, 4) = ZR90_7   1.00000E-04\n                         ZR91_7   1.00000E-02\n                         ZR92_7   1.00000E-02\n                         ZR94_7   1.00000E-02\n                         ZR96_7   1.00000E-02\n                         XE1287   1.00000E-05\n                         XE1297   1.00000E-07\n                         XE1307   1.00000E-04\n                         XE1317   1.00000E-02\n                         XE1327   1.00000E-02\n                         XE1347   1.00000E-02\n                         XE1367   1.00000E-02\n                         ZR93_7   1.00000E-02\n                         ZR95_7   1.00000E-03\n                         XE1357   1.00000E-05\n                         XE1337   1.00000E-04\n  c_lump_name(      5) = FP41AA\n  t_lump_isotope(:, 5) = ZR90_7   1.00000E-04\n                         ZR91_7   1.00000E-02\n                         ZR92_7   1.00000E-02\n                         ZR94_7   1.00000E-02\n                         ZR96_7   1.00000E-02\n                         XE1287   1.00000E-05\n                         XE1297   1.00000E-07\n                         XE1307   1.00000E-04\n                         XE1317   1.00000E-02\n                         XE1327   1.00000E-02\n                         XE1347   1.00000E-02\n                         XE1367   1.00000E-02\n                         ZR93_7   1.00000E-02\n                         ZR95_7   1.00000E-03\n                         XE1357   1.00000E-05\n                         XE1337   1.00000E-04\n/\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/library-file-generation/mc2v3-AA.inp",
    "content": "$control\n    c_group_structure       = ANL33\n    i_number_region         = 1\n    l_external_inelasticpn  = F\n    c_geometry_type         = mixture\n    l_buckling_search       = T\n    r_eps_buckling          = 0.00001\n    l_gamma                 = T \n/\n$library\n     c_mcclibdir  =\"\\\\path\\to\\mc2\\3.2.2\\libraries\\endfb-vii.0\\lib.mcc.e70\"\n     c_gammalibdir = \"\\\\path\\to\\mc2\\3.2.2\\libraries\\endfb-vii.0\\lib.gamma.e70\"\n/\n$material\n    t_composition(:,1) = U235_7   \"U235AA\"    1.00000E-03   873.000  !   Fuel\n                         U238_7   \"U238AA\"    1.00000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AA\"    1.00000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AA\"    1.00000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AA\"    1.00000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AA\"    1.00000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AA\"    1.00000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AA\"    1.00000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AA\"    1.00000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR93_7   \"ZR93AA\"    1.00000E-15   873.000  !   Fission product\n                         ZR94_7   \"ZR94AA\"    1.00000E-04   873.000  !   Composite fission product\n                         ZR95_7   \"ZR95AA\"    1.00000E-15   873.000  !   Fission product\n                         ZR96_7   \"ZR96AA\"    1.00000E-05   873.000  !   Composite fission product\n                         XE1287   \"XE28AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1297   \"XE29AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1307   \"XE30AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1317   \"XE31AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1327   \"XE32AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1337   \"XE33AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1347   \"XE34AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1357   \"XE35AA\"    1.00000E-15   873.000  !   Fission product\n                         XE1367   \"XE36AA\"    1.00000E-15   873.000  !   Fission product\n/\n$output\n    l_edit_flux          = T\n    c_check_memory       = \"long\"\n    c_lump_name(      1) = FP40AA\n    t_lump_isotope(:, 1) = ZR90_7   0.090\n                           ZR91_7   0.091\n                           ZR92_7   0.092\n                           ZR93_7   0.093\n                           ZR94_7   0.094\n                           ZR95_7   0.095\n                           ZR96_7   0.096\n                           XE1287   0.128\n                           XE1297   0.129\n                           XE1307   0.130\n                           XE1317   0.131\n                           XE1327   0.132\n                           XE1337   0.133\n                           XE1347   0.134\n                           XE1357   0.135\n                           XE1367   0.136\n/\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/library-file-generation/mc2v3-AB.inp",
    "content": "$control\n    c_group_structure       = ANL33\n    i_number_region         = 1\n    l_external_inelasticpn  = F\n    c_geometry_type         = mixture\n    l_buckling_search       = T\n    r_eps_buckling          = 0.00001\n    l_gamma                 = T \n/\n$library\n     c_mcclibdir  =\"\\\\path\\to\\mc2\\3.2.2\\libraries\\endfb-vii.0\\lib.mcc.e70\"\n     c_gammalibdir = \"\\\\path\\to\\mc2\\3.2.2\\libraries\\endfb-vii.0\\lib.gamma.e70\"\n/\n$material\n    t_composition(:,1) = U235_7   \"U235AB\"    1.10000E-03   873.000  !   Fuel\n                         U238_7   \"U238AB\"    1.10000E-03   873.000  !   Fuel\n                         PU2397   \"PU39AB\"    1.10000E-04   873.000  !   Fuel\n                         FE54_7   \"FE54AB\"    1.10000E-03   743.000  !   Structure\n                         FE56_7   \"FE56AB\"    1.10000E-02   743.000  !   Structure\n                         FE57_7   \"FE57AB\"    1.10000E-04   743.000  !   Structure\n                         FE58_7   \"FE58AB\"    1.10000E-05   743.000  !   Structure\n                         NA23_7   \"NA23AB\"    1.10000E-03   738.000  !   Coolant\n                         ZR90_7   \"ZR90AB\"    1.10000E-03   873.000  !   Composite fission product\n                         ZR91_7   \"ZR91AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR92_7   \"ZR92AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR93_7   \"ZR93AB\"    1.10000E-15   873.000  !   Fission product\n                         ZR94_7   \"ZR94AB\"    1.10000E-04   873.000  !   Composite fission product\n                         ZR95_7   \"ZR95AB\"    1.10000E-15   873.000  !   Fission product\n                         ZR96_7   \"ZR96AB\"    1.10000E-05   873.000  !   Composite fission product\n                         XE1287   \"XE28AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1297   \"XE29AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1307   \"XE30AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1317   \"XE31AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1327   \"XE32AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1337   \"XE33AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1347   \"XE34AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1357   \"XE35AB\"    1.10000E-15   873.000  !   Fission product\n                         XE1367   \"XE36AB\"    1.10000E-15   873.000  !   Fission product\n/\n$output\n    c_check_memory       = \"long\"\n    c_lump_name(      1) = FP40AB\n    t_lump_isotope(:, 1) = ZR90_7   0.090\n                           ZR91_7   0.091\n                           ZR92_7   0.092\n                           ZR93_7   0.093\n                           ZR94_7   0.094\n                           ZR95_7   0.095\n                           ZR96_7   0.096\n                           XE1287   0.128\n                           XE1297   0.129\n                           XE1307   0.130\n                           XE1317   0.131\n                           XE1327   0.132\n                           XE1337   0.133\n                           XE1347   0.134\n                           XE1357   0.135\n                           XE1367   0.136\n/\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/simple_hexz.inp",
    "content": "BLOCK=STP021,3\nUNFORM=A.DIF3D\n01    3D Hex-Z to generate NHFLUX file\n02    10000  1800000\n03    0  0\n04    1  0  0  00  110  10  100  1\n05    1.0E-7  1.0E-5  1.0E-5\n06    1.0  0.001  0.04  1.0\nUNFORM=A.NIP3\n01    3D Hex-Z core\n02    0  1\n03    120    $ full core in plane\n04    4 4  4 4  4 4\n09    Z   1   12.0\n09    Z   4   60.0\n09    Z   1   72.0\n14    M1    I1    1.0\n14    M4    I4    1.0\n15    M1    IC\n15    M4    AB\n29    12.0\n30    AB   1  0  0   0.0  72.0\n30    IC   1  0  0  12.0  60.0\n30    AB   2  0  0   0.0  72.0\n30    IC   2  0  0  12.0  60.0\n30    AB   3  0  0   0.0  72.0\n30    IC   3  0  0  12.0  60.0\nNOSORT=A.ISO\n 0V ISOTXS *GFK 3D BNCH *     1\n 1D      4     6     0     3     0     1     1     1\n 2D *NA COOLED FBR BENCHMARK FOUR GROUP CROSS SECTIONS                 *\n*      * I1     I2     I3     I4     I5     I6\n 0.768      0.232       0.0          0.0\n 1.72336E+09 4.02463E+08 7.97003E+07 3.15946E+07 1.05   E+07 8.00   E+05\n 10000.      1000.       0.0\n     0     3     6     9    12    15\n 4D  I1     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     1     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .11587      .21220      .46137      .34571      .11587\n.21220      .46137      .34571      .69059  E-03 1.83076E-03 .92948 E-02\n .17305 E-01 .39123 E-02 .18286 E-02 .36334 E-02 .92415 E-02 3.03607\n 2.91217    2.88187     2.87951\n 7D 0.0         0.0         .023597     0.0         .16153  E-02\n .40791 E-05 0.0         .46838 E-02 .42309 E-07 .44493 E-07\n 4D  I2     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     1     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .11588      .21213      .46770      .35349      .11588\n .21213     .46770      .35349       .66221 E-031.83956 E-03 1.00354E-02\n .20476 E-01 .48531 E-02 .26377 E-02 .51332 E-02 .13238 E-01 3.07906\n 2.91493     2.88495     2.88254\n 7D 0.0         0.0         .023262     0.0         .15718  E-02\n .46451 E-05 0.0         .43414 E-02 .40724 E-07 .49968 E-07\n 4D  I3     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     1     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .14584      .28443      .52703      .40732      .14584\n.28443      .52703      .40732       1.11527E-03 3.06346E-03 1.00212E-02\n .129995E-01 .27688 E-02 .44347 E-04 .12274 E-03 .34952 E-03 2.796410\n 2.44098     2.42317     2.42295\n 7D 0.0         0.0         .032071     0.0         .27776  E-02\n .38880 E-05 0.0         .58971 E-02 .90018 E-07 .45039 E-07\n 4D  I4     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     1     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .12270      .23133      .46274      .33749      .12270\n .23133      .46274      .33749      8.2278 E-04 2.17087E-03 7.64083E-03\n .97185 E-02 .19453 E-02 .31065 E-04 .87566 E-04 .23769 E-03 2.79026\n 2.441880    2.42309     2.42299\n 7D 0.0         0.0         .026322     0.0         .22889  E-02\n .28907 E-05 0.0         .53536 E-02 .62133 E-07 .33248 E-07\n 4D  I5     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     0     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .13317      .25355      .58044      .54168      .13317\n .25355      .58044      .54168      .186696E-02 .126433E-01 .634405E-01\n .16868\n 7D 0.0         0.0         .022946     0.0         .37687  E-02\n .10320 E-05 0.0         .86815 E-02 .70361 E-11 .10489 E-07\n 4D  I6     GFK         1\n 100.       0.0         0.0         0.0         0.0         0.0\n     0     0     0     0     0     0     0     0     1     1     0   200\n     1     1     2     3     4     1     1     1     1\n 5D .072206     .11487      .32642      .19272      .072206\n .11487      .32642      .19272      .216305E-03 .16880 E-03 .11468 E-02\n .78660 E-03\n 7D 0.0         0.0         .012942     0.0         .12871  E-02\n .68780 E-06 0.0         .34533 E-02 .43633 E-11 .69903 E-08\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/test_xsCollections.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module that tests methods within xsCollections.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi import settings\nfrom armi.nuclearDataIO import isotxs, xsCollections\nfrom armi.reactor.blocks import HexBlock\nfrom armi.tests import ISOAA_PATH\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\nfrom armi.utils.plotting import plotNucXs\n\n\nclass TestXsCollections(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.microLib = isotxs.readBinary(ISOAA_PATH)\n\n    def setUp(self):\n        self.mc = xsCollections.MacroscopicCrossSectionCreator(minimumNuclideDensity=1e-13)\n        self.block = MockBlock()\n        self.block.setNumberDensity(\"U235\", 0.02)\n        self.block.setNumberDensity(\"FE\", 0.01)\n\n    def test_genTotScatteringMatrix(self):\n        \"\"\"Generates the total scattering matrix by summing elastic, inelastic, and n2n scattering matrices.\"\"\"\n        nuc = self.microLib.nuclides[0]\n        totalScatter = nuc.micros.getTotalScatterMatrix()\n        self.assertAlmostEqual(\n            totalScatter[0, 0],\n            (nuc.micros.elasticScatter[0, 0] + nuc.micros.inelasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),\n        )\n\n    def test_totalScatteringMatrixWithMissingData(self):\n        \"\"\"\n        Generates the total scattering matrix by summing elastic and n2n scattering matrices.\n\n        Notes\n        -----\n        This tests that the total scattering matrix can be produced when the inelastic scattering matrix is not defined.\n        \"\"\"\n        nuc = self.microLib.nuclides[0]\n        nuc.micros.inelasticScatter = None\n        totalScatter = nuc.micros.getTotalScatterMatrix()\n        self.assertAlmostEqual(\n            totalScatter[0, 0],\n            (nuc.micros.elasticScatter[0, 0] + 2.0 * nuc.micros.n2nScatter[0, 0]),\n        )\n\n    def test_plotNucXs(self):\n        \"\"\"Testing this plotting method here because we need a XS library to run the test.\"\"\"\n        fName = \"test_plotNucXs.png\"\n        with TemporaryDirectoryChanger():\n            plotNucXs(self.microLib, \"U235AA\", \"fission\", fName=fName)\n            self.assertTrue(os.path.exists(fName))\n\n    def test_createMacrosFromMicros(self):\n        \"\"\"Test calculating macroscopic cross sections from microscopic cross sections.\n\n        .. test:: Compute macroscopic cross sections from microscopic cross sections and number densities.\n            :id: T_ARMI_NUCDATA_MACRO\n            :tests: R_ARMI_NUCDATA_MACRO\n        \"\"\"\n        self.assertEqual(self.mc.minimumNuclideDensity, 1e-13)\n        self.mc.createMacrosFromMicros(self.microLib, self.block)\n        totalMacroFissionXs = 0.0\n        totalMacroAbsXs = 0.0\n        for nuc, density in self.mc.densities.items():\n            nuclideXS = self.mc.microLibrary.getNuclide(nuc, \"AA\")\n            for microXs in nuclideXS.micros.fission:\n                totalMacroFissionXs += microXs * density\n\n            for microXsName in xsCollections.ABSORPTION_XS:\n                for microXs in getattr(nuclideXS.micros, microXsName):\n                    totalMacroAbsXs += microXs * density\n\n        self.assertAlmostEqual(sum(self.mc.macros.fission), totalMacroFissionXs)\n        self.assertAlmostEqual(sum(self.mc.macros.absorption), totalMacroAbsXs)\n\n    def test_collapseCrossSection(self):\n        \"\"\"\n        Tests cross section collapsing.\n\n        Notes\n        -----\n        The expected 1 group cross section was generated by running the collapse cross section method. This tests\n        that this method has not been modified to produce a different result.\n        \"\"\"\n        expected1gXs = 2.35725262208\n        micros = self.microLib[\"U235AA\"].micros\n        flux = list(reversed(range(33)))\n        self.assertAlmostEqual(micros.collapseCrossSection(micros.nGamma, flux), expected1gXs)\n\n\nclass MockReactor:\n    def __init__(self):\n        self.blueprints = MockBlueprints()\n        self.spatialGrid = None\n\n\nclass MockBlueprints:\n    # this is only needed for allNuclidesInProblem and attributes were acting funky, so this was made.\n    def __getattribute__(self, *args, **kwargs):\n        return [\"U235\", \"U235\", \"FE\", \"NA23\"]\n\n\nclass MockBlock(HexBlock):\n    def __init__(self, name=None, cs=None):\n        self.density = {}\n        HexBlock.__init__(self, name or \"MockBlock\", cs or settings.Settings())\n        self.r = MockReactor()\n\n    @property\n    def r(self):\n        return self._r\n\n    @r.setter\n    def r(self, r):\n        self._r = r\n\n    def getVolume(self, *args, **kwargs):\n        \"\"\"Return the volume of a block.\"\"\"\n        return 1.0\n\n    def getNuclideNumberDensities(self, nucNames):\n        \"\"\"Return a list of number densities in atoms/barn-cm for the nuc names requested.\"\"\"\n        return [self.density.get(nucName, 0.0) for nucName in nucNames]\n\n    def _getNdensHelper(self):\n        return {nucName: density for nucName, density in self.density.items()}\n\n    def setNumberDensity(self, key, val, *args, **kwargs):\n        \"\"\"Set the number density of this nuclide to this value.\"\"\"\n        self.density[key] = val\n\n    def getNuclides(self):\n        \"\"\"Determine which nuclides are present in this armi block.\"\"\"\n        return self.density.keys()\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/test_xsLibraries.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for xsLibraries.IsotxsLibrary.\"\"\"\n\nimport copy\nimport filecmp\nimport os\nimport pickle\nimport traceback\nimport unittest\nfrom time import sleep\n\nimport numpy as np\n\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.nuclearDataIO import xsLibraries\nfrom armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx\nfrom armi.tests import mockRunLogs\nfrom armi.utils import properties\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n# test input pathing\nTHIS_DIR = os.path.dirname(__file__)\nRUN_DIR = os.path.join(THIS_DIR, \"library-file-generation\")\nFIXTURE_DIR = os.path.join(THIS_DIR, \"fixtures\")\n\n# specific tests files\nGAMISO_AA = os.path.join(FIXTURE_DIR, \"AA.gamiso\")\nGAMISO_AA_AB = os.path.join(FIXTURE_DIR, \"combined-AA-AB.gamiso\")\nGAMISO_AB = os.path.join(FIXTURE_DIR, \"AB.gamiso\")\nGAMISO_LUMPED = os.path.join(FIXTURE_DIR, \"combined-and-lumped-AA-AB.gamiso\")\nISOTXS_AA = os.path.join(FIXTURE_DIR, \"ISOAA\")\nISOTXS_AA_AB = os.path.join(FIXTURE_DIR, \"combined-AA-AB.isotxs\")\nISOTXS_AB = os.path.join(FIXTURE_DIR, \"ISOAB\")\nISOTXS_LUMPED = os.path.join(FIXTURE_DIR, \"combined-and-lumped-AA-AB.isotxs\")\nPMATRX_AA = os.path.join(FIXTURE_DIR, \"AA.pmatrx\")\nPMATRX_AA_AB = os.path.join(FIXTURE_DIR, \"combined-AA-AB.pmatrx\")\nPMATRX_AB = os.path.join(FIXTURE_DIR, \"AB.pmatrx\")\nPMATRX_LUMPED = os.path.join(FIXTURE_DIR, \"combined-and-lumped-AA-AB.pmatrx\")\nUFG_FLUX_EDIT = os.path.join(FIXTURE_DIR, \"mc2v3-AA.flux_ufg\")\n\n# CCCC fixtures are less fancy than these merging ones.\nFIXTURE_DIR_CCCC = os.path.join(os.path.dirname(isotxs.__file__), \"tests\", \"fixtures\")\nDLAYXS_MCC3 = os.path.join(FIXTURE_DIR_CCCC, \"mc2v3.dlayxs\")\n\n\nclass TempFileMixin:\n    \"\"\"A helpful test tooling; creating temporary directories and nucdata test file path.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    @property\n    def testFileName(self):\n        return os.path.join(self.td.destination, f\"{self.__class__.__name__}-{self._testMethodName}.nucdata\")\n\n\nclass TestXSLibrary(TempFileMixin, unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.isotxsAA = isotxs.readBinary(ISOTXS_AA)\n        cls.gamisoAA = gamiso.readBinary(GAMISO_AA)\n        cls.pmatrxAA = pmatrx.readBinary(PMATRX_AA)\n        cls.xsLib = xsLibraries.IsotxsLibrary()\n        cls.xsLibGenerationErrorStack = None\n        try:\n            cls.xsLib.merge(copy.deepcopy(cls.isotxsAA))\n            cls.xsLib.merge(copy.deepcopy(cls.gamisoAA))\n            cls.xsLib.merge(copy.deepcopy(cls.pmatrxAA))\n        except Exception:\n            cls.xsLibGenerationErrorStack = traceback.format_exc()\n\n    def test_canPickleAndUnpickleISOTXS(self):\n        pikAA = pickle.loads(pickle.dumps(self.isotxsAA))\n        self.assertTrue(xsLibraries.compare(pikAA, self.isotxsAA))\n\n    def test_canPickleAndUnpickleGAMISO(self):\n        pikAA = pickle.loads(pickle.dumps(self.gamisoAA))\n        self.assertTrue(xsLibraries.compare(pikAA, self.gamisoAA))\n\n    def test_canPickleAndUnpicklePMATRX(self):\n        pikAA = pickle.loads(pickle.dumps(self.pmatrxAA))\n        self.assertTrue(xsLibraries.compare(pikAA, self.pmatrxAA))\n\n    def test_compareWorks(self):\n        self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA))\n        self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA))\n        aa = isotxs.readBinary(ISOTXS_AA)\n        del aa[aa.nuclideLabels[0]]\n        self.assertFalse(xsLibraries.compare(aa, self.isotxsAA))\n\n    def test_compareComponentsOfXSLibrary(self):\n        \"\"\"Compare different components of a XS library.\"\"\"\n        self.assertTrue(xsLibraries.compare(self.isotxsAA, self.isotxsAA))\n        self.assertTrue(xsLibraries.compare(self.pmatrxAA, self.pmatrxAA))\n        aa = isotxs.readBinary(ISOTXS_AA)\n        del aa[aa.nuclideLabels[0]]\n        self.assertFalse(xsLibraries.compare(aa, self.isotxsAA))\n\n    def test_mergeFailsWithNonIsotxsFiles(self):\n        dummyFileName = \"ISOSOMEFILE\"\n        with open(dummyFileName, \"w\") as someFile:\n            someFile.write(\"hi\")\n\n        try:\n            with mockRunLogs.BufferLog() as log:\n                lib = xsLibraries.IsotxsLibrary()\n                with self.assertRaises(OSError):\n                    xsLibraries.mergeXSLibrariesInWorkingDirectory(lib, \"ISOTXS\", \"\")\n                self.assertIn(dummyFileName, log.getStdout())\n        finally:\n            os.remove(dummyFileName)\n\n        with TemporaryDirectoryChanger():\n            dummyFileName = \"ISO[]\"\n            with open(dummyFileName, \"w\") as file:\n                file.write(\n                    \"This is a file that starts with the letters 'ISO' but will break the regular expression search.\"\n                )\n\n            try:\n                with mockRunLogs.BufferLog() as log:\n                    lib = xsLibraries.IsotxsLibrary()\n                    xsLibraries.mergeXSLibrariesInWorkingDirectory(lib)\n                    self.assertIn(f\"{dummyFileName} in the merging of ISOXX files\", log.getStdout())\n            finally:\n                pass\n\n    def _xsLibraryAttributeHelper(\n        self,\n        lib,\n        neutronEnergyLength,\n        neutronVelLength,\n        gammaEnergyLength,\n        neutronDoseLength,\n        gammaDoseLength,\n    ):\n        for attrName, listLength in [\n            (\"neutronEnergyUpperBounds\", neutronEnergyLength),\n            (\"neutronVelocity\", neutronVelLength),\n            (\"gammaEnergyUpperBounds\", gammaEnergyLength),\n            (\"neutronDoseConversionFactors\", neutronDoseLength),\n            (\"gammaDoseConversionFactors\", gammaDoseLength),\n        ]:\n            if listLength > 0:\n                self.assertEqual(listLength, len(getattr(lib, attrName)))\n            else:\n                with self.assertRaises(properties.ImmutablePropertyError):\n                    print(f\"Getting the value {attrName}\")\n                    print(getattr(lib, attrName))\n\n    def test_isotxsLibraryAttributes(self):\n        self._xsLibraryAttributeHelper(\n            self.isotxsAA,\n            neutronEnergyLength=33,\n            neutronVelLength=33,\n            gammaEnergyLength=0,\n            neutronDoseLength=0,\n            gammaDoseLength=0,\n        )\n\n    def test_gamisoLibraryAttributes(self):\n        self._xsLibraryAttributeHelper(\n            self.gamisoAA,\n            neutronEnergyLength=0,\n            neutronVelLength=0,\n            gammaEnergyLength=21,\n            neutronDoseLength=0,\n            gammaDoseLength=0,\n        )\n\n    def test_pmatrxLibraryAttributes(self):\n        self._xsLibraryAttributeHelper(\n            self.pmatrxAA,\n            neutronEnergyLength=33,\n            neutronVelLength=0,\n            gammaEnergyLength=21,\n            neutronDoseLength=0,\n            gammaDoseLength=0,\n        )\n\n    def test_mergeXSLibrariesWithDifferentDataWorks(self):\n        if self.xsLibGenerationErrorStack is not None:\n            print(self.xsLibGenerationErrorStack)\n            raise Exception(\"see stdout for stack trace\")\n\n        # check to make sure they labels overlap, or are actually the same\n        labels = set(self.xsLib.nuclideLabels)\n        self.assertEqual(labels, set(self.isotxsAA.nuclideLabels))\n        self.assertEqual(labels, set(self.gamisoAA.nuclideLabels))\n        self.assertEqual(labels, set(self.pmatrxAA.nuclideLabels))\n        # the whole thing is different from the sum of its components\n        self.assertFalse(xsLibraries.compare(self.xsLib, self.isotxsAA))\n        self.assertFalse(xsLibraries.compare(self.xsLib, self.gamisoAA))\n        self.assertFalse(xsLibraries.compare(self.xsLib, self.pmatrxAA))\n        # individual components are the same\n        self.assertTrue(isotxs.compare(self.xsLib, self.isotxsAA))\n        self.assertTrue(gamiso.compare(self.xsLib, self.gamisoAA))\n        self.assertTrue(pmatrx.compare(self.xsLib, self.pmatrxAA))\n\n    def test_canWriteIsotxsFromCombinedXSLibrary(self):\n        self._canWritefromCombined(isotxs, ISOTXS_AA)\n\n    def test_canWriteGamisoFromCombinedXSLibrary(self):\n        self._canWritefromCombined(gamiso, GAMISO_AA)\n\n    def test_canWritePmatrxFromCombinedXSLibrary(self):\n        self._canWritefromCombined(pmatrx, PMATRX_AA)\n\n    def _canWritefromCombined(self, writer, refFile):\n        if self.xsLibGenerationErrorStack is not None:\n            print(self.xsLibGenerationErrorStack)\n            raise Exception(\"See stdout for stack trace\")\n\n        # check to make sure they labels overlap, or are actually the same\n        writer.writeBinary(self.xsLib, self.testFileName)\n        self.assertTrue(filecmp.cmp(refFile, self.testFileName))\n\n\nclass TestGetISOTXSFilesWorkDir(unittest.TestCase):\n    def test_getISOTXSFilesWithoutLibrarySuffix(self):\n        shouldBeThere = [\"ISOAA\", \"ISOBA\", os.path.join(\"file-path\", \"ISOCA\")]\n        shouldNotBeThere = [\n            \"ISOBA-n2\",\n            \"ISOTXS\",\n            \"ISOTXS-c2\",\n            \"dummyISOTXS\",\n            \"ISOTXS.BCD\",\n            \"ISOAA.BCD\",\n        ]\n        filesInDirectory = shouldBeThere + shouldNotBeThere\n        toMerge = xsLibraries.getISOTXSLibrariesToMerge(\"\", filesInDirectory)\n        self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere)\n\n    def test_getISOTXSFilesWithLibrarySuffix(self):\n        shouldBeThere = [\n            \"ISOAA-n23\",\n            \"ISOAAF-n23\",\n            \"ISOBA-n23\",\n            \"ISODA\",\n            os.path.join(\"file-path\", \"ISOCA-n23\"),\n        ]\n        shouldNotBeThere = [\n            \"ISOAA\",\n            \"ISOAA-n24\",\n            \"ISOBA-ISO\",\n            \"ISOBA-n2\",\n            \"ISOBA\",\n            \"ISOTXS\",\n            \"ISOTXS-c2\",\n            \"dummyISOTXS\",\n            \"ISOTXS.BCD\",\n            \"ISOAA.BCD\",\n            \"ISOCA-doppler\",\n            \"ISOSA-void\",\n            os.path.join(\"file-path\", \"ISOCA\"),\n        ]\n        filesInDirectory = shouldBeThere + shouldNotBeThere\n        toMerge = xsLibraries.getISOTXSLibrariesToMerge(\"-n23\", filesInDirectory)\n        self.assert_contains_only(toMerge, shouldBeThere, shouldNotBeThere)\n\n    def assert_contains_only(self, container, shouldBeThere, shouldNotBeThere):\n        \"\"\"\n        Utility method for saying what things contain.\n\n        This could just check the contents and length, but the error produced from shouldNotBeThere is much nicer.\n        \"\"\"\n        container = set(container)\n        self.assertEqual(container, set(shouldBeThere))\n        self.assertEqual(set(), container & set(shouldNotBeThere))\n\n\nclass AbstractTestXSlibraryMerging(TempFileMixin):\n    \"\"\"\n    A shared class that defines tests that should be true for all IsotxsLibrary merging.\n\n    Notes\n    -----\n    This is a base class; it is not run directly.\n    \"\"\"\n\n    def _readFileAttempts(self, path):\n        \"\"\"Run the file read a few times, because sometimes GitHub CI is flaky with these tests.\"\"\"\n        maxAttempts = 5\n        for a in range(maxAttempts):\n            try:\n                return self.getReadFunc()(path)\n            except OSError as e:\n                if a >= (maxAttempts - 1):\n                    raise e\n                sleep(1)\n\n    def setUp(self):\n        TempFileMixin.setUp(self)\n        # Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant.\n        self.libAA = self._readFileAttempts(self.getLibAAPath())\n        self.libAB = self._readFileAttempts(self.getLibABPath())\n        self.libCombined = self._readFileAttempts(self.getLibAA_ABPath())\n        self.libLumped = self._readFileAttempts(self.getLibLumpedPath())\n        self.nuclideBases = NuclideBases()\n\n    def getErrorType(self):\n        raise NotImplementedError()\n\n    def getReadFunc(self):\n        raise NotImplementedError()\n\n    def getWriteFunc(self):\n        raise NotImplementedError()\n\n    def getLibAAPath(self):\n        raise NotImplementedError()\n\n    def getLibABPath(self):\n        raise NotImplementedError()\n\n    def getLibAA_ABPath(self):\n        raise NotImplementedError()\n\n    def getLibLumpedPath(self):\n        raise NotImplementedError()\n\n    def test_mergeXSLibSameNucNames(self):\n        \"\"\"Cannot merge XS libraries with the same nuclide names.\"\"\"\n        with self.assertRaises(AttributeError):\n            self.libAA.merge(self.libCombined)\n\n        with self.assertRaises(AttributeError):\n            self.libAA.merge(self.libAA)\n\n        with self.assertRaises(AttributeError):\n            self.libAA.merge(self.libCombined)\n\n        with self.assertRaises(AttributeError):\n            self.libCombined.merge(self.libAA)\n\n    def test_mergeXSLibxDiffGroupStructure(self):\n        \"\"\"Cannot merge XS libraries with different group structure.\"\"\"\n        dummyXsLib = xsLibraries.IsotxsLibrary()\n        dummyXsLib.neutronEnergyUpperBounds = np.array([1, 2, 3])\n        dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])\n        with self.assertRaises(properties.ImmutablePropertyError):\n            dummyXsLib.merge(self.libCombined)\n\n    def test_mergeEmptyXSLibWithClones(self):\n        \"\"\"Merge empty XS libraries with clones of others.\"\"\"\n        emptyXSLib = xsLibraries.IsotxsLibrary()\n        emptyXSLib.merge(self.libAA)\n        self.libAA = None\n        self.getWriteFunc()(emptyXSLib, self.testFileName)\n        sleep(1)\n        self.assertTrue(os.path.exists(self.testFileName))\n        self.assertGreater(os.path.getsize(self.testFileName), 0)\n        self.assertTrue(filecmp.cmp(self.getLibAAPath(), self.testFileName))\n\n    def test_mergeTwoXSLibFiles(self):\n        emptyXSLib = xsLibraries.IsotxsLibrary()\n        emptyXSLib.merge(self.libAA)\n        self.libAA = None\n        emptyXSLib.merge(self.libAB)\n        self.libAB = None\n        self.assertEqual(set(self.libCombined.nuclideLabels), set(emptyXSLib.nuclideLabels))\n        self.assertTrue(xsLibraries.compare(emptyXSLib, self.libCombined))\n        self.getWriteFunc()(emptyXSLib, self.testFileName)\n        sleep(1)\n        self.assertTrue(os.path.exists(self.testFileName))\n        self.assertGreater(os.path.getsize(self.testFileName), 0)\n        self.assertTrue(filecmp.cmp(self.getLibAA_ABPath(), self.testFileName))\n\n\nclass TestPmatrxMerge(AbstractTestXSlibraryMerging, unittest.TestCase):\n    def getErrorType(self):\n        raise OSError\n\n    def getReadFunc(self):\n        return pmatrx.readBinary\n\n    def getWriteFunc(self):\n        return pmatrx.writeBinary\n\n    def getLibAAPath(self):\n        return PMATRX_AA\n\n    def getLibABPath(self):\n        return PMATRX_AB\n\n    def getLibAA_ABPath(self):\n        return PMATRX_AA_AB\n\n    def getLibLumpedPath(self):\n        return PMATRX_LUMPED\n\n    def test_cannotMergeXSLibsWithDiffGammaGroups(self):\n        \"\"\"Test that we cannot merge XS Libs with different Gamma Group Structures.\"\"\"\n        dummyXsLib = xsLibraries.IsotxsLibrary()\n        dummyXsLib.gammaEnergyUpperBounds = np.array([1, 2, 3])\n        with self.assertRaises(properties.ImmutablePropertyError):\n            dummyXsLib.merge(self.libCombined)\n\n\nclass TestIsotxsMerge(AbstractTestXSlibraryMerging, unittest.TestCase):\n    def getErrorType(self):\n        raise OSError\n\n    def getReadFunc(self):\n        return isotxs.readBinary\n\n    def getWriteFunc(self):\n        return isotxs.writeBinary\n\n    def getLibAAPath(self):\n        return ISOTXS_AA\n\n    def getLibABPath(self):\n        return ISOTXS_AB\n\n    def getLibAA_ABPath(self):\n        return ISOTXS_AA_AB\n\n    def getLibLumpedPath(self):\n        return ISOTXS_LUMPED\n\n    def test_canRemoveIsotopes(self):\n        emptyXSLib = xsLibraries.IsotxsLibrary()\n        emptyXSLib.merge(self.libAA)\n        self.libAA = None\n        emptyXSLib.merge(self.libAB)\n        self.libAB = None\n        for nucId in [\n            \"ZR93_7\",\n            \"ZR95_7\",\n            \"XE1287\",\n            \"XE1297\",\n            \"XE1307\",\n            \"XE1317\",\n            \"XE1327\",\n            \"XE1337\",\n            \"XE1347\",\n            \"XE1357\",\n            \"XE1367\",\n        ]:\n            nucLabel = self.nuclideBases.byMcc3Id[nucId].label\n            del emptyXSLib[nucLabel + \"AA\"]\n            del emptyXSLib[nucLabel + \"AB\"]\n\n        self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels))\n        self.getWriteFunc()(emptyXSLib, self.testFileName)\n        self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName))\n\n\nclass TestGamisoMerge(AbstractTestXSlibraryMerging, unittest.TestCase):\n    def getErrorType(self):\n        raise OSError\n\n    def getReadFunc(self):\n        return gamiso.readBinary\n\n    def getWriteFunc(self):\n        return gamiso.writeBinary\n\n    def getLibAAPath(self):\n        return GAMISO_AA\n\n    def getLibABPath(self):\n        return GAMISO_AB\n\n    def getLibAA_ABPath(self):\n        return GAMISO_AA_AB\n\n    def getLibLumpedPath(self):\n        return GAMISO_LUMPED\n\n    def test_canRemoveIsotopes(self):\n        emptyXSLib = xsLibraries.IsotxsLibrary()\n        emptyXSLib.merge(self.libAA)\n        self.libAA = None\n        emptyXSLib.merge(self.libAB)\n        self.libAB = None\n        for nucId in [\n            \"ZR93_7\",\n            \"ZR95_7\",\n            \"XE1287\",\n            \"XE1297\",\n            \"XE1307\",\n            \"XE1317\",\n            \"XE1327\",\n            \"XE1337\",\n            \"XE1347\",\n            \"XE1357\",\n            \"XE1367\",\n        ]:\n            nucLabel = self.nuclideBases.byMcc3Id[nucId].label\n            del emptyXSLib[nucLabel + \"AA\"]\n            del emptyXSLib[nucLabel + \"AB\"]\n\n        self.assertEqual(set(self.libLumped.nuclideLabels), set(emptyXSLib.nuclideLabels))\n        self.getWriteFunc()(emptyXSLib, self.testFileName)\n        self.assertTrue(filecmp.cmp(self.getLibLumpedPath(), self.testFileName))\n\n\nclass TestCombinedMerge(unittest.TestCase):\n    def setUp(self):\n        # Load a library in the ARMI tree. This should be a small library with LFPs, Actinides, structure, and coolant.\n        self.isotxsAA = isotxs.readBinary(ISOTXS_AA)\n        self.gamisoAA = gamiso.readBinary(GAMISO_AA)\n        self.pmatrxAA = pmatrx.readBinary(PMATRX_AA)\n        self.isotxsAB = isotxs.readBinary(ISOTXS_AB)\n        self.gamisoAB = gamiso.readBinary(GAMISO_AB)\n        self.pmatrxAB = pmatrx.readBinary(PMATRX_AB)\n        self.libCombined = isotxs.readBinary(ISOTXS_AA_AB)\n\n    def test_mergeAllXSLibFiles(self):\n        lib = xsLibraries.IsotxsLibrary()\n        xsLibraries.mergeXSLibrariesInWorkingDirectory(\n            lib, xsLibrarySuffix=\"\", mergeGammaLibs=True, alternateDirectory=FIXTURE_DIR\n        )\n        self.assertEqual(set(lib.nuclideLabels), set(self.libCombined.nuclideLabels))\n"
  },
  {
    "path": "armi/nuclearDataIO/tests/test_xsNuclides.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for xs nuclides.\"\"\"\n\nimport unittest\n\nfrom armi.nucDirectory import nuclideBases\nfrom armi.nuclearDataIO import isotxs, xsLibraries, xsNuclides\nfrom armi.tests import ISOAA_PATH, mockRunLogs\n\n\nclass NuclideTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.lib = isotxs.readBinary(ISOAA_PATH)\n\n    def test_badNameFailure(self):\n        \"\"\"Creating nuclide from label fails on bad name.\"\"\"\n        nuc = xsNuclides.XSNuclide(None, \"BACONAA\")\n        nuc.isotxsMetadata[\"nuclideId\"] = \"BACN87\"\n        with self.assertRaises(OSError):\n            nuc.updateBaseNuclide()\n\n    def test_creatingNucNoSideEffects(self):\n        \"\"\"Creating nuclide does not mes with underlying nuclide dictionary.\"\"\"\n        nuc = nuclideBases.byName[\"U238\"]\n        self.assertFalse(hasattr(nuc, \"xsId\"))\n        nrAA = xsNuclides.XSNuclide(None, \"U238AA\")\n        nrAA.isotxsMetadata[\"nuclideId\"] = nuc.name\n        nrAA.updateBaseNuclide()\n        self.assertEqual(\"AA\", nrAA.xsId)\n        self.assertFalse(hasattr(nuc, \"xsId\"))\n\n    def test_odifyingNucAttrUpdatesIsotxs(self):\n        \"\"\"Modifying nuclide attribute updates the ISOTXS nuclide data.\"\"\"\n        lib = xsLibraries.IsotxsLibrary()\n        nuc = nuclideBases.byName[\"FE\"]\n        nrAA = xsNuclides.XSNuclide(lib, \"FEAA\")\n        lib[\"FEAA\"] = nrAA\n        nrAA.isotxsMetadata[\"nuclideId\"] = nuc.name\n        nrAA.updateBaseNuclide()\n        self.assertEqual(len(nuc.trans), len(nrAA.trans))\n        nuc.trans.append(\"whatever\")\n        self.assertEqual(len(nuc.trans), len(nrAA.trans))\n        self.assertEqual(\"whatever\", nuc.trans[-1])\n        self.assertEqual(\"whatever\", nrAA.trans[-1])\n        # We have modified the underlying nuclide; need to reset.\n        nuc.trans.pop()\n\n    def test_moLabelsNoWarnings(self):\n        \"\"\"New nuclide labels do not cause warnings.\"\"\"\n        with mockRunLogs.BufferLog() as logCapture:\n            self.assertEqual(\"\", logCapture.getStdout())\n            fe = nuclideBases.byName[\"FE\"]\n            feNuc = xsNuclides.XSNuclide(None, \"FEAA\")\n            feNuc.isotxsMetadata[\"nuclideId\"] = fe.name\n            feNuc.updateBaseNuclide()\n            self.assertEqual(fe, feNuc._base)\n            self.assertEqual(\"\", logCapture.getStdout())\n\n    def test_nuclide_oldLabelsCauseWarnings(self):\n        with mockRunLogs.BufferLog() as logCapture:\n            self.assertEqual(\"\", logCapture.getStdout())\n            pu = nuclideBases.byName[\"PU239\"]\n            puNuc = xsNuclides.XSNuclide(None, \"PLUTAA\")\n            puNuc.isotxsMetadata[\"nuclideId\"] = pu.name\n            puNuc.updateBaseNuclide()\n            self.assertEqual(pu, puNuc._base)\n            length = len(logCapture.getStdout())\n            self.assertGreater(length, 15)\n            # now get it with a legitimate same label, length should not change\n            puNuc = xsNuclides.XSNuclide(None, \"PLUTAB\")\n            puNuc.isotxsMetadata[\"nuclideId\"] = pu.name\n            puNuc.updateBaseNuclide()\n            self.assertEqual(pu, puNuc._base)\n            self.assertEqual(length, len(logCapture.getStdout()))\n\n    def test_nuclideBaseMethodsNoFail(self):\n        \"\"\"Nuclide base method should not fail.\"\"\"\n        for nuc in self.lib.nuclides:\n            self.assertIsInstance(nuc.getDatabaseName(), str)\n            self.assertIsInstance(nuc.getMcc3Id(), str)\n\n    def test_nuclideIsoaaDetails(self):\n        nuc = self.lib[\"U235AA\"]\n        self.assertEqual(935.9793848991394, sum(nuc.micros.fission))\n        self.assertEqual(1.0000000956962505, sum(nuc.micros.chi))\n        nuc = self.lib[\"B10AA\"]\n        self.assertEqual(0.7499475518734471, sum(nuc.micros.nGamma))\n        nuc = self.lib[\"B11AA\"]\n        self.assertEqual(0.0008645406924188137, sum(nuc.micros.n2n))\n        self.assertEqual(0.008091875669521187, sum(nuc.micros.nGamma))\n\n    def test_2dDataCoords(self):\n        \"\"\"Manually compare some 2d XS data to ensure the correct coordinates.\"\"\"\n        u235 = self.lib[\"U235AA\"]\n        self.assertAlmostEqual(5.76494979858, u235.micros.total[0, 0])\n        self.assertAlmostEqual(6.5928812027, u235.micros.total[1, 0])\n        self.assertAlmostEqual(113.00479126, u235.micros.total[31, 0])\n        self.assertAlmostEqual(606.100097656, u235.micros.total[32, 0])\n        self.assertAlmostEqual(5.7647356987, u235.micros.total[0, 1])\n        self.assertAlmostEqual(6.58178663254, u235.micros.total[1, 1])\n        self.assertAlmostEqual(112.154449463, u235.micros.total[31, 1])\n        self.assertAlmostEqual(606.100097656, u235.micros.total[32, 1])\n        pu239 = self.lib[\"PU39AA\"]\n        self.assertAlmostEqual(5.83128976821, pu239.micros.total[0, 0])\n        self.assertAlmostEqual(6.64091205597, pu239.micros.total[1, 0])\n        self.assertAlmostEqual(394.632354736, pu239.micros.total[31, 0])\n        self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 0])\n        self.assertAlmostEqual(5.83086299896, pu239.micros.total[0, 1])\n        self.assertAlmostEqual(6.63103675842, pu239.micros.total[1, 1])\n        self.assertAlmostEqual(383.891998291, pu239.micros.total[31, 1])\n        self.assertAlmostEqual(973.399902343, pu239.micros.total[32, 1])\n\n    def test_scatterXSdataCoords(self):\n        \"\"\"Manually compare scatter XS data to ensure the correct coordinates.\"\"\"\n        u235 = self.lib[\"U235AA\"]\n        elasticScatter = u235.micros.elasticScatter\n        n2nScatter = u235.micros.n2nScatter\n        inelasticScatter = u235.micros.inelasticScatter\n\n        self.assertAlmostEqual(0.0304658822715, elasticScatter[(2, 1)])\n        self.assertAlmostEqual(0.0331721678376, inelasticScatter[(2, 0)])\n        self.assertAlmostEqual(0.0310171917081, inelasticScatter[(2, 1)])\n        self.assertAlmostEqual(0.0893433615565, inelasticScatter[(2, 2)])\n        self.assertAlmostEqual(8.41606015456e-05, inelasticScatter[(16, 2)])\n        self.assertAlmostEqual(3.23279074621e-08, inelasticScatter[(17, 2)])\n        self.assertAlmostEqual(1.96078691062e-08, inelasticScatter[(18, 2)])\n        self.assertAlmostEqual(1.18927703241e-08, inelasticScatter[(19, 2)])\n        self.assertAlmostEqual(7.21333170972e-09, inelasticScatter[(20, 2)])\n        self.assertAlmostEqual(3.66581343059e-09, inelasticScatter[(21, 2)])\n        self.assertAlmostEqual(3.81337583732e-09, inelasticScatter[(22, 2)])\n        self.assertAlmostEqual(1.35068589646e-09, inelasticScatter[(23, 2)])\n        self.assertAlmostEqual(3.96180976914e-10, inelasticScatter[(24, 2)])\n        self.assertAlmostEqual(4.85626551381e-05, n2nScatter[(1, 0)])\n        self.assertAlmostEqual(4.61509245042e-07, n2nScatter[(1, 1)])\n        self.assertAlmostEqual(9.67319720075e-05, n2nScatter[(2, 1)])\n        self.assertAlmostEqual(3.39554608217e-05, n2nScatter[(16, 1)])\n        self.assertAlmostEqual(1.12633460958e-05, n2nScatter[(17, 1)])\n        self.assertAlmostEqual(6.964501722e-07, n2nScatter[(18, 1)])\n\n        pu239 = self.lib[\"PU39AA\"]\n        elasticScatter = pu239.micros.elasticScatter\n        inelasticScatter = pu239.micros.inelasticScatter\n        n2nScatter = pu239.micros.n2nScatter\n        self.assertAlmostEqual(1.7445316189e-05, n2nScatter[(1, 0)])\n        self.assertAlmostEqual(4.12698773289e-06, n2nScatter[(17, 1)])\n        self.assertAlmostEqual(6.80282767007e-07, n2nScatter[(1, 1)])\n        self.assertAlmostEqual(1.56137302838e-05, n2nScatter[(16, 1)])\n        self.assertAlmostEqual(9.7953477507e-07, n2nScatter[(18, 1)])\n        self.assertAlmostEqual(0.000104939324956, n2nScatter[(2, 1)])\n        self.assertAlmostEqual(0.0206335708499, elasticScatter[(2, 1)])\n        self.assertAlmostEqual(0.000585122266784, inelasticScatter[(2, 0)])\n        self.assertAlmostEqual(0.0352461636066, inelasticScatter[(2, 1)])\n        self.assertAlmostEqual(0.457990020514, inelasticScatter[(2, 2)])\n        self.assertAlmostEqual(1.16550609164e-07, n2nScatter[(19, 1)])\n        self.assertAlmostEqual(5.22556074429e-05, inelasticScatter[(16, 2)])\n        # the code below is very useful for generating the above test information\n        \"\"\"\n        for key, xs in pu239Scatter.items():\n            mk = max(key[1:])\n            if len(key)  == 5 and 1 in key and 2 in key and (mk <= 2 or mk > 15):\n                print ('self.assertAlmostEqual({}, pu239.micros[{}])'\n                       .format(xs, key))\n        \"\"\"\n\n    def test_getMicroXS(self):\n        \"\"\"Check whether getMicroXS method returns the correct cross sections for the input nuclide.\"\"\"\n        u235Nuc = self.lib[\"U235AA\"]\n        for i in range(self.lib.numGroups):\n            refFissionXS = u235Nuc.micros.fission[i]\n            curFissionXS = u235Nuc.getMicroXS(\"fission\", i)\n            self.assertAlmostEqual(refFissionXS, curFissionXS)\n\n        # error raised if you attempt a bad group index\n        with self.assertRaises(IndexError):\n            u235Nuc.getMicroXS(\"fission\", -999)\n\n        # zero returned if you try to grab a non-existent interaction\n        self.assertEqual(u235Nuc.getMicroXS(\"fake\", 1), 0)\n\n    def test_getXS(self):\n        u235Nuc = self.lib[\"U235AA\"]\n        refFission = u235Nuc.micros.fission\n        curFission = u235Nuc.getXS(\"fission\")\n        self.assertAlmostEqual(len(refFission), len(curFission))\n        self.assertAlmostEqual(refFission[0], curFission[0])\n        self.assertAlmostEqual(refFission[1], curFission[1])\n"
  },
  {
    "path": "armi/nuclearDataIO/xsCollections.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCross section collections contain cross sections for a single nuclide or region.\n\nSpecifically, they are used as attributes of :py:class:`~armi.nuclearDataIO.xsNuclides.XSNuclide`,\nwhich then are combined as a :py:class:`~armi.nuclearDataIO.xsLibraries.XSLibrary`.\n\nThese may represent microscopic or macroscopic neutron or photon cross sections. When they are\nmacroscopic, they generally represent a whole region with many nuclides, though this is not\nrequired.\n\nSee Also\n--------\narmi.nuclearDataIO.xsCollection.XSCollection : object that gets created.\n\nExamples\n--------\n    # creating a MicroscopicXSCollection by loading one from ISOTXS.\n    microLib = armi.nuclearDataIO.ISOTXS('ISOTXS')\n    micros = myLib.nuclides['U235AA'].micros\n\n    # creating macroscopic XS:\n    mc = MacroscopicCrossSectionCreator()\n    macroCollection = mc.createMacrosFromMicros(microLib, block)\n    blocksWithMacros = mc.createMacrosOnBlocklist(microLib, blocks)\n\n\"\"\"\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom armi import runLog\nfrom armi.utils import properties, units\n\n# Basic cross-section types that are represented by a 1-D vector in the multigroup approximation\n# No one is particularly proud of these names...we can claim\n# they have some origin in the ISOTXS file format card 04 definition\n# fmt: off\nNGAMMA = \"nGamma\"      # radiative capture\nNALPHA = \"nalph\"       # (n, alpha)\nNP = \"np\"              # (n, proton)\nND = \"nd\"              # (n, deuteron)\nNT = \"nt\"              # (n, triton)\nFISSION_XS = \"fission\" # (n, fission)\nN2N_XS = \"n2n\"         # (n,2n)\nNUSIGF = \"nuSigF\"\nNU = \"neutronsPerFission\"\n# fmt: on\nCAPTURE_XS = [NGAMMA, NALPHA, NP, ND, NT]\n\n# Cross section types that are represented by 2-D matrices in the multigroup approximation\nBASIC_SCAT_MATRIX = [\"elasticScatter\", \"inelasticScatter\", \"n2nScatter\"]\nOTHER_SCAT_MATRIX = [\"totalScatter\", \"elasticScatter1stOrder\"]\nHIGHORDER_SCATTER = \"higherOrderScatter\"\n\n# Subset of vector xs used to evaluate absorption cross-section\nABSORPTION_XS = CAPTURE_XS + [FISSION_XS, N2N_XS]\n\n# Subset of vector xs evaluated by _convertBasicXS\nBASIC_XS = ABSORPTION_XS + [NUSIGF]\n\n# Subset vector xs that are derived from basic cross sections\nDERIVED_XS = [\"absorption\", \"removal\"]\n\n# Total and transport are treated differently since they are 2D (can have multiple moments)\nTOTAL_XS = [\"total\", \"transport\"]\n\n# Subset of all basic cross sections that include removal and scattering\nALL_XS = BASIC_XS + BASIC_SCAT_MATRIX + OTHER_SCAT_MATRIX + DERIVED_XS + TOTAL_XS\n\n# All xs collection data\nALL_COLLECTION_DATA = ALL_XS + [\n    \"chi\",\n    NU,\n    \"strpd\",\n    HIGHORDER_SCATTER,\n    \"diffusionConstants\",\n]\n\nE_CAPTURE = \"ecapt\"\nE_FISSION = \"efiss\"\n\n\nclass XSCollection:\n    \"\"\"A cross section collection.\"\"\"\n\n    _zeroes = {}\n    \"\"\"\n    A dict of numpy arrays set to the size of XSLibrary.numGroups.\n\n    This is used to initialize cross sections which may not exist for the specific nuclide.\n    Consequently, there should never be a situation where a cross section does not exist.\n    In addition, they are all pointers to the same array, so we're not generating too much\n    unnecessary data.\n\n    Notes\n    -----\n    This is a dict so that it can store multiple 0_g \"matrices\", i.e. vectors. Realistically,\n    during any given run there will only be a set of groups, e.g. 33.\n    \"\"\"\n\n    @classmethod\n    def getDefaultXs(cls, numGroups):\n        default = cls._zeroes.get(numGroups, None)\n        if default is None:\n            default = np.zeros(numGroups)\n            cls._zeroes[numGroups] = default\n        return default\n\n    def __init__(self, parent):\n        \"\"\"\n        Construct a NuclideCollection.\n\n        Parameters\n        ----------\n        parent : object\n            The parent container, which may be a region, a nuclide, a block, etc.\n        \"\"\"\n        self.numGroups = None\n        self.transport = None\n        self.total = None\n        self.nGamma = None\n        self.fission = None\n        self.neutronsPerFission = None\n        self.chi = None\n        self.nalph = None\n        self.np = None\n        self.n2n = None\n        self.nd = None\n        self.nt = None\n        self.strpd = None\n        self.elasticScatter = None\n        self.inelasticScatter = None\n        self.n2nScatter = None\n        self.elasticScatter1stOrder = None\n        self.totalScatter = None\n        self.absorption = None\n        self.diffusionConstants = None\n        self.removal = None\n        self.nuSigF = None\n        self.higherOrderScatter = {}\n        self.source = \"{}\".format(parent)\n\n    def __getitem__(self, key):\n        \"\"\"\n        Access cross sections by key string (e.g. micros['fission'] = micros.fission.\n\n        Notes\n        -----\n        These containers were originally\n        dicts, but upgraded to objects with numpy values as specialization\n        was needed. This access method could/should be phased out.\n        \"\"\"\n        return self.__dict__[key]\n\n    def __setitem__(self, key, value):\n        self.__dict__[key] = value\n\n    def get(self, key, default):\n        try:\n            return self[key]\n        except (IndexError, KeyError, TypeError):\n            return default\n\n    def getAbsorptionXS(self):\n        \"\"\"Return total absorption XS, which is the sum of capture + fission + others.\"\"\"\n        absXS = [\n            self.nGamma,\n            self.fission,\n            self.nalph,\n            self.np,\n            self.nd,\n            self.nt,\n            self.n2n,\n        ]\n        return absXS\n\n    def getTotalScatterMatrix(self):\n        \"\"\"\n        Sum up scatter matrices to produce total scatter matrix.\n\n        Multiply reaction-based n2n scatter matrix by 2.0 to convert to production-based.\n\n        .. warning:: Not all lattice codes store (n,2n) matrices consistently. Some are\n                     production-based and some are absorption-based. If you use an\n                     absorption-based one, your scatter matrix will be off, generally\n                     leading to about a percent error in your neutron balance.\n\n        Notes\n        -----\n        The total scattering matrix is produced by summing the elastic, inelastic, and n2n scattering matrices. If a\n        specific scattering matrix does not exist for a composition (nuclide or region) then it is skipped and a\n        warning is displayed stating that the scattering reaction is not available and is not included in the total\n        scattering matrix.\n\n        Example: When producing macroscopic cross sections in MC2-3 the code internally merges the elastic and\n        inelastic scattering matrices into a single elastic scattering matrix.\n        \"\"\"\n        scatters = []\n        totalScatterComponents = {\n            \"elastic\": self.elasticScatter,\n            \"inelastic\": self.inelasticScatter,\n            \"n2n\": self.n2nScatter * 2.0,\n        }\n        for sType, sMatrix in totalScatterComponents.items():\n            if sMatrix is not None:\n                scatters.append(sMatrix)\n            else:\n                runLog.warning(\n                    \"{} scattering matrix in {} is not defined. Generating total scattering matrix\"\n                    \" without this data\".format(sType.title(), self),\n                    single=True,\n                )\n        return sum(scatters)\n\n    def clear(self):\n        \"\"\"Zero out all the cross sections; this is useful for creating dummy cross sections.\"\"\"\n        for xsAttr in ALL_XS:\n            value = getattr(self, xsAttr)\n            # it should either be a list, a numpy array, or a sparse matrix\n            if isinstance(value, list):\n                value = [0.0] * len(value)\n            elif isinstance(value, np.ndarray):\n                value = np.zeros(value.shape)\n            elif value is None:  # assume it is scipy.sparse\n                pass\n            elif value.nnz >= 0:\n                value = sparse.csr_matrix(value.shape)\n            setattr(self, xsAttr, value)\n        # need to do the same thing for the higherOrderScatter\n        for kk, currentMatrix in self.higherOrderScatter.items():\n            self.higherOrderScatter[kk] = sparse.csr_matrix(currentMatrix.shape)\n\n    @staticmethod\n    def collapseCrossSection(crossSection, weights):\n        r\"\"\"\n        Collapse a cross section into 1-group.\n\n        This is extremely useful for many analyses such as doing a shielding efficacy survey\n        or computing one-group reaction rates.\n\n        .. math::\n\n            \\bar{\\sigma} = \\frac{\\sum_g{\\sigma_g \\phi_g}}{\\sum_g{\\phi_g}}\n\n        Parameters\n        ----------\n        crossSection : list\n            Multigroup cross section values\n        weights : list\n            energy group weights to apply (usually the multigroup flux)\n\n        Returns\n        -------\n        oneGroupXS : float\n            The one group cross section in the same units as the input cross section.\n        \"\"\"\n        mult = np.array(crossSection) * np.array(weights)\n        return sum(mult) / sum(weights)\n\n    def compare(self, other, flux, relativeTolerance=0, verbose=False, nucName=\"\"):\n        \"\"\"Compare the cross sections between two XSCollections objects.\"\"\"\n        nuclideIDMsg = f\"Nuclide {nucName} \" if nucName else \"\"\n        equal = True\n        for xsName in ALL_COLLECTION_DATA:\n            myXsData = self.__dict__[xsName]\n            theirXsData = other.__dict__[xsName]\n\n            if xsName == HIGHORDER_SCATTER:\n                for actualList, expectedList in zip(myXsData, theirXsData):\n                    if actualList != expectedList:\n                        equal = False\n                        runLog.important(\n                            \"  {}{} {:<30} cross section is different.\".format(\n                                nuclideIDMsg,\n                                self.source,\n                                xsName,\n                            )\n                        )\n\n            elif sparse.issparse(myXsData) and sparse.issparse(theirXsData):\n                if not np.allclose(\n                    myXsData.todense(),\n                    theirXsData.todense(),\n                    rtol=relativeTolerance,\n                    atol=0.0,\n                ):\n                    verboseData = \"\" if not verbose else \"\\n{},\\n\\n{}\".format(myXsData, theirXsData)\n                    runLog.important(\n                        \"  {}{} {:<30} cross section is different.{}\".format(\n                            nuclideIDMsg, self.source, xsName, verboseData\n                        )\n                    )\n                    equal = False\n            elif isinstance(myXsData, dict) and myXsData != theirXsData:\n                # there are no dicts currently so code is untested\n                raise NotImplementedError(\"there are no dicts\")\n            elif not properties.areEqual(myXsData, theirXsData, relativeTolerance):\n                verboseData = \"\" if not verbose else \"\\n{},\\n\\n{}\".format(myXsData, theirXsData)\n                runLog.important(\n                    \"  {}{} {:<30} cross section is different.{}\".format(nuclideIDMsg, self.source, xsName, verboseData)\n                )\n                equal = False\n        return equal\n\n    def merge(self, other):\n        \"\"\"\n        Merge the cross sections of two collections.\n\n        Notes\n        -----\n        1. This can only merge if one hasn't been assigned at all, because it doesn't try to figure out how to\n           account for overlapping cross sections.\n        2. Update the current library (self) with values from the other library if all attributes in the library except\n           ones in `attributesToIgnore` are None.\n        3. Libraries are already merged if all attributes in the other library are None (This is nothing to merge!).\n        \"\"\"\n        attributesToIgnore = [\"source\", HIGHORDER_SCATTER]\n        if all(v is None for k, v in self.__dict__.items() if k not in attributesToIgnore):\n            self.__dict__.update(other.__dict__)  # See note 2\n        elif all(v is None for k, v in other.__dict__.items() if k not in attributesToIgnore):\n            pass  # See note 3\n        else:\n            overlappingAttrs = set(k for k, v in self.__dict__.items() if v is not None and k != \"source\")\n            overlappingAttrs &= set(k for k, v in other.__dict__.items() if v is not None and k != \"source\")\n            raise AttributeError(\n                \"Cannot merge {} and {}.\\n Cross sections overlap in attributes: {}.\".format(\n                    self.source, other.source, \", \".join(overlappingAttrs)\n                )\n            )\n\n\nclass MacroscopicCrossSectionCreator:\n    \"\"\"\n    Create macroscopic cross sections from micros and number density.\n\n    Object encapsulating all high-level methods related to the creation of macroscopic cross\n    sections.\n    \"\"\"\n\n    def __init__(self, buildScatterMatrix=True, minimumNuclideDensity=0.0):\n        self.densities = None\n        self.macros = None\n        self.micros = None\n        self.minimumNuclideDensity = minimumNuclideDensity\n        self.buildScatterMatrix = buildScatterMatrix\n        self.block = None\n\n    def createMacrosOnBlocklist(self, microLibrary, blockList, nucNames=None, libType=\"micros\"):\n        \"\"\"Create macroscopic cross sections for a list of blocks.\"\"\"\n        for block in blockList:\n            block.macros = self.createMacrosFromMicros(microLibrary, block, nucNames, libType=libType)\n\n        return blockList\n\n    def createMacrosFromMicros(self, microLibrary, block, nucNames=None, libType=\"micros\"):\n        \"\"\"\n        Creates a macroscopic cross section set based on a microscopic XS library using a block object.\n\n        Micro libraries have lots of nuclides, but macros only have 1.\n\n        Parameters\n        ----------\n        microLibrary : xsCollection.XSCollection\n            Input micros\n\n        block : Block\n            Object whose number densities should be used to generate macros\n\n        nucNames : list, optional\n            List of nuclides to include in the macros. Defaults to all in block.\n\n        libType : str, optional\n            The block attribute containing the desired microscopic XS for this block:\n            either \"micros\" for neutron XS or \"gammaXS\" for gamma XS.\n\n        Returns\n        -------\n        macros : xsCollection.XSCollection\n            A new XSCollection full of macroscopic cross sections\n        \"\"\"\n        runLog.debug(\"Building macroscopic cross sections for {0}\".format(block))\n        if nucNames is None:\n            nucNames = block.getNuclides()\n\n        self.microLibrary = microLibrary\n        self.block = block\n        self.xsSuffix = block.getMicroSuffix()\n        self.macros = XSCollection(parent=block)\n        self.densities = dict(\n            filter(\n                lambda x: x[1] > self.minimumNuclideDensity,\n                zip(nucNames, block.getNuclideNumberDensities(nucNames)),\n            )\n        )\n        self.ng = getattr(self.microLibrary, \"numGroups\" + _getLibTypeSuffix(libType))\n\n        self._initializeMacros()\n        self._convertBasicXS(libType=libType)\n        self._computeAbsorptionXS()\n        self._convertScatterMatrices(libType=libType)\n        self._computeDiffusionConstants()\n        self._buildTotalScatterMatrix()\n        self._computeRemovalXS()\n        self.macros.chi = computeBlockAverageChi(b=self.block, isotxsLib=self.microLibrary)\n\n        return self.macros\n\n    def _initializeMacros(self):\n        m = self.macros\n        for xsName in BASIC_XS + DERIVED_XS:\n            setattr(m, xsName, np.zeros(self.ng))\n\n        for matrixName in BASIC_SCAT_MATRIX:\n            # lil_matrices are good for indexing but bad for certain math operations.\n            # use csr for faster math\n            setattr(m, matrixName, sparse.csr_matrix((self.ng, self.ng)))\n\n    def _convertBasicXS(self, libType=\"micros\"):\n        \"\"\"\n        Converts basic XS such as fission, nGamma, etc.\n\n        Parameters\n        ----------\n        libType : str, optional\n            The block attribute containing the desired microscopic XS for this block:\n            either \"micros\" for neutron XS or \"gammaXS\" for gamma XS.\n        \"\"\"\n        reactions = BASIC_XS + TOTAL_XS\n        if NUSIGF in reactions:\n            reactions.remove(NUSIGF)\n            self.macros[NUSIGF] = computeMacroscopicGroupConstants(\n                FISSION_XS,\n                self.densities,\n                self.microLibrary,\n                self.xsSuffix,\n                libType=libType,\n                multConstant=NU,\n            )\n\n        for reaction in reactions:\n            self.macros[reaction] = computeMacroscopicGroupConstants(\n                reaction,\n                self.densities,\n                self.microLibrary,\n                self.xsSuffix,\n                libType=libType,\n            )\n\n    def _convertScatterMatrices(self, libType=\"micros\"):\n        \"\"\"\n        Build macroscopic scatter matrices.\n\n        Parameters\n        ----------\n        libType : str, optional\n            The block attribute containing the desired microscopic XS for this block:\n            either \"micros\" for neutron XS or \"gammaXS\" for gamma XS.\n        \"\"\"\n        if not self.buildScatterMatrix:\n            return\n\n        for nuclide in self.microLibrary.getNuclides(self.xsSuffix):\n            microCollection = getattr(nuclide, libType)\n            nDens = self.densities.get(nuclide.name, 0.0)\n            if microCollection.elasticScatter is not None:\n                self.macros.elasticScatter += microCollection.elasticScatter * nDens\n            if microCollection.inelasticScatter is not None:\n                self.macros.inelasticScatter += microCollection.inelasticScatter * nDens\n            if microCollection.n2nScatter is not None:\n                self.macros.n2nScatter += microCollection.n2nScatter * nDens\n\n    def _computeAbsorptionXS(self):\n        \"\"\"\n        Absorption = sum of all absorption reactions.\n\n        Must be called after :py:meth:`_convertBasicXS`.\n        \"\"\"\n        for absXS in self.macros.getAbsorptionXS():\n            self.macros.absorption += absXS\n\n    def _computeDiffusionConstants(self):\n        self.macros.diffusionConstants = 1.0 / (3.0 * self.macros.transport)\n\n    def _buildTotalScatterMatrix(self):\n        self.macros.totalScatter = self.macros.getTotalScatterMatrix()\n\n    def _computeRemovalXS(self):\n        \"\"\"\n        Compute removal cross section (things that remove a neutron from this phase space).\n\n        This includes all absorptions and outscattering.\n        Outscattering is represented by columns of the total scatter matrix.\n        Self-scattering (e.g. when g' == g) is not be included. This can be\n        handled by summing the columns and then subtracting the diagonal.\n\n        within-group n2n is accounted for by simply not including n2n in the removal xs.\n        \"\"\"\n        self.macros.removal = self.macros.absorption - self.macros.n2n\n        columnSum = self.macros.totalScatter.sum(axis=0).getA1()  # convert to ndarray\n        diags = self.macros.totalScatter.diagonal()\n        self.macros.removal += columnSum - diags\n\n\n# ruff: noqa: E501\ndef computeBlockAverageChi(b, isotxsLib):\n    r\"\"\"\n    Return the block average total chi vector based on isotope chi vectors.\n\n    This is defined by eq 3.4b in DIF3D manual [DIF3D]_, which corresponds to 1 in A.HMG4C card.\n\n    .. math::\n\n\n        \\chi_g = \\frac{\\sum_{n} \\chi_{g,n} N_n V \\sum_{g'}(\\nu_{g'}*\\sigma_{f,g'})}{\\sum_n N_n V \\sum_{g'}(\\nu_{g'}*\\sigma_{f,g'} )}\n\n\n    To evaluate efficiently, assume that if :math:`\\chi_{g,n}=0`, there will be no contributions\n\n    Volume is not used b/c it is already homogenized in the block.\n\n    Parameters\n    ----------\n    b : object\n        Block object\n\n    isotxsLib : object\n        ISOTXS library object\n\n    Notes\n    -----\n    This methodology is based on option 1 in the HMG4C utility (named total\n    fission source weighting).\n    \"\"\"\n    numGroups = isotxsLib.numGroups\n    numerator = np.zeros(numGroups)\n    denominator = 0.0\n    numberDensities = b.getNumberDensities()\n    for nucObj in isotxsLib.getNuclides(b.getMicroSuffix()):\n        nucMicroXS = nucObj.micros\n        nucNDens = numberDensities.get(nucObj.name, 0.0)\n        nuFissionTotal = sum(nucMicroXS.neutronsPerFission * nucMicroXS.fission)\n        numerator += nucMicroXS.chi * nucNDens * nuFissionTotal\n        denominator += nucNDens * nuFissionTotal\n    if denominator != 0.0:\n        return numerator / denominator\n    else:\n        return np.zeros(numGroups)\n\n\ndef _getLibTypeSuffix(libType):\n    if libType == \"micros\":\n        libTypeSuffix = \"\"\n    elif libType == \"gammaXS\":\n        libTypeSuffix = \"Gamma\"\n    else:\n        libTypeSuffix = None\n        runLog.warning(\n            'ARMI currently supports only micro XS libraries of types \"micros\" (neutron) and \"gammaXS\" (gamma).'\n        )\n\n    return libTypeSuffix\n\n\ndef computeNeutronEnergyDepositionConstants(numberDensities, lib, microSuffix):\n    \"\"\"\n    Compute the macroscopic neutron energy deposition group constants.\n\n    These group constants can be multiplied by the flux to obtain energy deposition rates.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which\n        the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.\n\n    lib : library object\n        Microscopic cross section library.\n\n    microSuffix : str\n        Microscopic library suffix (e.g. 'AB') for this composite.\n        See composite `getMicroSuffix` method.\n\n    Returns\n    -------\n    energyDepositionConsts : np.ndarray\n        Neutron energy deposition group constants. (J/cm)\n\n    Notes\n    -----\n    PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.\n    (eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)\n\n    Converted here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)\n    \"\"\"\n    return computeMacroscopicGroupConstants(\"neutronHeating\", numberDensities, lib, microSuffix) * units.JOULES_PER_eV\n\n\ndef computeGammaEnergyDepositionConstants(numberDensities, lib, microSuffix):\n    \"\"\"\n    Compute the macroscopic gamma energy deposition group constants.\n\n    These group constants can be multiplied by the flux to obtain energy deposition rates.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which\n        the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.\n\n    lib : library object\n        Microscopic cross section library.\n\n    microSuffix : str\n        Microscopic library suffix (e.g. 'AB') for this composite.\n        See composite `getMicroSuffix` method.\n\n    Returns\n    -------\n    energyDepositionConsts : np.ndarray\n        gamma energy deposition group constants. (J/cm)\n\n    Notes\n    -----\n    PMATRX documentation says units will be eV/s when multiplied by flux but it's eV/s/cm^3.\n    (eV/s/cm^3 = eV-bn * 1/cm^2/s * 1/bn-cm.)\n\n    Convert here to obtain J/cm (eV-bn * 1/bn-cm * J / eV)\n    \"\"\"\n    return computeMacroscopicGroupConstants(\"gammaHeating\", numberDensities, lib, microSuffix) * units.JOULES_PER_eV\n\n\ndef computeFissionEnergyGenerationConstants(numberDensities, lib, microSuffix):\n    r\"\"\"\n    Get the fission energy generation group constant of a block.\n\n    .. math::\n\n        E_{generation_fission} = \\kappa_f \\Sigma_f\n\n    Power comes from fission and capture reactions.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which\n        the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.\n\n    lib : library object\n        Microscopic cross section library.\n\n    microSuffix : str\n        Microscopic library suffix (e.g. 'AB') for this composite.\n        See composite `getMicroSuffix` method.\n\n    Returns\n    -------\n    fissionEnergyFactor: np.ndarray\n        Fission energy generation group constants (in Joules/cm)\n    \"\"\"\n    fissionEnergyFactor = computeMacroscopicGroupConstants(\n        FISSION_XS,\n        numberDensities,\n        lib,\n        microSuffix,\n        libType=\"micros\",\n        multConstant=E_FISSION,\n    )\n\n    return fissionEnergyFactor\n\n\ndef computeCaptureEnergyGenerationConstants(numberDensities, lib, microSuffix):\n    r\"\"\"\n    Get the energy generation group constant of a block.\n\n    .. math::\n\n        E_{generation capture} = \\kappa_c \\Sigma_c\n\n\n    Typically, one only cares about the flux* this XS (to find total power),\n    but the XS itself is required in some sensitivity studies.\n\n    Power comes from fission and capture reactions.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which\n        the macroscopic group constants are computed. See composite `getNumberDensities` method.\n\n    lib : library object\n        Microscopic cross section library.\n\n    microSuffix : str\n        Microscopic library suffix (e.g. 'AB') for this composite.\n        See composite `getMicroSuffix` method.\n\n    Returns\n    -------\n    captureEnergyFactor: np.ndarray\n        Capture energy generation group constants (in Joules/cm)\n    \"\"\"\n    captureEnergyFactor = None\n    for xs in CAPTURE_XS:\n        if captureEnergyFactor is None:\n            captureEnergyFactor = np.zeros(\n                np.shape(computeMacroscopicGroupConstants(xs, numberDensities, lib, microSuffix, libType=\"micros\"))\n            )\n\n        captureEnergyFactor += computeMacroscopicGroupConstants(\n            xs,\n            numberDensities,\n            lib,\n            microSuffix,\n            libType=\"micros\",\n            multConstant=E_CAPTURE,\n        )\n\n    return captureEnergyFactor\n\n\ndef computeMacroscopicGroupConstants(\n    constantName,\n    numberDensities,\n    lib,\n    microSuffix,\n    libType=None,\n    multConstant=None,\n    multLib=None,\n):\n    r\"\"\"\n    Compute any macroscopic group constants given number densities and a microscopic library.\n\n    .. impl:: Compute macroscopic cross sections from microscopic cross sections and number densities.\n        :id: I_ARMI_NUCDATA_MACRO\n        :implements: R_ARMI_NUCDATA_MACRO\n\n        This function computes the macroscopic cross sections of a specified\n        reaction type from inputted microscopic cross sections and number\n        densities. The ``constantName`` parameter specifies what type of\n        reaction is requested. The ``numberDensities`` parameter is a dictionary\n        mapping the nuclide to its number density. The ``lib`` parameter is a library\n        object like :py:class:`~armi.nuclearDataIO.xsLibraries.IsotxsLibrary` or\n        :py:class:`~armi.nuclearDataIO.xsLibraries.CompxsLibrary` that holds the\n        microscopic cross-section data. The ``microSuffix`` parameter specifies\n        from which part of the library the microscopic cross sections are\n        gathered; this is typically gathered from a components\n        ``getMicroSuffix`` method like :py:meth:`Block.getMicroSuffix\n        <armi.reactor.blocks.Block.getMicroSuffix>`. ``libType`` is an optional\n        parameter specifying whether the reaction is for neutrons or gammas.\n        This function also has the optional parameters ``multConstant`` and\n        ``multLib``, which allows another constant from the library, such as\n        neutrons per fission (nu) or energy per fission (kappa), to be\n        multiplied to the primary one. The macroscopic cross sections are then\n        computed as:\n\n        .. math::\n\n            \\Sigma_{g} = \\sum_{n} N_n \\sigma_{n,g}\\nu_n \\quad g=1,...,G\n\n        where :math:`n` is the isotope index, :math:`g` is the energy group\n        index, :math:`\\sigma` is the microscopic cross section, and :math:`\\nu`\n        is the scalar multiplier. If the library (``lib``) with suffix\n        ``microSuffix`` is missing a cross section for the ``constantName``\n        reaction for one or more of the nuclides in ``numberDensities`` an error\n        is raised; but if ``multConstant`` is missing that cross section, then\n        those nuclides are printed as a warning.\n\n    Parameters\n    ----------\n    constantName : str\n        Name of the reaction for which to obtain the group constants. This name should match a\n        cross section name or an attribute in the collection.\n    numberDensities : dict\n        nucName keys, number density values (atoms/bn-cm) of all nuclides in the composite for which\n        the macroscopic group constants are computed. See composite `getNuclideNumberDensities` method.\n    lib : library object\n        Microscopic cross section library.\n    microSuffix : str\n        Microscopic library suffix (e.g. 'AB') for this composite.\n        See composite `getMicroSuffix` method.\n    libType : str, optional\n        The block attribute containing the desired microscopic XS for this block:\n        either \"micros\" for neutron XS or \"gammaXS\" for gamma XS.\n    multConstant : str, optional\n        Name of constant by which the group constants will be multiplied. This name should match a\n        cross section name or an attribute in the collection.\n    multLib : library object, optional\n        Microscopic cross section nuclide library to obtain the multiplier from.\n        If None, same library as base cross section is used.\n\n    Returns\n    -------\n    macroGroupConstant : np.ndarray\n        Macroscopic group constants for the requested reaction.\n    \"\"\"\n    skippedNuclides = []\n    skippedMultNuclides = []\n    macroGroupConstants = None\n\n    # sort the numberDensities because a summation is being performed that may result in slight\n    # differences based on the order.\n    for nuclideName, numberDensity in sorted(numberDensities.items()):\n        if not numberDensity:\n            continue\n        try:\n            libNuclide = lib.getNuclide(nuclideName, microSuffix)\n            multLibNuclide = libNuclide\n        except KeyError:\n            skippedNuclides.append(nuclideName)  # Nuclide does not exist in the library\n            continue\n\n        if multLib:\n            try:\n                multLibNuclide = multLib.getNuclide(nuclideName, microSuffix)\n            except KeyError:\n                skippedMultNuclides.append(nuclideName)  # Nuclide does not exist in the library\n                continue\n\n        microGroupConstants = _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType)\n\n        multiplierVal = _getXsMultiplier(multLibNuclide, multConstant, libType)\n\n        if macroGroupConstants is None:\n            macroGroupConstants = np.zeros(microGroupConstants.shape)\n\n        if microGroupConstants.shape != macroGroupConstants.shape and not microGroupConstants.any():\n            microGroupConstants = np.zeros(macroGroupConstants.shape)\n\n        macroGroupConstants += np.asarray(numberDensity) * microGroupConstants * multiplierVal\n\n    if skippedNuclides:\n        msg = \"The following nuclides are not in microscopic library {}: {}\".format(lib, skippedNuclides)\n        runLog.error(msg, single=True)\n        raise ValueError(msg)\n\n    if skippedMultNuclides:\n        runLog.debug(\n            \"The following nuclides are not in multiplier library {}: {}\".format(multLib, skippedMultNuclides),\n            single=True,\n        )\n\n    return macroGroupConstants\n\n\ndef _getXsMultiplier(libNuclide, multiplier, libType):\n    if multiplier:\n        try:\n            microCollection = getattr(libNuclide, libType)\n            multiplierVal = getattr(microCollection, multiplier)\n        except Exception:\n            multiplierVal = libNuclide.isotxsMetadata[multiplier]\n    else:\n        multiplierVal = 1.0\n\n    return np.asarray(multiplierVal)\n\n\ndef _getMicroGroupConstants(libNuclide, constantName, nuclideName, libType):\n    if libType:\n        microCollection = getattr(libNuclide, libType)\n    else:\n        microCollection = libNuclide\n\n    microGroupConstants = np.asarray(getattr(microCollection, constantName))\n\n    if not microGroupConstants.any():\n        runLog.debug(\n            \"Nuclide {} does not have {} microscopic group constants.\".format(nuclideName, constantName),\n            single=True,\n        )\n\n    return microGroupConstants\n"
  },
  {
    "path": "armi/nuclearDataIO/xsLibraries.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nCross section library objects.\n\nCross section libraries, currently, contain neutron and/or gamma\ncross sections, but are not necessarily intended to be only neutron and gamma data.\n\"\"\"\n\nimport glob\nimport os\nimport re\n\nfrom armi import runLog\nfrom armi.nucDirectory import nuclideBases\nfrom armi.nuclearDataIO.nuclearFileMetadata import NuclideXSMetadata, RegionXSMetadata\nfrom armi.utils import properties\n\n_ISOTXS_EXT = \"ISO\"\n\n\ndef compare(lib1, lib2):\n    \"\"\"Compare two XSLibraries, and return True if equal, or False if not.\"\"\"\n    from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx\n\n    equal = True\n    # check the nuclides\n    equal &= _checkLabels(lib1, lib2)\n    equal &= _checkLabels(lib2, lib1)\n    equal &= isotxs.compare(lib1, lib2)\n    equal &= gamiso.compare(lib1, lib2)\n    equal &= pmatrx.compare(lib1, lib2)\n\n    return equal\n\n\ndef _checkLabels(llib1, llib2):\n    mismatch = set(llib1.nuclideLabels) - set(llib2.nuclideLabels)\n    if any(mismatch):\n        runLog.important(\"{} has nuclides that are not in {}: {}\".format(llib1, llib2, mismatch))\n        return False\n    return True\n\n\ndef compareXSLibraryAttribute(lib1, lib2, attributeName, tolerance=0.0):\n    \"\"\"Compare the values of an attribute in two libraries.\"\"\"\n    val1 = getattr(lib1, \"_\" + attributeName, None)\n    val2 = getattr(lib2, \"_\" + attributeName, None)\n    if not properties.areEqual(val1, val2, tolerance):\n        runLog.important(\n            \"{} and {} have different `{}` attributes:\\n{}\\n{}\".format(lib1, lib2, attributeName, val1, val2)\n        )\n        return False\n    return True\n\n\ndef compareLibraryNeutronEnergies(lib1, lib2, tolerance=0.0):\n    \"\"\"Compare the neutron velocities and energy upper bounds for two libraries.\"\"\"\n    equals = True\n    equals &= compareXSLibraryAttribute(lib1, lib2, \"neutronEnergyUpperBounds\", tolerance)\n    equals &= compareXSLibraryAttribute(lib1, lib2, \"neutronVelocities\", tolerance)\n    return equals\n\n\ndef getSuffixFromNuclideLabel(nucLabel):\n    \"\"\"\n    Return the xs suffix for the nuclide label.\n\n    Parameters\n    ----------\n    nucLabel: str\n        A string representing the nuclide and xs suffix, eg, \"U235AA\"\n\n    Returns\n    -------\n    suffix: str\n        The suffix of this string\n    \"\"\"\n    return nucLabel[-2:]\n\n\ndef getISOTXSLibrariesToMerge(xsLibrarySuffix, xsLibFileNames):\n    \"\"\"\n    Find ISOTXS libraries out of a list that should be merged based on the provided ``xsLibrarySuffix``.\n\n    Parameters\n    ----------\n    xsLibrarySuffix : str\n        XS library suffix is used to determine which ISOTXS files should be merged together. This can be an\n        empty string or be something like `-doppler`.\n\n    xsLibFileNames : list\n        A list of library file paths like ISOAA, ISOBA, ISOCA, etc. Can be a standalone file name or a full path.\n\n    Notes\n    -----\n    Files that exist: ISOAA-n1, ISOAB-n1, ISOAA-n2, ISOAB-n2, ISOAA, ISOAB, ISODA, ISOBA.\n    xsLibrarySuffix: 'n2'\n    Results: ISOAA-n2, ISOAB-n2\n    \"\"\"\n    isosToMerge = [\n        iso\n        for iso in xsLibFileNames\n        if \"ISOTXS\" not in iso  # Skip merged ISOTXS file\n        and \".ascii\" not in iso  # Skip BCD/ascii files\n        and \"BCD\" not in iso\n    ]  # Skip BCD/ascii files\n    if xsLibrarySuffix != \"\":\n        isosWithSuffix = [iso for iso in isosToMerge if re.match(f\".*ISO[A-Za-z]{{2}}F?{xsLibrarySuffix}$\", iso)]\n        isosToMerge = [\n            iso\n            for iso in isosToMerge\n            if \"-\" not in os.path.basename(iso)\n            and not any(os.path.basename(iso) == os.path.basename(iws).split(\"-\")[0] for iws in isosWithSuffix)\n        ]\n        isosToMerge += isosWithSuffix\n    else:\n        isosToMerge = [iso for iso in isosToMerge if \"-\" not in os.path.basename(iso)]\n    return isosToMerge\n\n\ndef mergeXSLibrariesInWorkingDirectory(\n    lib,\n    xsLibrarySuffix=\"\",\n    mergeGammaLibs=False,\n    alternateDirectory=None,\n):\n    \"\"\"\n    Merge neutron (ISOTXS) and gamma (GAMISO/PMATRX) library data into the provided library.\n\n    Notes\n    -----\n    Convention is for fuel XS id to come first alphabetically (A, B, C, etc.) and then be\n    followed by non-fuel. This should allow `referenceDummyNuclide` to be defined before\n    it is needed by a non-fuel cross section, but if the convention is not followed then\n    this could cause an issue.\n\n    Parameters\n    ----------\n    lib : obj\n        ISOTXS library object\n\n    xsLibrarySuffix : str, optional\n        XS library suffix used to determine which ISOTXS files are merged together,\n        typically something like `-doppler`. If empty string, will merge everything\n        without suffix (indicated by a `-`).\n\n    mergeGammaLibs : bool, optional\n        If True, the GAMISO and PMATRX files that correspond to the ISOTXS library will be merged. Note: if these\n        files do not exist this will fail.\n\n    alternateDirectory : str, optional\n        An alternate directory in which to search for files other than the working directory. The main purpose\n        of this is for testing, but it could also be useful to users.\n    \"\"\"\n    from armi import nuclearDataIO\n    from armi.nuclearDataIO.cccc import gamiso, isotxs, pmatrx\n\n    baseDir = alternateDirectory or os.getcwd()\n    globPath = os.path.join(baseDir, _ISOTXS_EXT + \"*\")\n    xsLibFiles = getISOTXSLibrariesToMerge(xsLibrarySuffix, [iso for iso in glob.glob(globPath)])\n    librariesToMerge = []\n    neutronVelocities = {}  # Dictionary of neutron velocities from each ISOTXS file\n    referenceDummyNuclides = None\n    for xsLibFilePath in sorted(xsLibFiles):\n        try:\n            # get XS ID from the cross section library name\n            xsID = re.search(\"ISO([A-Z0-9a-z]{2})\", xsLibFilePath).group(1)\n        except AttributeError:\n            # if glob has matched something that is not actually an ISOXX file,\n            # the .group() call will fail\n            runLog.debug(f\"Ignoring file {xsLibFilePath} in the merging of ISOXX files\")\n            continue\n\n        xsFileTypes = \"ISOTXS\" if not mergeGammaLibs else \"ISOTXS, GAMISO, and PMATRX\"\n        runLog.info(\"Retrieving {} data for XS ID {}{}\".format(xsFileTypes, xsID, xsLibrarySuffix))\n        if xsLibFilePath in lib.isotxsMetadata.fileNames:\n            runLog.extra(\"Skipping merge of {} because data already exists in the library\".format(xsLibFilePath))\n            continue\n\n        neutronLibrary = isotxs.readBinary(xsLibFilePath)\n        neutronVelocities[xsID] = neutronLibrary.neutronVelocity\n\n        dummyNuclidesInNeutron = [\n            nuc for nuc in neutronLibrary.nuclides if isinstance(nuc._base, nuclideBases.DummyNuclideBase)\n        ]\n        if not dummyNuclidesInNeutron:\n            runLog.info(f\"Adding dummy nuclides to library {xsID}\")\n            addedDummyData = isotxs.addDummyNuclidesToLibrary(\n                neutronLibrary, referenceDummyNuclides\n            )  # Add DUMMY nuclide data not produced by MC2-3\n            isotxsLibraryPath = os.path.join(\n                baseDir,\n                nuclearDataIO.getExpectedISOTXSFileName(suffix=xsLibrarySuffix, xsID=xsID),\n            )\n            isotxsDummyPath = isotxsLibraryPath\n            isotxs.writeBinary(neutronLibrary, isotxsDummyPath)\n            neutronLibraryDummyData = isotxs.readBinary(isotxsDummyPath)\n            librariesToMerge.append(neutronLibraryDummyData)\n            dummyNuclidesInNeutron = referenceDummyNuclides\n        else:\n            librariesToMerge.append(neutronLibrary)\n            if not referenceDummyNuclides:\n                referenceDummyNuclides = dummyNuclidesInNeutron\n\n        if mergeGammaLibs:\n            gamisoLibraryPath = os.path.join(\n                baseDir,\n                nuclearDataIO.getExpectedGAMISOFileName(suffix=xsLibrarySuffix, xsID=xsID),\n            )\n            pmatrxLibraryPath = os.path.join(\n                baseDir,\n                nuclearDataIO.getExpectedPMATRXFileName(suffix=xsLibrarySuffix, xsID=xsID),\n            )\n\n            # Check if the gamiso and pmatrx data paths exist with the xs library suffix so that\n            # these are merged in. If they don't both exist then that is OK and we can just\n            # revert back to expecting the files just based on the XS ID.\n            if not (os.path.exists(gamisoLibraryPath) and os.path.exists(pmatrxLibraryPath)):\n                runLog.warning(\n                    \"One of GAMISO or PMATRX data exist for \"\n                    f\"XS ID {xsID} with suffix {xsLibrarySuffix}. \"\n                    \"Attempting to find GAMISO/PMATRX data with \"\n                    f\"only XS ID {xsID} instead.\"\n                )\n                gamisoLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedGAMISOFileName(xsID=xsID))\n                pmatrxLibraryPath = os.path.join(baseDir, nuclearDataIO.getExpectedPMATRXFileName(xsID=xsID))\n\n            # GAMISO data\n            gammaLibrary = gamiso.readBinary(gamisoLibraryPath)\n            addedDummyData = gamiso.addDummyNuclidesToLibrary(\n                gammaLibrary, dummyNuclidesInNeutron\n            )  # Add DUMMY nuclide data not produced by MC2-3\n            if addedDummyData:\n                gamisoDummyPath = gamisoLibraryPath\n                gamiso.writeBinary(gammaLibrary, gamisoDummyPath)\n                gammaLibraryDummyData = gamiso.readBinary(gamisoDummyPath)\n                librariesToMerge.append(gammaLibraryDummyData)\n            else:\n                librariesToMerge.append(gammaLibrary)\n\n            # PMATRX data\n            pmatrxLibrary = pmatrx.readBinary(pmatrxLibraryPath)\n            addedDummyData = pmatrx.addDummyNuclidesToLibrary(\n                pmatrxLibrary, dummyNuclidesInNeutron\n            )  # Add DUMMY nuclide data not produced by MC2-3\n            if addedDummyData:\n                pmatrxDummyPath = pmatrxLibraryPath\n                pmatrx.writeBinary(pmatrxLibrary, pmatrxDummyPath)\n                pmatrxLibraryDummyData = pmatrx.readBinary(pmatrxDummyPath)\n                librariesToMerge.append(pmatrxLibraryDummyData)\n            else:\n                librariesToMerge.append(pmatrxLibrary)\n    for library in librariesToMerge:\n        lib.merge(library)\n\n    return neutronVelocities\n\n\nclass _XSLibrary:\n    \"\"\"Parent class for Isotxs and Compxs library objects.\"\"\"\n\n    neutronEnergyUpperBounds = properties.createImmutableProperty(\n        \"neutronEnergyUpperBounds\", \"an ISOTXS\", \"Get or set the neutron energy groups.\"\n    )\n\n    neutronVelocity = properties.createImmutableProperty(\n        \"neutronVelocity\", \"an ISOTXS\", \"Get or set the mean neutron velocity in cm/s.\"\n    )\n\n    def __init__(self):\n        # each element is a string such as U235AA\n        self._orderedNuclideLabels = []\n\n    def __contains__(self, key):\n        return key in self._orderedNuclideLabels\n\n    def __setitem__(self, key, value):\n        if key in self._orderedNuclideLabels:\n            raise AttributeError(\"{} already contains {}\".format(self, key))\n        value.container = self\n        self._orderedNuclideLabels.append(key)\n\n    def __getitem__(self, key):\n        raise NotImplementedError\n\n    def __delitem__(self, key):\n        self._orderedNuclideLabels.remove(key)\n\n    def merge(self, other):\n        raise NotImplementedError\n\n    def __len__(self):\n        return len(self._orderedNuclideLabels)\n\n    def _mergeNeutronEnergies(self, other):\n        self.neutronEnergyUpperBounds = other.neutronEnergyUpperBounds\n        # neutron velocity changes, but just use the first one.\n        if not hasattr(self, \"_neutronVelocity\"):\n            self.neutronVelocity = other.neutronVelocity\n\n    def items(self):\n        for key in self._orderedNuclideLabels:\n            yield (key, self[key])\n\n\nclass IsotxsLibrary(_XSLibrary):\n    \"\"\"\n    IsotxsLibrary objects are a collection of cross sections (XS) for both neutron and gamma reactions.\n\n    IsotxsLibrary objects must be initialized with data through one of the read methods within this package\n\n    See Also\n    --------\n    :py:func:`armi.nuclearDataIO.cccc.isotxs.readBinary`\n    :py:func:`armi.nuclearDataIO.cccc.gamiso.readBinary`\n    :py:func:`armi.nuclearDataIO.cccc.pmatrx.readBinary`\n    :py:class:`CompxsLibrary`\n\n    Examples\n    --------\n    >>> lib = xsLibraries.IsotxsLibrary()\n    >>> # this doesn't have any information yet, we can read ISOTXS information\n    >>> libIsotxs = isotxs.readBinary(\"ISOAA\")\n    >>> # any number of XSLibraries can be merged\n    >>> lib.merge(libIsotxs)  # now the `lib` contains the ISOAA information.\n    \"\"\"\n\n    def __init__(self):\n        _XSLibrary.__init__(self)\n        self.pmatrxMetadata = NuclideXSMetadata()\n        self.isotxsMetadata = NuclideXSMetadata()\n        self.gamisoMetadata = NuclideXSMetadata()\n\n        # keys are nuclide labels such as U235AA\n        # vals are XSNuclide objects\n        self._nuclides = {}\n        self._scatterWeights = {}\n\n    gammaEnergyUpperBounds = properties.createImmutableProperty(\n        \"gammaEnergyUpperBounds\",\n        \"a PMATRX or GAMISO\",\n        \"Get or set the gamma energy groups.\",\n    )\n\n    neutronDoseConversionFactors = properties.createImmutableProperty(\n        \"neutronDoseConversionFactors\",\n        \"a PMATRX\",\n        \"Get or set the neutron dose conversion factors.\",\n    )\n\n    gammaDoseConversionFactors = properties.createImmutableProperty(\n        \"gammaDoseConversionFactors\",\n        \"a PMATRX\",\n        \"Get or set the gamma does conversion factors.\",\n    )\n\n    @property\n    def numGroups(self):\n        \"\"\"Get the number of neutron energy groups.\"\"\"\n        # This unlocks the immutable property so that it can be\n        # read prior to not being set to check the number of groups\n        # that are defined. If the property is not unlocked before\n        # accessing when it has not yet been defined then an exception\n        # is thrown.\n        properties.unlockImmutableProperties(self)\n        if self.neutronEnergyUpperBounds is not None:\n            energyBounds = self.neutronEnergyUpperBounds\n        else:\n            energyBounds = []\n\n        # Make sure to re-lock the properties after we are done.\n        properties.lockImmutableProperties(self)\n        return len(energyBounds)\n\n    @property\n    def numGroupsGamma(self):\n        \"\"\"Get the number of gamma energy groups.\"\"\"\n        # This unlocks the immutable property so that it can be\n        # read prior to not being set to check the number of groups\n        # that are defined. If the property is not unlocked before\n        # accessing when it has not yet been defined then an exception\n        # is thrown.\n        properties.unlockImmutableProperties(self)\n        if self.gammaEnergyUpperBounds is not None:\n            energyBounds = self.gammaEnergyUpperBounds\n        else:\n            energyBounds = []\n\n        # Make sure to re-lock the properties after we are done.\n        properties.lockImmutableProperties(self)\n        return len(energyBounds)\n\n    @property\n    def xsIDs(self):\n        \"\"\"\n        Get the XS ID's present in this library.\n\n        Assumes the suffixes are the last 2 letters in the nucNames\n        \"\"\"\n        return list(set(getSuffixFromNuclideLabel(name) for name in self.nuclideLabels))\n\n    def __repr__(self):\n        isotxs = bool(self.isotxsMetadata.keys())\n        pmatrx = bool(self.pmatrxMetadata.keys())\n        gamiso = bool(self.gamisoMetadata.keys())\n        groups = \"\"\n        if self.numGroups:\n            groups += f\"Neutron groups: {self.numGroups}, \"\n        if self.numGroupsGamma:\n            groups += f\"Gamma groups: {self.numGroupsGamma},\"\n\n        return (\n            f\"<IsotxsLibrary (id:{id(self)}), \"\n            f\"ISOTXS: {isotxs}, PMATRX: {pmatrx}, GAMISO: {gamiso}, \"\n            f\"{groups} containing {len(self)} nuclides with \"\n            f\"XS IDs: {sorted(self.xsIDs)}>\"\n        )\n\n    def __setitem__(self, key, value):\n        _XSLibrary.__setitem__(self, key, value)\n        self._nuclides[key] = value\n\n    def __getitem__(self, key):\n        return self._nuclides[key]\n\n    def get(self, nuclideLabel, default):\n        return self._nuclides.get(nuclideLabel, default)\n\n    def getNuclide(self, nucName, suffix):\n        \"\"\"\n        Get a nuclide object from the XS library.\n\n        Parameters\n        ----------\n        nucName : str\n            ARMI nuclide name, e.g. 'U235', 'PU239'\n        suffix : str\n            Restrict to a specific nuclide lib suffix e.g. 'AA'\n\n        Returns\n        -------\n        nuclide : Nuclide object\n            A nuclide from the library or None\n        \"\"\"\n        libLabel = nuclideBases.byName[nucName].label + suffix\n\n        try:\n            return self[libLabel]\n        except KeyError:\n            runLog.error(\"Error in {}.\\nSee stderr.\".format(self))\n            raise\n\n    def __delitem__(self, key):\n        _XSLibrary.__delitem__(self, key)\n        del self._nuclides[key]\n\n    @property\n    def nuclideLabels(self):\n        \"\"\"Get the nuclide Names.\"\"\"\n        # need to create a new list so the _orderedNuclideLabels does not get modified.\n        return list(self._orderedNuclideLabels)\n\n    @property\n    def nuclides(self):\n        return [self[name] for name in self._orderedNuclideLabels]\n\n    def getNuclides(self, suffix):\n        \"\"\"Returns a list of the nuclide objects in the library.\"\"\"\n        nucs = []\n        # nucName is U235IA, etc.. nuc.name is U235, etc\n        for nucLabel, nuc in self.items():\n            # `in` used below for support of >26 xs groups\n            if not suffix or suffix in getSuffixFromNuclideLabel(nucLabel):\n                # accept things with the suffix if one is given\n                if nuc not in nucs:\n                    nucs.append(nuc)\n        return nucs\n\n    def merge(self, other):\n        \"\"\"Merge two XSLibraries.\"\"\"\n        runLog.debug(\"Merging XS library {} into XS library {}\".format(other, self))\n        self._mergeProperties(other)\n        # merging meta data may raise an exception before knowing anything about the contained nuclides\n        # if it raises an exception, nothing has been modified in two objects\n        isotxsMeta, pmatrxMeta, gamisoMeta = self._mergeMetadata(other)\n        self._mergeNuclides(other)\n        # only vampire the __dict__ if successful\n        other.__dict__ = {}\n        # only reassign metadata if successful\n        self.isotxsMetadata = isotxsMeta\n        self.pmatrxMetadata = pmatrxMeta\n        self.gamisoMetadata = gamisoMeta\n\n    def _mergeProperties(self, other):\n        properties.unlockImmutableProperties(other)\n        try:\n            self.neutronDoseConversionFactors = other.neutronDoseConversionFactors\n            self._mergeNeutronEnergies(other)\n            self.gammaEnergyUpperBounds = other.gammaEnergyUpperBounds\n            self.gammaDoseConversionFactors = other.gammaDoseConversionFactors\n        finally:\n            properties.lockImmutableProperties(other)\n\n    def _mergeMetadata(self, other):\n        isotxsMeta = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, \"ISOTXS\", OSError)\n        pmatrxMeta = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, \"PMATRX\", OSError)\n        gamisoMeta = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, \"GAMISO\", OSError)\n        return isotxsMeta, pmatrxMeta, gamisoMeta\n\n    def _mergeNuclides(self, other):\n        # these must be different\n        for nuclideKey, nuclide in other.items():\n            if nuclideKey in self:\n                self[nuclideKey].merge(nuclide)\n            else:\n                self[nuclideKey] = nuclide\n\n    def resetScatterWeights(self):\n        self._scatterWeights = {}\n\n    def getScatterWeights(self, scatterMatrixKey=\"elasticScatter\"):\n        \"\"\"\n        Build or retrieve pre-built scatter weight data.\n\n        This acts like a cache for _buildScatterWeights\n\n        See Also\n        --------\n        _buildScatterWeights\n        \"\"\"\n        if not self._scatterWeights.get(scatterMatrixKey):\n            self._scatterWeights[scatterMatrixKey] = self._buildScatterWeights(scatterMatrixKey)\n\n        return self._scatterWeights[scatterMatrixKey]\n\n    def _buildScatterWeights(self, scatterMatrixKey):\n        r\"\"\"\n        Build a scatter-weight lookup table for the scatter matrix.\n\n        Scatter \"weights\" are needed for sensitivity studies when deriviatives wrt the\n        scatter XS are required. They are defined like:\n\n        .. math::\n            w_{g^{\\prime} \\leftarrow g} = \\frac{\\sigma_{s,g^{\\prime} \\leftarrow g}}\n            {\\sum_{g^{\\prime\\prime}=1}^G \\sigma_{s, g^{\\prime\\prime} \\leftarrow g}}\n\n        Returns\n        -------\n        scatterWeights : dict\n            (xsID, fromGroup) : weight column (sparse Gx1)\n        \"\"\"\n        runLog.info(\"Building {0} weights on cross section library\".format(scatterMatrixKey))\n        scatterWeights = {}\n        for nucName, nuc in self.items():\n            nucScatterWeights = nuc.buildNormalizedScatterColumns(scatterMatrixKey)\n            for fromG, scatterColumn in nucScatterWeights.items():\n                scatterWeights[nucName, fromG] = scatterColumn\n        return scatterWeights\n\n    def purgeFissionProducts(self, r):\n        \"\"\"\n        Purge the fission products based on the active nuclides within the reactor.\n\n        Parameters\n        ----------\n        r : py:class:`armi.reactors.reactor.Reactor`\n            a reactor, or None\n\n        .. warning:: Sometimes worker nodes do not have a reactor, fission products will not be purged.\n        \"\"\"\n        runLog.info(\"Purging detailed fission products from {}\".format(self))\n        modeledNucs = r.blueprints.allNuclidesInProblem\n        for key, nuc in list(self.items()):\n            if nuc.name not in modeledNucs:\n                del self[key]\n\n\nclass CompxsLibrary(_XSLibrary):\n    \"\"\"\n    Library object used in reading/writing COMPXS files.\n\n    Contains macroscopic cross sections for homogenized regions.\n\n    See Also\n    --------\n    :py:class:`IsotxsLibrary`\n    :py:func:`armi.nuclearDataIO.cccc.compxs.readBinary`\n\n    Examples\n    --------\n    >>> lib = compxs.readBinary(\"COMPXS\")\n    >>> lib.regions\n    \"\"\"\n\n    def __init__(self):\n        _XSLibrary.__init__(self)\n        self._regions = {}\n        self.compxsMetadata = RegionXSMetadata()\n\n    def __setitem__(self, key, value):\n        _XSLibrary.__setitem__(self, key, value)\n        self._regions[key] = value\n\n    def __getitem__(self, key):\n        return self._regions[key]\n\n    def __delitem__(self, key):\n        _XSLibrary.__delitem__(self, key)\n        del self._regions[key]\n\n    @property\n    def regions(self):\n        return [self[name] for name in self._orderedNuclideLabels]\n\n    @property\n    def regionLabels(self):\n        return list(self._orderedNuclideLabels)\n\n    def merge(self, other):\n        \"\"\"Merge two ``COMPXS`` libraries.\"\"\"\n        self._mergeProperties(other)\n        self.compxsMetadata = self.compxsMetadata.merge(other.compxsMetadata, self, other, \"COMPXS\", OSError)\n        self._appendRegions(other)\n\n    def _mergeProperties(self, other):\n        properties.unlockImmutableProperties(other)\n        try:\n            self._mergeNeutronEnergies(other)\n        finally:\n            properties.lockImmutableProperties(other)\n\n    def _appendRegions(self, other):\n        offset = len(self.regions)\n        for region in other.regions:\n            newNumber = region.regionNumber + offset\n            self[newNumber] = region\n        self.compxsMetadata[\"numComps\"] = len(self.regions)\n"
  },
  {
    "path": "armi/nuclearDataIO/xsNuclides.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThis module contains cross section nuclides, which are a wrapper around the\n:py:class:`~armi.nucDirectory.nuclideBases.INuclide` objects. The cross section nuclide objects contain cross section\ninformation from a specific calculation (e.g. neutron, or gamma cross sections).\n\n:py:class:`XSNuclide` objects also contain meta data from the original file, so that another file can be reconstructed.\n\nWarning\n-------\n:py:class:`XSNuclide` objects should only be created by reading data into\n:py:class:`~armi.nuclearDataIO.xsLibrary.XSLibrary` objects, and then retrieving them through their label index (i.e.\n\"PU39AA\").\n\"\"\"\n\nfrom armi.nucDirectory import nuclideBases\nfrom armi.nuclearDataIO import nuclearFileMetadata, xsCollections, xsLibraries\nfrom armi.utils.customExceptions import warn_when_root\nfrom armi.utils.plotting import plotScatterMatrix  # noqa: F401\n\n\n@warn_when_root\ndef NuclideLabelDoesNotMatchNuclideLabel(nuclide, label, xsID):\n    return \"The label {} (xsID:{}) for nuclide {}, does not match the nucDirectory label.\".format(label, xsID, nuclide)\n\n\nclass XSNuclide(nuclideBases.NuclideWrapper):\n    \"\"\"\n    A nuclide object for a specific library.\n\n    XSNuclide objects can contain GAMISO, ISOTXS, and PMATRX data all on a single instance.\n    \"\"\"\n\n    def __init__(self, xsCollection, xsCollectionKey):\n        nuclideBases.NuclideWrapper.__init__(self, xsCollection, xsCollectionKey)\n        self.xsId = xsLibraries.getSuffixFromNuclideLabel(xsCollectionKey)\n        self.source = 0.0\n        # 2D record... nucNames\n        # 4D record\n        self.isotxsMetadata = nuclearFileMetadata.NuclideMetadata()\n        self.gamisoMetadata = nuclearFileMetadata.NuclideMetadata()\n        self.pmatrxMetadata = nuclearFileMetadata.NuclideMetadata()\n        # 5D and 7D records\n        self.micros = xsCollections.XSCollection(parent=self)\n        self.gammaXS = xsCollections.XSCollection(parent=self)\n        self.neutronHeating = None\n        self.neutronDamage = None\n        self.gammaHeating = None\n        self.isotropicProduction = None\n        self.linearAnisotropicProduction = None\n        self.nOrderProductionMatrix = {}\n\n    def updateBaseNuclide(self):\n        \"\"\"\n        Update which nuclide base this :py:class:`XSNuclide` points to.\n\n        Notes\n        -----\n        During instantiation, not everything is available, only they user-supplied nuclide label,\n        i.e. :py:class:`~armi.nucDirectory.nuclideBases.NuclideWrapper.containerKey`.\n        During the read operation,\n        \"\"\"\n        if self._base is not None:\n            return\n        # most nuclides have the correct NuclideBase ID\n        nuclideId = self.isotxsMetadata[\"nuclideId\"]\n        nuclideBase = nuclideBases.byName.get(nuclideId, None)\n        if nuclideBase is None or isinstance(nuclideBase, nuclideBases.DummyNuclideBase):\n            # FP, DUMMY, DUMP\n            nuclideBase = nuclideBases.byLabel.get(self.nucLabel, None)\n            if nuclideBase is None:\n                raise OSError(\"Could not determine NuclideBase for label {}\".format(self.nucLabel))\n        if self.nucLabel != nuclideBase.label:\n            NuclideLabelDoesNotMatchNuclideLabel(nuclideBase, self.nucLabel, self.xsId)\n            nuclideBases.changeLabel(nuclideBase, self.nucLabel)\n        self._base = nuclideBase\n\n    def getMicroXS(self, interaction, group):\n        \"\"\"Returns the microscopic xs as the ISOTXS value if it exists or a 0 since it doesn't.\"\"\"\n        if interaction in self.micros.__dict__:\n            try:\n                return self.micros[interaction][group]\n            except IndexError:\n                raise IndexError(\n                    \"Group {0} not found in interaction {1} of nuclide {2}\".format(group, interaction, self.name)\n                )\n        else:\n            return 0\n\n    def getXS(self, interaction):\n        \"\"\"Get the cross section of a particular interaction.\n\n        See Also\n        --------\n        armi.nucDirectory.homogRegion.getXS\n        \"\"\"\n        return self.micros[interaction]\n\n    def buildNormalizedScatterColumns(self, scatterMatrixKey):\n        \"\"\"\n        Build normalized columns of a scatter matrix.\n\n        the vectors represent all scattering out of each group.\n        The rows of the scatter matrix represent in-scatter and the columns\n        represent out-scatter. So this sums up the columns.\n\n        Returns\n        -------\n        scatterWeights : dict\n            keys are fromG indices, values are sparse matrix columns (size: Gx1)\n            containing normalized columns of the scatter matrix.\n        \"\"\"\n        scatter = self.micros[scatterMatrixKey]\n        scatterWeights = {}\n        if scatter is None:\n            return scatterWeights\n        for fromG in range(self.container.numGroups):\n            outScatter = scatter[:, fromG]  # fromG column of scatter matrix.\n            total = outScatter.sum()\n            if total != 0.0:\n                normalizedOutScatter = outScatter / total\n            else:\n                normalizedOutScatter = outScatter\n            scatterWeights[fromG] = normalizedOutScatter\n\n        return scatterWeights\n\n    @property\n    def trans(self):\n        \"\"\"Get the transmutations for this nuclide.\n\n        Notes\n        -----\n        This is a property wrapper around the base nuclide's :code:`trans` attribute\n        \"\"\"\n        return self._base.trans\n\n    @property\n    def decays(self):\n        \"\"\"Get the decays for this nuclide.\n\n        Notes\n        -----\n        This is a property wrapper around the base nuclide's :code:`decays` attribute\n        \"\"\"\n        return self._base.decays\n\n    def merge(self, other):\n        \"\"\"\n        Merge the attributes of two XSNuclides.\n\n        Parameters\n        ----------\n        other : armi.nuclearDataIO.xsNuclides.XSNuclide\n            The other nuclide to merge information.\n\n        Notes\n        -----\n        The merge is really more like \"cannibalize\" in that the object performing the merge takes on the attributes of\n        the :code:`other`. It isn't necessary to create new objects for the newly merged attributes, because the 99%\n        usage is only used during runtime, where the second XSNuclide, and it's container (e.g. ISTOXS, GAMISO, etc.)\n        are discarded after the merge.\n        \"\"\"\n        self.isotxsMetadata = self.isotxsMetadata.merge(other.isotxsMetadata, self, other, \"ISOTXS\", AttributeError)\n        self.gamisoMetadata = self.gamisoMetadata.merge(other.gamisoMetadata, self, other, \"GAMISO\", AttributeError)\n        self.pmatrxMetadata = self.pmatrxMetadata.merge(other.pmatrxMetadata, self, other, \"PMATRX\", AttributeError)\n        self.micros.merge(other.micros)\n        self.gammaXS.merge(other.gammaXS)\n        self.neutronHeating = _mergeAttributes(self, other, \"neutronHeating\")\n        self.neutronDamage = _mergeAttributes(self, other, \"neutronDamage\")\n        self.gammaHeating = _mergeAttributes(self, other, \"gammaHeating\")\n        self.isotropicProduction = _mergeAttributes(self, other, \"isotropicProduction\")\n        self.linearAnisotropicProduction = _mergeAttributes(self, other, \"linearAnisotropicProduction\")\n        # this is lazy, but should work, because the n-order wouldn't be set without the others being set first.\n        self.nOrderProductionMatrix = self.nOrderProductionMatrix or other.nOrderProductionMatrix\n\n\ndef _mergeAttributes(this, other, attrName):\n    \"\"\"Function for merging XSNuclide attributes.\n\n    Notes\n    -----\n    This function checks to see that the attribute has only been assigned for a single instance, and then uses uses\n    the one that has been assigned.\n\n    Returns\n    -------\n    The proper value for the attribute.\n    \"\"\"\n    attr1 = getattr(this, attrName)\n    attr2 = getattr(other, attrName)\n    if attr1 is not None and attr2 is not None:\n        raise AttributeError(\n            \"Cannot merge {} and {}, the attribute `{}` has been assigned on bothinstances.\".format(\n                this, other, attrName\n            )\n        )\n    return attr1 if attr1 is not None else attr2\n"
  },
  {
    "path": "armi/operators/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nOperators build and hold the ARMI reactor model and perform operations on it.\n\nDifferent operators may perform different calculation loops upon the reactor model.\nOperators can be thought of as schedulers for the interactions between the various\nARMI physics packages and the reactor object(s).\n\nOperators are generally created by a :py:mod:`armi.cases` object and are chosen by\nthe ``runType`` setting. Custom operators may be introduced via the :py:mod:`armi.plugins` system.\n\nThe ARMI framework comes with two general-purpose Operators, which can be used for\nvery real analysis given a proper set of plugins. The :py:class:`~armi.operators.operator.Operator`\nis the Standard operator, which loops over cycles and timenodes. The\n:py:class:`~armi.operators.snapshots.OperatorSnapshots`\nis the Snapshots operator, which loops over specific point in time from a previous Standard run\nand performs additional analysis (e.g. for detailed follow-on analysis/transients).\n\nSee Also\n--------\narmi.cases : Builds operators\n\narmi.reactor : The reactor model that the operator operates upon\n\narmi.interfaces : Code that operators schedule to perform the real analysis or\n    math on the reactor model\n\"\"\"\n\n# ruff: noqa: I001\nfrom armi import context, getPluginManagerOrFail\nfrom armi.operators.runTypes import RunTypes\nfrom armi.operators.operator import Operator\nfrom armi.operators.operatorMPI import OperatorMPI\nfrom armi.operators.snapshots import OperatorSnapshots\n\n\ndef factory(cs):\n    \"\"\"Choose an operator subclass and instantiate it object based on settings.\"\"\"\n    return getOperatorClassFromSettings(cs)(cs)\n\n\ndef getOperatorClassFromSettings(cs):\n    \"\"\"Choose a operator class based on user settings (possibly from plugin).\n\n    Parameters\n    ----------\n    cs : Settings\n\n    Returns\n    -------\n    Operator : Operator\n\n    Raises\n    ------\n    ValueError\n        If the Operator class cannot be determined from the settings.\n    \"\"\"\n    runType = cs[\"runType\"]\n\n    if runType == RunTypes.STANDARD:\n        if context.MPI_SIZE == 1:\n            return Operator\n        else:\n            return OperatorMPI\n\n    elif runType == RunTypes.SNAPSHOTS:\n        return OperatorSnapshots\n\n    plugInOperator = None\n    for potentialOperator in getPluginManagerOrFail().hook.getOperatorClassFromRunType(runType=runType):\n        if plugInOperator:\n            raise ValueError(\n                \"More than one Operator class was \"\n                f\"recognized for runType `{runType}`: \"\n                f\"{plugInOperator} and {potentialOperator}. \"\n                \"This is not allowed. Please adjust plugin config.\"\n            )\n        plugInOperator = potentialOperator\n    if plugInOperator:\n        return plugInOperator\n\n    raise ValueError(\n        f\"No valid operator was found for runType: `{runType}`. Please adjust settings or plugin configuration.\"\n    )\n"
  },
  {
    "path": "armi/operators/operator.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe standard ARMI operator.\n\nThis builds and maintains the interface stack and loops through it for a certain number of cycles with a certain number\nof timenodes per cycle.\n\nThis is analogous to a real reactor operating over some period of time, often from initial startup, through the various\ncycles, and out to the end of plant life.\n\"\"\"\n\nimport collections\nimport os\nimport re\nimport time\nfrom typing import Optional, Tuple\n\nfrom armi import context, interfaces, runLog\nfrom armi.bookkeeping import db, memoryProfiler\nfrom armi.bookkeeping.report import reportingUtils\nfrom armi.operators.runTypes import RunTypes\nfrom armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC\nfrom armi.physics.neutronics.globalFlux.globalFluxInterface import (\n    GlobalFluxInterfaceUsingExecuters,\n)\nfrom armi.settings import settingsValidation\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION,\n    CONF_DEFERRED_INTERFACE_NAMES,\n    CONF_DEFERRED_INTERFACES_CYCLE,\n    CONF_TIGHT_COUPLING,\n    CONF_TIGHT_COUPLING_MAX_ITERS,\n)\nfrom armi.utils import (\n    codeTiming,\n    getAvailabilityFactors,\n    getBurnSteps,\n    getCycleLengths,\n    getCycleNames,\n    getMaxBurnSteps,\n    getPowerFractions,\n    getPreviousTimeNode,\n    getStepLengths,\n    pathTools,\n    units,\n)\n\n\nclass Operator:\n    \"\"\"\n    Orchestrate an ARMI run, building all the pieces, looping through the interfaces, and manipulating the reactor.\n\n    This Operator loops over a user-input number of cycles, each with a user-input number of subcycles (called time\n    nodes). It calls a series of interaction hooks on each of the :py:class:`~armi.interfaces.Interface` in the\n    Interface Stack.\n\n    .. figure:: /.static/armi_general_flowchart.png\n        :align: center\n\n    **Figure 1.** The computational flow of the interface hooks in a Standard Operator\n\n    .. note:: The :doc:`/developer/guide` has some additional narrative on this topic.\n\n    .. impl:: An operator will have a reactor object to communicate between plugins.\n        :id: I_ARMI_OPERATOR_COMM\n        :implements: R_ARMI_OPERATOR_COMM\n\n        A major design feature of ARMI is that the Operator orchestrates the simulation, and as part of that, the\n        Operator has access to the Reactor data model. In code, this just means the reactor object is a mandatory\n        attribute of an instance of the Operator. But conceptually, this means that while the Operator drives the\n        simulation of the reactor, all code has access to the same copy of the reactor data model. This is a crucial\n        idea that allows disparate external nuclear models to interact; they interact with the ARMI reactor data model.\n\n    .. impl:: An operator is built from user settings.\n        :id: I_ARMI_OPERATOR_SETTINGS\n        :implements: R_ARMI_OPERATOR_SETTINGS\n\n        A major design feature of ARMI is that a run is built from user settings. In code, this means that a case\n        ``Settings`` object is passed into this class to initialize an Operator. Conceptually, this means that the\n        Operator that controls a reactor simulation is defined by user settings. Because developers can create their own\n        settings, the user can control an ARMI simulation with arbitrary granularity in this way. In practice, settings\n        common control things like: how many cycles a reactor is being modeled for, how many timesteps are to be modeled\n        per time node, the verbosity of the logging of the run, and which modeling steps will be run.\n\n\n    .. impl:: The operator shall advance the reactor through time.\n        :id: I_ARMI_DB_TIME2\n        :implements: R_ARMI_DB_TIME\n\n        A major design feature of any scientific model is time evolution of the physical system. The operator is in\n        charge of driving the reactor through time. It sets various parameters that define the temporal position of the\n        reactor: cycle, node, timeNode, and time. This information is then stored in the output database.\n\n\n    Attributes\n    ----------\n    cs : Settings\n            Global settings that define the run.\n\n    cycleNames : list of str\n        The name of each cycle. Cycles without a name are `None`.\n\n    stepLengths : list of list of float\n        A two-tiered list, where primary indices correspond to cycle and\n        secondary indices correspond to the length of each intra-cycle step (in days).\n\n    cycleLengths : list of float\n        The duration of each individual cycle in a run (in days). This is the entire cycle, from startup to startup and\n        includes outage time.\n\n    burnSteps : list of int\n        The number of sub-cycles in each cycle.\n\n    availabilityFactors : list of float\n        The fraction of time in a cycle that the plant is producing power. Note that capacity factor is always less than\n        or equal to this, depending on the power fraction achieved during each cycle. Note that this is not a two-tiered\n        list like stepLengths or powerFractions, because each cycle can have only one availabilityFactor.\n\n    powerFractions : list of list of float\n        A two-tiered list, where primary indices correspond to cycles and secondary indices correspond to the fraction\n        of full rated capacity that the plant achieves during that step of the cycle. Zero power fraction can indicate\n        decay-only cycles.\n\n    interfaces : list\n        The Interface objects that will operate upon the reactor\n    \"\"\"\n\n    inspector = settingsValidation.Inspector\n\n    def __init__(self, cs):\n        \"\"\"\n        Constructor for operator.\n\n        Parameters\n        ----------\n        cs : Settings\n            Global settings that define the run.\n\n        Raises\n        ------\n        OSError\n            If unable to create the FAST_PATH directory.\n        \"\"\"\n        self.r = None\n        self.cs = cs\n        runLog.LOG.startLog(self.cs.caseTitle)\n        self.timer = codeTiming.MasterTimer.getMasterTimer()\n        self.interfaces = []\n        self.restartData = []\n        self.loadedRestartData = []\n        self._cycleNames = None\n        self._stepLengths = None\n        self._cycleLengths = None\n        self._burnSteps = None\n        self._maxBurnSteps = None\n        self._powerFractions = None\n        self._availabilityFactors = None\n        self._convergenceSummary = None\n\n        # Create the welcome headers for the case (case, input, machine, and some basic reactor information)\n        reportingUtils.writeWelcomeHeaders(self, cs)\n\n        self._initFastPath()\n\n    @property\n    def burnSteps(self):\n        if not self._burnSteps:\n            self._burnSteps = getBurnSteps(self.cs)\n            if self._burnSteps == [] and self.cs[\"nCycles\"] == 1:\n                # it is possible for there to be one cycle with zero burn up, in which case burnSteps is an empty list\n                pass\n            else:\n                self._checkReactorCycleAttrs({\"burnSteps\": self._burnSteps})\n        return self._burnSteps\n\n    @property\n    def maxBurnSteps(self):\n        if not self._maxBurnSteps:\n            self._maxBurnSteps = getMaxBurnSteps(self.cs)\n        return self._maxBurnSteps\n\n    @property\n    def stepLengths(self):\n        \"\"\"\n        Calculate step lengths.\n\n        .. impl:: Calculate step lengths from cycles and burn steps.\n            :id: I_ARMI_FW_HISTORY\n            :implements: R_ARMI_FW_HISTORY\n\n            In all computational modeling of physical systems, it is necessary to break time into discrete chunks. In\n            reactor modeling, it is common to first break the time a reactor is simulated for into the practical cycles\n            the reactor runs. And then those cycles are broken down into smaller chunks called burn steps. The final\n            step lengths this method returns is a two-tiered list, where primary indices correspond to the cycle and\n            secondary indices correspond to the length of each intra-cycle step (in days).\n        \"\"\"\n        if not self._stepLengths:\n            self._stepLengths = getStepLengths(self.cs)\n            if self._stepLengths == [] and self.cs[\"nCycles\"] == 1:\n                # it is possible for there to be one cycle with zero burn up, in which case stepLengths is an empty list\n                pass\n            else:\n                self._checkReactorCycleAttrs({\"Step lengths\": self._stepLengths})\n            self._consistentPowerFractionsAndStepLengths()\n        return self._stepLengths\n\n    @property\n    def cycleLengths(self):\n        if not self._cycleLengths:\n            self._cycleLengths = getCycleLengths(self.cs)\n            self._checkReactorCycleAttrs({\"cycleLengths\": self._cycleLengths})\n        return self._cycleLengths\n\n    @property\n    def powerFractions(self):\n        if not self._powerFractions:\n            self._powerFractions = getPowerFractions(self.cs)\n            self._checkReactorCycleAttrs({\"powerFractions\": self._powerFractions})\n            self._consistentPowerFractionsAndStepLengths()\n        return self._powerFractions\n\n    @property\n    def availabilityFactors(self):\n        if not self._availabilityFactors:\n            self._availabilityFactors = getAvailabilityFactors(self.cs)\n            self._checkReactorCycleAttrs({\"availabilityFactors\": self._availabilityFactors})\n        return self._availabilityFactors\n\n    @property\n    def cycleNames(self):\n        if not self._cycleNames:\n            self._cycleNames = getCycleNames(self.cs)\n            self._checkReactorCycleAttrs({\"Cycle names\": self._cycleNames})\n        return self._cycleNames\n\n    @staticmethod\n    def _initFastPath():\n        \"\"\"\n        Create the FAST_PATH directory for fast local operations.\n\n        Notes\n        -----\n        The FAST_PATH was once created at import-time in order to support modules that use FAST_PATH without operators\n        (e.g. Database). However, we decided to leave FAST_PATH as the CWD in INTERACTIVE mode, so this should not be a\n        problem anymore, and we can safely move FAST_PATH creation back into the Operator.\n\n        If the operator is being used interactively (e.g. at a prompt) we will still use a temporary local fast path (in\n        case the user is working on a slow network path).\n        \"\"\"\n        context.activateLocalFastPath()\n        try:\n            os.makedirs(context.getFastPath())\n        except OSError:\n            # If FAST_PATH exists already that generally should be an error because different processes will be stepping\n            # on each other. The exception to this rule is in cases that instantiate multiple operators in one process\n            # (e.g. unit tests that loadTestReactor). Since the FAST_PATH is set at import, these will use the same path\n            # multiple times. We pass here for that reason.\n            if not os.path.exists(context.getFastPath()):\n                # if it actually doesn't exist, that's an actual error. Raise\n                raise\n\n    def _checkReactorCycleAttrs(self, attrsDict):\n        \"\"\"Check that the list has nCycles number of elements.\"\"\"\n        for name, param in attrsDict.items():\n            if len(param) != self.cs[\"nCycles\"]:\n                raise ValueError(\n                    \"The `{}` setting did not have a length consistent with the number of cycles.\\n\"\n                    \"Expected {} value(s), but only had {} defined.\\n\"\n                    \"Current input: {}\".format(name, self.cs[\"nCycles\"], len(param), param)\n                )\n\n    def _consistentPowerFractionsAndStepLengths(self):\n        \"\"\"Check that the internally-resolved _powerFractions and _stepLengths have consistent shapes, if they exist.\"\"\"\n        if self._powerFractions and self._stepLengths:\n            for cycleIdx in range(len(self._powerFractions)):\n                if len(self._powerFractions[cycleIdx]) != len(self._stepLengths[cycleIdx]):\n                    raise ValueError(\n                        \"The number of entries in lists for subcycle power fractions and sub-steps are inconsistent in \"\n                        f\"cycle {cycleIdx}\"\n                    )\n\n    @property\n    def atEOL(self):\n        \"\"\"\n        Return whether we are approaching EOL.\n\n        For the standard operator, this will return true when the current cycle is the last cycle\n        (cs[\"nCycles\"] - 1). Other operators may need to impose different logic.\n        \"\"\"\n        return self.r.p.cycle == self.cs[\"nCycles\"] - 1\n\n    def initializeInterfaces(self, r):\n        \"\"\"\n        Attach the reactor to the operator and initialize all interfaces.\n\n        This does not occur in `__init__` so that the ARMI operator can be initialized before a reactor is created,\n        which is useful for summarizing the case information quickly.\n\n        Parameters\n        ----------\n        r : Reactor\n            The Reactor object to attach to this Operator.\n        \"\"\"\n        self.r = r\n        r.o = self\n        with self.timer.getTimer(\"Interface Creation\"):\n            self.createInterfaces()\n            self._processInterfaceDependencies()\n            if context.MPI_RANK == 0:\n                runLog.header(\"=========== Interface Stack Summary  ===========\")\n                runLog.info(reportingUtils.getInterfaceStackSummary(self))\n                self.interactAllInit()\n            else:\n                self._attachInterfaces()\n\n        self._loadRestartData()\n\n    def __repr__(self):\n        return \"<{} {} {}>\".format(self.__class__.__name__, self.cs[\"runType\"], self.cs)\n\n    def __enter__(self):\n        \"\"\"Context manager to enable interface-level error handling hooks.\"\"\"\n        return self\n\n    def __exit__(self, exception_type, exception_value, stacktrace):\n        if any([exception_type, exception_value, stacktrace]):\n            runLog.error(r\"{}\\n{}\\{}\".format(exception_type, exception_value, stacktrace))\n            self.interactAllError()\n\n    def operate(self):\n        \"\"\"\n        Run the operation loop.\n\n        See Also\n        --------\n        mainOperator : run the operator loop on the primary MPI node (for parallel runs)\n        workerOperate : run the operator loop for the worker MPI nodes\n        \"\"\"\n        self._mainOperate()\n\n    def _mainOperate(self):\n        \"\"\"Main loop for a standard ARMI run. Steps through time interacting with the interfaces.\"\"\"\n        dbi = self.getInterface(\"database\")\n        if dbi is not None and dbi.enabled():\n            dbi.initDB()\n        if self.cs[\"loadStyle\"] != \"fromInput\" and self.cs[\"runType\"] != RunTypes.SNAPSHOTS:\n            self.interactAllRestart(dbi)\n        self.interactAllBOL()\n        startingCycle = self.r.p.cycle  # may be starting at t != 0 in restarts\n        for cycle in range(startingCycle, self.cs[\"nCycles\"]):\n            keepGoing = self._cycleLoop(cycle, startingCycle)\n            if not keepGoing:\n                break\n        self.interactAllEOL()\n\n    def interactAllRestart(self, dbi: Optional[db.DatabaseInterface]):\n        \"\"\"Prepare for a restart simulation.\n\n        Some steps are necessary to be taken after interfaces are constructed but before we\n        start the real simulation. Crucially, we need to load the previous time point from the\n        database. The previous time node is chosen because that is the last point where we are\n        certain we have valid data and can safely recover.\n\n        If restarting at BOC, trigger the EOC actions from the previous cycle. This is necessary to\n        perform any fuel management operations that would have happened at the end of the previous cycle.\n        \"\"\"\n        startCycle = self.cs[\"startCycle\"]\n        startNode = self.cs[\"startNode\"]\n        prevTimeNode = getPreviousTimeNode(startCycle, startNode, self.cs)\n\n        if dbi is not None:\n            dbi.prepRestartRun()\n        else:\n            raise ValueError(\"No database interface means nothing is responsible for restarting from DB\")\n\n        activeInterfaces = self.getActiveInterfaces(\"Restart\", excludedInterfaceNames=(\"database\",))\n        self._interactAll(\"Restart\", activeInterfaces, (startCycle, startNode), prevTimeNode)\n\n        if startNode == 0:\n            runLog.important(\"Calling `o.interactAllEOC` due to loading the last time node of the previous cycle.\")\n            self.interactAllEOC(prevTimeNode[0])\n\n        # advance time time since we loaded the previous time step\n        self.r.p.cycle = startCycle\n        self.r.p.timeNode = startNode\n\n    def _cycleLoop(self, cycle, startingCycle):\n        \"\"\"Run the portion of the main loop that happens each cycle.\"\"\"\n        self.r.p.cycleLength = self.cycleLengths[cycle]\n        self.r.p.availabilityFactor = self.availabilityFactors[cycle]\n        self.r.p.cycle = cycle\n        self.r.core.p.coupledIteration = 0\n\n        if cycle == startingCycle:\n            startingNode = self.r.p.timeNode\n        else:\n            startingNode = 0\n            self.r.p.timeNode = startingNode\n\n        halt = self.interactAllBOC(self.r.p.cycle)\n        if halt:\n            return False\n\n        # read total core power from settings (power or powerDensity)\n        basicPower = self.cs[\"power\"] or (self.cs[\"powerDensity\"] * self.r.core.getHMMass())\n\n        for timeNode in range(startingNode, int(self.burnSteps[cycle])):\n            self.r.core.p.power = self.powerFractions[cycle][timeNode] * basicPower\n            self.r.p.capacityFactor = self.r.p.availabilityFactor * self.powerFractions[cycle][timeNode]\n            self.r.p.stepLength = self.stepLengths[cycle][timeNode]\n\n            self._timeNodeLoop(cycle, timeNode)\n        else:  # do one last node at the end using the same power as the previous node\n            timeNode = self.burnSteps[cycle]\n            if self.burnSteps[cycle] == 0:\n                # this is a zero-burnup case\n                powFrac = 1\n            else:\n                powFrac = self.powerFractions[cycle][timeNode - 1]\n\n            self.r.core.p.power = powFrac * basicPower\n            self._timeNodeLoop(cycle, timeNode)\n\n        self.interactAllEOC(self.r.p.cycle)\n\n        return True\n\n    def _timeNodeLoop(self, cycle, timeNode):\n        \"\"\"Run the portion of the main loop that happens each subcycle.\"\"\"\n        self.r.p.timeNode = timeNode\n        if timeNode == 0:\n            dt = 0\n        else:\n            dt = self.r.o.stepLengths[cycle][timeNode - 1] / units.DAYS_PER_YEAR\n        self.r.p.time = self.r.p.time + dt\n\n        self.interactAllEveryNode(cycle, timeNode)\n        self._performTightCoupling(cycle, timeNode)\n\n    def _performTightCoupling(self, cycle: int, timeNode: int, writeDB: bool = True):\n        \"\"\"If requested, perform tight coupling and write out database.\n\n        Notes\n        -----\n        writeDB is False for OperatorSnapshots as the DB gets written at EOL.\n        \"\"\"\n        if not self.couplingIsActive():\n            # no coupling was requested\n            return\n        skipCycles = tuple(int(val) for val in self.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION])\n        if cycle in skipCycles:\n            runLog.warning(\n                f\"interactAllCoupled disabled this cycle ({self.r.p.cycle}) due to \"\n                \"`cyclesSkipTightCouplingInteraction` setting.\"\n            )\n        else:\n            self._convergenceSummary = collections.defaultdict(list)\n            for coupledIteration in range(self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]):\n                self.r.core.p.coupledIteration = coupledIteration + 1\n                converged = self.interactAllCoupled(coupledIteration)\n                if converged:\n                    runLog.important(f\"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have converged!\")\n                    break\n            if not converged:\n                runLog.warning(\n                    f\"Tight coupling iterations for c{cycle:02d}n{timeNode:02d} have not converged!\"\n                    f\" The maximum number of iterations, {self.cs[CONF_TIGHT_COUPLING_MAX_ITERS]}, was reached.\"\n                )\n        if writeDB:\n            # database has not yet been written, so we need to write it.\n            dbi = self.getInterface(\"database\")\n            dbi.writeDBEveryNode()\n\n    def _interactAll(self, interactionName, activeInterfaces, *args):\n        \"\"\"\n        Loop over the supplied activeInterfaces and perform the supplied interaction on each.\n\n        Notes\n        -----\n        This is the base method for the other ``interactAll`` methods.\n        \"\"\"\n        interactMethodName = \"interact{}\".format(interactionName)\n\n        printMemUsage = self.cs[\"verbosity\"] == \"debug\" and self.cs[\"debugMem\"]\n\n        halt = False\n\n        cycleNodeTag = self._expandCycleAndTimeNodeArgs(interactionName)\n        runLog.header(\"===========  Triggering {} Event ===========\".format(interactionName + cycleNodeTag))\n\n        for statePointIndex, interface in enumerate(activeInterfaces, start=1):\n            self.printInterfaceSummary(interface, interactionName, statePointIndex)\n\n            # maybe make this a context manager\n            if printMemUsage:\n                memBefore = memoryProfiler.PrintSystemMemoryUsageAction()\n                memBefore.broadcast()\n                memBefore.invoke(self, self.r, self.cs)\n\n            interactionMessage = f\"{interface.name}.{interactionName}\"\n            with self.timer.getTimer(interactionMessage):\n                interactMethod = getattr(interface, interactMethodName)\n                halt = halt or interactMethod(*args)\n\n            if printMemUsage:\n                memAfter = memoryProfiler.PrintSystemMemoryUsageAction()\n                memAfter.broadcast()\n                memAfter.invoke(self, self.r, self.cs)\n                memAfter -= memBefore\n                memAfter.printUsage(\"after {:25s} {:15s} interaction\".format(interface.name, interactionName))\n\n            # Allow inherited classes to clean up things after an interaction\n            self._finalizeInteract()\n\n        runLog.header(\"===========  Completed {} Event ===========\\n\".format(interactionName + cycleNodeTag))\n\n        return halt\n\n    def _finalizeInteract(self):\n        \"\"\"Member called after each interface has completed its interaction.\n\n        Useful for cleaning up data.\n        \"\"\"\n        pass\n\n    def printInterfaceSummary(self, interface, interactionName, statePointIndex):\n        \"\"\"\n        Log which interaction point is about to be executed.\n\n        This looks better as multiple lines but it's a lot easier to grep as one line. We leverage newlines instead of\n        long banners to save disk space.\n        \"\"\"\n        nodeInfo = self._expandCycleAndTimeNodeArgs(interactionName)\n        line = \"=========== {:02d} - {:30s} {:15s} ===========\".format(\n            statePointIndex, interface.name, interactionName + nodeInfo\n        )\n        runLog.header(line)\n\n    def _expandCycleAndTimeNodeArgs(self, interactionName):\n        \"\"\"Return text annotating information for current run event.\n\n        Notes\n        -----\n        - Init, BOL, EOL: empty\n        - Everynode: cycle, time node\n        - BOC, EOC: cycle number\n        - Coupled: cycle, time node, iteration number\n        \"\"\"\n        if interactionName == \"Coupled\":\n            cycleNodeInfo = (\n                f\" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, \"\n                f\"year {'{0:.2f}'.format(self.r.p.time)} - iteration \"\n                f\"{self.r.core.p.coupledIteration}\"\n            )\n        elif interactionName in (\"BOC\", \"EOC\"):\n            cycleNodeInfo = f\" - timestep: cycle {self.r.p.cycle}\"\n            # - timestep: cycle 2\n        elif interactionName in (\"Init\", \"BOL\", \"EOL\"):\n            cycleNodeInfo = \"\"\n        else:\n            cycleNodeInfo = (\n                f\" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}\"\n            )\n\n        return cycleNodeInfo\n\n    def interactAllInit(self):\n        \"\"\"Call interactInit on all interfaces in the stack after they are initialized.\"\"\"\n        self._interactAll(\"Init\", self.getInterfaces())\n\n    def interactAllBOL(self, excludedInterfaceNames=()):\n        \"\"\"\n        Call interactBOL for all interfaces in the interface stack at beginning-of-life.\n\n        All enabled or bolForce interfaces will be called excluding interfaces with excludedInterfaceNames.\n        \"\"\"\n        activeInterfaces = self.getActiveInterfaces(\"BOL\", excludedInterfaceNames)\n        self._interactAll(\"BOL\", activeInterfaces)\n\n    def interactAllBOC(self, cycle):\n        \"\"\"Interact at beginning of cycle of all enabled interfaces.\"\"\"\n        activeInterfaces = self.getActiveInterfaces(\"BOC\", cycle=cycle)\n        return self._interactAll(\"BOC\", activeInterfaces, cycle)\n\n    def interactAllEveryNode(self, cycle, tn, excludedInterfaceNames=()):\n        \"\"\"\n        Call the interactEveryNode hook for all enabled interfaces.\n\n        All enabled interfaces will be called excluding interfaces with excludedInterfaceNames.\n\n        Parameters\n        ----------\n        cycle : int\n            The cycle that is currently being run. Starts at 0\n        tn : int\n            The time node that is currently being run (0 for BOC, etc.)\n        excludedInterfaceNames : list, optional\n            Names of interface names that will not be interacted with.\n        \"\"\"\n        activeInterfaces = self.getActiveInterfaces(\"EveryNode\", excludedInterfaceNames)\n        self._interactAll(\"EveryNode\", activeInterfaces, cycle, tn)\n\n    def interactAllEOC(self, cycle, excludedInterfaceNames=()):\n        \"\"\"Interact end of cycle for all enabled interfaces.\"\"\"\n        self.r.p.time += self.r.p.cycleLength * (1 - self.r.p.availabilityFactor) / units.DAYS_PER_YEAR\n\n        activeInterfaces = self.getActiveInterfaces(\"EOC\", excludedInterfaceNames)\n        self._interactAll(\"EOC\", activeInterfaces, cycle)\n\n    def interactAllEOL(self, excludedInterfaceNames=()):\n        \"\"\"\n        Run interactEOL for all enabled interfaces.\n\n        Notes\n        -----\n        If the interfaces are flagged to be reversed at EOL, they are separated from the main stack and appended at the\n        end in reverse order. This allows, for example, an interface that must run first to also run last.\n        \"\"\"\n        activeInterfaces = self.getActiveInterfaces(\"EOL\", excludedInterfaceNames)\n        self._interactAll(\"EOL\", activeInterfaces)\n\n    def interactAllCoupled(self, coupledIteration):\n        \"\"\"\n        Run all interfaces that are involved in tight physics coupling.\n\n        .. impl:: Physics coupling is driven from Operator.\n            :id: I_ARMI_OPERATOR_PHYSICS1\n            :implements: R_ARMI_OPERATOR_PHYSICS\n\n            This method runs all the interfaces that are defined as part of the tight physics coupling of the reactor.\n            Then it returns if the coupling has converged or not.\n\n            Tight coupling implies the operator has split iterations between two or more physics solvers at the same\n            solution point in simulated time. For example, a flux solution might be computed, then a temperature\n            solution, and then another flux solution based on updated temperatures (which updates densities, dimensions,\n            and Doppler).\n\n            This is distinct from loose coupling, which simply uses the temperature values from the previous timestep in\n            the current flux solution. It's also distinct from full coupling where all fields are solved simultaneously.\n            ARMI supports tight and loose coupling.\n        \"\"\"\n        activeInterfaces = self.getActiveInterfaces(\"Coupled\")\n        # Store the previous iteration values before calling interactAllCoupled for each interface.\n        for interface in activeInterfaces:\n            if interface.coupler is not None:\n                interface.coupler.storePreviousIterationValue(interface.getTightCouplingValue())\n        self._interactAll(\"Coupled\", activeInterfaces, coupledIteration)\n\n        return self._checkTightCouplingConvergence(activeInterfaces)\n\n    def _checkTightCouplingConvergence(self, activeInterfaces: list):\n        \"\"\"Check if interfaces are converged.\n\n        Parameters\n        ----------\n        activeInterfaces : list\n            the list of active interfaces on the operator\n\n        Notes\n        -----\n        This is split off from self.interactAllCoupled to accommodate testing.\n        \"\"\"\n        # Summarize the coupled results and the convergence status.\n        converged = []\n        for interface in activeInterfaces:\n            coupler = interface.coupler\n            if coupler is not None:\n                key = f\"{interface.name}: {coupler.parameter}\"\n                converged.append(coupler.isConverged(interface.getTightCouplingValue()))\n                self._convergenceSummary[key].append(coupler.eps)\n\n        reportingUtils.writeTightCouplingConvergenceSummary(self._convergenceSummary)\n        return all(converged)\n\n    def interactAllError(self):\n        \"\"\"Interact when an error is raised by any other interface. Provides a wrap-up option on the way to a crash.\"\"\"\n        for i in self.interfaces:\n            runLog.extra(\"Error-interacting with {0}\".format(i.name))\n            i.interactError()\n\n    def createInterfaces(self):\n        \"\"\"\n        Dynamically discover all available interfaces and call their factories, potentially adding them to the stack.\n\n        An operator contains an ordered list of interfaces. These communicate between the core ARMI structure and\n        auxiliary computational modules and/or external codes. At specified interaction points in a run, the list of\n        interfaces is executed.\n\n        Each interface optionally defines interaction \"hooks\" for each of the interaction points. The normal interaction\n        points are BOL, BOC, every node, EOC, and EOL. If an interface defines an interactBOL method, that will run at\n        BOL, and so on.\n\n        The majority of ARMI capabilities lie within interfaces, and this architecture provides much of the flexibility\n        of ARMI.\n\n        See Also\n        --------\n        addInterface : Adds a particular interface to the interface stack.\n        armi.interfaces.STACK_ORDER : A system to determine the required order of interfaces.\n        armi.interfaces.getActiveInterfaceInfo : Collects the interface classes from relevant packages.\n        \"\"\"\n        runLog.header(\"=========== Creating Interfaces ===========\")\n        interfaceList = interfaces.getActiveInterfaceInfo(self.cs)\n\n        for klass, kwargs in interfaceList:\n            self.addInterface(klass(self.r, self.cs), **kwargs)\n\n    def addInterface(\n        self,\n        interface,\n        index=None,\n        reverseAtEOL=False,\n        enabled=True,\n        bolForce=False,\n    ):\n        \"\"\"\n        Attach an interface to this operator.\n\n        Notes\n        -----\n        Order matters.\n\n        Parameters\n        ----------\n        interface : Interface\n            the interface to add\n        index : int, optional. Will insert the interface at this index rather than appending it to the end of the list\n        reverseAtEOL : bool, optional.\n            The interactEOL hooks will run in reverse order if True. All interfaces with this flag will be run as a\n            group after all other interfaces. This allows something to run first at BOL and last at EOL, etc.\n        enabled : bool, optional\n            If enabled, will run at all hooks. If not, won't run any (with possible exception at BOL, see bolForce).\n            Whenever possible, Interfaces that are needed during runtime for some peripheral operation but not during\n            the main loop should be instantiated by the part of the code that actually needs the interface.\n        bolForce: bool, optional\n            If true, will run at BOL hook even if disabled. This is often a sign that the interface in question should\n            be ephemerally instantiated on demand rather than added to the interface stack at all.\n\n        Raises\n        ------\n        RuntimeError\n            If an interface of the same name or purpose is already attached to the Operator.\n        \"\"\"\n        if self.getInterface(interface.name):\n            raise RuntimeError(f\"An interface with name {interface.name} is already attached.\")\n\n        iFunc = self.getInterface(purpose=interface.purpose)\n\n        if iFunc:\n            if issubclass(type(iFunc), type(interface)):\n                runLog.info(\n                    \"Ignoring Interface {newFunc} because existing interface {old} already  more specific\".format(\n                        newFunc=interface, old=iFunc\n                    )\n                )\n                return\n            elif issubclass(type(interface), type(iFunc)):\n                self.removeInterface(iFunc)\n                runLog.info(\n                    \"Will Insert Interface {newFunc} because it is a subclass of {old} interface and \"\n                    \" more derived\".format(newFunc=interface, old=iFunc)\n                )\n            else:\n                raise RuntimeError(\n                    \"Cannot add {0}; the {1} already is designated \"\n                    \"as the {2} interface. Multiple interfaces of the same \"\n                    \"purpose is not supported.\".format(interface, iFunc, interface.purpose)\n                )\n\n        runLog.debug(\"Adding {0}\".format(interface))\n        if index is None:\n            self.interfaces.append(interface)\n        else:\n            self.interfaces.insert(index, interface)\n        if reverseAtEOL:\n            interface.reverseAtEOL = True\n\n        if not enabled:\n            interface.enabled(False)\n\n        interface.bolForce(bolForce)\n        interface.attachReactor(self, self.r)\n\n    def _processInterfaceDependencies(self):\n        \"\"\"\n        Check all interfaces' dependencies and adds missing ones.\n\n        Notes\n        -----\n        Order does not matter here because the interfaces added here are disabled and playing supporting role so it is\n        not intended to run on the interface stack. They will be called by other interfaces.\n\n        As mentioned in :py:meth:`addInterface`, it may be better to just instantiate utility code when its needed\n        rather than rely on this system.\n        \"\"\"\n        # Make multiple passes in case there's one added that depends on another.\n        for _dependencyPass in range(5):\n            numInterfaces = len(self.interfaces)\n            # manipulation friendly, so it's ok to add additional things to the stack\n            for i in self.getInterfaces():\n                for dependency in i.getDependencies(self.cs):\n                    name = dependency.name\n                    purpose = dependency.purpose\n                    klass = dependency\n\n                    if not self.getInterface(name, purpose=purpose):\n                        runLog.extra(\n                            \"Attaching {} interface (disabled, BOL forced) due to dependency in {}\".format(\n                                klass.name, i.name\n                            )\n                        )\n                        self.addInterface(klass(r=self.r, cs=self.cs), enabled=False, bolForce=True)\n            if len(self.interfaces) == numInterfaces:\n                break\n        else:\n            raise RuntimeError(\"Interface dependency resolution did not converge.\")\n\n    def removeAllInterfaces(self):\n        \"\"\"Removes all of the interfaces.\"\"\"\n        for interface in self.interfaces:\n            interface.detachReactor()\n        self.interfaces = []\n\n    def removeInterface(self, interface=None, interfaceName=None):\n        \"\"\"\n        Remove a single interface from the interface stack.\n\n        Parameters\n        ----------\n        interface : Interface, optional\n            An actual interface object to remove.\n        interfaceName : str, optional\n            The name of the interface to remove.\n\n        Returns\n        -------\n        success : boolean\n            True if the interface was removed\n            False if it was not (because it wasn't there to be removed)\n        \"\"\"\n        if interfaceName:\n            interface = self.getInterface(interfaceName)\n\n        if interface and interface in self.interfaces:\n            self.interfaces.remove(interface)\n            interface.detachReactor()\n            return True\n        else:\n            runLog.warning(\"Cannot remove interface {0} because it is not in the interface stack.\".format(interface))\n            return False\n\n    def getInterface(self, name=None, purpose=None):\n        \"\"\"\n        Returns a specific interface from the stack by its name or more generic purpose.\n\n        Parameters\n        ----------\n        name : str, optional\n            Interface name\n        purpose : str\n            Interface purpose (general, like 'globalFlux','th',etc.). This is useful when you need the ___ solver (e.g.\n            globalFlux) but don't care which particular one is active (e.g. SERPENT vs. DIF3D)\n\n        Raises\n        ------\n        RuntimeError\n            If there are more than one interfaces of the given name or purpose.\n        \"\"\"\n        candidateI = None\n        for i in self.interfaces:\n            if (name and i.name == name) or (purpose and i.purpose == purpose):\n                if candidateI is None:\n                    candidateI = i\n                else:\n                    raise RuntimeError(\n                        \"Cannot retrieve a single interface as there are multiple \"\n                        \"interfaces with name {} or purpose {} attached. \".format(name, purpose)\n                    )\n\n        return candidateI\n\n    def interfaceIsActive(self, name):\n        \"\"\"True if named interface exists and is enabled.\n\n        Notes\n        -----\n        This logic is significantly simpler that getActiveInterfaces. This logic only touches the enabled() flag, but\n        doesn't take into account the case settings.\n        \"\"\"\n        i = self.getInterface(name)\n        return i and i.enabled()\n\n    def getInterfaces(self):\n        \"\"\"\n        Get list of interfaces in interface stack.\n\n        .. impl:: An operator will expose an ordered list of interfaces.\n            :id: I_ARMI_OPERATOR_INTERFACES\n            :implements: R_ARMI_OPERATOR_INTERFACES\n\n            This method returns an ordered list of instances of the Interface class. This list is useful because at any\n            time node in the reactor simulation, these interfaces will be called in sequence to perform various types of\n            calculations. It is important to note that this Operator instance has a list of Plugins, and each of those\n            Plugins potentially defines multiple Interfaces. And these Interfaces define their own order, separate from\n            the ordering of the Plugins.\n\n        Notes\n        -----\n        Returns a copy so you can manipulate the list in an interface, like dependencies.\n        \"\"\"\n        return self.interfaces[:]\n\n    def getActiveInterfaces(\n        self,\n        interactState: str,\n        excludedInterfaceNames: Tuple[str] = (),\n        cycle: int = 0,\n    ):\n        \"\"\"Retrieve the interfaces which are active for a given interaction state.\n\n        Parameters\n        ----------\n        interactState: str\n            A string dictating which interaction state the interfaces should be pulled for.\n        excludedInterfaceNames: Tuple[str]\n            A tuple of strings dictating which interfaces should be manually skipped.\n        cycle: int\n            The given cycle. 0 by default.\n\n        Returns\n        -------\n        activeInterfaces: List[Interfaces]\n            The interfaces deemed active for the given interactState.\n        \"\"\"\n        # Validate the inputs\n        if excludedInterfaceNames is None:\n            excludedInterfaceNames = ()\n\n        if interactState not in (\"BOL\", \"BOC\", \"EveryNode\", \"EOC\", \"EOL\", \"Coupled\", \"Restart\"):\n            raise ValueError(f\"{interactState} is an unknown interaction state!\")\n\n        # Ensure the interface is enabled.\n        enabled = lambda i: i.enabled()\n        if interactState == \"BOL\":\n            enabled = lambda i: i.enabled() or i.bolForce()\n\n        # Ensure the name of the interface isn't in some exclusion list.\n        nameCheck = lambda i: True\n        if interactState in (\"EveryNode\", \"EOC\", \"EOL\"):\n            nameCheck = lambda i: i.name not in excludedInterfaceNames\n        elif interactState == \"BOC\" and cycle < self.cs[CONF_DEFERRED_INTERFACES_CYCLE]:\n            nameCheck = lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES]\n        elif interactState == \"BOL\":\n            nameCheck = (\n                lambda i: i.name not in self.cs[CONF_DEFERRED_INTERFACE_NAMES] and i.name not in excludedInterfaceNames\n            )\n\n        # Finally, find the active interfaces.\n        activeInterfaces = [i for i in self.interfaces if enabled(i) and nameCheck(i)]\n\n        # Special Case: At EOL we reverse the order of some interfaces.\n        if interactState == \"EOL\":\n            actInts = [ii for ii in activeInterfaces if not ii.reverseAtEOL]\n            actInts.extend(reversed([ii for ii in activeInterfaces if ii.reverseAtEOL]))\n            activeInterfaces = actInts\n\n        return activeInterfaces\n\n    def reattach(self, r, cs=None):\n        \"\"\"Add links to globally-shared objects to this operator and all interfaces.\n\n        Notes\n        -----\n        Could be a good opportunity for weakrefs.\n        \"\"\"\n        self.r = r\n        self.r.o = self\n        if cs is not None:\n            self.cs = cs\n        for i in self.interfaces:\n            i.r = r\n            i.o = self\n            if cs is not None:\n                i.cs = cs\n\n    def detach(self):\n        \"\"\"\n        Break links to globally-shared objects to this operator and all interfaces.\n\n        May be required prior to copying these objects over the network.\n\n        Notes\n        -----\n        Could be a good opportunity for weakrefs.\n        \"\"\"\n        if self.r:\n            self.r.o = None\n            for comp in self.r:\n                comp.parent = None\n        self.r = None\n        for i in self.interfaces:\n            i.o = None\n            i.r = None\n            i.cs = None\n\n    def _attachInterfaces(self):\n        \"\"\"\n        Links all the interfaces in the interface stack to the operator, reactor, and cs.\n\n        See Also\n        --------\n        createInterfaces : creates all interfaces\n        addInterface : adds a single interface to the stack\n        \"\"\"\n        for i in self.interfaces:\n            i.attachReactor(self, self.r)\n\n    def _loadRestartData(self):\n        \"\"\"\n        Read a restart.dat file which contains all the fuel management factorLists and cycle lengths.\n\n        Notes\n        -----\n        This allows the ARMI to do the same shuffles that it did last time, assuming fuel management logic has not\n        changed. Note, it would be better if the moves were just read from a table in the database.\n        \"\"\"\n        restartName = self.cs.caseTitle + \".restart.dat\"\n        if not os.path.exists(restartName):\n            return\n        else:\n            runLog.info(f\"Loading restart data from {restartName}\")\n\n        with open(restartName, \"r\") as restart:\n            for line in restart:\n                match = re.search(\n                    r\"cycle=(\\d+)\\s+time=(\\d+\\.\\d+[Ee+-]+\\d+)\\s+factorList=[\\[\\{](.+?)[\\]\\}]\",\n                    line,\n                )\n                if match:\n                    newStyle = re.findall(r\"'(\\w+)':\\s*(\\d*\\.?\\d*)\", line)\n                    if newStyle:\n                        # key-based factorList. load a dictionary.\n                        factorList = {}\n                        for key, val in newStyle:\n                            factorList[key] = float(val)\n                    else:\n                        # list based factorList. Load a list. (old style, backward compat)\n                        try:\n                            factorList = [float(item) for item in match.group(3).split(\",\")]\n                        except ValueError:\n                            factorList = match.group(3).split(\",\")\n                    runLog.debug(\"loaded restart data for cycle %d\" % float(match.group(1)))\n\n                    self.restartData.append((float(match.group(1)), float(match.group(2)), factorList))\n        runLog.info(\"loaded restart data for {0} cycles\".format(len(self.restartData)))\n\n    def loadState(self, cycle, timeNode, timeStepName=\"\", fileName=None, updateMassFractions=None):\n        \"\"\"\n        Convenience method reroute to the database interface state reload method.\n\n        See Also\n        --------\n        armi.bookkeeping.db.loadOperator:\n            A method for loading an operator given a database. loadOperator does not require an operator prior to\n            loading the state of the reactor. loadState does, and therefore armi.init must be called which requires\n            access to the blueprints, settings, and geometry files. These files are stored implicitly on the database,\n            so loadOperator creates the reactor first, and then attaches it to the operator. loadState should be used if\n            you are in the middle of an ARMI calculation and need load a different time step. If you are loading from a\n            fresh ARMI session, either method is sufficient if you have access to all the input files.\n        \"\"\"\n        dbi = self.getInterface(\"database\")\n        if not dbi:\n            raise RuntimeError(\"Cannot load from snapshot without a database interface\")\n\n        if updateMassFractions is not None:\n            runLog.warning(\"deprecated: updateMassFractions is no longer a valid option for loadState\")\n\n        dbi.loadState(cycle, timeNode, timeStepName, fileName)\n\n    def snapshotRequest(self, cycle, node, iteration=None):\n        \"\"\"\n        Process a snapshot request at this time.\n\n        This copies various physics input and output files to a special folder that follow-on analysis be executed upon\n        later.\n\n        Notes\n        -----\n        This was originally used to produce MC2/DIF3D inputs for external parties (who didn't have ARMI) to review.\n        Since then, the concept of snapshots has evolved with respect to the\n        :py:class:`~armi.operators.snapshots.OperatorSnapshots`.\n        \"\"\"\n        from armi.physics.neutronics.settings import CONF_LOADING_FILE\n\n        runLog.info(f\"Producing snapshot for cycle {cycle} node {node}\")\n        self.r.core.zones.summary()\n\n        newFolder = f\"snapShot{cycle}_{node}\"\n        if os.path.exists(newFolder):\n            runLog.important(f\"Deleting existing snapshot data in {newFolder}\")\n            pathTools.cleanPath(newFolder, forceClean=True)  # careful with cleanPath!\n            # give it a minute.\n            time.sleep(1)\n\n        if os.path.exists(newFolder):\n            runLog.warning(f\"Deleting existing snapshot data in {newFolder} failed\")\n        else:\n            os.mkdir(newFolder)\n\n        # Moving the cross section files is to a snapshot directory is a reasonable requirement, but these hard-coded\n        # names are not desirable. This is legacy and should be updated to be more robust for users.\n        for fileName in os.listdir(\".\"):\n            if \"mcc\" in fileName and re.search(r\"[A-Z]AF?\\d?.inp\", fileName):\n                base, ext = os.path.splitext(fileName)\n                if iteration is not None:\n                    newFile = \"{0}_{1:03d}_{2:d}_{4}{3}\".format(base, cycle, node, ext, iteration)\n                else:\n                    newFile = \"{0}_{1:03d}_{2:d}{3}\".format(base, cycle, node, ext)\n                # add the cycle and timenode to the XS input file names so that a rx-coeff case that\n                # runs in here won't overwrite them.\n                pathTools.copyOrWarn(fileName, fileName, os.path.join(newFolder, newFile))\n            if \"rzmflx\" in fileName:\n                pathTools.copyOrWarn(\"rzmflx for snapshot\", fileName, newFolder)\n\n        fileNamePossibilities = [f\"ISOTXS-c{cycle}n{node}\", f\"ISOTXS-c{cycle}\"]\n        if iteration is not None:\n            fileNamePossibilities = [f\"ISOTXS-c{cycle}n{node}i{iteration}\"] + fileNamePossibilities\n\n        for isoFName in fileNamePossibilities:\n            if os.path.exists(isoFName):\n                break\n        pathTools.copyOrWarn(\"ISOTXS for snapshot\", isoFName, pathTools.armiAbsPath(newFolder, \"ISOTXS\"))\n        globalFluxLabel = GlobalFluxInterfaceUsingExecuters.getLabel(self.cs.caseTitle, cycle, node, iteration)\n        globalFluxInput = globalFluxLabel + \".inp\"\n        globalFluxOutput = globalFluxLabel + \".out\"\n        pathTools.copyOrWarn(\"DIF3D input for snapshot\", globalFluxInput, newFolder)\n        pathTools.copyOrWarn(\"DIF3D output for snapshot\", globalFluxOutput, newFolder)\n        pathTools.copyOrWarn(\"Shuffle logic for snapshot\", self.cs[CONF_SHUFFLE_LOGIC], newFolder)\n        pathTools.copyOrWarn(\"Loading definition for snapshot\", self.cs[CONF_LOADING_FILE], newFolder)\n\n    @staticmethod\n    def setStateToDefault(cs):\n        \"\"\"Update the state of ARMI to fit the kind of run this operator manages.\"\"\"\n        return cs.modified(newSettings={\"runType\": RunTypes.STANDARD})\n\n    def couplingIsActive(self):\n        \"\"\"True if any kind of physics coupling is active.\"\"\"\n        return self.cs[CONF_TIGHT_COUPLING]\n"
  },
  {
    "path": "armi/operators/operatorMPI.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe MPI-aware variant of the standard ARMI operator.\n\n.. impl:: There is an MPI-aware variant of the ARMI Operator.\n    :id: I_ARMI_OPERATOR_MPI\n    :implements: R_ARMI_OPERATOR_MPI\n\n    This sets up the main Operator on the primary MPI node and initializes\n    worker processes on all other MPI nodes. At certain points in the run,\n    particular interfaces might call into action all the workers. For\n    example, a depletion or subchannel T/H module may ask the MPI pool to\n    perform a few hundred independent physics calculations in parallel. In\n    many cases, this can speed up the overall execution of an analysis,\n    if a big enough computer or computing cluster is available.\n\n    See :py:class:`~armi.operators.operator.Operator` for the parent class.\n\nNotes\n-----\nThis is not *yet* smart enough to use shared memory when the MPI tasks are on the same machine. Everything goes through\nMPI. This can be optimized as needed.\n\"\"\"\n\nimport gc\nimport os\nimport re\nimport time\nimport traceback\n\nfrom armi import context, getPluginManager, mpiActions, runLog\nfrom armi.operators.operator import Operator\nfrom armi.reactor import reactors\n\n\nclass OperatorMPI(Operator):\n    \"\"\"MPI-aware Operator.\"\"\"\n\n    def __init__(self, cs):\n        try:\n            Operator.__init__(self, cs)\n        except:\n            # kill the workers too so everything dies.\n            runLog.important(\"Primary node failed on init. Quitting.\")\n            if context.MPI_COMM:  # else it's a single cpu case.\n                context.MPI_COMM.bcast(\"quit\", root=0)\n            raise\n\n    def operate(self):\n        \"\"\"\n        Operate method for all nodes.\n\n        Calls _mainOperate or workerOperate depending on which MPI rank we are, and\n        handles errors.\n        \"\"\"\n        runLog.debug(\"OperatorMPI.operate\")\n        if context.MPI_RANK == 0:\n            # this is the primary\n            try:\n                # run the regular old operate method\n                Operator.operate(self)\n                runLog.important(time.ctime())\n            except Exception as ee:\n                runLog.error(\"Error in Primary Node. Check STDERR for a traceback.\\n{}\".format(ee))\n                raise\n            finally:\n                # If there are other processes, tell them to stop\n                if context.MPI_SIZE > 1:\n                    runLog.important(\"Stopping all MPI worker nodes and cleaning temps.\")\n                    # send the quit command to the workers.\n                    context.MPI_COMM.bcast(\"quit\", root=0)\n                    runLog.debug(\"Waiting for all nodes to close down\")\n                    # wait until they're done cleaning up.\n                    context.MPI_COMM.bcast(\"finished\", root=0)\n                    runLog.important(\"All worker nodes stopped.\")\n                # even though we waited, still need more time to close stdout.\n                time.sleep(1)\n                runLog.debug(\"Main operate finished\")\n                runLog.close()  # concatenate all logs.\n        else:\n            try:\n                self.workerOperate()\n            except:\n                # grab the final command\n                runLog.warning(\"An error has occurred in one of the worker nodes. See STDERR for traceback.\")\n                # bcasting quit won't work if the main is sitting around waiting for a different bcast or gather.\n                traceback.print_exc()\n                runLog.debug(\"Worker failed\")\n                runLog.close()\n                raise\n\n    def workerOperate(self):\n        \"\"\"\n        The main loop on any worker MPI nodes.\n\n        Notes\n        -----\n        This method is what worker nodes are in while they wait for instructions from\n        the primary node in a parallel run. The nodes will sit, waiting for a \"worker\n        command\". When this comes (from a bcast from the primary), a set of if statements\n        are evaluated, with specific behaviors defined for each command. If the operator\n        doesn't understand the command, it loops through the interface stack to see if\n        any of the interfaces understand it.\n\n        Originally, \"magic strings\" were broadcast, which were handled either here or in\n        one of the interfaces' ``workerOperate`` methods. Since then, the\n        :py:mod:`~armi.mpiActions` system has been devised which just broadcasts\n        ``MpiAction`` objects. Both methods are still supported.\n\n        See Also\n        --------\n        armi.mpiActions : MpiAction information\n        armi.interfaces.workerOperate : interface-level handling of worker commands.\n\n        \"\"\"\n        while True:\n            # sit around waiting for a command from the primary\n            runLog.extra(\"Node {0} ready and waiting\".format(context.MPI_RANK))\n            cmd = context.MPI_COMM.bcast(None, root=0)\n            runLog.extra(\"worker received command {0}\".format(cmd))\n            # got a command. go use it.\n            if isinstance(cmd, mpiActions.MpiAction):\n                cmd.invoke(self, self.r, self.cs)\n            elif cmd == \"quit\":\n                self.workerQuit()\n                break  # If this break is removed, the program will remain in the while loop forever.\n            elif cmd == \"finished\":\n                runLog.warning(\n                    \"Received unexpected FINISHED command. Usually a QUIT command precedes this. \"\n                    \"Skipping cleanup of temporary files.\"\n                )\n                break\n            elif cmd == \"sync\":\n                # wait around for a sync\n                runLog.debug(\"Worker syncing\")\n                note = context.MPI_COMM.bcast(\"wait\", root=0)\n                if note != \"wait\":\n                    raise RuntimeError(f'did not get \"wait\". Got {note}')\n            elif cmd == \"reset\":\n                runLog.extra(\"Workers are being reset.\")\n            else:\n                # We don't understand the command on our own. Check the interfaces this allows all interfaces to have\n                # their own custom operation code.\n                handled = False\n                for i in self.interfaces:\n                    handled = i.workerOperate(cmd)\n                    if handled:\n                        break\n                if not handled:\n                    if context.MPI_RANK == 0:\n                        print(\"Interfaces\" + str(self.interfaces))\n                    runLog.error(\n                        \"No interface understood worker command {0}\\n check stdout for err\\n\"\n                        \"available interfaces:\\n  {1}\".format(\n                            cmd,\n                            \"\\n  \".join(f\"name:{i.name} typeName:{i.purpose} {i}\" for i in self.interfaces),\n                        )\n                    )\n                    raise RuntimeError(f\"Failed to delegate worker command {cmd} to an interface.\")\n\n            pm = getPluginManager()\n            resetFlags = pm.hook.mpiActionRequiresReset(cmd=cmd)\n            # only reset if all the plugins agree to reset\n            if all(resetFlags) or cmd == \"reset\":\n                self._resetWorker()\n\n            # might be an mpi action which has a reactor and everything, preventing garbage collection\n            del cmd\n            gc.collect()\n\n    def _finalizeInteract(self):\n        \"\"\"Inherited member called after each interface has completed its interact.\n\n        This will force all the workers to clear their reactor data so that it\n        isn't carried around to the next interact.\n\n        Notes\n        -----\n        This is only called on the root processor. Worker processors will know\n        what to do with the \"reset\" broadcast.\n        \"\"\"\n        if context.MPI_SIZE > 1:\n            context.MPI_COMM.bcast(\"reset\", root=0)\n            runLog.extra(\"Workers have been reset.\")\n\n    def _resetWorker(self):\n        \"\"\"\n        Clear out the reactor on the workers to start anew.\n\n        Notes\n        -----\n        This was made to help minimize the amount of RAM that is used during some\n        gigantic long-running cases. Resetting after building copies of reactors\n        or transforming their geometry is one approach. We hope to implement\n        more efficient solutions in the future.\n\n        Warning\n        -------\n        This should build empty non-core systems too.\n        \"\"\"\n        # Nothing to do if we never had anything\n        if self.r is None:\n            return\n\n        cs = self.cs\n        bp = self.r.blueprints\n        spatialGrid = self.r.core.spatialGrid\n        spatialGrid.armiObject = None\n        xsGroups = self.getInterface(\"xsGroups\")\n        if xsGroups:\n            xsGroups.clearRepresentativeBlocks()\n        self.detach()\n        self.r = reactors.Reactor(cs.caseTitle, bp)\n        core = reactors.Core(\"Core\")\n        self.r.add(core)\n        core.spatialGrid = spatialGrid\n        core.spatialGrid.armiObject = core\n        self.reattach(self.r, cs)\n\n    @staticmethod\n    def workerQuit():\n        runLog.debug(\"Worker ending\")\n        runLog.close()  # no more messages.\n        # wait until all workers are closed so we can delete them.\n        context.MPI_COMM.bcast(\"finished\", root=0)\n\n    def collapseAllStderrs(self):\n        \"\"\"Takes all the individual stderr files from each processor and arranges them nicely into one file.\"\"\"\n        stderrFiles = []\n        for fName in os.listdir(\".\"):\n            match = re.search(r\"_(\\d\\d\\d\\d)\\.stderr\", fName)\n            if match:\n                stderrFiles.append((match.group(1), fName))\n        stderrFiles.sort()\n\n        stderr = open(\"{0}w.stderr\".format(self.cs.caseTitle), \"w\")\n        for cpu, fName in stderrFiles:\n            f = open(fName)\n            stderr.write(\"Processor {0}\\n\".format(cpu))\n            stderr.write(f.read())\n            stderr.write(\"\\n\")\n            f.close()\n        stderr.close()\n"
  },
  {
    "path": "armi/operators/runTypes.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nConstants defining the different supported run types.\n\nThese were moved here to better structure the dependencies within this\npackage. Dependencies should be organized in a tree-like structure, with\n``__init__.py`` living at the top. These will likely need to be extended by plugins in\nthe near future.\n\"\"\"\n\n\nclass RunTypes:\n    \"\"\"All available values of the ``runType`` setting that determine which Operator to use.\"\"\"\n\n    STANDARD = \"Standard\"\n    SNAPSHOTS = \"Snapshots\"\n    EQUILIBRIUM = \"Equilibrium\"\n"
  },
  {
    "path": "armi/operators/snapshots.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Snapshot Operator.\"\"\"\n\nfrom armi import runLog\nfrom armi.operators import operatorMPI\n\n\nclass OperatorSnapshots(operatorMPI.OperatorMPI):\n    \"\"\"\n    This operator just loops over the requested snapshots and computes at them.\n\n    These may add CR worth curves, rx coefficients, transient runs etc at these snapshots.\n    This operator can be run as a restart, adding new physics to a previous run.\n    \"\"\"\n\n    def __init__(self, cs):\n        super().__init__(cs)\n\n        # disable fuel management and optimization\n        # disable depletion because we don't want to change number densities for tn's >0 (or any)\n        self.disabledInterfaces = [\"depletion\", \"fuelHandler\", \"optimize\"]\n\n    def createInterfaces(self):\n        operatorMPI.OperatorMPI.createInterfaces(self)\n\n        for toDisable in self.disabledInterfaces:\n            i = self.getInterface(name=toDisable, purpose=toDisable)\n            if i:\n                i.enabled(False)\n\n    def _mainOperate(self):\n        \"\"\"\n        General main loop for ARMI snapshot case.\n\n        Instead of going through all cycles, this goes through just the snapshots.\n\n        See Also\n        --------\n        Operator._mainOperate : The primary ARMI loop for non-restart cases.\n        \"\"\"\n        runLog.important(\"---- Beginning Snapshot (restart) ARMI Operator Loop ------\")\n\n        # run things that happen before a calculation.\n        # setups, etc.\n        self.interactAllBOL()\n\n        # figure out which snapshots to run in. Parse the CCCNNN settings\n        snapshots = [(int(i[:3]), int(i[3:])) for i in self.cs[\"dumpSnapshot\"]]\n\n        # update the snapshot requests if the user chose to load from a specific cycle/node\n        dbi = self.getInterface(\"database\")\n        # database is excluded since SS writes by itself\n        excludeDB = (\"database\",)\n        for ssCycle, ssNode in snapshots:\n            runLog.important(\"Beginning snapshot ({0:02d}, {1:02d})\".format(ssCycle, ssNode))\n            dbi.loadState(ssCycle, ssNode)\n\n            # need to update reactor power after the database load\n            # this is normally handled in operator._cycleLoop\n            self.r.core.p.power = self.cs[\"power\"]\n            self.r.core.p.powerDensity = self.cs[\"powerDensity\"]\n\n            halt = self.interactAllBOC(self.r.p.cycle)\n            if halt:\n                break\n\n            # database is excluded since it writes after coupled\n            self.interactAllEveryNode(ssCycle, ssNode, excludedInterfaceNames=excludeDB)\n            self._performTightCoupling(ssCycle, ssNode, writeDB=False)\n            # tight coupling is done, now write to DB\n            dbi.writeDBEveryNode()\n\n            self.interactAllEOC(self.r.p.cycle)\n\n        # run things that happen at EOL, like reports, plotters, etc.\n        self.interactAllEOL(excludedInterfaceNames=excludeDB)\n        dbi.closeDB()  # dump the database to file\n        runLog.important(\"Done with ARMI snapshots case.\")\n\n    @staticmethod\n    def setStateToDefault(cs):\n        \"\"\"Update the state of ARMI to fit the kind of run this operator manages.\"\"\"\n        from armi.operators.runTypes import RunTypes\n\n        return cs.modified(newSettings={\"runType\": RunTypes.STANDARD})\n\n    @property\n    def atEOL(self):\n        \"\"\"\n        Notes\n        -----\n        This operator's atEOL method behaves very differently than other operators.\n        The idea is that snapshots don't really have an EOL since they are independent of\n        chrological order and may or may not contain the last time node from the load database.\n        \"\"\"\n        return False\n"
  },
  {
    "path": "armi/operators/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for built-in operators.\"\"\"\n"
  },
  {
    "path": "armi/operators/tests/test_operatorSnapshots.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for operator snapshots.\"\"\"\n\nimport unittest\nfrom pathlib import Path\nfrom unittest.mock import Mock\n\nfrom armi import settings\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.operators import getOperatorClassFromSettings\nfrom armi.operators.runTypes import RunTypes\nfrom armi.operators.snapshots import OperatorSnapshots\nfrom armi.settings.fwSettings.globalSettings import CONF_GROW_TO_FULL_CORE_AFTER_LOAD\nfrom armi.testing import TESTING_ROOT, loadTestReactor\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestOperatorSnapshots(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        newSettings = {}\n        newSettings[\"axialExpansion\"] = False\n        newSettings[\"db\"] = True\n        newSettings[\"genReports\"] = False\n        newSettings[\"summarizeAssemDesign\"] = False\n        newSettings[\"runType\"] = \"Standard\"\n        newSettings[\"verbosity\"] = \"error\"\n        newSettings[\"branchVerbosity\"] = \"error\"\n        newSettings[\"nCycles\"] = 1\n        newSettings[\"dumpSnapshot\"] = [\"000000\", \"008000\", \"016005\"]\n        o1, cls.r = loadTestReactor(\n            customSettings=newSettings,\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n        cls.o = OperatorSnapshots(o1.cs)\n        cls.o.r = cls.r\n\n        # let's disable all the interfaces, to save time\n        allInterfaces = [\n            \"database\",\n            \"fissionProducts\",\n            \"fuelHandler\",\n            \"history\",\n            \"main\",\n            \"memoryProfiler\",\n            \"snapshot\",\n            \"xsGroups\",\n        ]\n        for i in allInterfaces:\n            cls.o.disabledInterfaces.append(i)\n\n        # mock a Database Interface\n        cls.dbi = DatabaseInterface(cls.r, o1.cs)\n        cls.dbi.loadState = lambda c, n: None\n        cls.dbi.writeDBEveryNode = lambda: None\n        cls.dbi.closeDB = lambda: None\n\n        cls.o.createInterfaces()\n\n    def test_atEOL(self):\n        self.assertFalse(self.o.atEOL)\n\n    def test_setStateToDefault(self):\n        cs0 = self.o.cs.modified(newSettings={\"runType\": RunTypes.SNAPSHOTS})\n        self.assertEqual(cs0[\"runType\"], RunTypes.SNAPSHOTS)\n        cs = self.o.setStateToDefault(cs0)\n        self.assertEqual(cs[\"runType\"], RunTypes.STANDARD)\n\n    def test_mainOperate(self):\n        # Mock some tooling that we aren't testing\n        self.o.interactBOL = lambda: None\n        self.o.getInterface = lambda s: (self.dbi if s == \"database\" else super().getInterface(s))\n\n        self.assertEqual(self.r.core.p.power, 0.0)\n        self.o._mainOperate()\n        self.assertEqual(self.r.core.p.power, 1000000.0)\n\n    def test_createInterfacesDisabled(self):\n        # If someone adds an interface, we don't want this test to break, so let's do >6\n        self.assertGreater(len(self.o.interfaces), 6)\n        for i in self.o.interfaces:\n            self.assertFalse(i.enabled())\n\n\nclass TestOperatorSnapshotsSettings(unittest.TestCase):\n    def test_getOperatorClassFromSettings(self):\n        cs = settings.Settings()\n        cs = cs.modified(newSettings={\"runType\": RunTypes.SNAPSHOTS})\n        o = getOperatorClassFromSettings(cs)\n        self.assertEqual(o, OperatorSnapshots)\n\n\nclass TestSnapshotFullCoreExpan(unittest.TestCase):\n    \"\"\"Test that a snapshot operator can do full core analysis with a 1/3 core DB.\"\"\"\n\n    DB_PATH = Path(\"test_operator_snapshot_full_core_expansion.h5\")\n\n    @classmethod\n    def setUpClass(cls):\n        cls.td = TemporaryDirectoryChanger()\n        cls.td.__enter__()\n        o, cls.symmetricReactor = loadTestReactor(\n            inputFilePath=TESTING_ROOT, inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\"\n        )\n        dbi: DatabaseInterface = next(filter(lambda i: isinstance(i, DatabaseInterface), o.interfaces))\n        dbi.initDB(cls.DB_PATH)\n        dbi.writeDBEveryNode()\n        dbi.closeDB()\n\n        cls.snapshotSettings: settings.Settings = o.cs.modified(\n            newSettings={\"runType\": RunTypes.SNAPSHOTS, \"reloadDBName\": str(cls.DB_PATH)}\n        )\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.DB_PATH.unlink()\n        cls.td.__exit__(None, None, None)\n\n    def test_fullCoreFromThirdCore(self):\n        self.assertFalse(self.symmetricReactor.core.isFullCore)\n        cs = self.snapshotSettings.modified(\n            newSettings={CONF_GROW_TO_FULL_CORE_AFTER_LOAD: True, \"dumpSnapshot\": [\"0000\"]}\n        )\n        o = getOperatorClassFromSettings(cs)(cs)\n        self.assertIsInstance(o, OperatorSnapshots)\n        o.r = self.symmetricReactor\n        # Just want Database interface not history tracker not reporting not etc.\n        o.addInterface(DatabaseInterface(o.r, o.cs))\n        # Mock interactAllBOC so we don't do iteract every nodes\n        # We just want to trigger the re-attachment of the loaded reactor\n        o.interactAllBOC = Mock(return_value=True)\n        o.operate()\n        self.assertTrue(o.r.core.isFullCore)\n"
  },
  {
    "path": "armi/operators/tests/test_operators.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for operators.\"\"\"\n\nimport collections\nimport io\nimport os\nimport sys\nimport unittest\nfrom contextlib import contextmanager\nfrom unittest.mock import patch\n\nfrom armi import settings\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.interfaces import Interface, TightCoupler\nfrom armi.operators.operator import Operator\nfrom armi.physics.neutronics.globalFlux.globalFluxInterface import (\n    GlobalFluxInterfaceUsingExecuters,\n)\nfrom armi.reactor.reactors import Core, Reactor\nfrom armi.reactor.tests import test_reactors\nfrom armi.settings.caseSettings import Settings\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION,\n    CONF_DEFERRED_INTERFACE_NAMES,\n    CONF_DEFERRED_INTERFACES_CYCLE,\n    CONF_RUN_TYPE,\n    CONF_TIGHT_COUPLING,\n    CONF_TIGHT_COUPLING_SETTINGS,\n)\nfrom armi.tests import mockRunLogs\nfrom armi.utils import directoryChangers\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass InterfaceA(Interface):\n    purpose = \"A\"\n    name = \"First\"\n\n\nclass InterfaceB(InterfaceA):\n    \"\"\"Dummy Interface that extends A.\"\"\"\n\n    purpose = \"A\"\n    name = \"Second\"\n\n\nclass InterfaceC(Interface):\n    purpose = \"A\"\n    name = \"Third\"\n\n\nclass OperatorTests(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        self.activeInterfaces = [ii for ii in self.o.interfaces if ii.enabled()]\n\n    def test_operatorData(self):\n        \"\"\"Test that the operator has input data, a reactor model.\n\n        .. test:: The Operator includes input data and the reactor data model.\n            :id: T_ARMI_OPERATOR_COMM\n            :tests: R_ARMI_OPERATOR_COMM\n        \"\"\"\n        self.assertEqual(self.o.r, self.r)\n        self.assertEqual(type(self.o.cs), settings.Settings)\n\n    @patch(\"armi.operators.Operator._interactAll\")\n    def test_orderedInterfaces(self, interactAll):\n        \"\"\"Test the default interfaces are in an ordered list, looped over at each time step.\n\n        .. test:: An ordered list of interfaces are run at each time step.\n            :id: T_ARMI_OPERATOR_INTERFACES\n            :tests: R_ARMI_OPERATOR_INTERFACES\n\n        .. test:: Interfaces are run at BOC, EOC, and at time points between.\n            :id: T_ARMI_INTERFACE\n            :tests: R_ARMI_INTERFACE\n\n        .. test:: When users set the time discretization, it is enforced.\n            :id: T_ARMI_FW_HISTORY2\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        # an ordered list of interfaces\n        self.assertGreater(len(self.o.interfaces), 0)\n        for i in self.o.interfaces:\n            self.assertTrue(isinstance(i, Interface))\n\n        # make sure we only iterate one time step\n        self.o.cs = self.o.cs.modified(newSettings={\"nCycles\": 2})\n        self.r.p.cycle = 1\n\n        # mock some stdout logging of what's happening when\n        def sideEffect(node, activeInts, *args, **kwargs):\n            print(node)\n            print(activeInts)\n\n        interactAll.side_effect = sideEffect\n\n        # run the operator through one cycle\n        origout = sys.stdout\n        try:\n            out = io.StringIO()\n            sys.stdout = out\n            self.o.operate()\n        finally:\n            sys.stdout = origout\n\n        # grab the log data\n        log = out.getvalue()\n\n        # verify we have some common interfaces listed\n        self.assertIn(\"main\", log)\n        self.assertIn(\"fuelHandler\", log)\n        self.assertIn(\"fissionProducts\", log)\n        self.assertIn(\"history\", log)\n        self.assertIn(\"snapshot\", log)\n\n        # At the first time step, we get one ordered list of interfaces\n        interfaces = log.split(\"BOL\")[1].split(\"EOL\")[0].split(\",\")\n        self.assertGreater(len(interfaces), 0)\n        for i in interfaces:\n            self.assertIn(\"Interface\", i)\n\n        # verify the various time nodes are hit in order\n        timeNodes = [\"BOL\", \"BOC\"] + [\"EveryNode\"] * 3 + [\"EOC\", \"EOL\"]\n        for node in timeNodes:\n            self.assertIn(node, log)\n            log = node.join(log.split(node)[1:])\n\n    def test_addInterfaceSubclassCollision(self):\n        cs = settings.Settings()\n\n        interfaceA = InterfaceA(self.r, cs)\n\n        interfaceB = InterfaceB(self.r, cs)\n        self.o.addInterface(interfaceA)\n\n        # 1) Adds B and gets rid of A\n        self.o.addInterface(interfaceB)\n        self.assertEqual(self.o.getInterface(\"Second\"), interfaceB)\n        self.assertEqual(self.o.getInterface(\"First\"), None)\n\n        # 2) Now we have B which is a subclass of A,\n        #    we want to not add A (but also not have an error)\n        self.o.addInterface(interfaceA)\n        self.assertEqual(self.o.getInterface(\"Second\"), interfaceB)\n        self.assertEqual(self.o.getInterface(\"First\"), None)\n\n        # 3) Also if another class not a subclass has the same purpose,\n        #    raise an error\n        interfaceC = InterfaceC(self.r, cs)\n        self.assertRaises(RuntimeError, self.o.addInterface, interfaceC)\n\n        # 4) Check adding a different purpose Interface\n        interfaceC.purpose = \"C\"\n        self.o.addInterface(interfaceC)\n        self.assertEqual(self.o.getInterface(\"Second\"), interfaceB)\n        self.assertEqual(self.o.getInterface(\"Third\"), interfaceC)\n\n    def test_interfaceIsActive(self):\n        self.o, _r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        self.assertTrue(self.o.interfaceIsActive(\"main\"))\n        self.assertFalse(self.o.interfaceIsActive(\"Fake-o\"))\n\n    def test_getActiveInterfaces(self):\n        \"\"\"Ensure that the right interfaces are returned for a given interaction state.\"\"\"\n        self.o.cs[CONF_DEFERRED_INTERFACES_CYCLE] = 1\n        self.o.cs[CONF_DEFERRED_INTERFACE_NAMES] = [\"history\"]\n\n        # Test invalid inputs.\n        with self.assertRaises(ValueError):\n            self.o.getActiveInterfaces(\"notAnInterface\")\n\n        # Test BOL\n        interfaces = self.o.getActiveInterfaces(\"BOL\", excludedInterfaceNames=(\"xsGroups\"))\n        interfaceNames = [interface.name for interface in interfaces]\n        self.assertNotIn(\"xsGroups\", interfaceNames)\n        self.assertNotIn(\"history\", interfaceNames)\n\n        # Test BOC\n        interfaces = self.o.getActiveInterfaces(\"BOC\", cycle=0)\n        interfaceNames = [interface.name for interface in interfaces]\n        self.assertNotIn(\"history\", interfaceNames)\n\n        # Test EveryNode and EOC\n        interfaces = self.o.getActiveInterfaces(\"EveryNode\", excludedInterfaceNames=(\"xsGroups\"))\n        interfaceNames = [interface.name for interface in interfaces]\n        self.assertIn(\"history\", interfaceNames)\n        self.assertNotIn(\"xsGroups\", interfaceNames)\n\n        # Test Coupled\n        interfaces = self.o.getActiveInterfaces(\"Coupled\")\n        for test, ref in zip(interfaces, self.activeInterfaces):\n            self.assertEqual(test.name, ref.name)\n\n        # Test EOL\n        interfaces = self.o.getActiveInterfaces(\"EOL\")\n        self.assertEqual(interfaces[-1].name, \"main\")\n\n        # Test excludedInterfaceNames\n        excludedInterfaceNames = [\"fissionProducts\", \"fuelHandler\", \"xsGroups\"]\n        interfaces = self.o.getActiveInterfaces(\"EOL\", excludedInterfaceNames=excludedInterfaceNames)\n        interfaceNames = [ii.name for ii in interfaces]\n        self.assertIn(\"history\", interfaceNames)\n        self.assertIn(\"main\", interfaceNames)\n        self.assertIn(\"snapshot\", interfaceNames)\n        self.assertNotIn(\"fissionProducts\", interfaceNames)\n        self.assertNotIn(\"fuelHandler\", interfaceNames)\n        self.assertNotIn(\"xsGroups\", interfaceNames)\n\n    def test_loadStateError(self):\n        \"\"\"The ``loadTestReactor()`` test tool does not have any history in the DB to load from.\"\"\"\n        # a first, simple test that this method fails correctly\n        with self.assertRaises(RuntimeError):\n            self.o.loadState(0, 1)\n\n    def test_setStateToDefault(self):\n        # reset the runType for testing\n        self.assertEqual(self.o.cs[CONF_RUN_TYPE], \"Standard\")\n        self.o.cs = self.o.cs.modified(newSettings={\"runType\": \"fake\"})\n        self.assertEqual(self.o.cs[CONF_RUN_TYPE], \"fake\")\n\n        # validate the method works\n        cs = self.o.setStateToDefault(self.o.cs)\n        self.assertEqual(cs[CONF_RUN_TYPE], \"Standard\")\n\n    @patch(\"shutil.copy\")\n    @patch(\"os.listdir\")\n    def test_snapshotRequest(self, fakeDirList, fakeCopy):\n        fakeDirList.return_value = [\"mccAA.inp\"]\n        with TemporaryDirectoryChanger():\n            with mockRunLogs.BufferLog() as mock:\n                self.o.snapshotRequest(0, 1)\n                self.assertIn(\"ISOTXS-c0\", mock.getStdout())\n                self.assertIn(\"DIF3D input for snapshot\", mock.getStdout())\n                self.assertIn(\"Shuffle logic for snapshot\", mock.getStdout())\n                self.assertIn(\"Loading definition for snapshot\", mock.getStdout())\n            self.assertTrue(os.path.exists(\"snapShot0_1\"))\n\n        with TemporaryDirectoryChanger():\n            with mockRunLogs.BufferLog() as mock:\n                self.o.snapshotRequest(0, 2, iteration=1)\n                self.assertIn(\"ISOTXS-c0\", mock.getStdout())\n                self.assertIn(\"DIF3D input for snapshot\", mock.getStdout())\n                self.assertIn(\"Shuffle logic for snapshot\", mock.getStdout())\n                self.assertIn(\"Loading definition for snapshot\", mock.getStdout())\n            self.assertTrue(os.path.exists(\"snapShot0_2\"))\n\n\nclass TestCreateOperator(unittest.TestCase):\n    def test_createOperator(self):\n        \"\"\"Test that an operator can be created from settings.\n\n        .. test:: Create an operator from settings.\n            :id: T_ARMI_OPERATOR_SETTINGS\n            :tests: R_ARMI_OPERATOR_SETTINGS\n        \"\"\"\n        cs = settings.Settings()\n        o = Operator(cs)\n        # high-level items\n        self.assertTrue(isinstance(o, Operator))\n        self.assertTrue(isinstance(o.cs, settings.Settings))\n\n        # validate some more nitty-gritty operator details come from settings\n        burnStepsSetting = cs[\"burnSteps\"]\n        if type(burnStepsSetting) is not list:\n            burnStepsSetting = [burnStepsSetting]\n        self.assertEqual(o.burnSteps, burnStepsSetting)\n        self.assertEqual(o.maxBurnSteps, max(burnStepsSetting))\n\n        powerFracsSetting = cs[\"powerFractions\"]\n        if powerFracsSetting:\n            self.assertEqual(o.powerFractions, powerFracsSetting)\n        else:\n            self.assertEqual(o.powerFractions, [[1] * cs[\"burnSteps\"]])\n\n\nclass TestTightCoupling(unittest.TestCase):\n    def setUp(self):\n        self.cs = settings.Settings()\n        self.cs[CONF_TIGHT_COUPLING] = True\n        self.o = Operator(self.cs)\n        self.o.r = Reactor(\"empty\", None)\n        self.o.r.core = Core(\"empty\")\n\n    def test_getStepLengths(self):\n        \"\"\"Test the step lengths are correctly calculated, based on settings.\n\n        .. test:: Users can control time discretization of the simulation through settings.\n            :id: T_ARMI_FW_HISTORY0\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        self.assertEqual(self.cs[\"nCycles\"], 1)\n        self.assertAlmostEqual(self.cs[\"cycleLength\"], 365.242199)\n        self.assertEqual(self.cs[\"burnSteps\"], 4)\n\n        self.assertEqual(len(self.o.stepLengths), 1)\n        self.assertEqual(len(self.o.stepLengths[0]), 4)\n\n    def test_couplingIsActive(self):\n        \"\"\"Ensure that ``cs[CONF_TIGHT_COUPLING]`` controls ``couplingIsActive``.\"\"\"\n        self.assertTrue(self.o.couplingIsActive())\n        self.o.cs[CONF_TIGHT_COUPLING] = False\n        self.assertFalse(self.o.couplingIsActive())\n\n    def test_performTightCoupling_Inactive(self):\n        \"\"\"Ensures no action by ``_performTightCoupling`` if ``cs[CONF_TIGHT_COUPLING] = false``.\"\"\"\n        self.o.cs[CONF_TIGHT_COUPLING] = False\n        self.o._performTightCoupling(0, 0, writeDB=False)\n        self.assertEqual(self.o.r.core.p.coupledIteration, 0)\n\n    def test_performTightCoupling_skip(self):\n        \"\"\"Ensure that cycles within ``cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION]`` are skipped.\"\"\"\n        self.o.cs[CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION] = [1]\n        with mockRunLogs.BufferLog() as mock:\n            self.o._performTightCoupling(1, 0, writeDB=False)\n            self.assertIn(\"interactAllCoupled disabled this cycle\", mock.getStdout())\n            self.assertEqual(self.o.r.core.p.coupledIteration, 0)\n\n    def test_performTightCoupling_notConverged(self):\n        \"\"\"Ensure that the appropriate ``runLog.warning`` is addressed in tight coupling reaches max num of iters.\n\n        .. test:: The tight coupling logic can fail if there is no convergence.\n            :id: T_ARMI_OPERATOR_PHYSICS0\n            :tests: R_ARMI_OPERATOR_PHYSICS\n        \"\"\"\n\n        class NoConverge(TightCoupler):\n            def isConverged(self, _val: TightCoupler._SUPPORTED_TYPES) -> bool:\n                return False\n\n        class InterfaceNoConverge(Interface):\n            name = \"NoConverge\"\n\n            def __init__(self, r, cs):\n                super().__init__(r, cs)\n                self.coupler = NoConverge(param=\"dummy\", tolerance=None, maxIters=1)\n\n            def getTightCouplingValue(self):\n                return 0.0\n\n        self.o.addInterface(InterfaceNoConverge(None, self.o.cs))\n        with mockRunLogs.BufferLog() as mock:\n            self.o._performTightCoupling(0, 0, writeDB=False)\n            self.assertIn(\"have not converged! The maximum number of iterations\", mock.getStdout())\n\n    def test_performTightCoupling_WriteDB(self):\n        \"\"\"Ensure a tight coupling iteration accours and that a DB WILL be written if requested.\"\"\"\n        hasCouplingInteraction = 1\n        with directoryChangers.TemporaryDirectoryChanger():\n            with mockRunLogs.BufferLog() as mock:\n                self.dbWriteForCoupling(writeDB=True)\n                self.assertIn(\"Writing to database for statepoint:\", mock.getStdout())\n                self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction)\n\n    def test_performTightCoupling_NoWriteDB(self):\n        \"\"\"Ensure a tight coupling iteration accours and that a DB WILL NOT be written if requested.\"\"\"\n        hasCouplingInteraction = 1\n        with directoryChangers.TemporaryDirectoryChanger():\n            with mockRunLogs.BufferLog() as mock:\n                self.dbWriteForCoupling(writeDB=False)\n                self.assertNotIn(\"Writing to database for statepoint:\", mock.getStdout())\n                self.assertEqual(self.o.r.core.p.coupledIteration, hasCouplingInteraction)\n\n    def dbWriteForCoupling(self, writeDB: bool):\n        self.o.removeAllInterfaces()\n        dbi = DatabaseInterface(self.o.r, self.o.cs)\n        dbi.initDB(fName=self._testMethodName + \".h5\")\n        self.o.addInterface(dbi)\n        self.o._performTightCoupling(0, 0, writeDB=writeDB)\n        h5Contents = list(dbi.database.getH5Group(dbi.r).items())\n        if writeDB:\n            self.assertTrue(h5Contents)\n        else:\n            self.assertFalse(h5Contents)\n        dbi.database.close()\n\n    def test_computeTightCouplingConvergence(self):\n        \"\"\"Ensure that tight coupling convergence can be computed and checked.\n\n        Notes\n        -----\n        - Assertion #1: ensure that the convergence of Keff, eps, is greater than 1e-5 (the\n          prescribed convergence criteria)\n        - Assertion #2: ensure that eps is (prevIterKeff - currIterKeff)\n        \"\"\"\n        prevIterKeff = 0.9\n        currIterKeff = 1.0\n        self.o.cs[CONF_TIGHT_COUPLING_SETTINGS] = {\"globalFlux\": {\"parameter\": \"keff\", \"convergence\": 1e-05}}\n        globalFlux = GlobalFluxInterfaceUsingExecuters(self.o.r, self.o.cs)\n        globalFlux.coupler.storePreviousIterationValue(prevIterKeff)\n        self.o.addInterface(globalFlux)\n        # set keff to some new value and compute tight coupling convergence\n        self.o.r.core.p.keff = currIterKeff\n        self.o._convergenceSummary = collections.defaultdict(list)\n        self.assertFalse(self.o._checkTightCouplingConvergence([globalFlux]))\n        self.assertAlmostEqual(\n            globalFlux.coupler.eps,\n            currIterKeff - prevIterKeff,\n        )\n\n\nclass CyclesSettingsTests(unittest.TestCase):\n    \"\"\"Check that we can correctly access the various cycle settings from the operator.\"\"\"\n\n    detailedCyclesSettings = \"\"\"\nmetadata:\n  version: uncontrolled\nsettings:\n  power: 1000000000.0\n  nCycles: 3\n  cycles:\n    - name: startup sequence\n      cumulative days: [1, 2, 3]\n      power fractions: [0.1, 0.2, 0.3]\n      availability factor: 0.1\n    - cycle length: 10\n      burn steps: 5\n      power fractions: [0.2, 0.2, 0.2, 0.2, 0]\n      availability factor: 0.5\n    - name: prepare for shutdown\n      step days: [3, R4]\n      power fractions: [0.3, R4]\n  runType: Standard\n\"\"\"\n\n    def setUp(self):\n        self.standaloneDetailedCS = Settings()\n        self.standaloneDetailedCS.loadFromString(self.detailedCyclesSettings)\n        self.detailedOperator = Operator(self.standaloneDetailedCS)\n\n    def test_getPowerFractions(self):\n        \"\"\"Test that the power fractions are calculated correctly.\n\n        .. test:: Test the powerFractions are retrieved correctly for multiple cycles.\n            :id: T_ARMI_SETTINGS_POWER1\n            :tests: R_ARMI_SETTINGS_POWER\n        \"\"\"\n        powerFractionsSolution = [\n            [0.1, 0.2, 0.3],\n            [0.2, 0.2, 0.2, 0.2, 0],\n            [0.3, 0.3, 0.3, 0.3, 0.3],\n        ]\n\n        self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution)\n        self.detailedOperator._powerFractions = None\n        self.assertEqual(self.detailedOperator.powerFractions, powerFractionsSolution)\n\n    def test_getCycleNames(self):\n        cycleNamesSolution = [\"startup sequence\", None, \"prepare for shutdown\"]\n        self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution)\n\n        self.detailedOperator._cycleNames = None\n        self.assertEqual(self.detailedOperator.cycleNames, cycleNamesSolution)\n\n    def test_getAvailabilityFactors(self):\n        \"\"\"Check that the \"availability factor\" is correctly set from the \"cycles\" setting.\n\n        .. test:: Users can manually control time discretization of the simulation.\n            :id: R_ARMI_FW_HISTORY3\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        availabilityFactorsSolution = [0.1, 0.5, 1]\n        self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution)\n\n        self.detailedOperator._availabilityFactors = None\n        self.assertEqual(self.detailedOperator.availabilityFactors, availabilityFactorsSolution)\n\n    def test_getStepLengths(self):\n        \"\"\"Test that the manually-set, detailed time steps are retrievable.\n\n        .. test:: Users can manually control time discretization of the simulation.\n            :id: T_ARMI_FW_HISTORY1\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        stepLengthsSolution = [\n            [1, 1, 1],\n            [10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5],\n            [3, 3, 3, 3, 3],\n        ]\n\n        # detailed step lengths can be set manually\n        self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution)\n        self.detailedOperator._stepLength = None\n        self.assertEqual(self.detailedOperator.stepLengths, stepLengthsSolution)\n\n        # when doing detailed step information, we don't get step information from settings\n        cs = self.detailedOperator.cs\n        self.assertEqual(cs[\"nCycles\"], 3)\n        with self.assertRaises(ValueError):\n            cs[\"cycleLength\"]\n        with self.assertRaises(ValueError):\n            cs[\"burnSteps\"]\n\n    def test_getCycleLengths(self):\n        \"\"\"Check that the \"cycle length\" is correctly set from the \"cycles\" setting.\n\n        .. test:: Users can manually control time discretization of the simulation.\n            :id: R_ARMI_FW_HISTORY4\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        cycleLengthsSolution = [30, 10, 15]\n        self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution)\n\n        self.detailedOperator._cycleLengths = None\n        self.assertEqual(self.detailedOperator.cycleLengths, cycleLengthsSolution)\n\n    def test_getBurnSteps(self):\n        \"\"\"Check that the \"burn steps\" is correctly set from the \"cycles\" setting.\n\n        .. test:: Users can manually control time discretization of the simulation.\n            :id: R_ARMI_FW_HISTORY5\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        burnStepsSolution = [3, 5, 5]\n        self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution)\n\n        self.detailedOperator._burnSteps = None\n        self.assertEqual(self.detailedOperator.burnSteps, burnStepsSolution)\n\n    def test_getMaxBurnSteps(self):\n        \"\"\"Check that the max of the \"burn steps\" is correctly set from the \"cycles\" setting.\n\n        .. test:: Users can manually control time discretization of the simulation.\n            :id: R_ARMI_FW_HISTORY6\n            :tests: R_ARMI_FW_HISTORY\n        \"\"\"\n        maxBurnStepsSolution = 5\n        self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution)\n\n        self.detailedOperator._maxBurnSteps = None\n        self.assertEqual(self.detailedOperator.maxBurnSteps, maxBurnStepsSolution)\n\n\nclass TestInterfaceAndEventHeaders(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.o, cls.r = test_reactors.loadTestReactor(\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n            customSettings={CONF_TIGHT_COUPLING: True},\n        )\n        cls.r.p.cycle = 0\n        cls.r.p.timeNode = 1\n        cls.r.p.time = 11.01\n        cls.r.core.p.coupledIteration = 7\n\n    def test_expandCycleAndTimeNodeArgs_Empty(self):\n        \"\"\"When cycleNodeInfo should be an empty string.\"\"\"\n        for task in [\"Init\", \"BOL\", \"EOL\"]:\n            self.assertEqual(self.o._expandCycleAndTimeNodeArgs(interactionName=task), \"\")\n\n    def test_expandCycleAndTimeNodeArgs_Cycle(self):\n        \"\"\"When cycleNodeInfo should return only the cycle.\"\"\"\n        for task in [\"BOC\", \"EOC\"]:\n            self.assertEqual(\n                self.o._expandCycleAndTimeNodeArgs(interactionName=task),\n                f\" - timestep: cycle {self.r.p.cycle}\",\n            )\n\n    def test_expandCycleAndTimeNodeArgs_EveryNode(self):\n        \"\"\"When cycleNodeInfo should return the cycle and node.\"\"\"\n        self.assertEqual(\n            self.o._expandCycleAndTimeNodeArgs(interactionName=\"EveryNode\"),\n            f\" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year {'{0:.2f}'.format(self.r.p.time)}\",\n        )\n\n    def test_expandCycleAndTimeNodeArgs_Coupled(self):\n        \"\"\"When cycleNodeInfo should return the cycle, node, and iteration number.\"\"\"\n        self.assertEqual(\n            self.o._expandCycleAndTimeNodeArgs(interactionName=\"Coupled\"),\n            (\n                f\" - timestep: cycle {self.r.p.cycle}, node {self.r.p.timeNode}, year \"\n                f\"{'{0:.2f}'.format(self.r.p.time)} - iteration {self.r.core.p.coupledIteration}\"\n            ),\n        )\n\n\nclass OperatorRestartTests(unittest.TestCase):\n    \"\"\"Tests on the behavior of the interactAllRestart hook.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.START_CYCLE = 4\n        cls.START_NODE = 2\n        cls.o, cls.r = test_reactors.loadTestReactor(\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n            customSettings={\n                \"loadStyle\": \"fromDB\",\n                \"startCycle\": cls.START_CYCLE,\n                \"startNode\": cls.START_NODE,\n                # Need more cycles than we're restarting\n                \"nCycles\": cls.START_CYCLE + 3,\n            },\n        )\n\n    def setUp(self):\n        self.dbi: DatabaseInterface = self.o.getInterface(\"database\")\n        self.assertIsNotNone(self.dbi, msg=\"Database interface required for test.\")\n\n    def test_nominalRestart(self):\n        \"\"\"Make sure the database interface is uniquely called and the interactRestart is not called for DB.\n\n        We need to make sure the database interface loads the reactor before every other interface goes first.\n        But then, when all the interfaces get their chance to ``interactRestart``, the database interface\n        does not. Since it did it's work already.\n        \"\"\"\n        mainInterface: Interface = self.o.getInterface(name=\"main\")\n        self.assertIsNotNone(mainInterface)\n        with (\n            patch.object(self.dbi, \"interactRestart\") as dbInteractRestart,\n            patch.object(self.dbi, \"prepRestartRun\") as dbPrepRestart,\n            patch.object(mainInterface, \"interactRestart\") as mainIfcRestart,\n        ):\n            self.o.interactAllRestart(self.dbi)\n        dbPrepRestart.assert_called_once()\n        # Skip DatabaseInterface.interactRestart since we jumped ahead and \"restarted\" with prepRestartRun\n        dbInteractRestart.assert_not_called()\n\n        # Ensure we called other interfaces restarts at the previous node\n        mainIfcRestart.assert_called_once_with(\n            (self.START_CYCLE, self.START_NODE), (self.START_CYCLE, self.START_NODE - 1)\n        )\n        self.assertEqual(self.o.r.p.cycle, self.START_CYCLE)\n        self.assertEqual(self.o.r.p.timeNode, self.START_NODE)\n\n    @contextmanager\n    def patchCS(self, **kwargs):\n        \"\"\"Patch the case settings, restoring at the end of the context block.\n\n        Kwargs are key: value pairs for settings to be modified.\n\n        Can't use ``patch.dict`` because case settings don't have at least a ``.copy``\n        method that ``patch.dict`` expects.\n        \"\"\"\n        cs = self.o.cs\n        old = {k: cs[k] for k in kwargs}\n        for k, v in kwargs.items():\n            cs[k] = v\n        yield\n        for k, v in old.items():\n            cs[k] = v\n\n    def test_callPreviousEOC(self):\n        \"\"\"When restarting at the start of the cycle, make sure we call the previous interactEOC for all interfaces.\"\"\"\n        with (\n            self.patchCS(startNode=0),\n            patch.object(self.o, \"interactAllEOC\") as patchEOC,\n            # Don't want to attempt to load a ficticious DB\n            patch.object(self.dbi, \"prepRestartRun\"),\n        ):\n            self.o.interactAllRestart(self.dbi)\n        patchEOC.assert_called_once_with(self.START_CYCLE - 1)\n\n    def test_noDatabaseNoRestart(self):\n        \"\"\"Ensure there must be a database interface responsible for loading from database.\"\"\"\n        with self.assertRaisesRegex(ValueError, \"No database interface\"):\n            self.o.interactAllRestart(None)\n"
  },
  {
    "path": "armi/physics/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe Physics Packages are where the magic of physical simulation happens in an ARMI run.\n\n.. tip:: The vast majority of physics capabilities are provided by :py:mod:`Plugins <armi.plugins>`.\n    Thus, this package contains some fairly generic physics-related code that belongs in a reactor\n    analysis framework.\n\nBesides providing some generic physics-related capabilities, this package also provides a recommended\n*physics namespace* for all ARMI plugins to follow. The physics namespaces we've come up with is\nas follows:\n\nfuelCycle\n    Fuel management, fabrication, reprocessing, mass flow, etc.\n\nneutronics\n    Radiation transport, nuclear depletion, nuclear cross sections, reactivity coefficients,\n    kinetics, etc.\n\nsafety\n    Systems analysis in accident scenarios, source term, dose conversion, etc.\n\nfuelPerformance\n    Changes in fuel systems vs. burnup and time, including thermophysical modeling of\n    fuel, cladding, fuel salt, etc.\n\nthermalHydraulics\n    Heat transfer, fluid flow, pressure drop, power cycles, you name it.\n\neconomics\n    Economic modeling and cost estimation.\n\n    .. important:: Yeah, we know that it is kind of a stretch to call economics a kind of physics.\n\nWe have found it very useful to use `Python namespace packages <https://packaging.python.org/guides/packaging-namespace-packages/>`_\nto mirror this exact namespace in physics plugins that are outside of the ARMI framework. Thus, there can\nbe two totally separate plugins::\n\n    IAEA/\n        physics/\n            neutronics/\n                superSourceTerm/\n                    __init__.py\n                    plugin.py\n\nand::\n\n    IAEA/\n        physics/\n            economics/\n                magwoodsbrain/\n                    __init__.py\n                    plugin.py\n\n\nAnd then the associated ARMI-based app could import both ``IAEA.physics.neutronics.superSourceTerm`` and\n``IAEA.physics.economics.magwoodsbrain``. Having a consistency in namespace along these lines is\nquite nice.\n\"\"\"\n"
  },
  {
    "path": "armi/physics/constants.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some constants.\"\"\"\n\nDPA_CROSS_SECTIONS = {}\n\"\"\"Multigroup dpa cross sections.\n\nDisplacements per atom are correlated to material damage.\n\nNotes\n-----\nThis data structure can be updated by plugins with design-specific dpa data.\n\n:meta hide-value:\n\"\"\"\n\n# The following are multigroup DPA XS for EBR II. They were generated using an ultra hard MCC spectrum\n# that calculated buckling and had an initial keff of 2. Even so, Inc600/625/X750 33 group dpa XS values are less than\n# 5% for all but 5 energy groups. The maximum deviation is 18% in INC625 between .192 and .331 MeV.\n\nDPA_CROSS_SECTIONS[\"dpa_EBRII_HT9\"] = [\n    2.34569e03,\n    1.92004e03,\n    1.58640e03,\n    1.25670e03,\n    8.24006e02,\n    5.20750e02,\n    3.96146e02,\n    3.28749e02,\n    2.06149e02,\n    1.42452e02,\n    1.15189e02,\n    6.60183e01,\n    8.23281e01,\n    1.31771e01,\n    1.94552e01,\n    3.33861e01,\n    1.27099e01,\n    6.20510e00,\n    3.58651e00,\n    3.74080e00,\n    4.52607e-01,\n    1.62650e-01,\n    1.24318e-01,\n    1.56210e-01,\n    1.89583e-01,\n    2.36694e-01,\n    2.97445e-01,\n    3.92136e-01,\n    5.07320e-01,\n    6.81782e-01,\n    1.07978e00,\n    2.43258e00,\n    4.35563e00,\n]\n\nDPA_CROSS_SECTIONS[\"dpa_EBRII_INC600\"] = [\n    2.57204e03,\n    2.11682e03,\n    1.64031e03,\n    1.21591e03,\n    8.69816e02,\n    6.47128e02,\n    4.25248e02,\n    3.59778e02,\n    2.89208e02,\n    1.89443e02,\n    1.55667e02,\n    1.22460e02,\n    8.25721e01,\n    1.15026e02,\n    9.90510e01,\n    2.42252e01,\n    1.73504e01,\n    9.34915e00,\n    5.67409e00,\n    3.13557e00,\n    5.95081e-01,\n    1.95832e-01,\n    1.93791e-01,\n    2.52465e-01,\n    3.11159e-01,\n    3.71897e-01,\n    4.95951e-01,\n    6.50177e-01,\n    8.39344e-01,\n    1.12626e00,\n    1.78500e00,\n    4.02021e00,\n    7.19616e00,\n]\n\nDPA_CROSS_SECTIONS[\"dpa_EBRII_INC625\"] = [\n    2.49791e03,\n    2.05899e03,\n    1.60441e03,\n    1.20292e03,\n    8.68237e02,\n    6.39219e02,\n    4.16975e02,\n    3.50177e02,\n    2.74491e02,\n    1.89846e02,\n    1.53178e02,\n    1.16379e02,\n    7.35708e01,\n    1.05281e02,\n    8.96142e01,\n    2.58537e01,\n    1.91218e01,\n    8.44318e00,\n    5.16493e00,\n    2.67000e00,\n    5.66731e-01,\n    2.20242e-01,\n    1.92435e-01,\n    3.31226e-01,\n    3.69475e-01,\n    5.24326e-01,\n    4.78120e-01,\n    6.22211e-01,\n    8.15999e-01,\n    1.07725e00,\n    1.70732e00,\n    3.84540e00,\n    6.88285e00,\n]\n\nDPA_CROSS_SECTIONS[\"dpa_EBRII_INCX750\"] = [\n    2.59270e03,\n    2.13361e03,\n    1.65837e03,\n    1.23739e03,\n    8.86458e02,\n    6.51012e02,\n    4.27294e02,\n    3.58449e02,\n    2.88178e02,\n    1.88428e02,\n    1.56886e02,\n    1.27132e02,\n    8.89576e01,\n    1.31703e02,\n    1.04350e02,\n    2.55248e01,\n    1.77532e01,\n    9.43101e00,\n    5.60558e00,\n    3.06838e00,\n    5.85632e-01,\n    1.90347e-01,\n    1.89737e-01,\n    2.50070e-01,\n    3.08765e-01,\n    3.69079e-01,\n    4.92257e-01,\n    6.45369e-01,\n    8.33181e-01,\n    1.11802e00,\n    1.77196e00,\n    3.98945e00,\n    7.13947e00,\n]\n\nDPA_CROSS_SECTIONS[\"dpa_EBRII_PE16\"] = [\n    2.47895e03,\n    2.03583e03,\n    1.61943e03,\n    1.23864e03,\n    8.58439e02,\n    5.95879e02,\n    4.10632e02,\n    3.42948e02,\n    2.49940e02,\n    1.69919e02,\n    1.39511e02,\n    1.00171e02,\n    8.21254e01,\n    7.94117e01,\n    6.73353e01,\n    2.84413e01,\n    1.61127e01,\n    7.13145e00,\n    4.59314e00,\n    3.12973e00,\n    5.17916e-01,\n    1.51560e-01,\n    1.56357e-01,\n    2.37675e-01,\n    2.81173e-01,\n    3.65433e-01,\n    4.12907e-01,\n    5.40601e-01,\n    7.03084e-01,\n    9.37963e-01,\n    1.48726e00,\n    3.34954e00,\n    5.99536e00,\n]\n"
  },
  {
    "path": "armi/physics/executers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nExecutors are useful for having a standard way to run physics calculations.\n\nThey may involve external codes (with inputs/execution/output) or in-memory\ndata pathways.\n\"\"\"\n\nimport hashlib\nimport os\n\nfrom armi import runLog\nfrom armi.context import MPI_RANK, getFastPath\nfrom armi.utils import directoryChangers, pathTools\n\n\nclass ExecutionOptions:\n    \"\"\"\n    A data structure representing all options needed for a physics kernel.\n\n    .. impl:: Options for executing external calculations.\n        :id: I_ARMI_EX0\n        :implements: R_ARMI_EX\n\n        Implements a basic container to hold and report options to be used in\n        the execution of an external code (see :need:`I_ARMI_EX1`).\n        Options are stored as instance attributes and can be dumped as a string\n        using :py:meth:`~armi.physics.executers.ExecutionOptions.describe`, which\n        will include the name and value of all public attributes of the instance.\n\n        Also facilitates the ability to execute parallel instances of a code by\n        providing the ability to resolve a ``runDir`` that is aware of the\n        executing MPI rank. This is done via :py:meth:`~armi.physics.executers.ExecutionOptions.setRunDirFromCaseTitle`,\n        where the user passes in a ``caseTitle`` string, which is hashed and combined\n        with the MPI rank to provide a unique directory name to be used by each parallel\n        instance.\n\n    Attributes\n    ----------\n    inputFile : str\n        Name of main input file. Often passed to stdin of external code.\n    outputFile : str\n        Name of main output file. Often the stdout of external code.\n    extraInputFiles : list of tuples\n        (sourceName, destName) pairs of file names that will be brought from the\n        working dir into the runDir. Allows renames while in transit.\n    extraOutputFiles : list of tuples\n        (sourceName, destName) pairs of file names that will be extracted from the\n        runDir to the working dir\n    executablePath : str\n        Path to external executable to run (if external code is used)\n    runDir : str\n        Path on running system where the run will take place. This is often used\n        to ensure external codes that use hard-drive disk space run on a local disk\n        rather than a shared network drive\n    workingDir : str\n        Path on system where results will be placed after the run. This is often\n        a shared network location. Auto-applied during execution by default.\n    label : str\n        A name for the run that may be used as a prefix for input/output files generated.\n    interface : str\n        A name for the interface calling the Executer that may be used to organize the\n        input/output files generated within sub-folders under the working directory.\n    savePhysicsFiles : bool\n        Dump the physics kernel I/O files from the execution to a dedicated directory that\n        will not be overwritten so they will be available after the run.\n    copyOutput : bool\n        Copy the output from running the executable back to the working directory.\n    applyResultsToReactor : bool\n        Update the in-memory reactor model with results upon completion. Set to False\n        when information from a run is needed for auxiliary purposes rather than progressing\n        the reactor model.\n    \"\"\"\n\n    def __init__(self, label=None):\n        self.inputFile = None\n        self.outputFile = None\n        self.extraInputFiles = []\n        self.extraOutputFiles = []\n        self.executablePath = None\n        self.runDir = None\n        self.workingDir = None\n        self.label = label\n        self.interfaceName = None\n        self.applyResultsToReactor = True\n        self.paramsToScaleSubset = None\n        self.savePhysicsFiles = False\n        self.copyOutput = True\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__}: {self.label}>\"\n\n    def fromUserSettings(self, cs):\n        \"\"\"Set options from a particular Settings object.\"\"\"\n        raise NotImplementedError()\n\n    def fromReactor(self, reactor):\n        \"\"\"Set options from a particular reactor object.\"\"\"\n        raise NotImplementedError()\n\n    def resolveDerivedOptions(self):\n        \"\"\"Called by executers right before executing.\"\"\"\n\n    def setRunDirFromCaseTitle(self, caseTitle: str) -> None:\n        \"\"\"\n        Set run directory derived from case title and label.\n\n        This is optional (you can set runDir to whatever you want). If you\n        use this, you will get a relatively consistent naming convention\n        for your fast-path folders.\n        \"\"\"\n        # This creates a hash of the case title plus the label\n        # to shorten the running directory and to avoid path length\n        # limitations on the OS.\n        caseString = f\"{caseTitle}-{str(self.label)}\".encode(\"utf-8\")\n        caseTitleHash = str(hashlib.sha1(caseString).hexdigest())[:8]\n        self.runDir = os.path.join(getFastPath(), f\"{caseTitleHash}-{MPI_RANK}\")\n\n    def describe(self) -> str:\n        \"\"\"Make a string summary of all options.\"\"\"\n        lines = [\"Options summary:\", \"----------------\"]\n        for key, val in sorted(self.__dict__.items()):\n            if not key.startswith(\"_\"):\n                lines.append(f\"  {key:40s}{str(val)[:80]:80s}\")\n        return \"\\n\".join(lines)\n\n\nclass Executer:\n    \"\"\"\n    Short-lived object that coordinates a calculation step and updates a reactor.\n\n    Notes\n    -----\n    This is deliberately **not** a :py:class:`~mpiActions.MpiAction`. Thus, Executers can run as\n    potentially multiple steps in a parent (parallelizable ) MpiAction or in other flexible\n    ways. This is intended to maximize reusability.\n    \"\"\"\n\n    def __init__(self, options, reactor):\n        self.options = options\n        self.r = reactor\n        self.dcType = directoryChangers.TemporaryDirectoryChanger\n\n    def run(self):\n        \"\"\"\n        Run the executer steps.\n\n        This should use the current state of the reactor as input,\n        perform some kind of calculation, and update the reactor\n        with the output.\n        \"\"\"\n        raise NotImplementedError()\n\n\nclass DefaultExecuter(Executer):\n    \"\"\"\n    An Executer that uses a common run sequence.\n\n    This sequence has been found to be relatively common in many\n    externally-executed physics codes. It is here for convenience\n    but is not required. The sequence look like:\n\n    * Choose modeling options (either from the global run settings input or dictated programmatically)\n    * Apply geometry transformations to the ARMI Reactor as needed\n    * Build run-specific working directory\n    * Write input file(s)\n    * Put specific input files and libs in run directory\n    * Run the analysis (external execution, or not)\n    * Process output while still in run directory\n    * Check error conditions\n    * Move desired output files back to main working directory\n    * Clean up run directory\n    * Un-apply geometry transformations as needed\n    * Update ARMI data model as desired\n\n    .. impl:: Default tool for executing external calculations.\n        :id: I_ARMI_EX1\n        :implements: R_ARMI_EX\n\n        Facilitates the execution of external calculations by accepting ``options`` (an\n        :py:class:`~armi.physics.executers.ExecutionOptions` object) and providing\n        methods that build run directories and execute a code based on the values in\n        ``options``.\n\n        The :py:meth:`~armi.physics.executers.DefaultExecuter.run` method will first\n        resolve any derived options in the ``options`` object and check if the specified\n        ``executablePath`` option is valid, raising an error if not. If it is,\n        preparation work for executing the code is performed, such as performing any geometry\n        transformations specified in subclasses or building the directories needed\n        to save input and output files. Once the temporary working directory is created,\n        the executer moves into it and runs the external code, applying any results\n        from the run as specified in subclasses.\n\n        Finally, any geometry perturbations that were performed are undone.\n    \"\"\"\n\n    def run(self):\n        \"\"\"\n        Run the executer steps.\n\n        .. warning::\n                If a calculation requires anything different from what this method does,\n                do not update this method with new complexity! Instead, simply make your own\n                run sequence and/or class. This pattern is useful only in that it is fairly simple.\n                By all means, do use ``DirectoryChanger`` and ``ExecuterOptions``\n                and other utilities.\n        \"\"\"\n        self.options.resolveDerivedOptions()\n        runLog.debug(self.options.describe())\n        if self.options.executablePath and not os.path.exists(self.options.executablePath):\n            raise IOError(f\"Required executable `{self.options.executablePath}` not found for {self}\")\n        self._performGeometryTransformations()\n        inputs, outputs = self._collectInputsAndOutputs()\n        state = f\"c{self.r.p.cycle}n{self.r.p.timeNode}\"\n        dirName = self.options.interfaceName or self.options.label\n        if self.options.savePhysicsFiles:\n            outputDir = os.path.join(pathTools.armiAbsPath(os.getcwd()), state, dirName)\n        else:\n            outputDir = pathTools.armiAbsPath(os.getcwd())\n        # must either write input to CWD for analysis and then copy to runDir\n        # or not list it in inputs (for optimization)\n        self.writeInput()\n        with self.dcType(\n            self.options.runDir,\n            filesToMove=inputs,\n            filesToRetrieve=outputs,\n            outputPath=outputDir,\n        ) as dc:\n            self.options.workingDir = dc.initial\n            self._updateRunDir(dc.destination)\n            self._execute()\n            output = self._readOutput()\n            if self.options.applyResultsToReactor:\n                output.apply(self.r)\n        self._undoGeometryTransformations()\n        self._updateAdditionalParameters()\n        return output\n\n    def _updateRunDir(self, directory):\n        \"\"\"\n        If a ``TemporaryDirectoryChanger`` is used, the ``runDir`` needs to be updated.\n\n        If a ForcedCreationDirectoryChanger is used instead, nothing needs to be done.\n\n        Parameters\n        ----------\n        directory : str\n            New path for runDir\n        \"\"\"\n        if self.dcType == directoryChangers.TemporaryDirectoryChanger:\n            self.options.runDir = directory\n\n    def _collectInputsAndOutputs(self):\n        \"\"\"\n        Get total lists of input and output files.\n\n        If self.options.copyOutput is false, don't copy the main `outputFile` back from\n        the working directory.\n\n        In some ARMI runs, the executer can be run hundreds or thousands of times and\n        generate many output files that aren't strictly necessary to keep around. One\n        can save space by choosing not to copy the outputs back in these special cases.\n        ``extraOutputFiles`` are typically controlled by the subclass, so the copyOutput\n        option only affects the main ``outputFile``.\n\n        \"\"\"\n        inputs = [self.options.inputFile] if self.options.inputFile else []\n        inputs.extend(self.options.extraInputFiles)\n        if self.options.outputFile and self.options.copyOutput:\n            outputs = [self.options.outputFile]\n        else:\n            outputs = []\n        outputs.extend(self.options.extraOutputFiles)\n        return inputs, outputs\n\n    def _execute(self) -> bool:\n        runLog.extra(\n            f\"Executing {self.options.executablePath}\\n\"\n            f\"\\tInput: {self.options.inputFile}\\n\"\n            f\"\\tOutput: {self.options.outputFile}\\n\"\n            f\"\\tWorking dir: {self.options.runDir}\"\n        )\n        return True\n\n    def writeInput(self):\n        pass\n\n    def _readOutput(self):\n        raise NotImplementedError()\n\n    def _applyOutputToDataModel(self, output):\n        pass\n\n    def _performGeometryTransformations(self):\n        pass\n\n    def _undoGeometryTransformations(self):\n        pass\n\n    def _updateAdditionalParameters(self):\n        pass\n"
  },
  {
    "path": "armi/physics/fuelCycle/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe fuel cycle package analyzes the various elements of nuclear fuel cycles from mining to disposal.\n\nFuel cycle code can include things like:\n\n* In- and ex-core fuel management\n* Fuel chemistry\n* Fuel processing\n* Fuel fabrication\n* Fuel mass flow scenarios\n* And so on\n\nThere is one included fuel cycle plugin: The Fuel Handler.\n\nThe fuel handler plugin moves fuel around in a reactor.\n\"\"\"\n\nfrom armi import interfaces, operators, plugins\nfrom armi.operators import RunTypes\nfrom armi.physics.fuelCycle import fuelHandlers, settings\n\nORDER = interfaces.STACK_ORDER.FUEL_MANAGEMENT\n\n\nclass FuelHandlerPlugin(plugins.ArmiPlugin):\n    \"\"\"The built-in ARMI fuel management plugin.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        \"\"\"\n        Implementation of the exposeInterfaces plugin hookspec.\n\n        Notes\n        -----\n        The interface may import user input modules to customize the actual\n        fuel management.\n        \"\"\"\n        from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL\n\n        fuelHandlerNeedsToBeActive = (\n            cs[settings.CONF_FUEL_HANDLER_NAME]\n            or cs[settings.CONF_SHUFFLE_SEQUENCE_FILE]\n            or (cs[\"eqDirect\"] and cs[\"runType\"].lower() == RunTypes.STANDARD.lower())\n        )\n        if not fuelHandlerNeedsToBeActive or \"MCNP\" in cs[CONF_NEUTRONICS_KERNEL]:\n            return []\n        else:\n            enabled = cs[\"runType\"] != operators.RunTypes.SNAPSHOTS\n            return [interfaces.InterfaceInfo(ORDER, fuelHandlers.FuelHandlerInterface, {\"enabled\": enabled})]\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return settings.getFuelCycleSettings()\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettingsValidators(inspector):\n        \"\"\"Implementation of settings inspections for fuel cycle settings.\"\"\"\n        return settings.getFuelCycleSettingValidators(inspector)\n"
  },
  {
    "path": "armi/physics/fuelCycle/assemblyRotationAlgorithms.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAlgorithms used to rotate hex assemblies in a reactor core.\n\nNotes\n-----\nThese algorithms are defined in assemblyRotationAlgorithms.py, but they are used in:\n``FuelHandler.outage()``.\n\n.. warning:: Nothing should go in this file, but rotation algorithms.\n\"\"\"\n\nimport math\nfrom collections import defaultdict\n\nfrom armi import runLog\nfrom armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import (\n    getOptimalAssemblyOrientation,\n)\nfrom armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY\nfrom armi.physics.fuelCycle.utils import (\n    assemblyHasFuelPinBurnup,\n    assemblyHasFuelPinPowers,\n)\nfrom armi.reactor.assemblies import Assembly\n\n\ndef _rotationNumberToRadians(rot: int) -> float:\n    \"\"\"Convert a rotation number to radians, assuming a HexAssembly.\"\"\"\n    return rot * math.pi / 3\n\n\ndef buReducingAssemblyRotation(fh):\n    \"\"\"\n    Rotates all detail assemblies to put the highest bu pin in the lowest power orientation.\n\n    Parameters\n    ----------\n    fh : FuelHandler object\n        A fully initialized FuelHandler object.\n\n    See Also\n    --------\n    simpleAssemblyRotation : an alternative rotation algorithm\n    \"\"\"\n    runLog.info(\"Algorithmically rotating assemblies to minimize burnup\")\n    # Store how we should rotate each assembly but don't perform the rotation just yet\n    # Consider assembly A is shuffled to a new location and rotated.\n    # Now, assembly B is shuffled to where assembly A used to be. We need to consider the\n    # power profile of A prior to it's rotation to understand the power profile B may see.\n    rotations: dict[int, list[Assembly]] = defaultdict(list)\n    for aPrev in fh.moved:\n        # If the assembly was out of the core, it will not have pin powers.\n        # No rotation information to be gained.\n        if aPrev.lastLocationLabel in Assembly.NOT_IN_CORE:\n            continue\n        aNow = fh.r.core.getAssemblyWithStringLocation(aPrev.lastLocationLabel)\n        # An assembly in the SFP could have burnup but if it's coming from the load\n        # queue it's totally fresh. Skip a check over all pins in the model\n        if aNow.lastLocationLabel == Assembly.LOAD_QUEUE:\n            continue\n        # no point in rotation if there's no pin detail\n        if assemblyHasFuelPinPowers(aPrev) and assemblyHasFuelPinBurnup(aNow):\n            rot = getOptimalAssemblyOrientation(aNow, aPrev)\n            rotations[rot].append(aNow)\n\n    if fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:\n        for a in filter(\n            lambda asm: asm not in fh.moved and assemblyHasFuelPinPowers(asm) and assemblyHasFuelPinBurnup(asm),\n            fh.r.core,\n        ):\n            rot = getOptimalAssemblyOrientation(a, a)\n            rotations[rot].append(a)\n\n    nRotations = 0\n    for rot, assems in filter(lambda item: item[0], rotations.items()):\n        # Radians used for the actual rotation. But a neater degrees print out is nice for logs\n        radians = _rotationNumberToRadians(rot)\n        degrees = round(math.degrees(radians), 3)\n        for a in assems:\n            runLog.important(f\"Rotating assembly {a} {degrees} CCW.\")\n            a.rotate(radians)\n            nRotations += 1\n\n    runLog.info(f\"Rotated {nRotations} assemblies.\")\n\n\ndef simpleAssemblyRotation(fh):\n    \"\"\"\n    Rotate all pin-detail assemblies that were just shuffled by 60 degrees.\n\n    Parameters\n    ----------\n    fh : FuelHandler object\n        A fully initialized FuelHandler object.\n\n    Notes\n    -----\n    Also, optionally rotate stationary (non-shuffled) assemblies if the setting is set.\n    Obviously, only pin-detail assemblies can be rotated, because homogenized assemblies are isotropic.\n\n    Examples\n    --------\n    >>> simpleAssemblyRotation(fh)\n\n    See Also\n    --------\n    FuelHandler.outage : calls this method based on a user setting\n    \"\"\"\n    runLog.info(\"Rotating assemblies by 60 degrees\")\n    numRotated = 0\n    hist = fh.o.getInterface(\"history\")\n    rot = math.radians(60)\n    for a in hist.getDetailAssemblies():\n        if a in fh.moved or fh.cs[CONF_ASSEM_ROTATION_STATIONARY]:\n            a.rotate(rot)\n            numRotated += 1\n            ring, pos = a.spatialLocator.getRingPos()\n            runLog.extra(\"Rotating Assembly ({0},{1}) to Orientation {2}\".format(ring, pos, 1))\n\n    runLog.extra(\"Rotated {0} assemblies\".format(numRotated))\n"
  },
  {
    "path": "armi/physics/fuelCycle/fuelHandlerFactory.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"factory for the FuelHandler.\"\"\"\n\nimport importlib\nfrom pathlib import Path\n\nfrom armi.physics.fuelCycle import fuelHandlers\nfrom armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC\nfrom armi.utils import directoryChangers, pathTools\n\n\ndef fuelHandlerFactory(operator):\n    \"\"\"\n    Return an instantiated FuelHandler object based on user settings.\n\n    The FuelHandler is expected to be a short-lived object that only lives for\n    the cycle upon which it acts. At the next cycle, this factory will be\n    called again to instantiate a new FuelHandler.\n    \"\"\"\n    cs = operator.cs\n    fuelHandlerClassName = cs[CONF_FUEL_HANDLER_NAME]\n    fuelHandlerModulePath = cs[CONF_SHUFFLE_LOGIC]\n\n    if not fuelHandlerClassName:\n        # give the default FuelHandler. This does not have an implemented outage, but\n        # still offers moving capabilities. Useful when you just need to make explicit\n        # moves but do not have a fully-defined fuel management input.\n        return fuelHandlers.FuelHandler(operator)\n\n    # User did request a custom fuel handler. We must go find and import it\n    # from the input directory.\n    with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):\n        try:\n            modulePath = Path(fuelHandlerModulePath)\n            if modulePath.exists() and modulePath.suffix == \".py\":\n                module = pathTools.importCustomPyModule(modulePath)\n            else:\n                module = importlib.import_module(fuelHandlerModulePath)\n\n            if not hasattr(module, fuelHandlerClassName):\n                raise KeyError(\n                    \"The requested fuel handler object {0} is not \"\n                    \"found in the fuel management input file {1} from CWD {2}. \"\n                    \"Check input\"\n                    \"\".format(fuelHandlerClassName, fuelHandlerModulePath, cs.inputDirectory)\n                )\n            # instantiate the custom object\n            fuelHandlerCls = getattr(module, fuelHandlerClassName)\n            fuelHandler = fuelHandlerCls(operator)\n\n            # also get getFactorList function from module level if it's there.\n            # This is a legacy input option, getFactorList should now generally\n            # be an method of the FuelHandler object\n            if hasattr(module, \"getFactorList\"):\n                # staticmethod binds the provided getFactorList function to the\n                # fuelHandler object without passing the implicit self argument.\n                # The __get__ pulls the actual function out from the descriptor.\n                fuelHandler.getFactorList = staticmethod(module.getFactorList).__get__(fuelHandlerCls)\n\n        except (IOError, ImportError):\n            raise ValueError(\n                \"Either the file specified in the `shuffleLogic` setting ({}) or the \"\n                \"fuel handler class name specified in the `fuelHandlerName` setting ({}) \"\n                \"cannot be found. CWD is: {}. Update input.\".format(\n                    fuelHandlerModulePath, fuelHandlerClassName, cs.inputDirectory\n                )\n            )\n\n    return fuelHandler\n"
  },
  {
    "path": "armi/physics/fuelCycle/fuelHandlerInterface.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A place for the FuelHandler's Interface.\"\"\"\n\nfrom armi import interfaces, runLog\nfrom armi.physics.fuelCycle import fuelHandlerFactory, fuelHandlers\nfrom armi.physics.fuelCycle.settings import (\n    CONF_PLOT_SHUFFLE_ARROWS,\n    CONF_RUN_LATTICE_BEFORE_SHUFFLING,\n    CONF_SHUFFLE_LOGIC,\n    CONF_SHUFFLE_SEQUENCE_FILE,\n)\nfrom armi.utils import plotting\n\n\nclass FuelHandlerInterface(interfaces.Interface):\n    \"\"\"\n    Moves and/or processes fuel in a Standard Operator.\n\n    Fuel management traditionally runs at the beginning of a cycle, before\n    power or temperatures have been updated. This allows pre-run fuel management\n    steps for highly customized fuel loadings. In typical runs, no fuel management\n    occurs at the beginning of the first cycle and the as-input state is left as is.\n\n    .. impl:: ARMI provides a shuffle logic interface.\n        :id: I_ARMI_SHUFFLE\n        :implements: R_ARMI_SHUFFLE\n\n        This interface allows for a user to define custom shuffle logic that\n        modifies to the core model. Being based on the :py:class:`~armi.interfaces.Interface`\n        class, it has direct access to the current core model.\n\n        User logic is able to be executed from within the\n        :py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` method,\n        which will use the :py:meth:`~armi.physics.fuelCycle.fuelHandlerFactory.fuelHandlerFactory`\n        to search for a Python file or importable module specified by the case setting ``shuffleLogic``.\n        If it exists, the fuel handler with name specified by the user via the ``fuelHandlerName``\n        case setting will be imported, and any actions in its ``outage`` method\n        will be executed at the :py:meth:`~armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC`\n        hook.\n\n        If no class with the name specified by the ``fuelHandlerName`` setting is found\n        in the module or file specified by ``shuffleLogic``, an error is returned.\n\n        See the user manual for how the custom shuffle logic module or file should be constructed.\n    \"\"\"\n\n    name = \"fuelHandler\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        # assembly name key, (x, y) values. used for making shuffle arrows.\n        self.oldLocations = {}\n        # need order due to nature of moves but with fast membership tests\n        self.moved = []\n        self.cycle = 0\n\n    @staticmethod\n    def specifyInputs(cs):\n        files = {\n            cs.getSetting(settingName): [\n                cs[settingName],\n            ]\n            for settingName in [CONF_SHUFFLE_LOGIC, \"explicitRepeatShuffles\", CONF_SHUFFLE_SEQUENCE_FILE]\n            if cs[settingName]\n        }\n        return files\n\n    def interactBOC(self, cycle=None):\n        \"\"\"\n        Move and/or process fuel.\n\n        Also, if requested, first have the lattice physics system update XS.\n        \"\"\"\n        # if lattice physics is requested, compute it here instead of after fuel management.\n        # This enables XS to exist for branch searching, etc.\n        mc2 = self.o.getInterface(purpose=\"latticePhysics\")\n        xsgm = self.o.getInterface(\"xsGroups\")\n        if mc2 and self.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING]:\n            runLog.extra(\n                f'Running {mc2} lattice physics before fuel management due to the \"{CONF_RUN_LATTICE_BEFORE_SHUFFLING}\"'\n                \" setting being activated.\"\n            )\n            xsgm.interactBOC(cycle=cycle)\n            mc2.interactBOC(cycle=cycle)\n\n        if self.enabled() and (\n            self.cs[\"loadStyle\"] != \"fromDB\" or self.cs[\"startNode\"] == 0 or (self.cs[\"startCycle\"] != cycle)\n        ):\n            # in restart cases, only do this if restarting at BOC to avoid duplicating shuffles\n            # the logic to accomplish this is a bit long because we don't pass the\n            # timeNode into interactBOC hooks. Otherwise it would be much easier\n            # to determine when to call this or not\n            self.manageFuel(cycle)\n\n    def interactEOC(self, cycle=None):\n        if self.r.excore.get(\"sfp\") is not None:\n            runLog.extra(f\"There are {len(self.r.excore['sfp'])} assemblies in the Spent Fuel Pool\")\n\n    def interactEOL(self):\n        \"\"\"Make reports at EOL.\"\"\"\n        self.makeShuffleReport()\n\n    def manageFuel(self, cycle):\n        \"\"\"Perform the fuel management for this cycle.\"\"\"\n        fh = fuelHandlerFactory.fuelHandlerFactory(self.o)\n        fh.prepCore()\n        fh.prepShuffleMap()\n        # take note of where each assembly is located before the outage\n        # for mapping after the outage\n        self.r.core.locateAllAssemblies()\n        shuffleFactors, _ = fh.getFactorList(cycle)\n        fh.outage(shuffleFactors)  # move the assemblies around\n\n        if self.cs[CONF_PLOT_SHUFFLE_ARROWS]:\n            arrows = fh.makeShuffleArrows()\n            plotting.plotFaceMap(\n                self.r.core,\n                \"percentBu\",\n                labelFmt=None,\n                fName=\"{}.shuffles_{}.png\".format(self.cs.caseTitle, self.r.p.cycle),\n                shuffleArrows=arrows,\n            )\n\n    def makeShuffleReport(self):\n        \"\"\"\n        Create a data file listing all the shuffles that occurred in a case.\n\n        This can be used to export shuffling to an external code or to\n        perform explicit repeat shuffling in a restart.\n        It creates a ``*SHUFFLES.txt`` file based on the Reactor.moves structure\n\n        See Also\n        --------\n        readMoves : reads this file and parses it.\n        \"\"\"\n        fname = self.cs.caseTitle + \"-SHUFFLES.txt\"\n        out = open(fname, \"w\")\n        for cycle in range(self.cs[\"nCycles\"]):\n            # do cycle+1 because cycle 0 at t=0 isn't usually interesting\n            # remember, we put cycle 0 in so we could do BOL branch searches.\n            # This also syncs cycles up with external physics kernel cycles.\n            out.write(\"Before cycle {0}:\\n\".format(cycle))\n            movesThisCycle = self.r.core.moves.get(cycle)\n            if movesThisCycle is not None:\n                for move in movesThisCycle:\n                    enrichLine = \" \".join([\"{0:.8f}\".format(enrich) for enrich in move.enrichList])\n                    if move.fromLoc in fuelHandlers.FuelHandler.DISCHARGE_LOCS:\n                        # this is a re-entering assembly. Give extra info so repeat shuffles can handle it\n                        out.write(\n                            \"{0} moved to {1} with assembly type {2} ringPosCycle={4} with enrich list: {3}\\n\".format(\n                                move.fromLoc,\n                                move.toLoc,\n                                move.assemType,\n                                enrichLine,\n                                move.ringPosCycle,\n                            )\n                        )\n                    else:\n                        # skip extra info. regular expression in readMoves will handle it just fine.\n                        out.write(\n                            \"{0} moved to {1} with assembly type {2} with enrich list: {3}\\n\".format(\n                                move.fromLoc, move.toLoc, move.assemType, enrichLine\n                            )\n                        )\n            out.write(\"\\n\")\n        out.close()\n\n    def workerOperate(self, cmd):\n        \"\"\"Delegate mpi command to the fuel handler object.\"\"\"\n        fh = fuelHandlerFactory.fuelHandlerFactory(self.o)\n        return fh.workerOperate(cmd)\n"
  },
  {
    "path": "armi/physics/fuelCycle/fuelHandlers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module handles fuel management operations such as shuffling, rotation, and\nfuel processing (in fluid systems).\n\nThe :py:class:`FuelHandlerInterface` instantiates a ``FuelHandler``, which is typically a user-defined\nsubclass the :py:class:`FuelHandler` object in custom shuffle-logic input files.\nUsers point to the code modules with their custom fuel handlers using the\n``shuffleLogic`` and ``fuelHandlerName`` settings, as described in :ref:`fuel-management-input`.\nThese subclasses override ``chooseSwaps`` that determine\nthe particular shuffling of a case.\n\nThis module also handles repeat shuffles when doing a restart.\n\"\"\"\n\n# ruff: noqa: F401\nimport inspect\nimport math\nimport os\nimport re\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\nfrom ruamel.yaml import YAML\nfrom ruamel.yaml.constructor import DuplicateKeyError\n\nfrom armi import runLog\nfrom armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos\nfrom armi.physics.fuelCycle.fuelHandlerFactory import fuelHandlerFactory\nfrom armi.physics.fuelCycle.fuelHandlerInterface import FuelHandlerInterface\nfrom armi.physics.fuelCycle.settings import (\n    CONF_ASSEMBLY_ROTATION_ALG,\n    CONF_SHUFFLE_SEQUENCE_FILE,\n)\nfrom armi.reactor import grids\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils.customExceptions import InputError\n\n\n@dataclass(eq=True)\nclass AssemblyMove:\n    \"\"\"Description of an individual shuffle move.\n\n    Parameters\n    ----------\n    fromLoc : str\n        Original location label.\n    toLoc : str\n        Destination location label.\n    enrichList : list[float]\n        Axial U235 weight percent enrichment values for each block.\n    assemType : str, optional\n        Type of assembly that is moving.\n    ringPosCycle : list[int], optional\n        List of ints of length 3. For assembly retrieval from SFP.\n        (ring, position, cycle) specifies the desired assembly resided at\n        (ring, position) during specified cycle number.\n    rotation : float, optional\n        Degrees of manual rotation to apply after shuffling.\n    \"\"\"\n\n    fromLoc: str\n    toLoc: str\n    enrichList: List[float] = field(default_factory=list)\n    assemType: Optional[str] = None\n    ringPosCycle: Optional[list[int]] = None\n    rotation: Optional[float] = None\n\n    def __post_init__(self):\n        \"\"\"Perform some data checks.\"\"\"\n        errorMsg = (\n            \"invalid (ring, position, cycle) specified for assembly retrieval from SFP\\n\"\n            f\"expected: list of ints, len=3\\nreceived: {self.ringPosCycle}\"\n        )\n        if self.ringPosCycle is not None:\n            if not isinstance(self.ringPosCycle, list):\n                raise TypeError(errorMsg)\n            if len(self.ringPosCycle) != 3:\n                raise ValueError(errorMsg)\n            for val in self.ringPosCycle:\n                if not isinstance(val, int):\n                    raise TypeError(errorMsg)\n\n\n@dataclass\nclass ProcessMoveListResult:\n    \"\"\"Container for the results of :meth:`FuelHandler.processMoveList`.\"\"\"\n\n    loadChains: List[List[str]]\n    loopChains: List[List[str]]\n    enriches: List[List[float]]\n    loadChargeTypes: List[Optional[str]]\n    ringPosCycles: List[Optional[list[int]]]\n    dischargeDests: List[str]\n    rotations: List[Tuple[str, float]]\n    alreadyDone: List[str]\n\n\nclass FuelHandler:\n    \"\"\"\n    A fuel handling machine can move fuel around the core and reactor.\n\n    It makes decisions on how to shuffle fuel based on user specifications.\n    It provides some supervisory data tracking, such as having the ability\n    to print out information about all moves that happened in a cycle (without\n    the user needing to explicitly track this information).\n\n    To use this, simply create an input Python file and point to it by path\n    with the ``fuelHandler`` setting. In that file, subclass this object.\n    \"\"\"\n\n    DISCHARGE_LOCS = frozenset({\"SFP\", \"Delete\"})\n    \"\"\"Special strings to indicate an assembly is no longer in the core.\"\"\"\n\n    def __init__(self, operator):\n        # we need access to the operator to find the core, get settings, grab other interfaces, etc.\n        self.o = operator\n        self.moved = []\n        self.pendingRotations = []\n\n    @property\n    def cycle(self):\n        \"\"\"\n        Link to the current cycle number.\n\n        Notes\n        -----\n        This retains backwards compatibility with previous fuel handler inputs.\n        \"\"\"\n        return self.o.r.p.cycle\n\n    @property\n    def cs(self):\n        \"\"\"Link to the Case Settings object.\"\"\"\n        return self.o.cs\n\n    @property\n    def r(self):\n        \"\"\"Link to the Reactor object.\"\"\"\n        return self.o.r\n\n    def outage(self, factor=1.0):\n        \"\"\"\n        Simulates a reactor reload outage. Moves and tracks fuel.\n\n        This sets the moveList structure.\n        \"\"\"\n        if self.moved:\n            raise ValueError(\"Cannot perform two outages with same FuelHandler instance.\")\n\n        # determine if a repeat shuffle is occurring or a new shuffle pattern\n        if self.cs[CONF_SHUFFLE_SEQUENCE_FILE]:\n            if not os.path.exists(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]):\n                raise FileNotFoundError(\n                    \"Requested shuffle sequence file {0} does not exist. Cannot perform shuffling. \".format(\n                        self.cs[CONF_SHUFFLE_SEQUENCE_FILE]\n                    )\n                )\n            runLog.important(\"Applying shuffle sequence from {}\".format(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]))\n            # location hist params updated within performShuffle\n            self.performShuffle(self.cs[CONF_SHUFFLE_SEQUENCE_FILE], yaml=True)\n        elif self.cs[\"explicitRepeatShuffles\"]:\n            # repeated shuffle\n            if not os.path.exists(self.cs[\"explicitRepeatShuffles\"]):\n                raise RuntimeError(\n                    \"Requested repeat shuffle file {0} does not exist. Cannot perform shuffling. \".format(\n                        self.cs[\"explicitRepeatShuffles\"]\n                    )\n                )\n            runLog.important(\"Repeating a shuffling pattern from {}\".format(self.cs[\"explicitRepeatShuffles\"]))\n            # location hist params updated within performShuffle\n            self.performShuffle(self.cs[\"explicitRepeatShuffles\"])\n        else:\n            # Normal shuffle from user-provided shuffle logic input\n            self.chooseSwaps(factor)\n        self.updateAllLocationHistParams(self.cycle)\n\n        # do rotations if pin-level details are available (requires fluxRecon plugin)\n        if self.cs[\"fluxRecon\"] and self.cs[CONF_ASSEMBLY_ROTATION_ALG]:\n            # Rotate assemblies ONLY IF at least some assemblies have pin detail\n            # The user can choose the algorithm method name directly in the settings\n            if hasattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG]):\n                rotationMethod = getattr(rotAlgos, self.cs[CONF_ASSEMBLY_ROTATION_ALG])\n                rotationMethod(self)\n            else:\n                raise RuntimeError(\n                    \"FuelHandler {0} does not have a rotation algorithm called {1}.\\nChange your {2} setting\".format(\n                        rotAlgos,\n                        self.cs[CONF_ASSEMBLY_ROTATION_ALG],\n                        CONF_ASSEMBLY_ROTATION_ALG,\n                    )\n                )\n\n        for loc, deg in self.pendingRotations:\n            assem = self.r.core.getAssemblyWithStringLocation(loc)\n            if assem is None:\n                runLog.warning(f\"No assembly found at {loc} for manual rotation\")\n                continue\n            runLog.important(f\"Rotating assembly {assem} in {loc} by {deg} degrees CCW from shuffle file\")\n            assem.rotate(math.radians(deg))\n        self.pendingRotations = []\n\n        # inform the reactor of how many moves occurred so it can put the number in the database.\n        if self.moved:\n            numMoved = len(self.moved) * self.r.core.powerMultiplier\n\n            # tell the reactor which assemblies moved where\n            # also tell enrichments of each block in case there's some autoboosting going on.\n            # This is also essential for repeating shuffles in later restart runs.\n            for a in self.moved:\n                try:\n                    ringPosCycle = None\n                    # grab first (ring, pos) at cycle info which can be used to identify this assembly if it goes to SFP\n                    if a.p.ringPosHist:\n                        for cycleNum, rp in enumerate(a.p.ringPosHist):\n                            if isinstance(rp, tuple) and rp[0] not in a.NOT_IN_CORE:\n                                ringPosCycle = [int(rp[0]), int(rp[1]), cycleNum]\n                                break\n                        else:\n                            ringPosCycle = None\n\n                    self.r.core.setMoveList(\n                        self.cycle,\n                        a.lastLocationLabel,\n                        a.getLocation(),\n                        [b.getUraniumMassEnrich() for b in a],\n                        a.getType(),\n                        ringPosCycle,\n                    )\n                except:\n                    runLog.important(\"A fuel management error has occurred. \")\n                    runLog.important(\"Trying operation on assembly {}\".format(a))\n                    runLog.important(\"The moved list is {}\".format(self.moved))\n                    raise\n        else:\n            numMoved = 0\n\n        self.o.r.core.p.numMoves = numMoved\n        self.o.r.core.setBlockMassParams()\n\n        runLog.important(\"Fuel handler performed {0} assembly shuffles.\".format(numMoved))\n\n        # now wipe out the self.moved version so it doesn't transmit the assemblies during distributeState\n        moved = self.moved[:]\n        self.moved = []\n        return moved\n\n    def _preconditionLocationHistParam(self, a, cycle):\n        \"\"\"\n        Trim assembly location history param to be consistent with the specified\n        cycle or the reactor cycle parameter in preparation for the current ring and\n        position to be added.\n        list index corresponds to the cycle number n, which will be appended after the\n        parameter is preconditioned to length n (max index is n-1)\n        e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.\n\n        Parameters\n        ----------\n        a : armi.reactor.assembly.Assembly\n        cycle : int\n            cycle number at BOC to update assembly location history\n        \"\"\"\n        # Param length is shorter than expected (data from previous cycles is missing or shuffling was not performed\n        # on a previous cycle)\n        if len(a.p.ringPosHist) < cycle:\n            a.p.ringPosHist += [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)] * (cycle - len(a.p.ringPosHist))\n        # Param length is longer than expected. perhaps a restart analysis of some sort. trim trailing data\n        if len(a.p.ringPosHist) > cycle:\n            a.p.ringPosHist = a.p.ringPosHist[:cycle]\n        return a\n\n    def _updateAssemLocationHistParam(self, a, cycle):\n        \"\"\"\n        Update assembly location history parameter with current assembly location for\n        specified cycle number.\n        Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location\n        e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.\n        \"\"\"\n        a = self._preconditionLocationHistParam(a, cycle)\n        # assem param should now be the correct len. append data at correct index.\n        if a.getLocation() in a.NOT_IN_CORE:\n            a.p.ringPosHist.append((a.getLocation(), a.getLocation()))\n        else:\n            ring, pos, _ = grids.locatorLabelToIndices(a.getLocation())\n            a.p.ringPosHist.append((ring, pos))\n\n    def updateAllLocationHistParams(self, cycle):\n        \"\"\"\n        Update location history param for all assemblies with current assembly locations\n        for specified cycle number\n        Index of a.p.ringPosHist corresponds to the cycle number BOC assembly location\n        e.g. i=0 is the initial position, i=1 is the position at BOC1, etc.\n        \"\"\"\n        for a in self.r.core:\n            self._updateAssemLocationHistParam(a, cycle)\n        for a in list(self.r.excore[\"sfp\"]):\n            self._updateAssemLocationHistParam(a, cycle)\n\n    def chooseSwaps(self, shuffleFactors=None):\n        \"\"\"Moves the fuel around or otherwise processes it between cycles.\"\"\"\n        raise NotImplementedError\n\n    @staticmethod\n    def getFactorList(cycle, cs=None, fallBack=False):\n        \"\"\"\n        Return factors between 0 and 1 that control fuel management.\n\n        This is the default shuffle control function. Usually you would override this\n        with your own in a custom shuffleLogic.py file. For more details about how this\n        works, refer to :ref:`fuel-management-input`.\n\n        This will get bound to the default FuelHandler as a static method below. This is\n        done to allow a user to mix and match FuelHandler class implementations and\n        getFactorList implementations at run time.\n\n        Notes\n        -----\n        Ultimately, this approach will likely get replaced using the plugin framework, but\n        we aren't there yet.\n        \"\"\"\n        # prefer to keep these 0 through 1 since this is what the branch search can do.\n        defaultFactorList = {\"eqShuffles\": 1}\n        factorSearchFlags = []\n        return defaultFactorList, factorSearchFlags\n\n    def prepCore(self):\n        \"\"\"Aux function to run before XS generation (do moderation, etc).\"\"\"\n        pass\n\n    @staticmethod\n    def _compareAssem(candidate, current):\n        \"\"\"Check whether the candidate assembly should replace the current ideal assembly.\n\n        Given a candidate tuple (diff1, a1) and current tuple (diff2, a2), decide whether the\n        candidate is better than the current ideal. This first compares the diff1 and diff2 values.\n        If diff1 is sufficiently less than diff2, a1 wins, returning True. Otherwise, False. If\n        diff1 and diff2 are sufficiently close, the assembly with the lesser assemNum wins. This\n        should result in a more stable comparison than on floating-point comparisons alone.\n        \"\"\"\n        if np.isclose(candidate[0], current[0], rtol=1e-8, atol=1e-8):\n            return candidate[1].p.assemNum < current[1].p.assemNum\n        else:\n            return candidate[0] < current[0]\n\n    @staticmethod\n    def _getParamMax(a, paramName, blockLevelMax=True):\n        \"\"\"Get assembly/block-level maximum parameter value in assembly.\"\"\"\n        multiplier = a.getSymmetryFactor()\n        if multiplier != 1:\n            # handle special case: volume-integrated parameters where symmetry factor is not 1\n            if blockLevelMax:\n                paramCollection = a[0].p\n            else:\n                paramCollection = a.p\n            isVolumeIntegrated = paramCollection.paramDefs[paramName].location == ParamLocation.VOLUME_INTEGRATED\n            multiplier = a.getSymmetryFactor() if isVolumeIntegrated else 1.0\n\n        if blockLevelMax:\n            return a.getChildParamValues(paramName).max() * multiplier\n        else:\n            return a.p[paramName] * multiplier\n\n    def findAssembly(\n        self,\n        targetRing=None,\n        width=(0, 0),\n        param=None,\n        compareTo=None,\n        forceSide=None,\n        exclusions=None,\n        typeSpec=None,\n        mandatoryLocations=None,\n        zoneList=None,\n        excludedLocations=None,\n        minParam=None,\n        minVal=None,\n        maxParam=None,\n        maxVal=None,\n        findMany=False,\n        coords=None,\n        exactType=False,\n        acceptFirstCandidateRing=False,\n        blockLevelMax=False,\n        findFromSfp=False,\n        maxNumAssems=None,\n        circularRingFlag=False,\n    ):\n        r\"\"\"\n        Search reactor for assemblies with various criterion. Primarily for shuffling.\n\n        Parameters\n        ----------\n        targetRing : int, optional\n            The ring in which to search\n\n        width : tuple of integers\n            A (size, side) tuple where size is the number of rings on either side to also check.\n            side=1: only look in higher, -1: only look lower, 0: both sides\n\n        param : string, optional\n            A block (if blockLevelMax) or assem level param name such as 'power' or 'percentBu'\n            (requires compareTo).\n\n        compareTo : float or Assembly instance\n            an assembly to be compared to. Alternatively, a floating point number to compare to.\n            Even more alternatively,  an (assembly,mult) or (float,mult) tuple where mult is a\n            multiplier. For example, if you wanted an assembly that had a bu close to half of\n            assembly bob, you'd give param='percentBu', compareTo=(bob,0.5) If you want one with a\n            bu close to 0.3, you'd do param='percentBu',compareTo=0.3. Yes, if you give a (float,\n            multiplier) tuple the code will still work as expected.\n\n        forceSide : bool, optional\n            requires the found assembly to have either 1: higher, -1: lower, None: any param than\n            compareTo\n\n        exclusions : list, optional\n            List of assemblies that will be excluded from the search\n\n        minParam : float or list, optional\n            a parameter to compare to minVal for setting lower bounds. If list, must correspond to\n            parameters in minVal in order.\n\n        maxParam : float or list, optional\n            a parameter to compare to maxVal for setting upper bounds of acceptable assemblies.\n            If list, must correspond to parameters in maxVal in order.\n\n        minVal : float or list, optional\n            a value or a (parameter, multiplier) tuple for setting lower bounds\n\n            For instance, if minParam='timeToLimit' and minVal=10, only assemblies with timeToLimit\n            higher than 10 will be returned. (Of course, there is also maxParam and maxVal)\n\n        maxVal : float or list, optional\n            a value or a (parameter, multiplier) tuple for setting upper bounds\n\n        mandatoryLocations : list, optional\n            A list of string-representations of locations in the core for limiting the search to\n            several places. Any locations also included in `excludedLocations` will be excluded.\n\n        excludedLocations : list, optional\n            a list of string-representations of locations in the core that will be excluded from\n            the search\n\n        zoneList : list, optional\n            name of a zone defined in settings.py that will be picked from. Under development\n\n        findMany : bool, optional\n            If True, will return a list of assembies that match. Don't give a param.\n\n        typeSpec : Flags or list of Flags, optional\n            only assemblies with this type list will be returned. If none, only fuel will be found.\n\n        coords : tuple, optional\n            x,y tuple in cm. the fuel handler will try to find an assembly with a center closest to\n            that point\n\n        exactType : bool, optional\n            require type to be exactly equal to what's in the type list. So\n            Flags.IGNITER | Flags.FUEL is not Flags.INNER | Flags.IGNITER | Flags.FUEL\n\n        acceptFirstCandidateRing : bool, optional\n            takes the first assembly found in the earliest ring (without searching all rings for a\n            maxBu, for example) So if the candidate rings are 1-10 and we're looking for igniter\n            fuel with a maxBurnup, we don't get the max burnup in all those rings, but rather the\n            igniter with the max burnup in the ring closest to 1. If there are no igniters until\n            ring 4, you will get an igniter in ring 4.\n\n        blockLevelMax : bool, optional\n            If true, the param to search for will be built as the maximum block-level param of this\n            name instead of the assembly param. This avoids the need to assign assembly level params\n            sometimes.\n            default: false.\n\n        findFromSfp : bool, optional\n            If true, will look in the spent-fuel pool instead of in the core.\n\n        maxNumAssems : int, optional\n            The maximum number of assemblies to return. Only relevant if findMany==True\n\n        circularRingFlag : bool, optional\n            Toggle using rings that are based on distance from the center of the reactor\n\n        Notes\n        -----\n        The call signature on this method may have gotten slightly out of hand as valuable\n        capabilities were added in fuel management studies. For additional expansion, it may be\n        worth reconsidering the design of these query operations.\n\n        Returns\n        -------\n        Assembly instance or assemList of assembly instances that match criteria, or None if none\n        match\n\n        Examples\n        --------\n        This returns the feed fuel assembly in ring 4 that has a burnup closest to 100%\n        (the highest burnup assembly)::\n\n            feed = self.findAssembly(\n                targetRing=4, width=(0, 0), param=\"maxPercentBu\", compareTo=100, typeSpec=Flags.FEED | Flags.FUEL\n            )\n\n        \"\"\"\n        # list for storing multiple results if findMany is true.\n        assemList = []\n\n        # process input arguments\n        if targetRing is None:\n            # look through the full core\n            targetRing = 0\n            width = (100, 0)\n\n        if exclusions is None:\n            exclusions = []\n\n        if isinstance(minVal, list):\n            # list given with multiple mins\n            minVals = minVal\n            minParams = minParam\n        else:\n            minVals = [minVal]\n            minParams = [minParam]\n\n        if isinstance(maxVal, list):\n            maxVals = maxVal\n            maxParams = maxParam\n        else:\n            # just one given. put it in a list so the below machinery can handle it.\n            maxVals = [maxVal]\n            maxParams = [maxParam]\n\n        if typeSpec is None:\n            # restrict motions to fuel only\n            # not really necessary. take this default out if you want to move control rods, etc.\n            typeSpec = Flags.FUEL\n\n        minDiff = (1e60, None)\n\n        # compareTo can either be a tuple, a value, or an assembly\n        # if it's a tuple, it can either be an int/float and a multiplier, or an assembly and a multiplier\n        # if it's not a tuple, the multiplier will be assumed to be 1.0\n\n        mult = 1.0  # if no mult brought in, just assume 1.0\n        if isinstance(compareTo, tuple):\n            # tuple (assem or int/float, multiplier) brought in.\n            # separate it\n            compareTo, mult = compareTo\n\n        if isinstance(compareTo, (float, int)):\n            # floating point or int.\n            compVal = compareTo * mult\n        elif param:\n            # assume compareTo is an assembly\n            compVal = FuelHandler._getParamMax(compareTo, param, blockLevelMax) * mult\n\n        if coords:\n            # find the assembly closest to xt,yt if coords are given without considering params.\n            aTarg = None\n            minD = 1e10\n            xt, yt = coords  # assume (x,y) tuple\n            for a in self.r.core:\n                x, y, _ = a.spatialLocator.getLocalCoordinates()\n                d = (y - yt) ** 2 + (x - xt) ** 2\n                if d < minD:\n                    minD = d\n                    aTarg = a\n\n            return aTarg\n\n        if findFromSfp:\n            # hack to enable SFP searching.\n            candidateRings = [\"SFP\"]\n        else:\n            # set up candidateRings based on targetRing and width. The target rings comes first b/c it is preferred.\n            candidateRings = [targetRing]\n            if width[1] <= 0:\n                # 0 or -1 implies that the inner rings can be added.\n                for inner in range(width[0]):\n                    candidateRings.append(targetRing - inner - 1)  # +1 to get 1,2,3 instead of 0,1,2\n            if width[1] >= 0:\n                # if 1, add in the outer rings\n                for outer in range(width[0]):\n                    candidateRings.append(targetRing + outer + 1)\n\n        # get lists of assemblies in each candidate ring. Do it in this order in case we prefer ones in the first.\n        # scan through all assemblies and find the one (or more) that best fits the criteria\n        for ringI, assemsInRings in enumerate(\n            self._getAssembliesInRings(candidateRings, typeSpec, exactType, exclusions, circularRingFlag)\n        ):\n            for a in assemsInRings:\n                innocent = True\n                # Check that this assembly's minParam is > the minimum for each minParam\n                for minIndex, minVal in enumerate(minVals):\n                    minParam = minParams[minIndex]\n                    if minParam:\n                        # a minimum was specified. Check to see if we're ok\n                        if isinstance(minVal, tuple):\n                            # tuple turned in. it's a multiplier and a param\n                            realMinVal = FuelHandler._getParamMax(a, minVal[0], blockLevelMax) * minVal[1]\n                        else:\n                            realMinVal = minVal\n\n                        if FuelHandler._getParamMax(a, minParam, blockLevelMax) < realMinVal:\n                            # this assembly does not meet the minVal specifications. Skip it.\n                            innocent = False\n                            break  # for speed (not a big deal here)\n\n                if not innocent:\n                    continue\n\n                # Check upper bounds, to make sure this assembly doesn't have maxParams>maxVals\n                for maxIndex, maxVal in enumerate(maxVals):\n                    maxParam = maxParams[maxIndex]\n                    if maxParam:\n                        if isinstance(maxVal, tuple):\n                            # tuple turned in. it's a multiplier and a param\n                            realMaxVal = FuelHandler._getParamMax(a, maxVal[0], blockLevelMax) * maxVal[1]\n                        else:\n                            realMaxVal = maxVal\n\n                        if FuelHandler._getParamMax(a, maxParam, blockLevelMax) > realMaxVal:\n                            # this assembly has a maxParam that's higher than maxVal and therefore\n                            # doesn't qualify. skip it.\n                            innocent = False\n                            break\n\n                if not innocent:\n                    continue\n\n                # Check to see if this assembly is in the list of candidate locations. if not, skip it.\n                if mandatoryLocations:\n                    if a.getLocation() not in mandatoryLocations:\n                        continue\n\n                if excludedLocations:\n                    if a.getLocation() in excludedLocations:\n                        # this assembly is in the excluded location list. skip it.\n                        continue\n\n                # only process of the Assembly is in a Zone\n                if not self.isAssemblyInAZone(zoneList, a):\n                    continue\n\n                # Now find the assembly with the param closest to the target val.\n                if param:\n                    diff = abs(FuelHandler._getParamMax(a, param, blockLevelMax) - compVal)\n\n                    if (\n                        forceSide == 1\n                        and FuelHandler._getParamMax(a, param, blockLevelMax) > compVal\n                        and FuelHandler._compareAssem((diff, a), minDiff)\n                    ):\n                        # forceSide=1, so that means look in rings further out\n                        minDiff = (diff, a)\n                    elif (\n                        forceSide == -1\n                        and FuelHandler._getParamMax(a, param, blockLevelMax) < compVal\n                        and FuelHandler._compareAssem((diff, a), minDiff)\n                    ):\n                        # forceSide=-1, so that means look in rings closer in from the targetRing\n                        minDiff = (diff, a)\n                    elif FuelHandler._compareAssem((diff, a), minDiff):\n                        # no preference of which side, just take the one with the closest param.\n                        minDiff = (diff, a)\n                else:\n                    # no param specified. Just return one closest to the target ring\n                    diff = None\n                    if a.spatialLocator.getRingPos()[0] == targetRing:\n                        # short circuit the search\n                        if findMany:\n                            assemList.append((diff, a))\n                            continue\n                        else:\n                            return a\n                    elif abs(a.spatialLocator.getRingPos()[0] - targetRing) < minDiff[0]:\n                        minDiff = (\n                            abs(a.spatialLocator.getRingPos()[0] - targetRing),\n                            a,\n                        )\n\n                if findMany:\n                    # returning many assemblies. If there's a param, we'd like it to be honored by\n                    # ordering this list from smallest diff to largest diff.\n                    assemList.append((diff, a))\n\n            if ringI == 0 and acceptFirstCandidateRing and minDiff[1]:\n                # an acceptable assembly was found in the targetRing (ringI==0)\n                # and the user requested this to be returned. Therefore, return it without\n                # scanning through the additional rings.\n                return minDiff[1]\n\n        if not minDiff[1]:\n            # can't find assembly in targetRing with close param to compareTo\n            pass\n\n        if findMany:\n            assemList.sort()  # prefer items that have params that are the closest to the value.\n            # extract the assemblies.\n            assemsInRings = [a for diff, a in assemList]\n            if maxNumAssems:\n                return assemsInRings[:maxNumAssems]\n            else:\n                return assemsInRings\n        else:\n            return minDiff[1]\n\n    @staticmethod\n    def isAssemblyInAZone(zoneList, a):\n        \"\"\"Does the given assembly in one of these zones.\"\"\"\n        if zoneList:\n            # ruff: noqa: SIM110\n            for zone in zoneList:\n                if a.getLocation() in zone:\n                    # Success!\n                    return True\n\n            return False\n        else:\n            # A little counter-intuitively, if there are no zones, we return True.\n            return True\n\n    def _getAssembliesInRings(\n        self,\n        ringList,\n        typeSpec=Flags.FUEL,\n        exactType=False,\n        exclusions=None,\n        circularRingFlag=False,\n    ):\n        \"\"\"\n        Find assemblies in particular rings.\n\n        Parameters\n        ----------\n        ringList : list\n            List of integer ring numbers to find assemblies in. Optionally, a string specifying a\n            special location like the SFP (spent fuel pool)\n\n        typeSpec : Flags or iterable of Flags, optional\n            Flag types to restrict assemblies to\n\n        exactType : bool, optional\n            Match the type in typelist exactly\n\n        exclusions : list of Assemblies, optional\n            exclude these assemblies from the results\n\n        circularRingFlag : bool\n            A flag to toggle on using rings that are based on distance from the center of the reactor\n\n        Returns\n        -------\n        assemblyList : list\n            List of assemblies in each ring of the ringList. [[a1,a2,a3],[a4,a5,a6,a7],...]\n        \"\"\"\n        if \"SFP\" in ringList and self.r.excore.get(\"sfp\") is None:\n            sfpAssems = []\n            runLog.warning(\n                f\"{self} can't pull from SFP; no SFP is attached to the reactor {self.r}.\"\n                \"To get assemblies from an SFP, you must add an SFP system to the blueprints\"\n                f\"or otherwise instantiate a SpentFuelPool object as r.excore['sfp']\"\n            )\n        else:\n            sfpAssems = list(self.r.excore[\"sfp\"])\n\n        assemblyList = [[] for _i in range(len(ringList))]  # empty lists for each ring\n        if exclusions is None:\n            exclusions = []\n        exclusions = set(exclusions)\n\n        if circularRingFlag:\n            assemListTmp = []\n            assemListTmp2 = []\n            if ringList[0] == \"SFP\":\n                # kind of a hack for now. Need the capability.\n                assemblyList = sfpAssems\n            else:\n                for i, ringNumber in enumerate(ringList):\n                    assemListTmp = self.r.core.getAssembliesInCircularRing(ringNumber, typeSpec, exactType, exclusions)\n                    for a in assemListTmp:\n                        if a in exclusions:\n                            continue\n                        if not a.hasFlags(typeSpec, exact=exactType):\n                            continue\n                        # save only the assemblies not in the exclusions and with the proper type\n                        assemListTmp2.append(a)\n                    # make the list of lists of assemblies\n                    assemblyList[i] = assemListTmp2\n        else:\n            if ringList[0] == \"SFP\":\n                # kind of a hack for now. Need the capability.\n                assemList = sfpAssems\n            else:\n                assemList = self.r.core.getAssemblies()\n\n            for a in assemList:\n                if a in exclusions:\n                    continue\n                if not a.hasFlags(typeSpec, exact=exactType):\n                    continue\n\n                if a.getLocation() == \"SFP\":\n                    ring = \"SFP\"\n                else:\n                    ring = a.spatialLocator.getRingPos()[0]\n                if ring in ringList:\n                    # keep it in the right order\n                    assemblyList[ringList.index(ring)].append(a)\n\n        return assemblyList\n\n    def swapAssemblies(self, a1, a2):\n        \"\"\"Moves a whole assembly from one place to another.\n\n        .. impl:: User-specified blocks can be left in place during within-core swaps.\n            :id: I_ARMI_SHUFFLE_STATIONARY0\n            :implements: R_ARMI_SHUFFLE_STATIONARY\n\n            Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to\n            check if there are any block types specified by the user as stationary via the\n            ``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each\n            assembly which should remain stationary and checked to make sure that both assemblies\n            have the same number and same height of stationary blocks. If not, return an error.\n\n            If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and\n            :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the\n            stationary blocks between the two assemblies.\n\n            Once this process is complete, the actual assembly movement can take place. Through this\n            process, the stationary blocks remain in the same core location.\n\n        Parameters\n        ----------\n        a1 : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            The first assembly\n        a2 : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            The second assembly\n\n        See Also\n        --------\n        dischargeSwap : swap assemblies where one is outside the core and the other is inside\n        \"\"\"\n        if a1 is None or a2 is None:\n            runLog.warning(\"Cannot swap None assemblies. Check your findAssembly results. Skipping swap\")\n            return\n\n        runLog.extra(\"Swapping {} with {}.\".format(a1, a2))\n        # add assemblies into the moved location\n        for a in [a1, a2]:\n            if a not in self.moved:\n                self.moved.append(a)\n        oldA1Location = a1.spatialLocator\n        self._transferStationaryBlocks(a1, a2)\n        a1.moveTo(a2.spatialLocator)\n        a2.moveTo(oldA1Location)\n\n    def _transferStationaryBlocks(self, assembly1, assembly2):\n        \"\"\"\n        Exchange the stationary blocks (e.g. grid plate) between the moving assemblies.\n\n        These blocks in effect are not moved at all.\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # identify stationary blocks for assembly 1\n        a1StationaryBlocks = [\n            [block, block.spatialLocator.k] for block in assembly1 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n        # identify stationary blocks for assembly 2\n        a2StationaryBlocks = [\n            [block, block.spatialLocator.k] for block in assembly2 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        # check for any inconsistencies in stationary blocks and ensure alignment\n        if [block[1] for block in a1StationaryBlocks] != [block[1] for block in a2StationaryBlocks]:\n            raise ValueError(\n                \"\"\"Different number and/or locations of stationary blocks \n                 between {} (Stationary Blocks: {}) and {} (Stationary Blocks: {}).\"\"\".format(\n                    assembly1, a1StationaryBlocks, assembly2, a2StationaryBlocks\n                )\n            )\n        if a1StationaryBlocks and a2StationaryBlocks:\n            if a1StationaryBlocks[-1][0].p.ztop != a2StationaryBlocks[-1][0].p.ztop:\n                runLog.warning(\n                    \"\"\"Difference in top elevation of stationary blocks \n                     between {} (Stationary Blocks: {}, Elevation at top of stationary blocks {}) \n                     and {} (Stationary Blocks: {}, Elevation at top of stationary blocks {}))\"\"\".format(\n                        assembly1,\n                        a1StationaryBlocks,\n                        a1StationaryBlocks[-1][0].p.ztop,\n                        assembly2,\n                        a2StationaryBlocks,\n                        a2StationaryBlocks[-1][0].p.ztop,\n                    )\n                )\n\n        # swap stationary blocks\n        for (assem1Block, assem1BlockIndex), (assem2Block, assem2BlockIndex) in zip(\n            a1StationaryBlocks, a2StationaryBlocks\n        ):\n            # remove stationary blocks\n            assembly1.remove(assem1Block)\n            assembly2.remove(assem2Block)\n            # insert stationary blocks\n            assembly1.insert(assem1BlockIndex, assem2Block)\n            assembly2.insert(assem2BlockIndex, assem1Block)\n\n    @staticmethod\n    def validateLoc(loc, cycle):\n        \"\"\"Validate a location label from a shuffle YAML file.\n\n        Parameters\n        ----------\n        loc : str\n            Location label to validate.\n        cycle : int\n            Cycle currently being processed, used for context in error messages.\n        \"\"\"\n        if loc in FuelHandler.DISCHARGE_LOCS:\n            return\n\n        try:\n            grids.locatorLabelToIndices(loc)\n        except Exception:\n            raise InputError(\n                f\"Invalid location label {loc!r} in cycle {cycle} in shuffle YAML. \"\n                \"Location labels must be non-empty and contain integers.\"\n            )\n\n    def dischargeSwap(self, incoming, outgoing, toSfp=False):\n        \"\"\"Removes one assembly from the core and replace it with another assembly.\n\n        .. impl:: User-specified blocks can be left in place for the discharge swap.\n            :id: I_ARMI_SHUFFLE_STATIONARY1\n            :implements: R_ARMI_SHUFFLE_STATIONARY\n\n            Before assemblies are moved, the ``_transferStationaryBlocks`` class method is called to\n            check if there are any block types specified by the user as stationary via the\n            ``stationaryBlockFlags`` case setting. Using these flags, blocks are gathered from each\n            assembly which should remain stationary and checked to make sure that both assemblies\n            have the same number and same height of stationary blocks. If not, return an error.\n\n            If all checks pass, the :py:meth:`~armi.reactor.assemblies.Assembly.remove` and\n            :py:meth:`~armi.reactor.assemblies.Assembly.insert` methods are used to swap the\n            stationary blocks between the two assemblies.\n\n            Once this process is complete, the actual assembly movement can take place. Through this\n            process, the stationary blocks from the outgoing assembly remain in the original core\n            position, while the stationary blocks from the incoming assembly are discharged with the\n            outgoing assembly.\n\n        Parameters\n        ----------\n        incoming : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            The assembly getting swapped into the core.\n        outgoing : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            The assembly getting discharged out the core.\n        toSfp : bool, optional\n            If True, store the discharged assembly in the SFP regardless of the\n            ``trackAssems`` setting.\n\n        See Also\n        --------\n        swapAssemblies : swaps assemblies that are already in the core\n        \"\"\"\n        runLog.debug(\"Discharge swapping {} for {}.\".format(incoming, outgoing))\n        if incoming is None or outgoing is None:\n            runLog.warning(\"Cannot discharge swap None assemblies. Check your findAssembly calls. Skipping\")\n            return\n\n        # add assemblies into the moved location\n        # keep it unique so we don't get artificially inflated numMoves\n        for a in [incoming, outgoing]:\n            if a not in self.moved:\n                self.moved.append(a)\n\n        self._transferStationaryBlocks(incoming, outgoing)\n\n        # replace the goingOut guy.\n        loc = outgoing.spatialLocator\n        # say it happened at the end of the previous cycle by sending cycle-1\n        # to removeAssembly, which will look up EOC of last cycle,\n        # which, coincidentally is the same time we're at right now at BOC.\n        self.r.core.removeAssembly(outgoing, addToSFP=toSfp)\n\n        # adjust the assembly multiplicity so that it does not forget how many it really\n        # represents. This allows us to discharge an assembly from any location in\n        # fractional-core models where the central location may only be one assembly,\n        # whereas other locations are more, and keep proper track of things. In the\n        # future, this mechanism may be used to handle symmetry in general.\n        outgoing.p.multiplicity = len(loc.getSymmetricEquivalents()) + 1\n\n        if self.r.excore.get(\"sfp\") is not None:\n            if incoming in self.r.excore[\"sfp\"].getChildren():\n                # pull it out of the sfp if it's in there.\n                runLog.extra(\"removing {0} from the sfp\".format(incoming))\n                self.r.excore[\"sfp\"].remove(incoming)\n\n        incoming.p.multiplicity = 1\n        self.r.core.add(incoming, loc)\n\n    def swapCascade(self, assemList):\n        \"\"\"\n        Perform swaps on a list of assemblies.\n\n        Parameters\n        ----------\n        assemList: list\n            A list of assemblies to be shuffled.\n\n        Notes\n        -----\n        [goingOut,inter1,inter2,goingIn]  will go to\n        [inter1, inter2, goingIn, goingOut] in terms of positions\n        or, in ASCII art::\n\n             >---------------v\n             |               |\n            [A  <- B <- C <- D]\n\n        \"\"\"\n        # first check for duplicates\n        for assem in assemList:\n            if assemList.count(assem) != 1:\n                runLog.warning(f\"{assem} is in the cascade more than once.\")\n\n        # now swap\n        levels = len(assemList)\n        for level in range(levels - 1):\n            if not assemList[level + 1]:\n                runLog.info(\n                    f\"Skipping level {level + 1} in the cascade because it is None. Be careful, \"\n                    \"this might cause an unexpected shuffling order.\"\n                )\n                continue\n            self.swapAssemblies(assemList[0], assemList[level + 1])\n\n    def performShuffle(self, shuffleFile, yaml=False):\n        \"\"\"\n        Execute shuffling instructions from a previous run or YAML file.\n\n        Parameters\n        ----------\n        shuffleFile : str\n            Path to the shuffle sequence file.\n        yaml : bool, optional\n            If True, interpret ``shuffleFile`` as a YAML shuffle sequence.\n\n        Returns\n        -------\n        moved : list\n            List of assemblies that moved this cycle.\n\n        Notes\n        -----\n        Typically the shuffle file from a previous run will be ``caseTitle``-\"SHUFFLES.txt\".\n\n        See Also\n        --------\n        doRepeatShuffle : Performs moves as processed by this method\n        processMoveList : Converts a stored list of moves into a functional list of assemblies to swap\n        makeShuffleReport : Creates the file that is processed here\n        \"\"\"\n        # read moves file\n        cycle = self.r.p.cycle\n        if cycle == 0:\n            # if cycle is 0, we are at the beginning of the first cycle\n            # this is a special case where we don't have any moves\n            # so we return an empty list\n            return []\n\n        if yaml:\n            moves, swaps = self.readMovesYaml(shuffleFile)\n        else:\n            moves = self.readMoves(shuffleFile)\n            swaps = {}\n\n        # setup the load and loop chains to be run per cycle\n        moveList = moves[cycle]\n        swapList = swaps.get(cycle, [])\n        moveData = self.processMoveList(moveList)\n\n        # Now have the move locations\n        moved = self.doRepeatShuffle(\n            moveData.loadChains,\n            moveData.loopChains,\n            moveData.enriches,\n            moveData.loadChargeTypes,\n            moveData.ringPosCycles,\n            moveData.dischargeDests,\n        )\n\n        # Apply any swaps after performing cascades\n        for loc1, loc2 in swapList:\n            a1 = self.r.core.getAssemblyWithStringLocation(loc1)\n            a2 = self.r.core.getAssemblyWithStringLocation(loc2)\n            if a1 is None or a2 is None:\n                runLog.warning(f\"Could not perform swap between {loc1} and {loc2}\")\n                continue\n            self.swapAssemblies(a1, a2)\n            moved.extend([a1, a2])\n        self.pendingRotations = moveData.rotations\n\n        return moved\n\n    @staticmethod\n    def readMoves(fname):\n        r\"\"\"\n        Reads a shuffle output file and sets up the moves dictionary.\n\n        Parameters\n        ----------\n        fname : str\n            The shuffles file to read\n\n        Returns\n        -------\n        moves : dict\n            A dictionary of all the moves. Keys are the cycle number. Values are a list\n            of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects, one for each individual\n            move that happened in the cycle. ``oldLoc`` and ``newLoc`` are string\n            representations of the locations and ``enrichList`` is a list of mass\n            enrichments from bottom to top.\n\n        See Also\n        --------\n        performShuffle : reads this file and executes the shuffling\n        outage : creates the moveList in the first place.\n        makeShuffleReport : writes the file that is read here.\n        \"\"\"\n        try:\n            f = open(fname)\n        except OSError:\n            raise RuntimeError(\n                \"Could not find/open repeat shuffle file {} in working directory {}\".format(fname, os.getcwd())\n            )\n\n        moves = {}\n        numMoves = 0\n        for line in f:\n            if \"ycle \" in line:\n                # Used to say \"Cycle 1 at 0.0 years\". Now says: \"Before cycle 1 at 0.0 years\" to be more specific.\n                # This RE allows backwards compatibility.\n                # Later, we removed the at x years\n                m = re.search(r\"ycle (\\d+)\", line)\n                cycle = int(m.group(1))\n                moves[cycle] = []\n            elif \"assembly\" in line:\n                # this is the new load style where an actual assembly type is written to the shuffle logic\n                # due to legacy reasons, the assembly type will be put into group 4\n                pat = (\n                    r\"([A-Za-z0-9!\\-]+) moved to ([A-Za-z0-9!\\-]+) with assembly type \"\n                    + r\"([A-Za-z0-9!\\s]+)\\s*(ringPosCycle=\\[.*\\])?\\s*with enrich list: (.+)\"\n                )\n                m = re.search(pat, line)\n                if not m:\n                    raise InputError('Failed to parse line \"{0}\" in shuffle file'.format(line))\n                oldLoc = m.group(1)\n                newLoc = m.group(2)\n                assemType = m.group(3).strip()  # take off any possible trailing whitespace\n                ringPosCycle = m.group(4)  # will be None for legacy shuffleLogic files. (pre 2013-08)\n                if ringPosCycle:\n                    ringPosCycle = eval(ringPosCycle.split(\"=\")[1])  # extract the assembly ring, position and cycle.\n                enrichList = [float(i) for i in m.group(5).split()]\n                moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList, assemType, ringPosCycle))\n                numMoves += 1\n            elif \"moved\" in line:\n                # very old shuffleLogic file.\n                runLog.warning(\n                    \"Using old *.SHUFFLES.txt loading file\",\n                    single=True,\n                    label=\"Using old shuffles file\",\n                )\n                m = re.search(\n                    \"([A-Za-z0-9!]+) moved to ([A-Za-z0-9!]+) with enrich list: (.+)\",\n                    line,\n                )\n                if not m:\n                    raise InputError('Failed to parse line \"{0}\" in shuffle file'.format(line))\n                oldLoc = m.group(1)\n                newLoc = m.group(2)\n                enrichList = [float(i) for i in m.group(3).split()]\n                # old loading style, just assume that there is a booster as our surrogate\n                moves[cycle].append(AssemblyMove(oldLoc, newLoc, enrichList))\n                numMoves += 1\n\n        f.close()\n\n        runLog.info(\"Read {0} moves over {1} cycles\".format(numMoves, len(moves.keys())))\n        return moves\n\n    @staticmethod\n    def readMovesYaml(fname):\n        r\"\"\"\n        Read a shuffle file in YAML format.\n\n        A cascade with no explicit final location deletes the assembly\n        by default.\n\n        Parameters\n        ----------\n        fname : str\n            Path to the YAML-formatted shuffle file.\n\n        Returns\n        -------\n        moves : dict\n            Mapping of cycle numbers to lists of\n            :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects that\n            describe the shuffle sequence.\n        swaps : dict\n            Mapping of cycle numbers to lists of location-pair tuples describing\n            assemblies to be swapped.\n        \"\"\"\n        # 1. load YAML file\n        try:\n            with open(fname, \"r\") as stream:\n                yaml = YAML(typ=\"safe\")\n                data = yaml.load(stream)\n        except DuplicateKeyError as e:\n            raise InputError(str(e)) from e\n        except OSError as ee:\n            raise RuntimeError(\n                f\"Could not find/open repeat shuffle file {fname!r} in working directory {os.getcwd()}: {ee}\"\n            ) from ee\n\n        # 2. perform various validation tests on the YAML data\n        if \"sequence\" not in data:\n            raise InputError(\"Shuffle YAML missing required 'sequence' mapping\")\n\n        moves = {}\n        swaps = defaultdict(list)\n        # cycles may be provided in any order; verify only that there are no gaps\n        cycleNums = {int(c) for c in data[\"sequence\"].keys()}\n        if cycleNums:\n            expected = set(range(min(cycleNums), max(cycleNums) + 1))\n            missing = sorted(expected - cycleNums)\n            if missing:\n                if len(missing) == 1:\n                    raise InputError(f\"Missing cycle {missing[0]} in shuffle sequence\")\n                raise InputError(f\"Missing cycles {missing} in shuffle sequence\")\n\n        # 3. parse YAML file into shuffle data\n        for cycleKey, actions in data[\"sequence\"].items():\n            cycle = int(cycleKey)\n            moves[cycle] = []\n            seenLocs = set()\n\n            if actions is None and cycle != 0:\n                runLog.warning(f\"Cycle {cycleKey} has no shuffle actions defined, skipping.\")\n                continue\n\n            elif cycle == 0:\n                raise InputError(\n                    \"Cycle 0 is not allowed in shuffle YAML. \"\n                    \"This cycle is reserved for the initial core loading.\"\n                    \"Shuffling is available at the beginning of cycle 1\"\n                )\n\n            for action in actions:\n                allowed = {\"cascade\", \"fuelEnrichment\", \"extraRotations\", \"swap\", \"ringPosCycle\"}\n                unknown = set(action) - allowed\n                if unknown:\n                    raise InputError(f\"Unknown action keys {unknown} in shuffle YAML\")\n\n                if \"cascade\" in action:\n                    chain = list(action[\"cascade\"])\n                    if len(chain) < 2:\n                        raise InputError(\"cascade must contain at least two entries\")\n                    if any(not isinstance(item, str) for item in chain):\n                        raise InputError(\"cascade entries must be strings\")\n\n                    if chain[0] == \"SFP\":\n                        # move an assembly from the SFP into the Core\n                        assemType = None\n                        locs = chain\n                        if len(locs) < 2:\n                            raise InputError(\"cascade starting with SFP must include a destination location\")\n                    else:\n                        # move an assembly around the Core\n                        assemType = chain[0]\n                        locs = chain[1:]\n                        if not locs:\n                            raise InputError(\"cascade must contain at least one location after the assembly type\")\n\n                    for loc in locs:\n                        FuelHandler.validateLoc(loc, cycle)\n                        if loc not in FuelHandler.DISCHARGE_LOCS and loc in seenLocs:\n                            raise InputError(f\"Location {loc} appears in multiple cascades in cycle {cycle}\")\n                        seenLocs.add(loc)\n\n                    enrich = []\n                    enrichList = action.get(\"fuelEnrichment\", [])\n                    try:\n                        enrich = [float(e) for e in enrichList]\n                    except (TypeError, ValueError):\n                        raise InputError(\"fuelEnrichment values must be numeric. Got {enrichList}\")\n                    if any(e < 0 or e > 1 for e in enrich):\n                        raise InputError(\"fuelEnrichment values must be between 0 and 1. Got {enrich}\")\n\n                    ringPosCycle = action.get(\"ringPosCycle\")\n                    if locs[0] == \"SFP\":\n                        if ringPosCycle is None:\n                            raise InputError(\"ringPosCycle required when loading from SFP\")\n                        moves[cycle].append(AssemblyMove(\"SFP\", locs[1], [], None, ringPosCycle))\n                        startIdx = 1\n                    else:\n                        if ringPosCycle is not None:\n                            raise InputError(\"ringPosCycle is only valid when loading from SFP\")\n                        moves[cycle].append(AssemblyMove(\"LoadQueue\", locs[0], enrich, assemType))\n                        startIdx = 0\n\n                    for i in range(startIdx, len(locs) - 1):\n                        moves[cycle].append(AssemblyMove(locs[i], locs[i + 1]))\n                    if locs[-1] not in FuelHandler.DISCHARGE_LOCS:\n                        moves[cycle].append(AssemblyMove(locs[-1], \"Delete\"))\n\n                elif \"swap\" in action:\n                    swap = action[\"swap\"]\n                    if not isinstance(swap, list) or len(swap) != 2:\n                        raise InputError(\"swap must be a list of two location labels, got {swap}\")\n                    if any(not isinstance(item, str) for item in swap):\n                        raise InputError(\"swap entries must be strings, got {swap}\")\n                    for loc in swap:\n                        FuelHandler.validateLoc(loc, cycle)\n                    loc1, loc2 = swap\n                    swaps[cycle].append((loc1, loc2))\n\n                elif \"extraRotations\" in action:\n                    for loc, angle in action.get(\"extraRotations\", {}).items():\n                        FuelHandler.validateLoc(loc, cycle)\n                        moves[cycle].append(AssemblyMove(loc, loc, rotation=float(angle)))\n\n                else:\n                    raise InputError(f\"Unable to process {action} in {cycle}\")\n\n        return moves, dict(swaps)\n\n    @staticmethod\n    def trackChain(moveList, startingAt, alreadyDone=None):\n        r\"\"\"\n        Builds a chain of locations based on starting location.\n\n        Notes\n        -----\n        Takes a moveList and extracts chains. Remembers all it touches.\n        If A moved to B, C moved to D, and B moved to C, this returns\n        A, B, C ,D.\n\n        Used in some monte carlo physics writers and in performShuffle\n\n        Parameters\n        ----------\n        moveList : list\n            a list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove`\n            objects that occurred at a single outage.\n\n        startingAt : str\n            A location label where the chain would start. This is important because the discharge\n            moves are built when the SFP is found in a move. This method must find all\n            assemblies in the chain leading up to this particular discharge.\n\n        alreadyDone : list\n            A list of locations that have already been tracked.\n\n        Returns\n        -------\n        chain : list\n            The chain as a location list in order\n        enrich : list\n            The axial enrichment distribution of the load assembly.\n        assemType : str\n            The type of the assembly\n        loadName or ringPosCycle : [str, tuple[int, int, int]]\n            The assembly name of the load assembly, or the ringPosHist identifier\n        destination : str\n            Location where the first assembly in the chain is discharged\n\n        See Also\n        --------\n        performShuffle\n        processMoveList\n        \"\"\"\n        if alreadyDone is None:\n            alreadyDone = []\n\n        enrich = None  # in case this is a load chain, prep for getting enrich.\n        loadName = None\n        assemType = None  # in case this is a load chain, prep for getting an assembly type\n        destination = None\n\n        for move in moveList:\n            fromLoc = move.fromLoc\n            toLoc = move.toLoc\n            if toLoc in FuelHandler.DISCHARGE_LOCS and \"LoadQueue\" in fromLoc:\n                # skip dummy moves\n                continue\n            elif (fromLoc, toLoc) in alreadyDone:\n                # skip this pair\n                continue\n\n            elif startingAt in fromLoc:\n                # looking for chain involving toLoc\n                # back-track the chain of moves\n                chain = [fromLoc]\n                destination = toLoc\n                safeCount = 0  # to break out of crazy loops.\n                ringPosCycle = None\n                complete = False\n                while (\n                    chain[-1] not in ({\"LoadQueue\"} | FuelHandler.DISCHARGE_LOCS) and not complete and safeCount < 100\n                ):\n                    # look for something going to where the previous one is from\n                    lookingFor = chain[-1]\n                    for innerMove in moveList:\n                        cFromLoc = innerMove.fromLoc\n                        cToLoc = innerMove.toLoc\n                        cEnrichList = innerMove.enrichList\n                        cAssemblyType = innerMove.assemType\n                        cRingPosCycle = innerMove.ringPosCycle\n                        if cToLoc == lookingFor:\n                            chain.append(cFromLoc)\n                            if cFromLoc in ({\"LoadQueue\"} | FuelHandler.DISCHARGE_LOCS):\n                                # charge-discharge loop complete.\n                                enrich = cEnrichList\n                                ringPosCycle = cRingPosCycle\n                                assemType = cAssemblyType\n                            # break after finding the first predecessor to avoid duplicates\n                            break\n\n                    if chain[-1] == startingAt:\n                        # non-charging loop complete\n                        complete = True\n\n                    safeCount += 1\n\n                if not safeCount < 100:\n                    raise RuntimeError(\"Chain tracking got too long. Check moves.\\n{0}\".format(chain))\n\n                # delete the last item, it's loadqueue location or the startingFrom\n                # location.\n                chain.pop()\n\n                # chain tracked. Can jump out of loop early.\n                return chain, enrich, assemType, ringPosCycle, destination\n\n        # if we get here, the startingAt location was not found.\n        runLog.warning(\"No chain found starting at {0}\".format(startingAt))\n        return [], enrich, assemType, loadName, destination\n\n    def processMoveList(self, moveList) -> ProcessMoveListResult:\n        \"\"\"\n        Processes a move list and extracts fuel management loops and charges.\n\n        Parameters\n        ----------\n        moveList : list\n            A list of :class:`~armi.physics.fuelCycle.fuelHandlers.AssemblyMove` objects describing each\n            move.\n\n        Returns\n        -------\n        ProcessMoveListResult\n            Structured information describing the move chains, enrichment\n            distributions, and other shuffle data. Attributes include:\n\n            loadChains : list[list[str]]\n                Moves that include discharges.\n            loopChains : list[list[str]]\n                Moves without discharges.\n            enriches : list[list[float]]\n                Axial enrichment distribution for each load assembly.\n            loadChargeTypes : list[Optional[str]]\n                Assembly types for each load chain.\n            loadNames : list[Optional[str]]\n                Assembly names of loads (e.g., from SFP).\n            dischargeDests : list[str]\n                Final destinations for discharged assemblies (e.g., ``SFP`` or ``Delete``).\n            rotations : list[tuple[str, float]]\n                Manual rotations to apply (location, degrees).\n            alreadyDone : list[str]\n                Locations already processed while tracking chains.\n\n        Notes\n        -----\n        Used in some Monte Carlo interfaces to convert ARMI moves to their format moves. Also used in\n        repeat shuffling.\n\n        See Also\n        --------\n        makeShuffleReport : writes the file that is being processed\n        performShuffle : uses this to repeat shuffles\n        \"\"\"\n        alreadyDone = []\n        loadChains = []  # moves that have discharges\n        loadChargeTypes = []  # the assembly types (str) to be used in a load chain.\n        loopChains = []  # moves that don't have discharges\n        enriches = []  # enrichments of each loadChain\n        ringPosCycles = []  # assembly ring, position, at cycle (to read from SFP)\n        dischargeDests = []  # final destinations for discharged assemblies\n        rotations = []\n\n        # first handle all charge/discharge chains by looking for things going to SFP/Delete\n        for move in moveList:\n            fromLoc = move.fromLoc\n            toLoc = move.toLoc\n            rot = move.rotation\n            if fromLoc == toLoc:\n                if rot is not None:\n                    rotations.append((fromLoc, rot))\n                continue\n            if toLoc in self.DISCHARGE_LOCS and \"LoadQueue\" in fromLoc:\n                # skip dummy moves\n                continue\n\n            elif toLoc in self.DISCHARGE_LOCS:\n                # discharge. Track chain.\n                chain, enrichList, assemType, ringPosCycle, dest = FuelHandler.trackChain(moveList, startingAt=fromLoc)\n                runLog.extra(\"Load Chain with load assem {0}: {1}\".format(assemType, chain))\n                loadChains.append(chain)\n                enriches.append(enrichList)\n                loadChargeTypes.append(assemType)\n                ringPosCycles.append(ringPosCycle)\n                dischargeDests.append(dest)\n                # track all the locations we saw already so we\n                # don't use them in the loop moves.\n                alreadyDone.extend(chain)\n\n        # go through again, looking for stuff that isn't in chains.\n        # put them in loop type 3 moves (arbitrary order)\n        for move in moveList:\n            fromLoc = move.fromLoc\n            toLoc = move.toLoc\n            if fromLoc == toLoc:\n                # rotation or no-op\n                continue\n            if toLoc in self.DISCHARGE_LOCS or fromLoc in ({\"LoadQueue\"} | self.DISCHARGE_LOCS):\n                # skip loads/discharges; they're already done.\n                continue\n            elif fromLoc in alreadyDone:\n                # skip repeats\n                continue\n            else:\n                # normal move\n                chain, _enrichList, _assemType, _loadAssemName, _dest = FuelHandler.trackChain(\n                    moveList, startingAt=fromLoc\n                )\n                loopChains.append(chain)\n                alreadyDone.extend(chain)\n\n                runLog.extra(\"Loop Chain: {0}\".format(chain))\n\n        return ProcessMoveListResult(\n            loadChains=loadChains,\n            loopChains=loopChains,\n            enriches=enriches,\n            loadChargeTypes=loadChargeTypes,\n            ringPosCycles=ringPosCycles,\n            dischargeDests=dischargeDests,\n            rotations=rotations,\n            alreadyDone=alreadyDone,\n        )\n\n    def doRepeatShuffle(self, loadChains, loopChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests):\n        r\"\"\"\n        Actually does the fuel movements required to repeat a shuffle order.\n\n        Parameters\n        ----------\n        loadChains : list\n            list of lists of location labels for each load chain (with charge/discharge)\n        loopChains : list\n            list of lists of location labels for each loop chain (no charge/discharge)\n        enriches : list\n            The block enrichment distribution of each load assembly\n        loadChargeTypes :list\n            The types of assemblies that get charged.\n        ringPosCycles : list\n            The ring, pos, and cycle of assemblies that get brought into the core (useful for pulling out\n            of SFP for round 2, etc.)\n        dischargeDests : list\n            Final destination for each load chain (e.g., ``SFP`` or ``Delete``)\n\n        See Also\n        --------\n        performShuffle  : coordinates the moves for this cycle\n        processMoveList : builds the input lists\n\n        Notes\n        -----\n        This is a helper function for performShuffle\n        \"\"\"\n        moved = []\n\n        # shuffle all of the load chain assemblies (These include discharges to SFP\n        # and loads from Loadqueue)\n\n        # build a lookup table of locations throughout the current core and cache it.\n        locContents = self.r.core.makeLocationLookup(assemblyLevel=True)\n\n        # perform load swaps (with charge/discharge)\n        for assemblyChain, enrichList, assemblyType, ringPosCycle, dest in zip(\n            loadChains, enriches, loadChargeTypes, ringPosCycles, dischargeDests\n        ):\n            # convert the labels into actual assemblies to be swapped\n            assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents)\n\n            moved.extend(assemblyList)\n\n            # go through and swap the assemblies knowing that there is a discharge (first one)\n            # and a new assembly brought it (last one)\n            for i in range(0, -(len(assemblyList) - 1), -1):\n                self.swapAssemblies(assemblyList[i], assemblyList[i - 1])\n\n            # Now, everything has been set except the first assembly in the list, which must now be\n            # replaced with a fresh assembly... but which one? The assemblyType string\n            # tells us.\n            # Sometimes enrichment is set on-the-fly by branch searches, so we must\n            # not only use the proper assembly type but also adjust the enrichment.\n            if ringPosCycle:\n                ring, pos, cycle = ringPosCycle\n                loadAssembly = self.r.core.getAssemblyWithRingPosHist(ring, pos, cycle)\n                if not loadAssembly:\n                    msg = f\"The required assembly located at ring {ring} pos {pos} at cycle {cycle} is not found\"\n                    runLog.error(msg)\n                    raise RuntimeError(msg)\n            else:\n                # create a new assembly from the BOL assem templates and adjust the enrichment\n                loadAssembly = self.r.core.createAssemblyOfType(enrichList=enrichList, assemType=assemblyType)\n\n            # replace the goingOut guy (for continual feed cases)\n            runLog.debug(\"Calling discharge swap with {} and {}\".format(loadAssembly, assemblyList[0]))\n            self.dischargeSwap(loadAssembly, assemblyList[0], toSfp=(dest == \"SFP\"))\n            moved.append(loadAssembly)\n\n        # shuffle all of the loop chain assemblies (no charge/discharge)\n\n        for assemblyChain in loopChains:\n            # convert the labels into actual assemblies to be swapped\n            assemblyList = self.r.core.getLocationContents(assemblyChain, assemblyLevel=True, locContents=locContents)\n\n            for a in assemblyList:\n                moved.append(a)\n\n            # go through and swap the assemblies knowing that there is a discharge (first one)\n            # and a new assembly brought it (last one)\n            # for i in range(0,-(len(assemblyList)-1),-1):\n            for i in range(0, -(len(assemblyList) - 1), -1):\n                self.swapAssemblies(assemblyList[i], assemblyList[i + 1])\n\n        return moved\n\n    def workerOperate(self, cmd):\n        \"\"\"Handle a mpi command on the worker nodes.\"\"\"\n        pass\n\n    def prepShuffleMap(self):\n        \"\"\"Prepare a table of current locations for plotting shuffle maneuvers.\"\"\"\n        self.oldLocations = {}\n        for a in self.r.core:\n            self.oldLocations[a.getName()] = a.spatialLocator.getGlobalCoordinates()\n\n    def makeShuffleArrows(self):\n        \"\"\"\n        Build data for plotting all the previous shuffles as arrows.\n\n        Returns\n        -------\n        arrows : list\n            Values are (currentCoords, oldCoords) tuples\n        \"\"\"\n        arrows = []\n        runLog.extra(\"Building list of shuffle arrows.\")\n        for a in self.r.core:\n            currentCoords = a.spatialLocator.getGlobalCoordinates()\n            oldCoords = self.oldLocations.get(a.getName(), None)\n            if oldCoords is None:\n                oldCoords = np.array((-50, -50, 0))\n            elif any(currentCoords != oldCoords):\n                arrows.append((oldCoords, currentCoords))\n\n        return arrows\n"
  },
  {
    "path": "armi/physics/fuelCycle/hexAssemblyFuelMgmtUtils.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis is a selection of fuel management utilities that seem generally useful enough to keep in ARMI, but they still only\napply to hex assembly reactors.\n\nNotes\n-----\nWe are keeping these in ARMI even if they appear unused internally.\n\"\"\"\n\nimport math\nimport typing\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.physics.fuelCycle.utils import maxBurnupBlock, maxBurnupLocator\nfrom armi.utils.mathematics import findClosest\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.assemblies import HexAssembly\n\n\ndef getOptimalAssemblyOrientation(a: \"HexAssembly\", aPrev: \"HexAssembly\") -> int:\n    \"\"\"\n    Get optimal hex assembly orientation/rotation to minimize peak burnup.\n\n    Works by placing the highest-burnup pin in the location (of 6 possible locations) with lowest expected pin power. We\n    evaluated \"expected pin power\" based on the power distribution in ``aPrev``, the previous assembly located where\n    ``a`` is going. The algorithm goes as follows.\n\n    1. Get all the pin powers and ``IndexLocation`` s from the block at the previous location/timenode.\n    2. Obtain the ``IndexLocation`` of the pin with the highest burnup in the current assembly.\n    3. For each possible rotation,\n\n        - Find the new location with ``HexGrid.rotateIndex``\n        - Find the index where that location occurs in previous locations\n        - Find the previous power at that location\n\n    4. Return the rotation with the lowest previous power\n\n    This algorithm assumes a few things.\n\n    1. ``len(HexBlock.getPinCoordinates()) == len(HexBlock.p.linPowByPin)`` and, by extension, ``linPowByPin[i]`` is\n       found at ``getPinCoordinates()[i]``.\n    2. Your assembly has at least 60 degree symmetry of fuel pins and powers. This means if we find a fuel pin and\n       rotate it 60 degrees, there should be another fuel pin at that lattice site. This is mostly a safe assumption\n       since many hexagonal reactors have at least 60 degree symmetry of fuel pin layout. This assumption holds if you\n       have a full hexagonal lattice of fuel pins as well.\n    3. Fuel pins in ``a`` have similar locations in ``aPrev``. This is a safe assumption in that most fuel assemblies\n       have similar layouts so it's plausible that if ``a`` has a fuel pin at ``(1, 0, 0)`` so does ``aPrev``.\n\n    .. impl:: Provide an algorithm for rotating hexagonal assemblies to equalize burnup\n        :id: I_ARMI_ROTATE_HEX_BURNUP\n        :implements: R_ARMI_ROTATE_HEX_BURNUP\n\n        This method will return a rotation such that the highest-burnup pin moves to the hex location with the lowest\n        expect pin number. This rotation will be optimal in the sense that it will minimize peak burnup.\n\n    Parameters\n    ----------\n    a : Assembly object\n        The assembly that is being rotated.\n    aPrev : Assembly object\n        The assembly that previously occupied this location (before the last shuffle).\n        If the assembly \"a\" was not shuffled, it's sufficient to pass ``a``.\n\n    Returns\n    -------\n    int\n        An integer from 0 to 5 representing the number of pi/3 (60 degree) counterclockwise rotations from where ``a``\n        is currently oriented to the \"optimal\" orientation\n\n    Raises\n    ------\n    ValueError\n        If there is insufficient information to determine the rotation of ``a``. This could be due to a lack of fuel\n        blocks or parameters like ``linPowByPin``.\n    \"\"\"\n    maxBuBlock = maxBurnupBlock(a)\n    if maxBuBlock.spatialGrid is None:\n        msg = f\"Block {maxBuBlock} in {a} does not have a spatial grid. Cannot rotate.\"\n        runLog.error(msg)\n        raise ValueError(msg)\n    maxBuPinLocation = maxBurnupLocator(maxBuBlock)\n    # No need to rotate if max burnup pin is the center\n    if maxBuPinLocation.i == 0 and maxBuPinLocation.j == 0:\n        return 0\n\n    if aPrev is not a:\n        blockAtPreviousLocation = aPrev[a.index(maxBuBlock)]\n    else:\n        blockAtPreviousLocation = maxBuBlock\n\n    previousLocations = blockAtPreviousLocation.getPinLocations()\n    previousPowers = blockAtPreviousLocation.p.linPowByPin\n    if len(previousLocations) != len(previousPowers):\n        msg = (\n            f\"Inconsistent pin powers and number of pins in {blockAtPreviousLocation}. \"\n            f\"Found {len(previousLocations)} locations but {len(previousPowers)} powers.\"\n        )\n        runLog.error(msg)\n        raise ValueError(msg)\n\n    ringPowers = {(loc.i, loc.j): p for loc, p in zip(previousLocations, previousPowers)}\n\n    targetGrid = blockAtPreviousLocation.spatialGrid\n    candidateRotation = 0\n    candidatePower = ringPowers.get((maxBuPinLocation.i, maxBuPinLocation.j), math.inf)\n    for rot in range(1, 6):\n        candidateLocation = targetGrid.rotateIndex(maxBuPinLocation, rot)\n        newPower = ringPowers.get((candidateLocation.i, candidateLocation.j), math.inf)\n        if newPower < candidatePower:\n            candidateRotation = rot\n            candidatePower = newPower\n\n    return candidateRotation\n\n\ndef buildRingSchedule(\n    maxRingInCore,\n    chargeRing=None,\n    dischargeRing=None,\n    jumpRingFrom=None,\n    jumpRingTo=None,\n    coarseFactor=0.0,\n):\n    r\"\"\"\n    Build a ring schedule for shuffling.\n\n    Notes\n    -----\n    General enough to do convergent, divergent, or any combo, plus jumprings.\n\n    The center of the core is ring 1, based on the DIF3D numbering scheme.\n\n    Jump ring behavior can be generalized by first building a base ring list\n    where assemblies get charged to H and discharge from A::\n\n        [A, B, C, D, E, F, G, H]\n\n    If a jump should be placed where it jumps from ring G to C, reversed back to F, and then discharges from A,\n    we simply reverse the sublist [C,D,E,F], leaving us with::\n\n        [A, B, F, E, D, C, G, H]\n\n    A less-complex, more standard convergent-divergent scheme is a subcase of this, where the\n    sublist [A,B,C,D,E] or so is reversed, leaving::\n\n        [E, D, C, B, A, F, G, H]\n\n    So the task of this function is simply to determine what subsection, if any, to reverse of\n    the baselist.\n\n    Parameters\n    ----------\n    maxRingInCore : int\n        The number of rings in the hex assembly reactor.\n\n    chargeRing : int, optional\n        The peripheral ring into which an assembly enters the core. Default is outermost ring.\n\n    dischargeRing : int, optional\n        The last ring an assembly sits in before discharging. Default is jumpRing-1\n\n    jumpRingFrom : int\n        The last ring an assembly sits in before jumping to the center\n\n    jumpRingTo : int, optional\n        The inner ring into which a jumping assembly jumps. Default is 1.\n\n    coarseFactor : float, optional\n        A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings.\n        This allows coarse shuffling, with large jumps. Default: 0\n\n    Returns\n    -------\n    ringSchedule : list\n        A list of rings in order from discharge to charge.\n\n    ringWidths : list\n        A list of integers corresponding to the ringSchedule determining the widths of each ring area\n\n    Examples\n    --------\n    >>> f.buildRingSchedule(17, 1, jumpRingFrom=14)\n    ([13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 14, 15, 16, 17],\n    [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])\n    \"\"\"\n    if dischargeRing > maxRingInCore:\n        runLog.warning(\n            f\"Discharge ring {dischargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring\"\n        )\n        dischargeRing = maxRingInCore\n    if chargeRing > maxRingInCore:\n        runLog.warning(\n            f\"Charge ring {chargeRing} is outside the core (max {maxRingInCore}). Changing it to be the max ring.\"\n        )\n        chargeRing = maxRingInCore\n\n    # process arguments\n    if dischargeRing is None:\n        # No discharge ring given, so we default to converging from outside to inside\n        # and therefore discharging from the center\n        dischargeRing = 1\n    if chargeRing is None:\n        # Charge ring not specified. Since we default to convergent shuffling, we\n        # must insert the fuel at the periphery.\n        chargeRing = maxRingInCore\n    if jumpRingFrom is not None and not (1 < jumpRingFrom < maxRingInCore):\n        raise ValueError(f\"JumpRingFrom {jumpRingFrom} is not in the core.\")\n    if jumpRingTo is not None and not (1 <= jumpRingTo < maxRingInCore):\n        raise ValueError(f\"JumpRingTo {jumpRingTo} is not in the core.\")\n\n    if chargeRing > dischargeRing and jumpRingTo is None:\n        # a convergent shuffle with no jumping. By setting\n        # jumpRingTo to be 1, no jumping will be activated\n        # in the later logic.\n        jumpRingTo = 1\n    elif jumpRingTo is None:\n        # divergent case. Disable jumpring by putting jumpring at periphery.\n        jumpRingTo = maxRingInCore\n\n    if chargeRing > dischargeRing and jumpRingFrom is not None and jumpRingFrom < jumpRingTo:\n        raise RuntimeError(\"Cannot have outward jumps in convergent cases.\")\n    if chargeRing < dischargeRing and jumpRingFrom is not None and jumpRingFrom > jumpRingTo:\n        raise RuntimeError(\"Cannot have inward jumps in divergent cases.\")\n\n    # step 1: build the base rings\n    numSteps = int((abs(dischargeRing - chargeRing) + 1) * (1.0 - coarseFactor))\n    # don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]\n    numSteps = max(numSteps, 2)\n\n    baseRings = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)]\n    # eliminate duplicates.\n    newBaseRings = []\n    for br in baseRings:\n        if br not in newBaseRings:\n            newBaseRings.append(br)\n\n    baseRings = newBaseRings\n\n    # build widths\n    widths = []\n    for i, ring in enumerate(baseRings[:-1]):\n        # 0 is the most restrictive, meaning don't even look in other rings.\n        widths.append(abs(baseRings[i + 1] - ring) - 1)\n    widths.append(0)  # add the last ring with width 0.\n\n    # step 2: locate which rings should be reversed to give the jump-ring effect.\n    if jumpRingFrom is not None:\n        _closestRingFrom, jumpRingFromIndex = findClosest(baseRings, jumpRingFrom, indx=True)\n        _closestRingTo, jumpRingToIndex = findClosest(baseRings, jumpRingTo, indx=True)\n    else:\n        jumpRingToIndex = 0\n\n    # step 3: build the final ring list, potentially with a reversed section\n    newBaseRings = []\n    newWidths = []\n    # add in the non-reversed section before the reversed section\n\n    if jumpRingFrom is not None:\n        newBaseRings.extend(baseRings[:jumpRingToIndex])\n        newWidths.extend(widths[:jumpRingToIndex])\n        # add in reversed section that is jumped\n        newBaseRings.extend(reversed(baseRings[jumpRingToIndex:jumpRingFromIndex]))\n        newWidths.extend(reversed(widths[jumpRingToIndex:jumpRingFromIndex]))\n        # add the rest.\n        newBaseRings.extend(baseRings[jumpRingFromIndex:])\n        newWidths.extend(widths[jumpRingFromIndex:])\n    else:\n        # no jump section. Just fill in the rest.\n        newBaseRings.extend(baseRings[jumpRingToIndex:])\n        newWidths.extend(widths[jumpRingToIndex:])\n\n    return newBaseRings, newWidths\n\n\ndef buildConvergentRingSchedule(chargeRing, dischargeRing=1, coarseFactor=0.0):\n    r\"\"\"\n    Builds a ring schedule for convergent shuffling from ``chargeRing`` to ``dischargeRing``.\n\n    Parameters\n    ----------\n    chargeRing : int\n        The peripheral ring into which an assembly enters the core. A good default is\n        outermost ring: ``r.core.getNumRings()``.\n\n    dischargeRing : int, optional\n        The last ring an assembly sits in before discharging. If no discharge, this is the one that\n        gets placed where the charge happens. Default: Innermost ring\n\n    coarseFactor : float, optional\n        A number between 0 and 1 where 0 hits all rings and 1 only hits the outer, rJ, center, and rD rings.\n        This allows coarse shuffling, with large jumps. Default: 0\n\n    Returns\n    -------\n    convergent : list\n        A list of rings in order from discharge to charge.\n\n    conWidths : list\n        A list of integers corresponding to the ringSchedule determining the widths of each ring area\n    \"\"\"\n    # step 1: build the convergent rings\n    numSteps = int((chargeRing - dischargeRing + 1) * (1.0 - coarseFactor))\n    # don't let it be smaller than 2 because linspace(1,5,1)= [1], linspace(1,5,2)= [1,5]\n    numSteps = max(numSteps, 2)\n    convergent = [int(ring) for ring in np.linspace(dischargeRing, chargeRing, numSteps)]\n\n    # step 2. eliminate duplicates\n    convergent = sorted(list(set(convergent)))\n\n    # step 3. compute widths\n    conWidths = []\n    for i, ring in enumerate(convergent[:-1]):\n        conWidths.append(convergent[i + 1] - ring)\n    conWidths.append(1)\n\n    # step 4. assemble and return\n    return convergent, conWidths\n"
  },
  {
    "path": "armi/physics/fuelCycle/settings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Settings for generic fuel cycle code.\"\"\"\n\nimport importlib.util\n\nfrom armi.settings import setting, settingsValidation\n\nCONF_ASSEM_ROTATION_STATIONARY = \"assemblyRotationStationary\"\nCONF_ASSEMBLY_ROTATION_ALG = \"assemblyRotationAlgorithm\"\nCONF_CIRCULAR_RING_MODE = \"circularRingMode\"\nCONF_FUEL_HANDLER_NAME = \"fuelHandlerName\"\nCONF_SHUFFLE_SEQUENCE_FILE = \"shuffleSequenceFile\"\nCONF_JUMP_RING_NUM = \"jumpRingNum\"\nCONF_LEVELS_PER_CASCADE = \"levelsPerCascade\"\nCONF_PLOT_SHUFFLE_ARROWS = \"plotShuffleArrows\"\nCONF_RUN_LATTICE_BEFORE_SHUFFLING = \"runLatticePhysicsBeforeShuffling\"\nCONF_SHUFFLE_LOGIC = \"shuffleLogic\"\n\n\ndef getFuelCycleSettings():\n    \"\"\"Define settings for fuel cycle.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_ASSEMBLY_ROTATION_ALG,\n            default=\"\",\n            label=\"Assembly Rotation Algorithm\",\n            description=\"The algorithm to use to rotate the detail assemblies while shuffling\",\n            options=[\"\", \"buReducingAssemblyRotation\", \"simpleAssemblyRotation\"],\n            enforcedOptions=True,\n        ),\n        setting.Setting(\n            CONF_ASSEM_ROTATION_STATIONARY,\n            default=False,\n            label=\"Rotate stationary assems\",\n            description=(\n                \"Whether or not to rotate assemblies that are not shuffled.This can only be True if 'rotation' is true.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_CIRCULAR_RING_MODE,\n            default=False,\n            description=\"Toggle between circular ring definitions to hexagonal ring definitions\",\n            label=\"Use Circular Rings\",\n        ),\n        setting.Setting(\n            CONF_RUN_LATTICE_BEFORE_SHUFFLING,\n            default=False,\n            description=(\n                \"Forces the Generation of Cross Sections Prior to Shuffling the Fuel Assemblies. \"\n                \"Note: This is recommended when performing equilibrium shuffling branching searches.\"\n            ),\n            label=\"Generate XS Prior to Fuel Shuffling\",\n        ),\n        setting.Setting(\n            CONF_SHUFFLE_LOGIC,\n            default=\"\",\n            label=\"Shuffle Logic\",\n            description=(\n                \"Path to a Python script or dotted module path that handles the fuel shuffling \"\n                \"for this case. This is user-defined per run as a dynamic input.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_SHUFFLE_SEQUENCE_FILE,\n            default=\"\",\n            label=\"Shuffle Sequence File\",\n            description=\"Path to a YAML file defining a custom shuffle sequence\",\n        ),\n        setting.Setting(\n            CONF_FUEL_HANDLER_NAME,\n            default=\"\",\n            label=\"Fuel Handler Name\",\n            description=\"The name of the FuelHandler class in the shuffle logic module to activate\",\n        ),\n        setting.Setting(\n            CONF_PLOT_SHUFFLE_ARROWS,\n            default=False,\n            description=\"Make plots with arrows showing each move.\",\n            label=\"Plot shuffle arrows\",\n        ),\n        setting.Setting(\n            CONF_JUMP_RING_NUM,\n            default=8,\n            label=\"Jump Ring Number\",\n            description=\"The number of hex rings jumped when distributing the feed assemblies in \"\n            \"the alternating concentric rings or checkerboard shuffle patterns (convergent / \"\n            \"divergent shuffling).\",\n        ),\n        setting.Setting(\n            CONF_LEVELS_PER_CASCADE,\n            default=14,\n            label=\"Move per cascade\",\n            description=\"The number of moves made per cascade when performing convergent or \"\n            \"divergent shuffle patterns.\",\n        ),\n    ]\n    return settings\n\n\ndef getFuelCycleSettingValidators(inspector):\n    queries = []\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: bool(inspector.cs[CONF_SHUFFLE_LOGIC]) ^ bool(inspector.cs[CONF_FUEL_HANDLER_NAME]),\n            \"A value was provided for `fuelHandlerName` or `shuffleLogic`, but not \"\n            \"the other. Either both `fuelHandlerName` and `shuffleLogic` should be \"\n            \"defined, or neither of them.\",\n            \"\",\n            inspector.NO_ACTION,\n        )\n    )\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: \" \" in inspector.cs[CONF_SHUFFLE_LOGIC],\n            \"Spaces are not allowed in shuffleLogic file location. You have specified {0}. \"\n            \"Shuffling will not occur.\".format(inspector.cs[CONF_SHUFFLE_LOGIC]),\n            \"\",\n            inspector.NO_ACTION,\n        )\n    )\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]\n            and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]),\n            \"The specified shuffle sequence file '{0}' cannot be found.\".format(\n                inspector.cs[CONF_SHUFFLE_SEQUENCE_FILE]\n            ),\n            \"\",\n            inspector.NO_ACTION,\n        )\n    )\n\n    def _clearShufflingInput():\n        inspector._assignCS(CONF_SHUFFLE_LOGIC, \"\")\n        inspector._assignCS(CONF_FUEL_HANDLER_NAME, \"\")\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_SHUFFLE_LOGIC]\n            and not inspector._csRelativePathExists(inspector.cs[CONF_SHUFFLE_LOGIC])\n            and importlib.util.find_spec(inspector.cs[CONF_SHUFFLE_LOGIC]) is None,\n            \"The specified shuffle logic module or file '{0}' cannot be found. Shuffling will not occur.\".format(\n                inspector.cs[CONF_SHUFFLE_LOGIC]\n            ),\n            \"Clear specified file value?\",\n            _clearShufflingInput,\n        )\n    )\n\n    return queries\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/_customFuelHandlerModule.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test utilities for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`.\"\"\"\n\n\nclass MockFileFuelHandler:\n    \"\"\"Fuel handler used when importing from a file path.\"\"\"\n\n    def __init__(self, operator):\n        self.operator = operator\n\n\nclass MockModuleFuelHandler:\n    \"\"\"Fuel handler used when importing from a module path.\"\"\"\n\n    def __init__(self, operator):\n        self.operator = operator\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/test_assemblyRotationAlgorithms.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTests for tools used to rotate hex assemblies.\n\nNotes\n-----\nThese algorithms are defined in assemblyRotationAlgorithms.py, but they are used in:\n``FuelHandler.outage()``.\n\"\"\"\n\nimport copy\nimport enum\nimport math\nimport typing\nfrom unittest import TestCase, mock\n\nimport numpy as np\n\nfrom armi.physics.fuelCycle import assemblyRotationAlgorithms as rotAlgos\nfrom armi.physics.fuelCycle import fuelHandlers\nfrom armi.physics.fuelCycle.hexAssemblyFuelMgmtUtils import (\n    getOptimalAssemblyOrientation,\n)\nfrom armi.physics.fuelCycle.settings import CONF_ASSEM_ROTATION_STATIONARY\nfrom armi.reactor.assemblies import HexAssembly\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\n\n\nclass MockFuelHandler(fuelHandlers.FuelHandler):\n    \"\"\"Implements the entire interface but with empty methods.\"\"\"\n\n    def chooseSwaps(self, *args, **kwargs):\n        pass\n\n\nclass _PinLocations(enum.IntEnum):\n    \"\"\"Zero-indexed locations for specific points of interest.\n\n    If a data vector has an entry to all ``self.N_PINS=169`` pins in the test model,\n    then ``data[PIN_LOCATIONS.UPPER_RIGHT_VERTEX]`` will access the data for the pin\n    along the upper right 60 symmetry line. Since we're dealing with rotations here, it\n    does not need to literally be the pin at the vertex. Just along the symmetry line\n    to help explain tests.\n\n    The use case here is setting the pin or burnup array to be a constant value, but\n    using a single max or minimum value to determine rotation.\n    \"\"\"\n\n    CENTER = 0\n    UPPER_RIGHT_VERTEX = 1\n    UPPER_LEFT_VERTEX = 2\n    DUE_LEFT_VERTEX = 3\n    LOWER_LEFT_VERTEX = 4\n    LOWER_RIGHT_VERTEX = 5\n    DUE_RIGHT_VERTEX = 6\n\n\nclass ShuffleAndRotateTestHelper(TestCase):\n    \"\"\"Fixture class to assist in testing rotation of assemblies via the fuel handler.\"\"\"\n\n    N_PINS = 169\n\n    def setUp(self):\n        self.o, self.r = test_reactors.loadTestReactor()\n        self.r.core.locateAllAssemblies()\n\n    @staticmethod\n    def ensureBlockHasSpatialGrid(b: HexBlock):\n        \"\"\"If ``b`` does not have a spatial grid, auto create one.\"\"\"\n        if b.spatialGrid is None:\n            b.getPinPitch = mock.Mock(return_value=1.1)\n            b.autoCreateSpatialGrids()\n\n    def setAssemblyPinBurnups(self, a: HexAssembly, burnups: np.ndarray):\n        \"\"\"Prepare the assembly that will be shuffled and rotated.\"\"\"\n        peakBu = burnups.max()\n        for b in a.getChildrenWithFlags(Flags.FUEL):\n            self.ensureBlockHasSpatialGrid(b)\n            b.p.percentBuPeak = peakBu\n            for c in b.getChildrenWithFlags(Flags.FUEL):\n                c.p.pinPercentBu = burnups\n\n    def setAssemblyPinPowers(self, a: HexAssembly, pinPowers: np.ndarray):\n        \"\"\"Prep the assembly that existed at the site a shuffled assembly will occupy.\"\"\"\n        for b in a.getChildrenWithFlags(Flags.FUEL):\n            self.ensureBlockHasSpatialGrid(b)\n            b.p.linPowByPin = pinPowers\n\n    def powerWithMinValue(self, minIndex: int) -> np.ndarray:\n        \"\"\"Create a vector of pin powers with a minimum value at a given index.\"\"\"\n        data = np.ones(self.N_PINS)\n        data[minIndex] = 0\n        return data\n\n    def burnupWithMaxValue(self, maxIndex: int) -> np.ndarray:\n        \"\"\"Create a vector of pin burnups with a maximum value at a given index.\"\"\"\n        data = np.zeros(self.N_PINS)\n        data[maxIndex] = 50\n        return data\n\n    def compareMockedToExpectedRotation(self, nRotations: int, mRotate: mock.Mock, msg: typing.Optional[str] = None):\n        \"\"\"Helper function to check the mocked rotate and compare against expected rotation.\"\"\"\n        expectedRadians = nRotations * math.pi / 3\n        (actualRadians,) = mRotate.call_args.args\n        self.assertAlmostEqual(actualRadians, expectedRadians, msg=msg)\n\n\nclass TestOptimalAssemblyRotation(ShuffleAndRotateTestHelper):\n    \"\"\"Test the burnup dependent assembly rotation methods.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.assembly: HexAssembly = self.r.core.getFirstAssembly(Flags.FUEL)\n\n    def test_flatPowerNoRotation(self):\n        \"\"\"If all pin powers are identical, no rotation is suggested.\"\"\"\n        burnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX)\n        powers = np.ones_like(burnups)\n        self.setAssemblyPinBurnups(self.assembly, burnups)\n        self.setAssemblyPinPowers(self.assembly, powers)\n        rot = getOptimalAssemblyOrientation(self.assembly, self.assembly)\n        self.assertEqual(rot, 0)\n\n    def test_maxBurnupAtCenterNoRotation(self):\n        \"\"\"If max burnup pin is at the center, no rotation is suggested.\"\"\"\n        burnups = self.burnupWithMaxValue(_PinLocations.CENTER)\n        powers = np.zeros_like(burnups)\n        self.setAssemblyPinBurnups(self.assembly, burnups)\n        self.setAssemblyPinPowers(self.assembly, powers)\n        rot = getOptimalAssemblyOrientation(self.assembly, self.assembly)\n        self.assertEqual(rot, 0)\n\n    def test_oppositeRotation(self):\n        \"\"\"Test a 180 degree rotation is suggested when the max burnup pin is opposite the lowest power pin.\n\n        Use the second ring of the hexagon because it's easier to write out pin locations\n        and check work.\n\n        .. test:: Test the burnup equalizing rotation algorithm.\n            :id: T_ARMI_ROTATE_HEX_BURNUP\n            :tests: R_ARMI_ROTATE_HEX_BURNUP\n            :acceptance_criteria: After rotating a hexagonal assembly, confirm the pin with the highest burnup is\n                in the same sector as pin with the lowest power in the high burnup pin's ring.\n\n        Notes\n        -----\n        Use zero-indexed pin location not pin ID to assign burnups and powers. Since\n        we have a single component, ``Block.p.linPowByPin[i] <-> Component.p.pinPercentBu[i]``\n        \"\"\"\n        shuffledAssembly = self.assembly\n        previousAssembly = copy.deepcopy(shuffledAssembly)\n        pairs = (\n            (_PinLocations.DUE_RIGHT_VERTEX, _PinLocations.DUE_LEFT_VERTEX),\n            (_PinLocations.UPPER_LEFT_VERTEX, _PinLocations.LOWER_RIGHT_VERTEX),\n            (_PinLocations.UPPER_RIGHT_VERTEX, _PinLocations.LOWER_LEFT_VERTEX),\n            (_PinLocations.DUE_LEFT_VERTEX, _PinLocations.DUE_RIGHT_VERTEX),\n            (_PinLocations.LOWER_RIGHT_VERTEX, _PinLocations.UPPER_LEFT_VERTEX),\n            (_PinLocations.LOWER_LEFT_VERTEX, _PinLocations.UPPER_RIGHT_VERTEX),\n        )\n        for startPin, oppositePin in pairs:\n            powers = self.powerWithMinValue(oppositePin)\n            burnups = self.burnupWithMaxValue(startPin)\n            self.setAssemblyPinBurnups(shuffledAssembly, burnups)\n            self.setAssemblyPinPowers(previousAssembly, powers)\n            rot = getOptimalAssemblyOrientation(shuffledAssembly, previousAssembly)\n            # 180 degrees is three 60 degree rotations\n            self.assertEqual(rot, 3, msg=f\"{startPin=} :: {oppositePin=}\")\n\n    def test_noBlocksWithBurnup(self):\n        \"\"\"Require at least one block to have burnup.\"\"\"\n        with self.assertRaisesRegex(ValueError, \"Error finding max burnup\"):\n            getOptimalAssemblyOrientation(self.assembly, self.assembly)\n\n    def test_mismatchPinPowersAndLocations(self):\n        \"\"\"Require pin powers and locations to be have the same length.\"\"\"\n        powers = np.arange(self.N_PINS + 1)\n        burnups = np.arange(self.N_PINS)\n        self.setAssemblyPinBurnups(self.assembly, burnups)\n        self.setAssemblyPinPowers(self.assembly, powers)\n        with self.assertRaisesRegex(ValueError, \"Inconsistent pin powers and number of pins\"):\n            getOptimalAssemblyOrientation(self.assembly, self.assembly)\n\n\nclass TestFuelHandlerMgmtTools(ShuffleAndRotateTestHelper):\n    def test_buRotationWithFreshFeed(self):\n        \"\"\"Test that rotation works if a new assembly is swapped with fresh fuel.\n\n        Fresh feed assemblies will not exist in the reactor, and various checks that\n        try to the \"previous\" assembly's location can fail.\n        \"\"\"\n        newSettings = {\n            \"fluxRecon\": True,\n            \"assemblyRotationAlgorithm\": \"buReducingAssemblyRotation\",\n        }\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n\n        fresh = self.r.core.createFreshFeed(self.o.cs)\n        self.assertEqual(fresh.lastLocationLabel, HexAssembly.LOAD_QUEUE)\n        fh = MockFuelHandler(self.o)\n        fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.append(fresh))\n\n        with mock.patch(\n            \"armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation\",\n        ) as p:\n            fh.outage()\n        # The only moved assembly was most recently outside the core so we have no need to rotate\n        # Make sure our fake chooseSwaps added the fresh assembly to the moved assemblies\n        fh.chooseSwaps.assert_called_once()\n        p.assert_not_called()\n\n    def test_buRotationWithStationaryRotation(self):\n        \"\"\"Test that the burnup equalizing rotation algorithm works on non-shuffled assemblies.\"\"\"\n        newSettings = {\n            CONF_ASSEM_ROTATION_STATIONARY: True,\n            \"fluxRecon\": True,\n            \"assemblyRotationAlgorithm\": \"buReducingAssemblyRotation\",\n        }\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n\n        # Grab two assemblies that were not moved. One of which will have the detailed information\n        # needed for rotation\n        detailedAssem, coarseAssem = self.o.r.core.getChildrenWithFlags(Flags.FUEL)[:2]\n        self.setAssemblyPinBurnups(detailedAssem, burnups=np.arange(self.N_PINS))\n        self.setAssemblyPinPowers(detailedAssem, pinPowers=np.arange(self.N_PINS))\n        detailedAssem.rotate = mock.Mock()\n        coarseAssem.rotate = mock.Mock()\n\n        fh = MockFuelHandler(self.o)\n\n        with mock.patch(\n            \"armi.physics.fuelCycle.assemblyRotationAlgorithms.getOptimalAssemblyOrientation\",\n            return_value=5,\n        ) as p:\n            fh.outage()\n        p.assert_called_once_with(detailedAssem, detailedAssem)\n        # Assembly with detailed pin powers and pin burnups will be rotated\n        detailedAssem.rotate.assert_called_once()\n        self.compareMockedToExpectedRotation(5, detailedAssem.rotate)\n        # Assembly without pin level data will not be rotated\n        coarseAssem.rotate.assert_not_called()\n\n    def test_rotateInShuffleQueue(self):\n        \"\"\"Test for expected behavior when multiple assemblies are shuffled and rotated in one outage.\n\n        Examine the behavior of three assemblies: ``first -> second -> third``\n\n        1. ``first`` is moved to the location of ``second`` and rotated by comparing\n           ``first`` burnup against ``second`` pin powers.\n        2. ``second`` is moved to the location of ``third`` and rotated by comparing\n           ``second`` burnup against ``third`` pin powers.\n\n        where:\n\n        * ``first`` burnup is maximized in the upper left direction.\n        * ``second`` pin power is minimized along the lower left direction.\n        * ``second`` burnup is maximized in the upper right direction.\n        * ``third`` pin power is minimized in the direct right direction.\n\n        We should expect:\n\n        1. ``first`` is rotated from upper left to lower left => two 60 degree CCW rotations.\n        2. ``second`` is rotated from upper right to direct right => five 60 degree CCW rotations.\n        \"\"\"\n        newSettings = {\n            CONF_ASSEM_ROTATION_STATIONARY: False,\n            \"fluxRecon\": True,\n            \"assemblyRotationAlgorithm\": \"buReducingAssemblyRotation\",\n        }\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n\n        first, second, third = self.r.core.getChildrenWithFlags(Flags.FUEL)[:3]\n\n        firstBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_LEFT_VERTEX)\n        self.setAssemblyPinBurnups(first, firstBurnups)\n\n        secondPowers = self.powerWithMinValue(_PinLocations.LOWER_LEFT_VERTEX)\n        self.setAssemblyPinPowers(second, pinPowers=secondPowers)\n\n        secondBurnups = self.burnupWithMaxValue(_PinLocations.UPPER_RIGHT_VERTEX)\n        self.setAssemblyPinBurnups(second, burnups=secondBurnups)\n\n        thirdPowers = self.powerWithMinValue(_PinLocations.DUE_RIGHT_VERTEX)\n        self.setAssemblyPinPowers(third, thirdPowers)\n\n        # Set the shuffling sequence\n        # first -> second\n        # second -> third\n        second.lastLocationLabel = first.getLocation()\n        third.lastLocationLabel = second.getLocation()\n\n        first.rotate = mock.Mock()\n        second.rotate = mock.Mock()\n        third.rotate = mock.Mock()\n\n        fh = MockFuelHandler(self.o)\n        fh.chooseSwaps = mock.Mock(side_effect=lambda _: fh.moved.extend([second, third]))\n        fh.outage()\n\n        first.rotate.assert_called_once()\n        self.compareMockedToExpectedRotation(2, first.rotate, \"First\")\n        second.rotate.assert_called_once()\n        self.compareMockedToExpectedRotation(5, second.rotate, \"Second\")\n        third.rotate.assert_not_called()\n\n\nclass SimpleRotationTests(ShuffleAndRotateTestHelper):\n    \"\"\"Test the simple rotation where assemblies are rotated a fixed amount.\"\"\"\n\n    def test_simpleAssemblyRotation(self):\n        \"\"\"Test rotating assemblies 120 degrees with two rotation events.\"\"\"\n        fh = fuelHandlers.FuelHandler(self.o)\n        newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n        hist = self.o.getInterface(\"history\")\n        assems = hist.o.r.core.getAssemblies(Flags.FUEL)[:5]\n\n        # add some detailed assemblies\n        for a in assems:\n            hist.detailAssemblyNames.append(a.getName())\n\n        b = self.o.r.core.getFirstBlock(Flags.FUEL)\n        rotNum = b.getRotationNum()\n        rotAlgos.simpleAssemblyRotation(fh)\n        rotAlgos.simpleAssemblyRotation(fh)\n        self.assertEqual(b.getRotationNum(), rotNum + 2)\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/test_fuelHandlerFactory.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for :mod:`armi.physics.fuelCycle.fuelHandlerFactory`.\"\"\"\n\nimport unittest\nfrom pathlib import Path\n\nfrom armi.physics.fuelCycle import fuelHandlerFactory\nfrom armi.physics.fuelCycle.settings import CONF_FUEL_HANDLER_NAME, CONF_SHUFFLE_LOGIC\nfrom armi.physics.fuelCycle.tests import _customFuelHandlerModule\n\n\nclass _DummySettings(dict):\n    \"\"\"Minimal stand-in for :class:`armi.settings.Settings`.\"\"\"\n\n\nclass _DummyOperator:\n    \"\"\"Operator stub that only exposes the settings object.\"\"\"\n\n    def __init__(self, settings):\n        self.cs = settings\n\n\nclass FuelHandlerFactoryTests(unittest.TestCase):\n    \"\"\"Exercise the custom module import logic.\"\"\"\n\n    def setUp(self):\n        self.inputDirectory = Path(__file__).resolve().parents[3]\n        self.settings = _DummySettings()\n        self.settings.inputDirectory = str(self.inputDirectory)\n        self.operator = _DummyOperator(self.settings)\n\n    def test_filePath(self):\n        \"\"\"Custom handlers can still be loaded from explicit file paths.\"\"\"\n        modulePath = Path(__file__).resolve().with_name(\"_customFuelHandlerModule.py\")\n        self.settings.update(\n            {\n                CONF_FUEL_HANDLER_NAME: \"MockFileFuelHandler\",\n                CONF_SHUFFLE_LOGIC: str(modulePath),\n            }\n        )\n\n        handler = fuelHandlerFactory.fuelHandlerFactory(self.operator)\n\n        self.assertEqual(handler.__class__.__name__, \"MockFileFuelHandler\")\n\n    def test_modulePath(self):\n        \"\"\"Module-style paths are imported using :mod:`importlib`.\"\"\"\n        moduleName = \"armi.physics.fuelCycle.tests._customFuelHandlerModule\"\n        self.settings.update(\n            {\n                CONF_FUEL_HANDLER_NAME: \"MockModuleFuelHandler\",\n                CONF_SHUFFLE_LOGIC: moduleName,\n            }\n        )\n\n        handler = fuelHandlerFactory.fuelHandlerFactory(self.operator)\n\n        self.assertIsInstance(handler, _customFuelHandlerModule.MockModuleFuelHandler)\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/test_fuelHandlers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTests some capabilities of the fuel handling machine.\n\nThis test is high enough level that it requires input files to be present. The ones to use\nare called armiRun.yaml which is located in armi.tests\n\"\"\"\n\nimport collections\nimport copy\nimport os\nimport tempfile\nimport unittest\nfrom unittest.mock import PropertyMock, patch\n\nimport numpy as np\n\nfrom armi.physics.fuelCycle import fuelHandlers, settings\nfrom armi.physics.fuelCycle.fuelHandlers import AssemblyMove\nfrom armi.physics.fuelCycle.settings import (\n    CONF_ASSEM_ROTATION_STATIONARY,\n    CONF_ASSEMBLY_ROTATION_ALG,\n    CONF_PLOT_SHUFFLE_ARROWS,\n    CONF_RUN_LATTICE_BEFORE_SHUFFLING,\n    CONF_SHUFFLE_SEQUENCE_FILE,\n)\nfrom armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager\nfrom armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (\n    LatticePhysicsInterface,\n)\nfrom armi.reactor import assemblies, blocks, components, grids\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.tests import test_reactors\nfrom armi.reactor.zones import Zone\nfrom armi.settings import caseSettings\nfrom armi.settings.fwSettings.globalSettings import CONF_TRACK_ASSEMS\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import TEST_ROOT, ArmiTestHelper, mockRunLogs\nfrom armi.utils import directoryChangers\nfrom armi.utils.customExceptions import InputError\n\n\nclass TestReadMovesYamlErrors(unittest.TestCase):\n    \"\"\"Ensure malformed YAML inputs raise informative ``InputError``.\"\"\"\n\n    def _run(self, text):\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".yaml\", delete=False) as tf:\n            tf.write(text)\n            fname = tf.name\n        try:\n            fuelHandlers.FuelHandler.readMovesYaml(fname)\n        finally:\n            os.remove(fname)\n\n    def test_missingSequence(self):\n        yaml_text = \"foo: []\\n\"\n        with self.assertRaisesRegex(InputError, \"sequence\"):\n            self._run(yaml_text)\n\n    def test_duplicateCycle(self):\n        yaml_text = \"sequence:\\n  1: []\\n  1: []\\n\"\n        with self.assertRaisesRegex(InputError, r\"(?i)\\bduplicate key\\b\"):\n            self._run(yaml_text)\n\n    def test_unknownActionKey(self):\n        yaml_text = \"sequence:\\n  1:\\n    - badAction: []\\n\"\n        with self.assertRaisesRegex(InputError, \"Unknown action\"):\n            self._run(yaml_text)\n\n    def test_badCascade(self):\n        cases = [\n            (\"sequence:\\n  1:\\n    - cascade: ['only']\\n\", \"cascade\"),\n            (\"sequence:\\n  1:\\n    - cascade: ['outer fuel', 1]\\n\", \"cascade\"),\n        ]\n        for yaml_text, msg in cases:\n            with self.subTest(yaml_text=yaml_text):\n                with self.assertRaisesRegex(InputError, msg):\n                    self._run(yaml_text)\n\n    def test_badSwap(self):\n        yaml_text = \"sequence:\\n  1:\\n    - swap: ['009-045']\\n\"\n        with self.assertRaisesRegex(InputError, \"swap\"):\n            self._run(yaml_text)\n\n    def test_badFuelEnrichment(self):\n        cases = [\n            (\n                \"\"\"sequence:\\n  1:\\n    - cascade: ['outer fuel', '009-045']\\n      fuelEnrichment: ['a']\\n\"\"\",\n                \"fuelEnrichment\",\n            ),\n            (\n                \"\"\"sequence:\\n  1:\\n    - cascade: ['outer fuel', '009-045']\\n      fuelEnrichment: [-1]\\n\"\"\",\n                \"fuelEnrichment\",\n            ),\n            (\n                \"\"\"sequence:\\n  1:\\n    - cascade: ['outer fuel', '009-045']\\n      fuelEnrichment: [101]\\n\"\"\",\n                \"fuelEnrichment\",\n            ),\n        ]\n        for yaml_text, msg in cases:\n            with self.subTest(yaml_text=yaml_text):\n                with self.assertRaisesRegex(InputError, msg):\n                    self._run(yaml_text)\n\n    def test_rotationInvalidLocation(self):\n        yaml_text = \"sequence:\\n  1:\\n    - extraRotations: {'badLoc': 30}\\n\"\n        with self.assertRaisesRegex(InputError, \"Invalid location\"):\n            self._run(yaml_text)\n\n    def test_duplicateCascadeLocation(self):\n        yaml_text = (\n            \"sequence:\\n  1:\\n    - cascade: ['outer', '009-045', '008-001']\\n\"\n            \"    - cascade: ['outer', '009-045', '007-002']\\n\"\n        )\n        with self.assertRaisesRegex(InputError, \"009-045\"):\n            self._run(yaml_text)\n\n    def test_invalidCascadeLocation(self):\n        yaml_text = \"sequence:\\n  1:\\n    - cascade: ['outer', 'badLoc']\\n\"\n        with self.assertRaisesRegex(InputError, \"Invalid location\"):\n            self._run(yaml_text)\n\n    def test_missingCycle(self):\n        yaml_text = \"sequence:\\n  1: []\\n  3: []\\n\"\n        with self.assertRaisesRegex(InputError, \"Missing cycle 2\"):\n            self._run(yaml_text)\n\n\nclass TestReadMovesYamlFeatures(unittest.TestCase):\n    \"\"\"Miscellaneous behavior of :meth:`FuelHandler.readMovesYaml`.\"\"\"\n\n    def _read(self, text):\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".yaml\", delete=False) as tf:\n            tf.write(text)\n            fname = tf.name\n        try:\n            moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname)\n            return moves\n        finally:\n            os.remove(fname)\n\n    def test_cyclesOutOfOrder(self):\n        yaml_text = \"sequence:\\n  1: []\\n  2: []\\n  4: []\\n  3: []\\n\"\n        moves = self._read(yaml_text)\n        self.assertEqual(list(moves), [1, 2, 4, 3])\n\n\nclass FuelHandlerTestHelper(ArmiTestHelper):\n    @classmethod\n    def setUpClass(cls):\n        # prepare the input files. This is important so the unit tests run from wherever\n        # they need to run from.\n        cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT, dumpOnException=False)\n        cls.directoryChanger.open()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.directoryChanger.close()\n\n    def setUp(self):\n        \"\"\"\n        Build a dummy reactor without using input files.\n\n        There are some igniters and feeds but none of these have any number densities.\n        \"\"\"\n        self.o, self.r = test_reactors.loadTestReactor(\n            self.directoryChanger.destination,\n            customSettings={\"nCycles\": 4, \"trackAssems\": True},\n        )\n\n        allBlocks = self.r.core.getBlocks()\n        fakeBu = 30.0 / len(allBlocks)\n        for bi, b in enumerate(allBlocks):\n            b.p.flux = 5e10\n            if b.isFuel():\n                b.p.percentBu = fakeBu * bi\n        self.nfeed = len(self.r.core.getAssemblies(Flags.FEED))\n        self.nigniter = len(self.r.core.getAssemblies(Flags.IGNITER))\n        self.nSfp = len(self.r.excore[\"sfp\"])\n\n        # generate a reactor with assemblies\n        # generate components with materials\n        nPins = 271\n\n        fuelDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 1.0, \"id\": 0.0, \"mult\": nPins}\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n\n        cladDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n        clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n\n        interDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"op\": 16.8,\n            \"ip\": 16.0,\n            \"mult\": 1.0,\n        }\n        interSodium = components.Hexagon(\"interCoolant\", \"Sodium\", **interDims)\n\n        # generate a block\n        self.block = blocks.HexBlock(\"TestHexBlock\")\n        self.block.setType(\"fuel\")\n        self.block.setHeight(10.0)\n        self.block.add(fuel)\n        self.block.add(clad)\n        self.block.add(interSodium)\n\n        # generate an assembly\n        self.assembly = assemblies.HexAssembly(\"TestAssemblyType\")\n        self.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)\n        for _ in range(1):\n            self.assembly.add(copy.deepcopy(self.block))\n\n        # copy the assembly to make a list of assemblies and have a reference assembly\n        self.aList = []\n        for _ in range(6):\n            self.aList.append(copy.deepcopy(self.assembly))\n\n        self.refAssembly = copy.deepcopy(self.assembly)\n        self.directoryChanger.open()\n        self.r.core.locateAllAssemblies()\n\n    def tearDown(self):\n        # clean up the test\n        self.block = None\n        self.assembly = None\n        self.aList = None\n        self.refAssembly = None\n        self.r = None\n        self.o = None\n\n        self.directoryChanger.close()\n\n\nclass MockLatticePhysicsInterface(LatticePhysicsInterface):\n    \"\"\"A mock lattice physics interface that does nothing for interactBOC.\"\"\"\n\n    name = \"MockLatticePhysicsInterface\"\n\n    def _getExecutablePath(self):\n        return \"/mock/\"\n\n    def interactBOC(self, cycle=None):\n        pass\n\n\nclass MockXSGM(CrossSectionGroupManager):\n    \"\"\"A mock cross section group manager that does nothing for interactBOC.\"\"\"\n\n    def interactBOC(self, cycle=None):\n        pass\n\n\nclass TestFuelHandler(FuelHandlerTestHelper):\n    @patch(\"armi.reactor.assemblies.Assembly.getSymmetryFactor\")\n    def test_getParamMax(self, mockGetSymmetry):\n        a = self.assembly\n        mockGetSymmetry.return_value = 1\n        expectedValue = 0.5\n        a.p[\"kInf\"] = expectedValue\n        for b in a:\n            b.p[\"kInf\"] = expectedValue\n\n        with patch(\n            \"armi.reactor.parameters.parameterDefinitions.Parameter.location\", new_callable=PropertyMock\n        ) as mock_assemblyParameterLocation:\n            mock_assemblyParameterLocation.return_value = ParamLocation.VOLUME_INTEGRATED\n            # symmetry factor == 1\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", True)\n            self.assertEqual(res, expectedValue)\n\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", False)\n            self.assertEqual(res, expectedValue)\n\n            # symmetry factor == 3\n            mockGetSymmetry.return_value = 3\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", True)\n            self.assertAlmostEqual(res, expectedValue * 3)\n\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", False)\n            self.assertAlmostEqual(res, expectedValue * 3)\n\n            # not volume integrated and symmetry factor == 3\n            mock_assemblyParameterLocation.return_value = ParamLocation.AVERAGE\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", True)\n            self.assertEqual(res, expectedValue)\n\n            res = fuelHandlers.FuelHandler._getParamMax(a, \"kInf\", False)\n            self.assertEqual(res, expectedValue)\n\n    def test_interactBOC(self):\n        # set up mock interface\n        self.o.addInterface(MockLatticePhysicsInterface(self.r, self.o.cs))\n        self.o.removeInterface(interfaceName=\"xsGroups\")\n        self.o.addInterface(MockXSGM(self.r, self.o.cs))\n        # adjust case settings\n        self.o.cs[CONF_RUN_LATTICE_BEFORE_SHUFFLING] = True\n        # run fhi.interactBOC\n        fhi = self.o.getInterface(\"fuelHandler\")\n        with mockRunLogs.BufferLog() as mock:\n            fhi.interactBOC()\n            self.assertIn(\"lattice physics before fuel management due to the\", mock._outputStream)\n\n    def test_findHighBu(self):\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 4)\n        a = self.r.core.childrenByLocator[loc]\n        # set burnup way over 1.0, which is otherwise the highest bu in the core\n        a[0].p.percentBu = 50\n\n        fh = fuelHandlers.FuelHandler(self.o)\n        a1 = fh.findAssembly(param=\"percentBu\", compareTo=100, blockLevelMax=True, typeSpec=None)\n        self.assertIs(a, a1)\n\n    @patch(\"armi.physics.fuelCycle.fuelHandlers.FuelHandler.chooseSwaps\")\n    def test_outage(self, mockChooseSwaps):\n        # mock up a fuel handler\n        fh = fuelHandlers.FuelHandler(self.o)\n        mockChooseSwaps.return_value = list(self.r.core.getAssemblies())\n\n        # edge case: cannot perform two outages on the same FuelHandler\n        fh.moved = [self.r.core.getFirstAssembly()]\n        with self.assertRaises(ValueError):\n            fh.outage(factor=1.0)\n\n        # edge case: fail if the shuffle file is missing\n        fh.moved = []\n        self.o.cs = self.o.cs.modified(newSettings={\"explicitRepeatShuffles\": \"fakePath\"})\n        with self.assertRaises(RuntimeError):\n            fh.outage(factor=1.0)\n\n        # a successful run\n        fh.moved = []\n        self.o.cs = self.o.cs.modified(\n            newSettings={\n                \"explicitRepeatShuffles\": \"\",\n                \"fluxRecon\": True,\n                CONF_ASSEMBLY_ROTATION_ALG: \"simpleAssemblyRotation\",\n            }\n        )\n        fh.outage(factor=1.0)\n        self.assertEqual(len(fh.moved), 0)\n\n    def test_outageEdgeCase(self):\n        \"\"\"Check that an error is raised if the list of moved assemblies is invalid.\"\"\"\n\n        class MockFH(fuelHandlers.FuelHandler):\n            def chooseSwaps(self, factor=1.0):\n                self.moved = [None]\n\n        # mock up a fuel handler\n        fh = MockFH(self.o)\n\n        # test edge case\n        with self.assertRaises(AttributeError):\n            fh.outage(factor=1.0)\n\n    def test_isAssemblyInAZone(self):\n        # build a fuel handler\n        fh = fuelHandlers.FuelHandler(self.o)\n\n        # test the default value if there are no zones\n        a = self.r.core.getFirstAssembly()\n        self.assertTrue(fh.isAssemblyInAZone(None, a))\n\n        # If our assembly isn't in one of the supplied zones\n        z = Zone(\"test_isAssemblyInAZone\")\n        self.assertFalse(fh.isAssemblyInAZone([z], a))\n\n        # If our assembly IS in one of the supplied zones\n        z.addLoc(a.getLocation())\n        self.assertTrue(fh.isAssemblyInAZone([z], a))\n\n    def test_width(self):\n        \"\"\"Tests the width capability of findAssembly.\"\"\"\n        fh = fuelHandlers.FuelHandler(self.o)\n        assemsByRing = collections.defaultdict(list)\n        for a in self.r.core:\n            assemsByRing[a.spatialLocator.getRingPos()[0]].append(a)\n\n        # instantiate reactor power. more power in more outer rings\n        for ring, power in zip(range(1, 8), range(10, 80, 10)):\n            aList = assemsByRing[ring]\n            for a in aList:\n                sf = a.getSymmetryFactor()  # center assembly is only 1/3rd in the core\n                for b in a:\n                    b.p.power = power / sf\n\n        paramName = \"power\"\n        # 1 ring outer and inner from ring 3\n        a = fh.findAssembly(\n            targetRing=3,\n            width=(1, 0),\n            param=paramName,\n            blockLevelMax=True,\n            compareTo=100,\n        )\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            4,\n            \"The highest power ring returned is {0}. It should be {1}\".format(ring, 4),\n        )\n        a = fh.findAssembly(targetRing=3, width=(1, 0), param=paramName, blockLevelMax=True, compareTo=0)\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            2,\n            \"The lowest power ring returned is {0}. It should be {1}\".format(ring, 2),\n        )\n\n        # 2 rings outer from ring 3\n        a = fh.findAssembly(\n            targetRing=3,\n            width=(2, 1),\n            param=paramName,\n            blockLevelMax=True,\n            compareTo=100,\n        )\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            5,\n            \"The highest power ring returned is {0}. It should be {1}\".format(ring, 5),\n        )\n        a = fh.findAssembly(targetRing=3, width=(2, 1), param=paramName, blockLevelMax=True, compareTo=0)\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            3,\n            \"The lowest power ring returned is {0}. It should be {1}\".format(ring, 3),\n        )\n\n        # 2 rings inner from ring 3\n        a = fh.findAssembly(\n            targetRing=3,\n            width=(2, -1),\n            param=paramName,\n            blockLevelMax=True,\n            compareTo=100,\n        )\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            3,\n            \"The highest power ring returned is {0}. It should be {1}\".format(ring, 3),\n        )\n        a = fh.findAssembly(\n            targetRing=3,\n            width=(2, -1),\n            param=paramName,\n            blockLevelMax=True,\n            compareTo=0,\n        )\n        ring = a.spatialLocator.getRingPos()[0]\n        self.assertEqual(\n            ring,\n            1,\n            \"The lowest power ring returned is {0}. It should be {1}\".format(ring, 1),\n        )\n\n    def test_findMany(self):\n        \"\"\"Tests the ``findMany`` and type aspects of the fuel handler.\"\"\"\n        fh = fuelHandlers.FuelHandler(self.o)\n\n        igniters = fh.findAssembly(typeSpec=Flags.IGNITER | Flags.FUEL, findMany=True)\n        feeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True)\n        fewFeeds = fh.findAssembly(typeSpec=Flags.FEED | Flags.FUEL, findMany=True, maxNumAssems=4)\n\n        self.assertEqual(\n            len(igniters),\n            self.nigniter,\n            \"Found {0} igniters. Should have found {1}\".format(len(igniters), self.nigniter),\n        )\n        self.assertEqual(\n            len(feeds),\n            self.nfeed,\n            \"Found {0} feeds. Should have found {1}\".format(len(igniters), self.nfeed),\n        )\n        self.assertEqual(\n            len(fewFeeds),\n            4,\n            \"Reduced findMany returned {0} assemblies instead of {1}\".format(len(fewFeeds), 4),\n        )\n\n    def test_findInSFP(self):\n        \"\"\"Tests ability to pull from the spent fuel pool.\"\"\"\n        fh = fuelHandlers.FuelHandler(self.o)\n        spent = fh.findAssembly(\n            findMany=True,\n            findFromSfp=True,\n            param=\"percentBu\",\n            compareTo=100,\n            blockLevelMax=True,\n        )\n        self.assertEqual(\n            len(spent),\n            self.nSfp,\n            \"Found {0} assems in SFP. Should have found {1}\".format(len(spent), self.nSfp),\n        )\n        burnups = [a.getMaxParam(\"percentBu\") for a in spent]\n        bu = spent[0].getMaxParam(\"percentBu\")\n        self.assertEqual(\n            bu,\n            max(burnups),\n            \"First assembly does not have the highest burnup ({0}). It has ({1})\".format(max(burnups), bu),\n        )\n\n    def test_findByCoords(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        assem = fh.findAssembly(coords=(0, 0))\n        self.o.r.core.sortAssemsByRing()\n        self.assertIs(assem, self.o.r.core[0])\n\n    def test_findWithMinMax(self):\n        \"\"\"Test the complex min/max comparators.\"\"\"\n        fh = fuelHandlers.FuelHandler(self.o)\n        assem = fh.findAssembly(\n            param=\"percentBu\",\n            compareTo=100,\n            blockLevelMax=True,\n            minParam=\"percentBu\",\n            minVal=(\"percentBu\", 0.1),\n            maxParam=\"percentBu\",\n            maxVal=20.0,\n        )\n        # the burnup should be the maximum bu within\n        # up to a burnup of 20%, which by the simple\n        # dummy data layout should be the 2/3rd block in the blocklist\n        lastB = None\n        for b in self.r.core.iterBlocks(Flags.FUEL):\n            if b.p.percentBu > 20:\n                break\n            lastB = b\n        expected = lastB.parent\n        self.assertIs(assem, expected)\n\n        # test the impossible: an block with burnup less than 110% of its own burnup\n        assem = fh.findAssembly(\n            param=\"percentBu\",\n            compareTo=100,\n            blockLevelMax=True,\n            minParam=\"percentBu\",\n            minVal=(\"percentBu\", 1.1),\n        )\n        self.assertIsNone(assem)\n\n    def runShuffling(self, fh):\n        \"\"\"Shuffle fuel and write out a SHUFFLES.txt file.\"\"\"\n        fh.attachReactor(self.o, self.r)\n\n        # so we don't overwrite the version-controlled armiRun-SHUFFLES.txt\n        self.o.cs.caseTitle = \"armiRun2\"\n        fh.interactBOL()\n\n        # expected assembly position history based on shuffling specification of this test.\n        # do not blindly rebase these reference values. test failures using this dict\n        # imply that the assembly shuffling definition has changed.\n        expPosHist = {}\n        # cycle 1 shuffle, (2, 1) moved to SFP\n        expPosHist[\"A0005\"] = [(2, 1), (\"SFP\", \"SFP\"), (\"SFP\", \"SFP\"), (\"SFP\", \"SFP\")]\n        # cycle 1 shuffle, (3, 3) moved to (2, 1) in cascade\n        # cycle 3 shuffle, (2, 1) moved to (5, 4)\n        expPosHist[\"A0018\"] = [(3, 3), (2, 1), (2, 1), (5, 4)]\n        # cycle 1 shuffle, (4, 2) moved to (3, 3) in cascade\n        expPosHist[\"A0019\"] = [(4, 2), (3, 3), (3, 3), (3, 3)]\n        # cycle 1 shuffle, (5, 1) moved to (4, 2) in cascade\n        expPosHist[\"A0020\"] = [(5, 1), (4, 2), (4, 2), (4, 2)]\n        # cycle 1 shuffle, (6, 7) moved to (5, 1) in cascade\n        expPosHist[\"A0044\"] = [(6, 7), (5, 1), (5, 1), (5, 1)]\n        # cycle 1 shuffle, fresh to (6, 7)\n        # cycle 3 shuffle, (6, 7) moved to (5, 2) in cascade\n        expPosHist[\"A0077\"] = [(\"NotCreatedYet\", \"NotCreatedYet\"), (6, 7), (6, 7), (5, 2)]\n        # cycle 2 shuffle, (2, 2) moved to (5, 3)\n        expPosHist[\"A0009\"] = [(2, 2), (2, 2), (5, 3), (5, 3)]\n        # cycle 2 shuffle, (3, 2) moved to (2, 2) in cascade\n        expPosHist[\"A0014\"] = [(3, 2), (3, 2), (2, 2), (2, 2)]\n        # cycle 2 shuffle, (4, 1) moved to (3, 2) in cascade\n        expPosHist[\"A0015\"] = [(4, 1), (4, 1), (3, 2), (3, 2)]\n        # cycle 2 shuffle, (5, 4) moved to (4, 1) in cascade\n        expPosHist[\"A0034\"] = [(5, 4), (5, 4), (4, 1), (4, 1)]\n        # cycle 2 shuffle, (6, 4) moved to (5, 4) in cascade then discharged to SFP\n        expPosHist[\"A0040\"] = [(6, 4), (6, 4), (5, 4), (\"SFP\", \"SFP\")]\n        # cycle 2 shuffle, fresh to (6, 4)\n        expPosHist[\"A0078\"] = [(\"NotCreatedYet\", \"NotCreatedYet\"), (\"NotCreatedYet\", \"NotCreatedYet\"), (6, 4), (6, 4)]\n        # cycle 1 shuffle, (5, 3) moved to SFP\n        expPosHist[\"A0029\"] = [(5, 3), (5, 3), (\"SFP\", \"SFP\"), (\"SFP\", \"SFP\")]\n        # cycle 3 shuffle, (3, 1) moved to (2, 1) in cascade\n        expPosHist[\"A0010\"] = [(3, 1), (3, 1), (3, 1), (2, 1)]\n        # cycle 3 shuffle, (4, 3) moved to (3, 1) in cascade\n        expPosHist[\"A0024\"] = [(4, 3), (4, 3), (4, 3), (3, 1)]\n        # cycle 3 shuffle, (5, 2) moved to (4, 3) in cascade\n        expPosHist[\"A0025\"] = [(5, 2), (5, 2), (5, 2), (4, 3)]\n        # cycle 3 shuffle, fresh to (6, 7)\n        expPosHist[\"A0079\"] = [\n            (\"NotCreatedYet\", \"NotCreatedYet\"),\n            (\"NotCreatedYet\", \"NotCreatedYet\"),\n            (\"NotCreatedYet\", \"NotCreatedYet\"),\n            (6, 7),\n        ]\n\n        for cycle in range(4):\n            self.r.p.cycle = cycle\n            fh.cycle = cycle\n            fh.manageFuel(cycle)\n            for a in self.r.excore[\"sfp\"]:\n                self.assertEqual(a.getLocation(), \"SFP\")\n            for b in self.r.core.iterBlocks(Flags.FUEL):\n                self.assertGreater(b.p.kgHM, 0.0, \"b.p.kgHM not populated!\")\n                self.assertGreater(b.p.kgFis, 0.0, \"b.p.kgFis not populated!\")\n\n        # check assemblies in core\n        for a in self.r.core:\n            self._checkAssemblyPositionHistory(a, expPosHist)\n        # check assemblies in SFP\n        for a in list(self.r.excore[\"sfp\"]):\n            self._checkAssemblyPositionHistory(a, expPosHist)\n\n        # check getter methods based on assembly location history\n        for aName, posList in expPosHist.items():\n            for i, rp in enumerate(posList):\n                if rp[0] is not None and rp[0] not in assemblies.Assembly.NOT_IN_CORE:\n                    r, p = rp\n                    self.assertEqual(self.r.core.getAssemblyWithRingPosHist(r, p, i).getName(), aName)\n\n        fh.interactEOL()\n\n    def _checkAssemblyPositionHistory(self, a, answerKey):\n        if a.getName() not in answerKey:  # check that location history is the same position\n            self.assertEqual(len(set(a.p.ringPosHist)), 1)\n        else:\n            self.assertListEqual(a.p.ringPosHist, answerKey[a.getName()])\n\n    def test_repeatShuffles(self):\n        \"\"\"Loads the ARMI test reactor with a custom shuffle logic file and shuffles assemblies twice.\n\n        .. test:: Execute user-defined shuffle operations based on a reactor model.\n            :id: T_ARMI_SHUFFLE\n            :tests: R_ARMI_SHUFFLE\n\n        Notes\n        -----\n        The custom shuffle logic is executed by\n        :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.manageFuel` in\n        :py:meth:`armi.physics.fuelCycle.tests.test_fuelHandlers.TestFuelHandler.runShuffling`. There are two primary\n        assertions: spent fuel pool assemblies are in the correct location and the assemblies were shuffled into their\n        correct locations. This process is repeated twice to ensure repeatability.\n        \"\"\"\n        # check labels before shuffling:\n        for a in self.r.excore[\"sfp\"]:\n            self.assertEqual(a.getLocation(), \"SFP\")\n\n        # do some shuffles\n        fh = self.o.getInterface(\"fuelHandler\")\n        self.runShuffling(fh)  # changes caseTitle\n\n        # Make sure the generated shuffles file matches the tracked one.  This will need to be updated if/when more\n        # assemblies are added to the test reactor but must be done carefully. Do not blindly rebaseline this file.\n        self.compareFilesLineByLine(\n            os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.txt\"), \"armiRun2-SHUFFLES.txt\"\n        )\n\n        # store locations of each assembly\n        firstPassResults = {}\n        for a in self.r.core:\n            firstPassResults[a.getLocation()] = a.getName()\n            self.assertNotIn(a.getLocation(), a.NOT_IN_CORE)\n\n        # reset core to BOL state\n        # reset assembly counter to get the same assem nums.\n        self.setUp()\n\n        newSettings = {CONF_PLOT_SHUFFLE_ARROWS: True}\n        # now repeat shuffles\n        newSettings[\"explicitRepeatShuffles\"] = os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.txt\")\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n\n        fh = self.o.getInterface(\"fuelHandler\")\n\n        self.runShuffling(fh)\n\n        # make sure the shuffle was repeated perfectly\n        for a in self.r.core:\n            self.assertEqual(a.getName(), firstPassResults[a.getLocation()])\n\n        for a in self.r.excore[\"sfp\"]:\n            self.assertEqual(a.getLocation(), \"SFP\")\n\n        # Do some cleanup, since the fuelHandler Interface has code that gets around the TempDirectoryChanger\n        os.remove(\"armiRun2-SHUFFLES.txt\")\n        os.remove(\"armiRun2.shuffles_0.png\")\n        os.remove(\"armiRun2.shuffles_1.png\")\n        os.remove(\"armiRun2.shuffles_2.png\")\n        os.remove(\"armiRun2.shuffles_3.png\")\n\n    def test_readMoves(self):\n        \"\"\"\n        Depends on the ``shuffleLogic`` created by ``repeatShuffles``.\n\n        See Also\n        --------\n        runShuffling : creates the shuffling file to be read in.\n        \"\"\"\n        numblocks = len(self.r.core.getFirstAssembly())\n        fh = fuelHandlers.FuelHandler(self.o)\n        moves = fh.readMoves(os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.txt\"))\n        self.assertEqual(len(moves), 4)\n        firstMove = moves[1][0]\n        self.assertEqual(firstMove.fromLoc, \"002-001\")\n        self.assertEqual(firstMove.toLoc, \"SFP\")\n        self.assertEqual(len(firstMove.enrichList), numblocks)\n        self.assertEqual(firstMove.assemType, \"igniter fuel\")\n        self.assertIsNone(firstMove.ringPosCycle)\n\n        # check the move to the SFP\n        sfpMove = moves[2][-1]\n        self.assertEqual(sfpMove.fromLoc, \"005-003\")\n        self.assertEqual(sfpMove.toLoc, \"SFP\")\n        self.assertIsNone(sfpMove.ringPosCycle)\n\n        # make sure we fail hard if the file doesn't exist\n        with self.assertRaises(RuntimeError):\n            fh.readMoves(\"totall_fictional_file.txt\")\n\n    def test_readMovesYaml(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        moves, swaps = fh.readMovesYaml(os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.yaml\"))\n        self.maxDiff = None\n        expected = {\n            1: [\n                AssemblyMove(\"LoadQueue\", \"009-045\", [0.0, 0.12, 0.14, 0.15, 0.0], \"igniter fuel\"),\n                AssemblyMove(\"009-045\", \"008-004\"),\n                AssemblyMove(\"008-004\", \"007-001\"),\n                AssemblyMove(\"007-001\", \"006-005\"),\n                AssemblyMove(\"006-005\", \"Delete\"),\n                AssemblyMove(\"009-045\", \"009-045\", rotation=60.0),\n                AssemblyMove(\"LoadQueue\", \"004-004\", [0.0, 0.12, 0.14, 0.15, 0.0], \"middle fuel\"),\n                AssemblyMove(\"004-004\", \"005-005\"),\n                AssemblyMove(\"005-005\", \"006-006\"),\n                AssemblyMove(\"006-006\", \"Delete\"),\n            ],\n            2: [\n                AssemblyMove(\"LoadQueue\", \"009-045\", [0.0, 0.12, 0.14, 0.15, 0.0], \"igniter fuel\"),\n                AssemblyMove(\"009-045\", \"008-004\"),\n                AssemblyMove(\"008-004\", \"007-001\"),\n                AssemblyMove(\"007-001\", \"006-005\"),\n                AssemblyMove(\"006-005\", \"Delete\"),\n                AssemblyMove(\"LoadQueue\", \"004-004\", [0.0, 0.12, 0.14, 0.15, 0.0], \"middle fuel\"),\n                AssemblyMove(\"004-004\", \"005-005\"),\n                AssemblyMove(\"005-005\", \"006-006\"),\n                AssemblyMove(\"006-006\", \"Delete\"),\n                AssemblyMove(\"009-045\", \"009-045\", rotation=60.0),\n                AssemblyMove(\"SFP\", \"005-003\", ringPosCycle=[6, 5, 0]),\n                AssemblyMove(\"005-003\", \"SFP\"),\n            ],\n            3: [\n                AssemblyMove(\"LoadQueue\", \"009-045\", [0.0, 0.12, 0.14, 0.15, 0.0], \"igniter fuel\"),\n                AssemblyMove(\"009-045\", \"008-004\"),\n                AssemblyMove(\"008-004\", \"007-001\"),\n                AssemblyMove(\"007-001\", \"006-005\"),\n                AssemblyMove(\"006-005\", \"Delete\"),\n                AssemblyMove(\"SFP\", \"002-002\", ringPosCycle=[5, 3, 1]),\n                AssemblyMove(\"002-002\", \"SFP\"),\n            ],\n        }\n        self.assertEqual(moves, expected)\n        self.assertEqual(swaps, {3: [(\"009-045\", \"008-004\"), (\"007-001\", \"006-005\")]})\n\n    def test_performShuffleYamlIntegration(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        yaml_text = \"\"\"\n        sequence:\n            1:\n                - swap: [\"009-045\", \"008-004\"]\n                - cascade: [\"igniter fuel\", \"009-045\", \"008-004\", \"007-001\", \"006-005\"]\n                  fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]\n                - extraRotations: {\"009-045\": 60}\n        \"\"\"\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".yaml\", delete=False) as tf:\n            tf.write(yaml_text)\n            fname = tf.name\n        try:\n            locs = [\"009-045\", \"008-004\", \"007-001\", \"006-005\"]\n            before = {loc: self.r.core.getAssemblyWithStringLocation(loc).getName() for loc in locs}\n            self.r.p.cycle = 1\n            self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False})\n            self.r.core._trackAssems = False\n            fh.outage()\n\n            fresh = self.r.core.getAssemblyWithStringLocation(\"008-004\")\n            self.assertEqual(fresh.getType(), \"igniter fuel\")\n            self.assertNotIn(fresh.getName(), before.values())\n\n            rotated = self.r.core.getAssemblyWithStringLocation(\"009-045\")\n            self.assertEqual(rotated.getName(), before[\"009-045\"])\n            self.assertAlmostEqual(rotated.p.orientation[2], 60.0)\n\n            self.assertEqual(\n                self.r.core.getAssemblyWithStringLocation(\"007-001\").getName(),\n                before[\"008-004\"],\n            )\n            self.assertEqual(\n                self.r.core.getAssemblyWithStringLocation(\"006-005\").getName(),\n                before[\"007-001\"],\n            )\n            self.assertIsNone(self.r.excore[\"sfp\"].getAssembly(before[\"006-005\"]))\n        finally:\n            os.remove(fname)\n\n    def test_yamlSfpOverridesTrackAssems(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        yaml_text = \"\"\"\n        sequence:\n            1:\n                - cascade: [\"igniter fuel\", \"009-045\", \"SFP\"]\n                  fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]\n        \"\"\"\n        with tempfile.NamedTemporaryFile(\"w\", suffix=\".yaml\", delete=False) as tf:\n            tf.write(yaml_text)\n            fname = tf.name\n        try:\n            before = self.r.core.getAssemblyWithStringLocation(\"009-045\").getName()\n            self.r.p.cycle = 1\n            self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname, CONF_TRACK_ASSEMS: False})\n            self.r.core._trackAssems = False\n            fh.outage()\n\n            self.assertFalse(self.r.core._trackAssems)\n            self.assertIsNotNone(self.r.excore[\"sfp\"].getAssembly(before))\n        finally:\n            os.remove(fname)\n\n    def test_readMovesYaml_loadFromSfp(self):\n        assem = self.r.excore[\"sfp\"].getChildren()[0]\n        # fake the assembly location history\n        assem.p.ringPosHist = [(2, 3), (4, 5), (5, 7)]\n        yaml_text = \"\"\"\n        sequence:\n            1:\n                - cascade: [\"SFP\", \"005-003\", \"SFP\"]\n                  ringPosCycle: [5, 7, 2]\n        \"\"\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            fname = \"moves.yaml\"\n            with open(fname, \"w\", encoding=\"utf-8\") as stream:\n                stream.write(yaml_text)\n            moves, _ = fuelHandlers.FuelHandler.readMovesYaml(fname)\n            expected = {\n                1: [\n                    AssemblyMove(\"SFP\", \"005-003\", [], None, [5, 7, 2]),\n                    AssemblyMove(\"005-003\", \"SFP\"),\n                ]\n            }\n            self.assertEqual(moves, expected)\n\n    def test_performShuffleYaml_loadFromSfp(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        sfpAssem = self.r.excore[\"sfp\"].getChildren()[0]\n        # fake the assembly location history\n        ringPosHistInts = [(2, 3), (4, 5), (5, 7)]\n        sfpAssem.p.ringPosHist = [(str(x).encode(), str(y).encode()) for x, y in ringPosHistInts]\n        yaml_text = \"\"\"\n        sequence:\n            1:\n                - cascade: [\"SFP\", \"009-045\", \"SFP\"]\n                  ringPosCycle: [5, 7, 2]\n        \"\"\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            fname = \"moves.yaml\"\n            with open(fname, \"w\", encoding=\"utf-8\") as stream:\n                stream.write(yaml_text)\n            before = self.r.core.getAssemblyWithStringLocation(\"009-045\").getName()\n            self.r.p.cycle = 1\n            self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname})\n            fh.outage()\n            assem = self.r.core.getAssemblyWithStringLocation(\"009-045\")\n            self.assertEqual(assem.getName(), sfpAssem.getName())\n            cycle0Loc = (\"2\".encode(), \"3\".encode())\n            self.assertEqual(assem.p.ringPosHist[0], cycle0Loc)\n            self.assertEqual(assem.p.ringPosHist[1], (9, 45))\n            self.assertEqual(len(assem.p.ringPosHist), 2)  # truncated by logic in fuelHandlers\n            newSfpAssem = self.r.excore[\"sfp\"].getAssembly(before)\n            self.assertIsNotNone(newSfpAssem)\n            self.assertEqual(newSfpAssem.p.ringPosHist[0], (9, 45))\n\n    def test_performShuffleYaml_loadFromSfp2(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        fname = os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.yaml\")\n        self.o.cs = self.o.cs.modified(newSettings={CONF_SHUFFLE_SEQUENCE_FILE: fname})\n        # fake the assembly location history\n        with directoryChangers.TemporaryDirectoryChanger():\n            # _moves, _ = fh.readMovesYaml()\n\n            before1 = self.r.core.getAssemblyWithStringLocation(\"005-003\")\n            before2 = self.r.core.getAssemblyWithStringLocation(\"006-005\")\n            for cycle in range(4):\n                self.r.p.cycle = cycle\n                fh.outage()\n\n            # check that the following ringPosHist exist in the SFP\n            inSfp = [\n                [6, 6, 0],\n                [6, 6, 1],\n                [6, 5, 1],\n                [6, 5, 2],\n                [2, 2, 2],\n            ]\n            for a in self.r.excore[\"sfp\"].getChildren():\n                print(a, a.p.ringPosHist)\n            for ring, pos, cycle in inSfp:\n                found = False\n                for a in self.r.excore[\"sfp\"].getChildren():\n                    if a.p.ringPosHist[cycle] == (ring, pos):\n                        found = True\n                        break\n                self.assertTrue(found, f\"ringPosHist == ({ring}, {pos}, {cycle}) not found in SFP!\")\n\n            # check that SFP is in the ringPosHist of (2, 2) and (5, 3)\n            # check that the assembly that ended up in 002-002 is the same that started in 005-003\n            # check that the assembly that ended up in 005-003 is the same that started in 006-005\n            for loc, refA in [\n                (\"002-002\", before1),\n                (\"005-003\", before2),\n            ]:\n                a = self.r.core.getAssemblyWithStringLocation(loc)\n                self.assertIn((\"SFP\", \"SFP\"), a.p.ringPosHist)\n                self.assertEqual(\n                    refA.getName(), a.getName(), \"Expected {a} to be the same assembly as {refA} based on shuffling!\"\n                )\n\n    def test_processMoveList(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        moves = fh.readMoves(os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.txt\"))\n        result = fh.processMoveList(moves[2])\n        self.assertIn(None, result.ringPosCycles)\n        self.assertTrue(all(\"SFP\" not in chain for chain in result.loadChains))\n        self.assertTrue(all(\"LoadQueue\" not in chain for chain in result.loadChains))\n        self.assertFalse(result.loopChains)\n        self.assertFalse(result.rotations)\n\n    def test_processMoveList_yaml(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        moves, _ = fh.readMovesYaml(os.path.join(TESTING_ROOT, \"resources\", \"armiRun-SHUFFLES.yaml\"))\n        result = fh.processMoveList(moves[1])\n        self.assertEqual(len(result.loadChains), 2)\n        self.assertTrue(any(result.enriches))\n        self.assertTrue(result.rotations)\n\n    def test_getFactorList(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        factors, _ = fh.getFactorList(0)\n        self.assertIn(\"eqShuffles\", factors)\n\n    def test_linPowByPin(self):\n        _fh = fuelHandlers.FuelHandler(self.o)\n        _hist = self.o.getInterface(\"history\")\n        newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n        assem = self.o.r.core.getFirstAssembly(Flags.FUEL)\n        b = next(assem.iterBlocks(Flags.FUEL))\n\n        b.p.linPowByPin = [1, 2, 3]\n        self.assertEqual(type(b.p.linPowByPin), np.ndarray)\n\n        b.p.linPowByPin = np.array([1, 2, 3])\n        self.assertEqual(type(b.p.linPowByPin), np.ndarray)\n\n    def test_linPowByPinNeutron(self):\n        _fh = fuelHandlers.FuelHandler(self.o)\n        _hist = self.o.getInterface(\"history\")\n        newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n        assem = self.o.r.core.getFirstAssembly(Flags.FUEL)\n        b = next(assem.iterBlocks(Flags.FUEL))\n\n        b.p.linPowByPinNeutron = [1, 2, 3]\n        self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray)\n\n        b.p.linPowByPinNeutron = np.array([1, 2, 3])\n        self.assertEqual(type(b.p.linPowByPinNeutron), np.ndarray)\n\n    def test_linPowByPinGamma(self):\n        _fh = fuelHandlers.FuelHandler(self.o)\n        _hist = self.o.getInterface(\"history\")\n        newSettings = {CONF_ASSEM_ROTATION_STATIONARY: True}\n        self.o.cs = self.o.cs.modified(newSettings=newSettings)\n        assem = self.o.r.core.getFirstAssembly(Flags.FUEL)\n        b = next(assem.iterBlocks(Flags.FUEL))\n\n        b.p.linPowByPinGamma = [1, 2, 3]\n        self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray)\n\n        b.p.linPowByPinGamma = np.array([1, 2, 3])\n        self.assertEqual(type(b.p.linPowByPinGamma), np.ndarray)\n\n    def test_transferStationaryBlocks(self):\n        \"\"\"Test the _transferStationaryBlocks method.\n\n        .. test:: User-specified blocks can remain in place during shuffling\n            :id: T_ARMI_SHUFFLE_STATIONARY0\n            :tests: R_ARMI_SHUFFLE_STATIONARY\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab the assemblies\n        assems = self.r.core.getAssemblies(Flags.FUEL)\n\n        # grab two arbitrary assemblies\n        a1 = assems[1]\n        a2 = assems[2]\n\n        # grab the stationary blocks pre swap\n        a1PreSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        a2PreSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        # swap the stationary blocks\n        fh = fuelHandlers.FuelHandler(self.o)\n        fh._transferStationaryBlocks(a1, a2)\n\n        # grab the stationary blocks post swap\n        a1PostSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        a2PostSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        # validate the stationary blocks have swapped locations and are aligned\n        self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks)\n        self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks)\n\n    def test_transStatBlocksBadNumbers(self):\n        \"\"\"\n        Test the _transferStationaryBlocks method for the case where the input assemblies have different numbers of\n        stationary blocks.\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab the assemblies\n        assems = self.r.core.getAssemblies(Flags.FUEL)\n\n        # grab two arbitrary assemblies\n        a1 = assems[1]\n        a2 = assems[2]\n\n        # change a block in assembly 1 to be flagged as a stationary block\n        for block in a1:\n            if not any(block.hasFlags(sbf) for sbf in sBFList):\n                a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0])\n                self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList))\n                break\n\n        # try to swap stationary blocks between assembly 1 and 2\n        fh = fuelHandlers.FuelHandler(self.o)\n        with self.assertRaises(ValueError):\n            fh._transferStationaryBlocks(a1, a2)\n\n    def test_transStatBlockUnaligned(self):\n        \"\"\"\n        Test the _transferStationaryBlocks method for the case where the input assemblies have unaligned locations of\n        stationary blocks.\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab the assemblies\n        assems = self.r.core.getAssemblies(Flags.FUEL)\n\n        # grab two arbitrary assemblies\n        a1 = assems[1]\n        a2 = assems[2]\n\n        # move location of a stationary flag in assembly 1\n        for block in a1:\n            if any(block.hasFlags(sbf) for sbf in sBFList):\n                # change flag of first identified stationary block to fuel\n                a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL)\n                self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL))\n                # change next or previous block flag to stationary flag\n                try:\n                    a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0])\n                    self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList))\n                except Exception:\n                    a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0])\n                    self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList))\n                break\n\n        # try to swap stationary blocks between assembly 1 and 2\n        fh = fuelHandlers.FuelHandler(self.o)\n        with self.assertRaises(ValueError):\n            fh._transferStationaryBlocks(a1, a2)\n\n    def test_transStatBlockBadHeights(self):\n        \"\"\"\n        Test the _transferStationaryBlocks method for the case where the total height of the stationary blocks is\n        unequal between input assemblies.\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab the assemblies\n        assems = self.r.core.getAssemblies(Flags.FUEL)\n\n        # grab two arbitrary assemblies\n        a1 = assems[1]\n        a2 = assems[2]\n\n        # change height of a stationary block in assembly 1\n        for block in a1:\n            if any(block.hasFlags(sbf) for sbf in sBFList):\n                # change height of first identified stationary block\n                nomHeight = block.getHeight()\n                a1[block.spatialLocator.k].setHeight(nomHeight - 1e-5)\n\n        # try to swap stationary blocks between assembly 1 and 2\n        fh = fuelHandlers.FuelHandler(self.o)\n        with mockRunLogs.BufferLog() as mock:\n            fh._transferStationaryBlocks(a1, a2)\n            self.assertIn(\"top elevation of stationary\", mock.getStdout())\n\n    def test_dischargeSwap(self):\n        \"\"\"Remove an assembly from the core and replace it with one from the SFP.\n\n        .. test:: User-specified blocks can remain in place during shuffling\n            :id: T_ARMI_SHUFFLE_STATIONARY1\n            :tests: R_ARMI_SHUFFLE_STATIONARY\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab an arbitrary fuel assembly from the core and from the SFP\n        a1 = self.r.core.getFirstAssembly(Flags.FUEL)\n        a2 = self.r.excore[\"sfp\"].getChildrenWithFlags(Flags.FUEL)[0]\n\n        # grab the stationary blocks pre swap\n        a1PreSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        a2PreSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        # test discharging assembly 1 and replacing with assembly 2\n        fh = fuelHandlers.FuelHandler(self.o)\n        fh.dischargeSwap(a2, a1)\n        self.assertTrue(a1.getLocation() in a1.NOT_IN_CORE)\n        self.assertTrue(a2.getLocation() not in a2.NOT_IN_CORE)\n\n        # grab the stationary blocks post swap\n        a1PostSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a1 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        a2PostSwapStationaryBlocks = [\n            [block.getName(), block.spatialLocator.k] for block in a2 if any(block.hasFlags(sbf) for sbf in sBFList)\n        ]\n\n        # validate the stationary blocks have swapped locations correctly and are aligned\n        self.assertEqual(a1PostSwapStationaryBlocks, a2PreSwapStationaryBlocks)\n        self.assertEqual(a2PostSwapStationaryBlocks, a1PreSwapStationaryBlocks)\n\n    def test_dischargeSwapStationaryBlocks(self):\n        \"\"\"\n        Test the _transferStationaryBlocks method for the case where the input assemblies have\n        different numbers as well as unaligned locations of stationary blocks.\n        \"\"\"\n        # grab stationary block flags\n        sBFList = self.r.core.stationaryBlockFlagsList\n\n        # grab an arbitrary fuel assembly from the core and from the SFP\n        a1 = self.r.core.getFirstAssembly(Flags.FUEL)\n        a2 = self.r.excore[\"sfp\"].getChildren(Flags.FUEL)[0]\n\n        # change a block in assembly 1 to be flagged as a stationary block\n        for block in a1:\n            if not any(block.hasFlags(sbf) for sbf in sBFList):\n                a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, sBFList[0])\n                self.assertTrue(any(block.hasFlags(sbf) for sbf in sBFList))\n                break\n\n        # try to discharge assembly 1 and replace with assembly 2\n        fh = fuelHandlers.FuelHandler(self.o)\n        with self.assertRaises(ValueError):\n            fh.dischargeSwap(a2, a1)\n\n        # re-initialize assemblies\n        self.setUp()\n        a1 = self.r.core.getFirstAssembly(Flags.FUEL)\n        a2 = self.r.excore[\"sfp\"].getChildren(Flags.FUEL)[0]\n\n        # move location of a stationary flag in assembly 1\n        for block in a1:\n            if any(block.hasFlags(sbf) for sbf in sBFList):\n                # change flag of first identified stationary block to fuel\n                a1[block.spatialLocator.k].setType(a1[block.spatialLocator.k].p.type, Flags.FUEL)\n                self.assertTrue(a1[block.spatialLocator.k].hasFlags(Flags.FUEL))\n                # change next or previous block flag to stationary flag\n                try:\n                    a1[block.spatialLocator.k + 1].setType(a1[block.spatialLocator.k + 1].p.type, sBFList[0])\n                    self.assertTrue(any(a1[block.spatialLocator.k + 1].hasFlags(sbf) for sbf in sBFList))\n                except Exception:\n                    a1[block.spatialLocator.k - 1].setType(a1[block.spatialLocator.k - 1].p.type, sBFList[0])\n                    self.assertTrue(any(a1[block.spatialLocator.k - 1].hasFlags(sbf) for sbf in sBFList))\n                break\n\n        # try to discharge assembly 1 and replace with assembly 2\n        with self.assertRaises(ValueError):\n            fh.dischargeSwap(a2, a1)\n\n    def test_getAssembliesInRings(self):\n        fh = fuelHandlers.FuelHandler(self.o)\n        aList0 = fh._getAssembliesInRings([0], Flags.FUEL, False, None, False)\n        self.assertEqual(len(aList0), 1)\n\n        aList1 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, False)\n        self.assertEqual(len(aList1), 3)\n\n        aList2 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, True, None, False)\n        self.assertEqual(len(aList2), 3)\n\n        aList3 = fh._getAssembliesInRings([0, 1, 2, \"SFP\"], Flags.FUEL, True, None, False)\n        self.assertEqual(len(aList3), 4)\n\n        aList4 = fh._getAssembliesInRings([0, 1, 2], Flags.FUEL, False, None, True)\n        self.assertEqual(len(aList4), 3)\n\n\nclass TestFuelPlugin(unittest.TestCase):\n    \"\"\"Tests that make sure the plugin is being discovered well.\"\"\"\n\n    def test_settingsAreDiscovered(self):\n        cs = caseSettings.Settings()\n        nm = settings.CONF_JUMP_RING_NUM\n        self.assertEqual(cs[nm], 8)\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/test_hexAssemblyFuelMgmtUtils.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests some fuel handling tools, specific to hex-assembly reactors.\"\"\"\n\nfrom armi.physics.fuelCycle import hexAssemblyFuelMgmtUtils as hexUtils\nfrom armi.tests import ArmiTestHelper\nfrom armi.utils import directoryChangers\n\n\nclass TestHexAssemMgmtTools(ArmiTestHelper):\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_buildConvergentRingSchedule(self):\n        schedule, widths = hexUtils.buildConvergentRingSchedule(1, 17, 0)\n        self.assertEqual(schedule, [1, 17])\n        self.assertEqual(widths, [16, 1])\n\n        schedule, widths = hexUtils.buildConvergentRingSchedule(3, 17, 1)\n        self.assertEqual(schedule, [3, 17])\n        self.assertEqual(widths, [14, 1])\n\n        schedule, widths = hexUtils.buildConvergentRingSchedule(12, 16, 0.5)\n        self.assertEqual(schedule, [12, 16])\n        self.assertEqual(widths, [4, 1])\n\n    def test_buildRingSchedule(self):\n        # simple divergent\n        schedule, widths = hexUtils.buildRingSchedule(9, 1, 9)\n        self.assertEqual(schedule, [9, 8, 7, 6, 5, 4, 3, 2, 1])\n        zeroWidths = [0, 0, 0, 0, 0, 0, 0, 0, 0]\n        self.assertEqual(widths, zeroWidths)\n\n        # simple with no jumps\n        schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingTo=1)\n        self.assertEqual(schedule, [1, 2, 3, 4, 5, 6, 7, 8, 9])\n        self.assertEqual(widths, zeroWidths)\n\n        # simple with 1 jump\n        schedule, widths = hexUtils.buildRingSchedule(9, 9, 1, jumpRingFrom=6)\n        self.assertEqual(schedule, [5, 4, 3, 2, 1, 6, 7, 8, 9])\n        self.assertEqual(widths, zeroWidths)\n\n        # 1 jump plus auto-correction to core size\n        schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=5)\n        self.assertEqual(schedule, [6, 7, 8, 9, 5, 4, 3, 2, 1])\n        self.assertEqual(widths, zeroWidths)\n\n        # crash on invalid jumpring\n        with self.assertRaises(ValueError):\n            schedule, widths = hexUtils.buildRingSchedule(9, 1, 17, jumpRingFrom=0)\n\n        # test 4: Mid way jumping\n        schedule, widths = hexUtils.buildRingSchedule(9, 1, 9, jumpRingTo=6, jumpRingFrom=3)\n        self.assertEqual(schedule, [9, 8, 7, 4, 5, 6, 3, 2, 1])\n        self.assertEqual(widths, zeroWidths)\n"
  },
  {
    "path": "armi/physics/fuelCycle/tests/test_utils.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nfrom unittest import TestCase\n\nimport numpy as np\n\nfrom armi.physics.fuelCycle import utils\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.components import Circle\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.grids import IndexLocation, MultiIndexLocation\n\n\nclass FuelCycleUtilsTests(TestCase):\n    \"\"\"Tests for geometry indifferent fuel cycle routines.\"\"\"\n\n    N_PINS = 169\n\n    def setUp(self):\n        self.block = Block(\"test block\")\n        self.fuel = Circle(\n            \"test pin\",\n            material=\"UO2\",\n            Tinput=20,\n            Thot=20,\n            mult=self.N_PINS,\n            id=0.0,\n            od=1.0,\n        )\n\n        clad = Circle(\n            \"clad\",\n            material=\"HT9\",\n            Tinput=20,\n            Thot=300,\n            id=1.0,\n            od=1.1,\n        )\n        self.block.add(self.fuel)\n        self.block.add(clad)\n        # Force no fuel flags\n        self.fuel.p.flags = Flags.PIN\n\n    def test_maxBurnupLocationFromComponents(self):\n        \"\"\"Test that the ``Component.p.pinPercentBu`` parameter can reveal max burnup location.\"\"\"\n        self.fuel.spatialLocator = MultiIndexLocation(None)\n        locations = []\n        for i in range(self.N_PINS):\n            loc = IndexLocation(i, 0, 0, None)\n            self.fuel.spatialLocator.append(loc)\n            locations.append(loc)\n        self.fuel.p.pinPercentBu = np.ones(self.N_PINS, dtype=float)\n\n        # Pick an arbitrary index for the pin with the most burnup\n        maxBuIndex = self.N_PINS // 3\n        self.fuel.p.pinPercentBu[maxBuIndex] *= 2\n        expectedLoc = locations[maxBuIndex]\n        actual = utils.maxBurnupLocator(self.block)\n        self.assertEqual(actual, expectedLoc)\n\n    def test_singleLocatorWithBurnup(self):\n        \"\"\"Test that a single component with burnup can be used to find the highest burnup.\"\"\"\n        freeComp = Circle(\"free fuel\", material=\"UO2\", Tinput=200, Thot=200, id=0, od=1, mult=1)\n        freeComp.spatialLocator = IndexLocation(2, 4, 0, None)\n        freeComp.p.pinPercentBu = [\n            0.01,\n        ]\n        loc = utils.maxBurnupLocator([freeComp])\n        self.assertIs(loc, freeComp.spatialLocator)\n\n    def test_maxBurnupLocatorWithNoBurnup(self):\n        \"\"\"Ensure we catch an error if no burnup is found across components.\"\"\"\n        with self.assertRaisesRegex(ValueError, \"No burnups found\"):\n            utils.maxBurnupLocator([])\n\n    def test_maxBurnupLocatorMismatchedData(self):\n        \"\"\"Ensure pin burnup and locations must agree.\"\"\"\n        freeComp = Circle(\"free fuel\", material=\"UO2\", Tinput=200, Thot=200, id=0, od=1, mult=1)\n        freeComp.spatialLocator = IndexLocation(2, 4, 0, None)\n        freeComp.p.pinPercentBu = [\n            0.01,\n            0.02,\n        ]\n        with self.assertRaisesRegex(ValueError, \"Pin burnup.*pin locations.*differ\"):\n            utils.maxBurnupLocator([freeComp])\n\n    def test_assemblyHasPinPower(self):\n        \"\"\"Test the ability to check if an assembly has fuel pin powers.\"\"\"\n        fakeAssem = [self.block]\n        # No fuel blocks, no pin power on blocks => no pin powers\n        self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))\n\n        # Yes fuel blocks, no pin power on blocks => no pin powers\n        self.block.p.flags |= Flags.FUEL\n        self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem))\n\n        # Yes fuel blocks, yes pin power on blocks => yes pin powers\n        self.block.p.linPowByPin = np.arange(self.N_PINS, dtype=float)\n        self.assertTrue(utils.assemblyHasFuelPinPowers(fakeAssem))\n\n        # Yes fuel blocks, yes pin power assigned but all zeros => no pin powers\n        self.block.p.linPowByPin = np.zeros(self.N_PINS, dtype=float)\n        self.assertFalse(utils.assemblyHasFuelPinPowers(fakeAssem))\n\n    def test_assemblyHasPinBurnups(self):\n        \"\"\"Test the ability to check if an assembly has fuel pin burnup.\"\"\"\n        fakeAssem = [self.block]\n        # No fuel components => no assembly burnups\n        self.assertFalse(self.block.getChildrenWithFlags(Flags.FUEL))\n        self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))\n        # No fuel with burnup => no assembly burnups\n        self.block.p.flags |= Flags.FUEL\n        self.fuel.p.flags |= Flags.FUEL\n        self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))\n        # Fuel pin has burnup => yes assembly burnup\n        self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float)\n        self.assertTrue(utils.assemblyHasFuelPinBurnup(fakeAssem))\n        # Fuel pin has empty burnup => no assembly burnup\n        self.fuel.p.pinPercentBu = np.zeros(self.N_PINS)\n        self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))\n        # Yes burnup but no fuel flags => no assembly burnup\n        self.fuel.p.flags ^= Flags.FUEL\n        self.assertFalse(self.fuel.hasFlags(Flags.FUEL))\n        self.fuel.p.pinPercentBu = np.arange(self.N_PINS, dtype=float)\n        self.assertFalse(utils.assemblyHasFuelPinBurnup(fakeAssem))\n\n    def test_maxBurnupBlock(self):\n        \"\"\"Test the ability to find maximum burnup block in an assembly.\"\"\"\n        reflector = Block(\"reflector\")\n        assem = [reflector, self.block]\n        self.block.p.percentBuPeak = 40\n        expected = utils.maxBurnupBlock(assem)\n        self.assertIs(expected, self.block)\n\n        # add a new block with more burnup higher up the stack\n        hotter = copy.deepcopy(self.block)\n        hotter.p.percentBuPeak *= 2\n        expected = utils.maxBurnupBlock([reflector, self.block, hotter, self.block, reflector])\n        self.assertIs(expected, hotter)\n\n    def test_maxBurnupBlockNoBlocks(self):\n        \"\"\"Ensure a more helpful error is provided for empty sequence.\"\"\"\n        with self.assertRaisesRegex(ValueError, \"Error finding max burnup\"):\n            utils.maxBurnupBlock([])\n\n    def test_maxBurnupBlockNoBurnup(self):\n        \"\"\"Ensure that we will not return a block with zero burnup.\"\"\"\n        self.block.p.percentBuPeak = 0.0\n        with self.assertRaisesRegex(ValueError, \"Error finding max burnup\"):\n            utils.maxBurnupBlock([self.block])\n"
  },
  {
    "path": "armi/physics/fuelCycle/utils.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Geometric agnostic routines that are useful for fuel cycle analysis on pin-type reactors.\"\"\"\n\nimport operator\nimport typing\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.grids import IndexLocation, MultiIndexLocation\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.blocks import Block\n    from armi.reactor.components import Component\n\n\ndef assemblyHasFuelPinPowers(a: typing.Iterable[\"Block\"]) -> bool:\n    \"\"\"Determine if an assembly has pin powers.\n\n    These are necessary for determining rotation and may or may\n    not be present on all assemblies.\n\n    Parameters\n    ----------\n    a : Assembly\n        Assembly in question\n\n    Returns\n    -------\n    bool\n        If at least one fuel block in the assembly has pin powers.\n    \"\"\"\n    # Avoid using Assembly.getChildrenWithFlags(Flags.FUEL)\n    # because that creates an entire list where we may just need the first\n    # fuel block\n    fuelBlocks = filter(lambda b: b.hasFlags(Flags.FUEL), a)\n    return any(b.hasFlags(Flags.FUEL) and np.any(b.p.linPowByPin) for b in fuelBlocks)\n\n\ndef assemblyHasFuelPinBurnup(a: typing.Iterable[\"Block\"]) -> bool:\n    \"\"\"Determine if an assembly has pin burnups.\n\n    These are necessary for determining rotation and may or may not\n    be present on all assemblies.\n\n    Parameters\n    ----------\n    a : Assembly\n        Assembly in question\n\n    Returns\n    -------\n    bool\n        If a block with pin burnup was found.\n\n    Notes\n    -----\n    Checks if any `Component.p.pinPercentBu`` is set and contains non-zero data\n    on a fuel component in the block.\n    \"\"\"\n    # Avoid using Assembly.getChildrenWithFlags(Flags.FUEL)\n    # because that creates an entire list where we may just need the first\n    # fuel block. Same for avoiding Block.getChildrenWithFlags.\n    hasFuelFlags = lambda o: o.hasFlags(Flags.FUEL)\n    for b in filter(hasFuelFlags, a):\n        for c in filter(hasFuelFlags, b):\n            if np.any(c.p.pinPercentBu):\n                return True\n    return False\n\n\ndef maxBurnupLocator(\n    children: typing.Iterable[\"Component\"],\n) -> IndexLocation:\n    \"\"\"Find the location of the pin with highest burnup by looking at components.\n\n    Parameters\n    ----------\n    children : iterable[Component]\n        Iterator over children with a spatial locator and ``pinPercentBu`` parameter\n\n    Returns\n    -------\n    IndexLocation\n        Location of the pin with the highest burnup.\n\n    Raises\n    ------\n    ValueError\n        If no children have burnup, or the burnup and locators differ.\n    \"\"\"\n    maxBu = 0\n    maxLocation = None\n    withBurnupAndLocs = filter(\n        lambda c: c.spatialLocator is not None and c.p.pinPercentBu is not None,\n        children,\n    )\n    for child in withBurnupAndLocs:\n        pinBu = child.p.pinPercentBu\n        if isinstance(child.spatialLocator, MultiIndexLocation):\n            locations = child.spatialLocator\n        else:\n            locations = [child.spatialLocator]\n        if len(locations) != pinBu.size:\n            raise ValueError(\n                f\"Pin burnup (n={len(locations)}) and pin locations (n={pinBu.size}) \"\n                f\"on {child} differ: {locations=} :: {pinBu=}\"\n            )\n        myMaxIX = pinBu.argmax()\n        myMaxBu = pinBu[myMaxIX]\n        if myMaxBu > maxBu:\n            maxBu = myMaxBu\n            maxLocation = locations[myMaxIX]\n    if maxLocation is not None:\n        return maxLocation\n    raise ValueError(\"No burnups found!\")\n\n\ndef maxBurnupBlock(a: typing.Iterable[\"Block\"]) -> \"Block\":\n    \"\"\"Find the block that contains the pin with the highest burnup.\"\"\"\n    buGetter = operator.attrgetter(\"p.percentBuPeak\")\n    # Discard any blocks with zero burnup\n    blocksWithBurnup = filter(buGetter, a)\n    try:\n        return max(blocksWithBurnup, key=buGetter)\n    except Exception as ee:\n        msg = f\"Error finding max burnup block from {a}\"\n        runLog.error(msg)\n        raise ValueError(msg) from ee\n"
  },
  {
    "path": "armi/physics/fuelPerformance/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nGeneric fuel performance plugin package.\n\nFuel performance deals with addressing fuel system limits and predicting behaviors that are coupled\nto other physics within the reactor. Often fuel performance models address chemical, thermal and\nmechanical behaviors of the fuel system.\n\nThe following general phenomena fall into the fuel performance category of physics for solid fuel\n(e.g., SFR, LWR, TRISO):\n\n* chemical degradation on the inside of fuel cladding such as fuel-clad chemical interaction (FCCI)\n* corrosion or erosion processes on the outside of the fuel cladding\n* the fuel-clad mechanical interaction (FCMI) resulting in cladding stress and strain\n* pressurization of the fuel pin due to released fission gases\n* high temperatures of the fuel which affect material properties and feedback during accident\n  scenarios\n\nFuel performance is typically coupled with thermal analysis because the thermal conditions of the\nfuel affects the performance and properties of the fuel change with temperature and burnup.\n\nIn many cases, fuel performance is coupled with neutronic analysis as well, because the fission\ngases are strong neutron absorbers. In some reactors, significant composition changes during\nirradiation can influence neutronics as well (e.g. sodium thermal bond being squeezed out of pins).\nFinally,  fuel temperatures impact the Doppler reactivity coefficient.\n\"\"\"\n\nfrom armi.physics.fuelPerformance.plugin import FuelPerformancePlugin  # noqa: F401\n"
  },
  {
    "path": "armi/physics/fuelPerformance/executers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBaseline fuel performance related executers and options.\n\nThese can be subclassed in fuel performance plugins to perform\nfuel performance physics calculations.\n\nFuel performance is described in the\n:py:mod:`Fuel Performance subpackage <armi.physics.fuelPerformance>`\n\"\"\"\n\nfrom armi.physics import executers\nfrom armi.physics.fuelPerformance.settings import (\n    CONF_AXIAL_EXPANSION,\n    CONF_BOND_REMOVAL,\n    CONF_CLADDING_STRAIN,\n    CONF_CLADDING_WASTAGE,\n    CONF_FGR_REMOVAL,\n    CONF_FGYF,\n    CONF_FUEL_PERFORMANCE_ENGINE,\n)\n\n\nclass FuelPerformanceOptions(executers.ExecutionOptions):\n    \"\"\"Options relevant to all fuel performance engines.\"\"\"\n\n    def __init__(self, label=None):\n        executers.ExecutionOptions.__init__(self, label)\n        self.fuelPerformanceEngine = None\n        self.axialExpansion = None\n        self.bondRemoval = None\n        self.fissionGasRemoval = None\n        self.claddingWastage = None\n        self.claddingStrain = None\n\n    def fromUserSettings(self, cs):\n        \"\"\"Copy relevant settings values from cs into this object.\"\"\"\n        self.fuelPerformanceEngine = cs[CONF_FUEL_PERFORMANCE_ENGINE]\n        self.axialExpansion = cs[CONF_AXIAL_EXPANSION]\n        self.bondRemoval = cs[CONF_BOND_REMOVAL]\n        self.fissionGasRemoval = cs[CONF_FGR_REMOVAL]\n        self.claddingWastage = cs[CONF_CLADDING_WASTAGE]\n        self.claddingStrain = cs[CONF_CLADDING_STRAIN]\n        self.fissionGasYieldFraction = cs[CONF_FGYF]\n\n    def fromReactor(self, reactor):\n        \"\"\"Load options from reactor.\"\"\"\n\n\nclass FuelPerformanceExecuter(executers.DefaultExecuter):\n    \"\"\"\n    Prep, execute, and process a fuel performance solve.\n\n    This uses the ``DefaultExecuter`` with the hope that most\n    subclasses can use that run loop. As more fuel performance plugins are\n    built we can reconsider this hierarchy.\n    \"\"\"\n\n    def __init__(self, options, reactor):\n        executers.DefaultExecuter.__init__(self, options, reactor)\n"
  },
  {
    "path": "armi/physics/fuelPerformance/parameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Parameter definitions for fuel performance plugins.\"\"\"\n\nfrom armi.reactor import parameters\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils import units\n\n\ndef getFuelPerformanceParameterDefinitions():\n    \"\"\"Return ParameterDefinitionCollections for each appropriate ArmiObject.\"\"\"\n    return {Block: _getFuelPerformanceBlockParams()}\n\n\ndef _getFuelPerformanceBlockParams():\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb:\n        pb.defParam(\n            \"fuelCladLocked\",\n            units=units.UNITLESS,\n            default=False,\n            description=\"Boolean to indicate if the fuel is locked with the clad.\"\n            \" This is used to determine the expansion constraints for the fuel during\"\n            \" thermal and/or burn-up expansion of the fuel and cladding materials.\",\n        )\n\n        def gasReleaseFraction(self, value):\n            if value < 0.0 or value > 1.0:\n                raise ValueError(f\"Cannot set a gas release fraction of {value} outside of the bounds of [0.0, 1.0]\")\n            self._p_gasReleaseFraction = value\n\n        pb.defParam(\n            \"gasReleaseFraction\",\n            setter=gasReleaseFraction,\n            units=units.UNITLESS,\n            description=\"Fraction of generated fission gas that no longer exists in the block.\",\n            categories=[\"eq cumulative shift\"],\n        )\n\n        def bondRemoved(self, value):\n            if value < 0.0 or value > 1.0:\n                raise ValueError(f\"Cannot set a bond removed of {value} outside of the bounds of [0.0, 1.0]\")\n            self._p_bondRemoved = value\n\n        pb.defParam(\n            \"bondRemoved\",\n            setter=bondRemoved,\n            units=units.UNITLESS,\n            description=\"Fraction of thermal bond between fuel and clad that has been pushed out.\",\n            categories=[\"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"cladWastage\",\n            units=units.MICRONS,\n            description=\"Total cladding wastage from inner and outer surfaces.\",\n            location=ParamLocation.AVERAGE,\n            categories=[\"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"totalCladStrain\",\n            units=units.PERCENT,\n            description=\"Total diametral clad strain.\",\n            categories=[\"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"axialGrowthPct\",\n            units=units.PERCENT,\n            description=\"Axial growth percentage\",\n            categories=[\"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"fpPeakFuelTemp\",\n            units=units.DEGC,\n            description=\"Fuel performance calculated peak fuel temperature.\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"fpAveFuelTemp\",\n            units=units.DEGC,\n            description=\"Fuel performance calculated average fuel temperature.\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"gasPorosity\",\n            units=units.UNITLESS,\n            description=\"Fraction of fuel volume that is occupied by gas pores\",\n            default=0.0,\n            categories=[\"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"liquidPorosity\",\n            units=units.UNITLESS,\n            description=\"Fraction of fuel volume that is occupied by liquid filled pores\",\n            default=0.0,\n        )\n\n    return pDefs\n"
  },
  {
    "path": "armi/physics/fuelPerformance/plugin.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic Fuel Performance Plugin.\"\"\"\n\nfrom armi import interfaces, plugins\nfrom armi.physics.fuelPerformance import settings\n\nORDER = interfaces.STACK_ORDER.CROSS_SECTIONS\n\n\nclass FuelPerformancePlugin(plugins.ArmiPlugin):\n    \"\"\"Plugin for fuel performance.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        \"\"\"Expose the fuel performance interfaces.\"\"\"\n        return []\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for fuel performance.\"\"\"\n        return settings.defineSettings()\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettingsValidators(inspector):\n        \"\"\"Define settings inspections for fuel performance.\"\"\"\n        return settings.defineValidators(inspector)\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameters():\n        \"\"\"Define parameters for the plugin.\"\"\"\n        from armi.physics.fuelPerformance import parameters\n\n        return parameters.getFuelPerformanceParameterDefinitions()\n"
  },
  {
    "path": "armi/physics/fuelPerformance/settings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Settings related to fuel performance.\"\"\"\n\nfrom armi.settings import setting\nfrom armi.settings.settingsValidation import Query\n\nCONF_AXIAL_EXPANSION = \"axialExpansion\"\nCONF_BOND_REMOVAL = \"bondRemoval\"\nCONF_CLADDING_STRAIN = \"claddingStrain\"\nCONF_CLADDING_WASTAGE = \"claddingWastage\"\nCONF_FGR_REMOVAL = \"fgRemoval\"\nCONF_FGYF = \"fissionGasYieldFraction\"\nCONF_FUEL_PERFORMANCE_ENGINE = \"fuelPerformanceEngine\"\n\n\ndef defineSettings():\n    \"\"\"Define generic fuel performance settings.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_FUEL_PERFORMANCE_ENGINE,\n            default=\"\",\n            label=\"Fuel Performance Engine\",\n            description=(\n                \"Fuel performance engine that determines fission gas removal, bond removal,\"\n                \" axial growth, wastage, and cladding strain.\"\n            ),\n            options=[\"\"],\n        ),\n        setting.Setting(\n            CONF_FGYF,\n            default=0.25,\n            label=\"Fission Gas Yield Fraction\",\n            description=(\n                \"The fraction of gaseous atoms produced per fission event, assuming a fission product yield of 2.0\"\n            ),\n        ),\n        setting.Setting(\n            CONF_AXIAL_EXPANSION,\n            default=False,\n            label=\"Fuel Axial Expansion\",\n            description=\"Perform axial fuel expansion. This will adjust fuel block lengths.\",\n        ),\n        setting.Setting(\n            CONF_BOND_REMOVAL,\n            default=False,\n            label=\"Thermal Bond Removal\",\n            description=\"Toggles fuel performance bond removal. This will remove thermal bond from the fuel.\",\n        ),\n        setting.Setting(\n            CONF_FGR_REMOVAL,\n            default=False,\n            label=\"Fission Gas Removal\",\n            description=\"Toggles fuel performance fission gas removal.  This will remove fission gas from the fuel.\",\n        ),\n        setting.Setting(\n            CONF_CLADDING_WASTAGE,\n            default=False,\n            label=\"Cladding Wastage\",\n            description=\"Evaluate cladding wastage. \",\n        ),\n        setting.Setting(\n            CONF_CLADDING_STRAIN,\n            default=False,\n            label=\"Cladding Strain\",\n            description=\"Evaluate cladding strain. \",\n        ),\n    ]\n    return settings\n\n\ndef defineValidators(inspector):\n    return [\n        Query(\n            lambda: (\n                inspector.cs[CONF_AXIAL_EXPANSION]\n                or inspector.cs[CONF_BOND_REMOVAL]\n                or inspector.cs[CONF_FGR_REMOVAL]\n                or inspector.cs[CONF_CLADDING_WASTAGE]\n                or inspector.cs[CONF_CLADDING_STRAIN]\n            )\n            and inspector.cs[CONF_FUEL_PERFORMANCE_ENGINE] == \"\",\n            \"A fuel performance behavior has been selected but no fuel performance engine is selected.\",\n            \"\",\n            inspector.NO_ACTION,\n        ),\n    ]\n"
  },
  {
    "path": "armi/physics/fuelPerformance/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/fuelPerformance/tests/test_executers.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for generic fuel performance executers.\"\"\"\n\nimport unittest\n\nfrom armi.physics.fuelPerformance.executers import (\n    CONF_BOND_REMOVAL,\n    FuelPerformanceOptions,\n)\nfrom armi.settings.caseSettings import Settings\n\n\nclass TestFuelPerformanceOptions(unittest.TestCase):\n    def test_fuelPerformanceOptions(self):\n        fpo = FuelPerformanceOptions(\"test_fuelPerformanceOptions\")\n        self.assertEqual(fpo.label, \"test_fuelPerformanceOptions\")\n\n        cs = Settings()\n        fpo.fromUserSettings(cs)\n        self.assertEqual(fpo.bondRemoval, cs[CONF_BOND_REMOVAL])\n"
  },
  {
    "path": "armi/physics/fuelPerformance/tests/test_fuelPerformancePlugin.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for generic fuel performance plugin.\"\"\"\n\nfrom armi.physics.fuelPerformance.plugin import FuelPerformancePlugin\nfrom armi.tests.test_plugins import TestPlugin\n\n\nclass TestFuelPerformancePlugin(TestPlugin):\n    plugin = FuelPerformancePlugin\n"
  },
  {
    "path": "armi/physics/fuelPerformance/tests/test_fuelPerformanceSymmetry.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAudit symmetry-aware parameters in fuel performance.\n\nSee Also\n--------\n    armi.testing.symmetryTesting\n\"\"\"\n\nfrom armi.physics.fuelPerformance.parameters import getFuelPerformanceParameterDefinitions\nfrom armi.reactor.blocks import Block\nfrom armi.testing import symmetryTesting\n\n\nclass TestFPParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper):\n    def setUp(self):\n        pluginParameters = getFuelPerformanceParameterDefinitions()\n        self.blockParamsToTest = pluginParameters[Block]\n        self.parameterOverrides = {\n            \"gasReleaseFraction\": 0.5,\n            \"bondRemoved\": 0.5,\n        }\n        super().setUp()\n"
  },
  {
    "path": "armi/physics/fuelPerformance/tests/test_fuelPerformanceUtils.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for fuel performance utilities.\"\"\"\n\nimport unittest\n\nfrom armi.physics.fuelPerformance import utils\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_blocks\n\n\nclass TestFuelPerformanceUtils(unittest.TestCase):\n    def test_applyFuelDisplacement(self):\n        displacement = 0.01\n        block = test_blocks.loadTestBlock()\n        fuel = block.getComponent(Flags.FUEL)\n        originalHotODInCm = fuel.getDimension(\"od\")\n        utils.applyFuelDisplacement(block, displacement)\n        finalHotODInCm = fuel.getDimension(\"od\")\n\n        self.assertAlmostEqual(finalHotODInCm, originalHotODInCm + 2 * displacement)\n\n    def test_gasConductivityCorrection_morph0(self):\n        temp = 500  # C\n        porosity = 0.4\n\n        # No correction\n        chi = utils.gasConductivityCorrection(temp, porosity, 0)\n\n        ref = 1.0\n        self.assertAlmostEqual(chi, ref, 5)\n\n    def test_gasConductivityCorrection_morph1(self):\n        temp = 500  # C\n        porosity = 0.4\n\n        # Irregular Porosity, Bauer equation\n        chi = utils.gasConductivityCorrection(temp, porosity, 1)\n\n        ref = (1.0 - porosity) ** (1.5 * 1.00)\n        self.assertAlmostEqual(chi, ref, 5)\n\n    def test_gasConductivityCorrection_morph2(self):\n        temp = 500  # C\n        porosity = 0.4\n\n        # Irregular Porosity, Bauer equation\n        chi = utils.gasConductivityCorrection(temp, porosity, 2)\n\n        ref = (1.0 - porosity) ** (1.5 * 1.72)\n        self.assertAlmostEqual(chi, ref, 5)\n\n    def test_gasConductivityCorrection_morph3(self):\n        temp = 500  # C\n        porosity = 0.4\n\n        # Mixed Morphology, low temp\n        chi = utils.gasConductivityCorrection(temp, porosity, 3)\n        ref = (1.0 - porosity) ** (1.5 * 1.72)\n        self.assertAlmostEqual(chi, ref, 5)\n\n        # Mixed Morphology, high temp\n        temp = 700\n        chi = utils.gasConductivityCorrection(temp, porosity, 3)\n        ref = (1.0 - porosity) ** (1.5 * 1.00)\n        self.assertAlmostEqual(chi, ref, 5)\n\n    def test_gasConductivityCorrection_morph4(self):\n        temp = 500  # C\n        porosity = 0.4\n\n        # maxwell-eucken\n        chi = utils.gasConductivityCorrection(temp, porosity, 4)\n\n        ref = (1.0 - porosity) / (1.0 + 1.5 * porosity)\n        self.assertAlmostEqual(chi, ref, 5)\n"
  },
  {
    "path": "armi/physics/fuelPerformance/utils.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Fuel performance utilities.\"\"\"\n\nfrom armi.reactor.flags import Flags\n\n\ndef applyFuelDisplacement(block, displacementInCm):\n    r\"\"\"\n    Expands the fuel radius in a pin by a number of cm.\n\n    Assumes there's thermal bond in it to displace.\n    This adjusts the dimension of the fuel while conserving its mass.\n\n    The bond mass is not conserved; it is assumed to be pushed up into the plenum\n    but the modeling of this is not done yet by this method.\n\n    .. warning:: A 0.5% buffer is included to avoid overlaps. This should be analyzed\n        in detail as a methodology before using in any particular analysis.\n\n    .. math::\n\n        n V = n\\prime V\\prime\n        n\\prime = \\frac{V}{V\\prime} n\n\n    \"\"\"\n    clad = block.getComponent(Flags.CLAD)\n    fuel = block.getComponent(Flags.FUEL)\n    originalHotODInCm = fuel.getDimension(\"od\")\n    cladID = clad.getDimension(\"id\")\n    # do not swell past cladding ID! (actually leave 0.5% buffer for thermal expansion)\n    newHotODInCm = min(cladID * 0.995, originalHotODInCm + displacementInCm * 2)\n    fuel.setDimension(\"od\", newHotODInCm, retainLink=True, cold=False)\n    # reduce number density of fuel to conserve number of atoms (and mass)\n    fuel.changeNDensByFactor(originalHotODInCm**2 / newHotODInCm**2)\n\n\ndef gasConductivityCorrection(tempInC: float, porosity: float, morphology: int = 2):\n    \"\"\"\n    Calculate the correction to conductivity for a porous, gas-filled solid.\n\n    Parameters\n    ----------\n    tempInC\n        temperature in celsius\n    porosity\n        fraction of open/total volume\n    morphology, optional\n        correlation to use regarding pore morphology (default 2 is irregular\n        porosity for conservatism)\n\n    Returns\n    -------\n    chi : float\n        correction to conductivity due to porosity (should be multiplied)\n\n    Notes\n    -----\n    Morphology is treated different by different models:\n\n    0, no porosity correction\n    1, bauer equation, spherical porosity\n    2, bauer equation, irregular porosity\n    3, bauer equation, mixed morphology, above 660, spherical. Below 660, irregular\n    4, maxwell-eucken equation, beta=1.5\n\n    Source1 : In-Pile Measurement of the Thermal Conductivity of Irradiated Metallic Fuel, T.H. Bauer J.W. Holland.\n              Nuclear Technology, Vol. 110, 1995. Pages 407-421\n    Source2 : The Porosity Dependence of the Thermal Conductivity for Nuclear Fuels, G. Ondracek B. Schulz.\n              Journal of Nuclear Materials, Vol. 46, 1973. Pages 253-258\n    \"\"\"\n    if morphology == 0:\n        chi = 1.0\n    elif morphology == 1:\n        epsilon = 1.0\n        chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)\n    elif morphology == 2:\n        epsilon = 1.72\n        chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)\n    elif morphology == 3:\n        epsilon = 1.0\n        if tempInC < 660:\n            epsilon = 1.72\n        else:\n            epsilon = 1.00\n        chi = (1.0 - porosity) ** ((3.0 / 2.0) * epsilon)\n    elif morphology == 4:\n        chi = (1.0 - porosity) / (1.0 + 1.5 * porosity)\n\n    return chi\n"
  },
  {
    "path": "armi/physics/neutronics/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe neutronics physics package in the ARMI framework.\n\nNeutronics encompasses the modeling of nuclear chain reactions and their associated transmutation\nand decay.\n\"\"\"\n\n# ruff: noqa: F401\nfrom enum import IntEnum\n\nfrom armi.physics.neutronics.const import (\n    ALL,\n    FLUXFILES,\n    GAMMA,\n    INPUTOUTPUT,\n    NEUTRON,\n    NEUTRONGAMMA,\n    RESTARTFILES,\n)\nfrom armi.physics.neutronics.plugin import NeutronicsPlugin\n\n# ARC and CCCC cross section file format names\nCOMPXS = \"COMPXS\"\nPMATRX = \"PMATRX\"\nGAMISO = \"GAMISO\"\nPMATRX_EXT = \"pmatrx\"\nGAMISO_EXT = \"gamiso\"\nISOTXS = \"ISOTXS\"\nDIF3D = \"DIF3D\"\n\n# Constants for neutronics calculation types\nADJOINT_CALC = \"adjoint\"\nREAL_CALC = \"real\"\nADJREAL_CALC = \"both\"\n\n# Constants for boundary conditions\n\n# All external boundary conditions are set to zero outward current\nINFINITE = \"Infinite\"\n\n# \"Planar\" external boundaries conditions are set to zero outward current\nREFLECTIVE = \"Reflective\"\n\n# Generalized boundary conditions D * PHI PRIME + A * PHI = 0 where A is user-specified constant,\n# D is the diffusion coefficient, PHI PRIME and PHI are the outward current and flux at the\n# external boundaries.\nGENERAL_BC = \"Generalized\"\n\n# The following boundary conditions are three approximations of the vacuum boundary condition\n# in diffusion theory.\n#    'Extrapolated': sets A to 0.4692 (in generalized BC) to have the flux vanishing at\n#                    0.7104*transport mean free path through linear extrapolation. Derived for plane\n#                    geometries - should be valid for complex geometries unless radius of curvature is\n#                    comparable to the mean free path.\n#    'ZeroSurfaceFlux': flux vanishes at the external boundary.\n#    'ZeroInwardCurrent': set A to 0.5 (in generalized BC) to have Jminus = 0 at the external boundaries.\nEXTRAPOLATED = \"Extrapolated\"\nZEROFLUX = \"ZeroSurfaceFlux\"\nZERO_INWARD_CURRENT = \"ZeroInwardCurrent\"\n\n\n# Common settings checks\ndef gammaTransportIsRequested(cs):\n    \"\"\"\n    Check if gamma transport was requested by the user.\n\n    Arguments\n    ---------\n    cs : ARMI settings object\n        Object containing the default and user-specified ARMI settings controlling the simulation\n\n    Returns\n    -------\n    flag : bool\n        Returns true if gamma transport is requested.\n    \"\"\"\n    from armi.physics.neutronics.settings import CONF_GLOBAL_FLUX_ACTIVE\n\n    return GAMMA in cs[CONF_GLOBAL_FLUX_ACTIVE]\n\n\ndef gammaXsAreRequested(cs):\n    \"\"\"\n    Check if gamma cross-sections generation was requested by the user.\n\n    Arguments\n    ---------\n    cs : ARMI settings object\n        Object containing the default and user-specified ARMI settings controlling the simulation.\n\n    Returns\n    -------\n    flag : bool\n        Returns true if gamma cross section generation is requested.\n    \"\"\"\n    from armi.physics.neutronics.settings import CONF_GEN_XS\n\n    return GAMMA in cs[CONF_GEN_XS]\n\n\ndef adjointCalculationRequested(cs):\n    \"\"\"Return true if an adjoint calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` setting.\"\"\"\n    from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE\n\n    return cs[CONF_NEUTRONICS_TYPE] in [ADJOINT_CALC, ADJREAL_CALC]\n\n\ndef realCalculationRequested(cs):\n    \"\"\"Return true if a real calculation is requested based on the ``CONF_NEUTRONICS_TYPE`` type setting.\"\"\"\n    from armi.physics.neutronics.settings import CONF_NEUTRONICS_TYPE\n\n    return cs[CONF_NEUTRONICS_TYPE] in [\"real\", \"both\"]\n\n\nclass LatticePhysicsFrequency(IntEnum):\n    \"\"\"\n    Enumeration for lattice physics update frequency options.\n\n    NEVER = never automatically trigger lattice physics (a custom script could still trigger it)\n    BOL = Beginning-of-life (c0n0)\n    BOC = Beginning-of-cycle (c*n0)\n    everyNode = Every interaction node (c*n*)\n    firstCoupledIteration = every node + the first coupled iteration at each node\n    all = every node + every coupled iteration\n\n    Notes\n    -----\n    firstCoupledIteration only updates the cross sections during the first coupled iteration, but\n    not on any subsequent iterations. This may be an appropriate approximation in some cases to save\n    compute time, but each individual user should give careful consideration to whether this is the\n    behavior they want for a particular application. The main purpose of this setting is to capture\n    a large change in temperature distribution when running a snapshot at a different power/flow\n    condition than the original state being loaded from the database.\n    \"\"\"\n\n    never = 0\n    BOL = 1\n    BOC = 2\n    everyNode = 3\n    firstCoupledIteration = 4\n    all = 5\n"
  },
  {
    "path": "armi/physics/neutronics/const.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nConstants and Enums.\n\nIn an independent file to minimize circular imports.\n\"\"\"\n\nCONF_CROSS_SECTION = \"crossSectionControl\"\n#\n# FAST_FLUX_THRESHOLD_EV is the energy threshold above which neutrons are considered \"fast\" [eV]\n#\nFAST_FLUX_THRESHOLD_EV = 100000.0  # eV\n\n# CROSS SECTION LIBRARY GENERATION CONSTANTS\nMAXIMUM_XS_LIBRARY_ENERGY = 1.4190675e7  # eV\nULTRA_FINE_GROUP_LETHARGY_WIDTH = 1.0 / 120.0\n\n# LOWEST_ENERGY_EV cannot be zero due to integrating lethargy, and lethargy is undefined at 0.0\n# The lowest lower boundary of many group structures such as any WIMS, SCALE or CASMO\n# is 1e-5 eV, therefore it is chosen here. This number must be lower than all of the\n# defined group structures. The chosen 1e-5 eV is rather arbitrary but expected to be low\n# enough to support other group structures. For fast reactors, there will be\n# no sensitivity at all to this value since there is no flux in this region.\nLOWEST_ENERGY_EV = 1.0e-5\n\n\n# Highest energy will typically depend on what physics code is being run, but this is\n# a decent round number to use.\nHIGH_ENERGY_EV = 1.5e07\n\n# Particle types constants\nGAMMA = \"Gamma\"\nNEUTRON = \"Neutron\"\nNEUTRONGAMMA = \"Neutron and Gamma\"\n\n# Constants for neutronics setting controlling saving of files after neutronics calculation\n# See setting 'neutronicsOutputsToSave'\nALL = \"All\"\nRESTARTFILES = \"Restart files\"\nINPUTOUTPUT = \"Input/Output\"\nFLUXFILES = \"Flux files\"\n"
  },
  {
    "path": "armi/physics/neutronics/crossSectionGroupManager.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCross section group manager handles burnup-dependent properties of microscopic cross sections.\n\nBlocks are specified to be in a certain *cross section type* and *burnup group*. Together,\nthese form the *cross section group*. By advancing blocks by their burnup into\ndifferent groups, we capture some of the physical effects related to depletion.\n\nXS types are typically single capital letters like A\nBU groups are also capital letters.\nA XS group of AB is in XS type ``A`` and burnup group ``B``.\n\nThis module groups the blocks according to their XS groups and can determine\nwhich block is to be deemed **representative** of an entire set of blocks in a particular xs group.\nThen the representative block is sent to a lattice physics kernel for actual physics\ncalculations.\n\nGenerally, the cross section manager is a attribute of the lattice physics code interface\n\nExamples\n--------\n    csm = CrossSectionGroupManager()\n    csm._setBuGroupBounds(cs['buGroups'])\n    csm._setTempGroupBounds(cs['tempGroups']) # or empty list\n    csm._addXsGroupsFromBlocks(blockList)\n    csm.createRepresentativeBlocks()\n    representativeBlockList = csm.representativeBlocks.values()\n    blockThatRepresentsBA = csm.representativeBlocks['BA']\n\nThe class diagram is provided in `xsgm-class-diagram`_\n\n.. _xsgm-class-diagram:\n\n.. pyreverse:: armi.physics.neutronics.crossSectionGroupManager\n    :align: center\n    :alt: XSGM class diagram\n    :width: 90%\n\n    Class inheritance diagram for :py:mod:`crossSectionGroupManager`.\n\"\"\"\n\nimport collections\nimport copy\nimport os\nimport string\nimport sys\n\nimport numpy as np\n\nfrom armi import context, interfaces, runLog\nfrom armi.physics.neutronics import LatticePhysicsFrequency\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.reactor import flags\nfrom armi.reactor.components import basicShapes\nfrom armi.reactor.flags import Flags\nfrom armi.utils import safeCopy\nfrom armi.utils.units import C_TO_K, TRACE_NUMBER_DENSITY\n\nORDER = interfaces.STACK_ORDER.BEFORE + interfaces.STACK_ORDER.CROSS_SECTIONS\n\n\ndef describeInterfaces(cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    from armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL\n\n    if \"MCNP\" not in cs[CONF_NEUTRONICS_KERNEL]:  # MCNP does not use CSGM\n        return (CrossSectionGroupManager, {})\n\n    return None\n\n\n_ALLOWABLE_XS_TYPE_LIST = list(string.ascii_uppercase + string.ascii_lowercase)\n\n\ndef getXSTypeNumberFromLabel(xsTypeLabel: str) -> int:\n    \"\"\"\n    Convert a XSID label (e.g. 'AA') to an integer.\n\n    Useful for visualizing XS type in XTVIEW.\n\n    2-digit labels are supported when there is only one burnup group.\n    \"\"\"\n    return int(\"\".join([\"{:02d}\".format(ord(si)) for si in xsTypeLabel]))\n\n\ndef getXSTypeLabelFromNumber(xsTypeNumber: int) -> str:\n    \"\"\"\n    Convert a XSID label (e.g. 65) to an XS label (e.g. 'A').\n\n    Useful for visualizing XS type in XTVIEW.\n\n    2-digit labels are supported when there is only one burnup group.\n    \"\"\"\n    try:\n        if xsTypeNumber > ord(\"Z\"):\n            # two digit. Parse\n            return chr(int(str(xsTypeNumber)[:2])) + chr(int(str(xsTypeNumber)[2:]))\n        elif xsTypeNumber < ord(\"A\"):\n            raise ValueError(\n                f\"Cannot convert invalid xsTypeNumber `{xsTypeNumber}` to char. \"\n                \"The number must be >= 65 (corresponding to 'A').\"\n            )\n        else:\n            return chr(xsTypeNumber)\n    except ValueError:\n        runLog.error(\"Error converting {} to label.\".format(xsTypeNumber))\n        raise\n\n\ndef _checkConsistentNuclides(thisComp, repComp):\n    \"\"\"\n    Check that thisComp has the same set of nuclides as the analogous component in the\n    representative block.\n\n    This check is somewhat permissive in that it allows for the two components to differ\n    in nuclides where one of them is at a zero number density.\n\n    Warning\n    -------\n    This only checks ``consistentNucs`` for ones that are important in SFRs.\n    \"\"\"\n    consistentNucs = {\"PU239\", \"U238\", \"U235\", \"U234\", \"FE56\", \"NA23\", \"O16\"}\n    # ignore anything with zero number density\n    theseNucs = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens > 0.0)\n    thoseNucs = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens > 0.0)\n\n    # in the nuclide list of the component, but at a number density of 0.0\n    # treat this more permissively -- i.e., it could be considered as either having or not having it\n    theseNucsAtZero = set(nuc for nuc, ndens in thisComp.getNumberDensities().items() if ndens == 0.0)\n    thoseNucsAtZero = set(nuc for nuc, ndens in repComp.getNumberDensities().items() if ndens == 0.0)\n\n    # check for any differences between which `consistentNucs` the components have\n    diffNucsNonZero = theseNucs.symmetric_difference(thoseNucs).intersection(consistentNucs)\n    diffNucsAtZero = theseNucsAtZero.symmetric_difference(thoseNucsAtZero).intersection(consistentNucs)\n    diffNucs = diffNucsNonZero - diffNucsAtZero\n    if diffNucs:\n        raise ValueError(\n            f\"Component {thisComp} in block {repComp} and component {thisComp} in block {thisComp.parent} are in the \"\n            f\"same location, but nuclides differ by {diffNucs}. \\n{theseNucs} \\n{thoseNucs}\"\n        )\n\n\nclass BlockCollection(list):\n    \"\"\"\n    Controls which blocks are representative of a particular cross section type/BU group.\n\n    This is a list with special methods.\n    \"\"\"\n\n    def __init__(self, allNuclidesInProblem, validBlockTypes=None, averageByComponent=False):\n        list.__init__(self)\n        self.allNuclidesInProblem = allNuclidesInProblem\n        self.weightingParam = None\n        self.averageByComponent = averageByComponent\n\n        # allowed to be independent of fuel component temperatures b/c Doppler\n        self.avgNucTemperatures = {}\n        self._validRepresentativeBlockTypes = None\n        if validBlockTypes:\n            self._validRepresentativeBlockTypes = []\n            for t in validBlockTypes:\n                self._validRepresentativeBlockTypes.append(Flags.fromString(t))\n\n    def __repr__(self):\n        return \"<{} with {} blocks>\".format(self.__class__.__name__, len(self))\n\n    def _getNewBlock(self):\n        \"\"\"\n        Create a new block instance.\n\n        Notes\n        -----\n        Should only be used by average because of name (which may not matter)\n        \"\"\"\n        newBlock = copy.deepcopy(self.getCandidateBlocks()[0])\n        newBlock.name = \"AVG_\" + newBlock.getMicroSuffix()\n        return newBlock\n\n    def createRepresentativeBlock(self):\n        \"\"\"Generate a block that best represents all blocks in group.\"\"\"\n        self._checkValidWeightingFactors()\n        representativeBlock = self._makeRepresentativeBlock()\n        return representativeBlock\n\n    def _makeRepresentativeBlock(self):\n        raise NotImplementedError\n\n    def _checkValidWeightingFactors(self):\n        \"\"\"\n        Verify the validity of the weighting parameter.\n\n        .. warning:: Don't mix unweighted blocks (flux=0) w/ weighted ones\n        \"\"\"\n        if self.weightingParam is None:\n            weights = [0.0] * len(self.getCandidateBlocks())\n        else:\n            weights = [block.p[self.weightingParam] for block in self.getCandidateBlocks()]\n        anyNonZeros = any(weights)\n        if anyNonZeros and not all(weights):\n            # we have at least one non-zero entry and at least one zero. This is bad.\n            # find the non-zero ones for debugging\n            zeros = [block for block in self if not block.p[self.weightingParam]]\n            runLog.error(\"Blocks with zero `{0}` include: {1}\".format(self.weightingParam, zeros))\n            raise ValueError(\n                \"{0} has a mixture of zero and non-zero weighting factors (`{1}`)\\nSee stdout for details\".format(\n                    self, self.weightingParam\n                )\n            )\n\n    def calcAvgNuclideTemperatures(self):\n        r\"\"\"\n        Calculate the average nuclide temperatures in this collection based on the blocks in the collection.\n\n        If a nuclide is in multiple components, that's taken into consideration.\n\n        .. math::\n             T = \\frac{\\sum{n_i v_i T_i}}{\\sum{n_i v_i}}\n\n        where :math:`n_i` is a number density, :math:`v_i` is a volume, and :math:`T_i` is a temperature\n        \"\"\"\n        self.avgNucTemperatures = {}\n        nvt, nv = self._getNucTempHelper()\n        for i, nuclide in enumerate(self.allNuclidesInProblem):\n            nvtCurrent = nvt[i]\n            nvCurrent = nv[i]\n            avgTemp = 0.0 if nvCurrent == 0.0 else nvtCurrent / nvCurrent\n            self.avgNucTemperatures[nuclide] = avgTemp\n\n    def _getNucTempHelper(self):\n        \"\"\"\n        Get temperature averaging numerator and denominator for block collection.\n\n        This is abstract; you must override it.\n        \"\"\"\n        raise NotImplementedError\n\n    def getWeight(self, block):\n        \"\"\"Get value of weighting function for this block.\"\"\"\n        vol = block.getVolume() or 1.0\n        if not self.weightingParam:\n            weight = 1.0\n        else:\n            # don't return 0\n            weight = block.p[self.weightingParam] or 1.0\n\n        return weight * vol\n\n    def getCandidateBlocks(self):\n        \"\"\"\n        Get blocks in this collection that are the valid representative type.\n\n        Often, peripheral non-fissile blocks (reflectors, control, shields) need cross sections but\n        cannot produce them alone. You can approximate their cross sections by placing them in certain cross\n        section groups. However, we do not want these blocks to be included in the spectrum\n        calculations that produce cross sections. Therefore the subset of valid representative\n        blocks are used to compute compositions, temperatures, etc.\n\n        .. tip:: The proper way to treat non-fuel blocks is to apply a leakage spectrum from fuel onto them.\n        \"\"\"\n        return [b for b in self if b.hasFlags(self._validRepresentativeBlockTypes)]\n\n    def _calcWeightedBurnup(self):\n        \"\"\"\n        For a blockCollection that represents fuel, calculate the weighted average burnup.\n\n        Notes\n        -----\n        - Only used for logging purposes\n        - Burnup needs to be weighted by heavy metal mass instead of volume\n        \"\"\"\n        weightedBurnup = 0.0\n        totalWeight = 0.0\n        for b in self:\n            # self.getWeight(b) incorporates the volume as does mass, so divide by volume not to double-count\n            weighting = b.p.massHmBOL * self.getWeight(b) / b.getVolume()\n            totalWeight += weighting\n            weightedBurnup += weighting * b.p.percentBu\n        return 0.0 if totalWeight == 0.0 else weightedBurnup / totalWeight\n\n\nclass MedianBlockCollection(BlockCollection):\n    \"\"\"Returns the median burnup block. This is a simple and often accurate approximation.\"\"\"\n\n    def _makeRepresentativeBlock(self):\n        \"\"\"Get the median burnup block.\"\"\"\n        medianBlock = self._getMedianBlock()\n        # copy so we can adjust LFPs w/o changing the global ones\n        newBlock = copy.deepcopy(medianBlock)\n        lfpCollection = medianBlock.getLumpedFissionProductCollection()\n        if lfpCollection:\n            lfpCollection = lfpCollection.duplicate()\n            lfpCollection.setGasRemovedFrac(newBlock.p.gasReleaseFraction)\n            newBlock.setLumpedFissionProducts(lfpCollection)\n        else:\n            runLog.warning(\"Representative block {0} has no LFPs\".format(medianBlock))\n        self.calcAvgNuclideTemperatures()\n        return newBlock\n\n    def _getNucTempHelper(self):\n        \"\"\"\n        Return the Median block nuclide temperature terms.\n\n        In this case, there's only one block to average, so return its averaging terms.\n\n        See Also\n        --------\n        calcAvgNuclideTemperatures\n        \"\"\"\n        medianBlock = self._getMedianBlock()\n        return getBlockNuclideTemperatureAvgTerms(medianBlock, self.allNuclidesInProblem)\n\n    def _getMedianBlock(self):\n        \"\"\"\n        Return the median burnup Block.\n\n        Build list of items for each block when sorted gives desired order\n\n        Last item in each tuple is always the block itself (for easy retrieval).\n\n        For instance, if you want the median burnup, this list would contain\n        tuples of (burnup, blockName, block). Blockname is included so\n        the order is consistent between runs when burnups are equal (e.g. 0).\n        \"\"\"\n        info = []\n        for b in self.getCandidateBlocks():\n            info.append((b.p.percentBu * self.getWeight(b), b.getName(), b))\n        info.sort()\n        medianBlockData = info[len(info) // 2]\n        return medianBlockData[-1]\n\n\nclass AverageBlockCollection(BlockCollection):\n    \"\"\"\n    Block collection that builds a new block based on others in collection.\n\n    Averages number densities, fission product yields, and fission gas\n    removal fractions.\n\n    .. impl:: Create representative blocks using volume-weighted averaging.\n        :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS0\n        :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n\n        This class constructs new blocks from an existing block list based on a volume-weighted\n        average. Inheriting functionality from the abstract\n        :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>`\n        object, this class will construct representative blocks using averaged parameters of all\n        blocks in the given collection. Number density averages can be computed at a component level\n        or at a block level by default. Average nuclide temperatures and burnup are also included\n        when constructing a representative block.\n    \"\"\"\n\n    def _makeRepresentativeBlock(self):\n        \"\"\"Generate a block that best represents all blocks in group.\"\"\"\n        newBlock = self._getNewBlock()\n        lfpCollection = self._getLFP()\n        newBlock.setLumpedFissionProducts(lfpCollection)\n        # check if components are similar\n        if self._performAverageByComponent():\n            # set number densities and temperatures on a component basis\n            for compIndex, c in enumerate(sorted(newBlock.getComponents())):\n                c.setNumberDensities(self._getAverageComponentNumberDensities(compIndex))\n                c.temperatureInC = self._getAverageComponentTemperature(compIndex)\n        else:\n            newBlock.setNumberDensities(self._getAverageNumberDensities())\n\n        newBlock.p.percentBu = self._calcWeightedBurnup()\n        newBlock.clearCache()\n        self.calcAvgNuclideTemperatures()\n        return newBlock\n\n    def _getAverageNumberDensities(self):\n        \"\"\"\n        Get weighted average number densities of the collection.\n\n        Returns\n        -------\n        numberDensities : dict\n            nucName, ndens data (atoms/bn-cm)\n        \"\"\"\n        nuclides = self.allNuclidesInProblem\n        blocks = self.getCandidateBlocks()\n        weights = np.array([self.getWeight(b) for b in blocks])\n        weights /= weights.sum()  # normalize by total weight\n        ndens = weights.dot([b.getNuclideNumberDensities(nuclides) for b in blocks])\n        return dict(zip(nuclides, ndens))\n\n    def _getLFP(self):\n        \"\"\"Find lumped fission product collection.\"\"\"\n        b = self.getCandidateBlocks()[0]\n        return b.getLumpedFissionProductCollection()\n\n    def _getNucTempHelper(self):\n        \"\"\"All candidate blocks are used in the average.\"\"\"\n        nvt = np.zeros(len(self.allNuclidesInProblem))\n        nv = np.zeros(len(self.allNuclidesInProblem))\n        for block in self.getCandidateBlocks():\n            wt = self.getWeight(block)\n            nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem)\n            nvt += nvtBlock * wt\n            nv += nvBlock * wt\n\n        return nvt, nv\n\n    def _getAverageComponentNumberDensities(self, compIndex):\n        \"\"\"\n        Get weighted average number densities of a component in the collection.\n\n        Returns\n        -------\n        numberDensities : dict\n            nucName, ndens data (atoms/bn-cm)\n        \"\"\"\n        blocks = self.getCandidateBlocks()\n        weights = np.array([self.getWeight(b) for b in blocks])\n        weights /= weights.sum()  # normalize by total weight\n        components = [sorted(b.getComponents())[compIndex] for b in blocks]\n        nuclides = self._getAllNucs(components)\n        ndens = weights.dot([c.getNuclideNumberDensities(nuclides) for c in components])\n        return dict(zip(nuclides, ndens))\n\n    def _getAverageComponentTemperature(self, compIndex):\n        \"\"\"\n        Get weighted average component temperature for the collection.\n\n        Notes\n        -----\n        Weighting is both by the block weight within the collection and the relative mass of the\n        Component. The block weight is already scaled by the block volume, so we need to pull that\n        out of the block weighting because it would effectively be double-counted in the component\n        mass. b.getHeight() is proportional to block volume, so it is used here as a computationally\n        cheaper proxy for scaling by block volume.\n\n        Returns\n        -------\n        numberDensities : dict\n            nucName, ndens data (atoms/bn-cm)\n        \"\"\"\n        blocks = self.getCandidateBlocks()\n        weights = np.array([self.getWeight(b) / b.getHeight() for b in blocks])\n        weights /= weights.sum()  # normalize by total weight\n        components = [sorted(b.getComponents())[compIndex] for b in blocks]\n        weightedAvgComponentMass = sum(w * c.getMass() for w, c in zip(weights, components))\n        if weightedAvgComponentMass == 0.0:\n            # if there is no component mass (e.g., gap), do a regular average\n            return np.mean(np.array([c.temperatureInC for c in components]))\n        else:\n            return (\n                weights.dot(np.array([c.temperatureInC * c.getMass() for c in components])) / weightedAvgComponentMass\n            )\n\n    def _performAverageByComponent(self):\n        \"\"\"\n        Check if block collection averaging can/should be performed by component.\n\n        If the components of blocks in the collection are similar and the user has requested\n        Component-level averaging, return True. Otherwise, return False.\n        \"\"\"\n        if not self.averageByComponent:\n            return False\n        else:\n            return self._checkBlockSimilarity()\n\n    def _checkBlockSimilarity(self):\n        \"\"\"\n        Check if blocks in the collection have similar components.\n\n        If the components of blocks in the collection are similar and the user has requested\n        Component-level averaging, return True. Otherwise, return False.\n        \"\"\"\n        cFlags = dict()\n        for b in self.getCandidateBlocks():\n            cFlags[b] = [c.p.flags for c in sorted(b.getComponents())]\n        refB = b\n        refFlags = cFlags[refB]\n        for b, compFlags in cFlags.items():\n            for c, refC in zip(compFlags, refFlags):\n                if c != refC:\n                    runLog.warning(\n                        \"Non-matching block in AverageBlockCollection!\\n\"\n                        f\"{refC} component flags in {refB} does not match {c} in {b}.\\n\"\n                        f\"Number densities will be smeared in representative block.\"\n                    )\n                    return False\n        else:\n            return True\n\n    @staticmethod\n    def _getAllNucs(components):\n        \"\"\"Iterate through components and get all unique nuclides.\"\"\"\n        nucs = set()\n        for c in components:\n            nucs = nucs.union(c.getNuclides())\n        return sorted(list(nucs))\n\n\ndef getBlockNuclideTemperature(block, nuclide):\n    \"\"\"Return the average temperature for 1 nuclide.\"\"\"\n    tempIntegratedVolume, volume = getBlockNuclideTemperatureAvgTerms(block, [nuclide])\n    return tempIntegratedVolume / volume if volume > 0 else 0.0\n\n\ndef getBlockNuclideTemperatureAvgTerms(block, allNucNames):\n    \"\"\"\n    Compute terms (numerator, denominator) of average for this block.\n\n    This volume-weights the densities by component volume fraction.\n\n    It's important to count zero-density nuclides (i.e. ones like AM242 that are expected to build\n    up) as trace values at the proper component temperatures.\n    \"\"\"\n\n    def getNumberDensitiesWithTrace(component, allNucNames):\n        \"\"\"Needed to make sure temperature of 0-density nuclides in fuel get fuel temperature.\"\"\"\n        if component.p.nuclides is None:\n            return [0.0 for _nuc in allNucNames]\n\n        allByteNucs = [nucName.encode() for nucName in allNucNames]\n        ndens = []\n        nucCopy = np.array(component.p.nuclides)\n        nDensCopy = np.array(component.p.numberDensities)\n        reverseIndex = {nuc: i for i, nuc in enumerate(nucCopy)}\n        for nuc in allByteNucs:\n            i = reverseIndex.get(nuc, -1)\n            if i >= 0:\n                ndens.append(max(nDensCopy[i], TRACE_NUMBER_DENSITY))\n            else:\n                ndens.append(0.0)\n        return ndens\n\n    vol = block.getVolume()\n    components, volFracs = zip(*block.getVolumeFractions())\n    # D = CxN matrix of number densities\n    ndens = np.array([getNumberDensitiesWithTrace(c, allNucNames) for c in components])\n\n    # C-length temperature array\n    temperatures = np.array([c.temperatureInC for c in components])\n\n    # multiply each component's values by volume frac, now NxC\n    nvBlock = ndens.T * np.array(volFracs) * vol\n\n    nvt = sum((nvBlock * temperatures).T)  # N-length array summing over components.\n    nv = sum(nvBlock.T)  # N-length array\n    return nvt, nv\n\n\nclass CylindricalComponentsAverageBlockCollection(AverageBlockCollection):\n    \"\"\"\n    Creates a representative block for the purpose of cross section generation with a one-\n    dimensional cylindrical model.\n\n    .. impl:: Create representative blocks using custom cylindrical averaging.\n        :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS1\n        :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n\n        This class constructs representative blocks based on a volume-weighted average using\n        cylindrical blocks from an existing block list. Inheriting functionality from the abstract\n        :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>`\n        object, this class will construct representative blocks using averaged parameters of all\n        blocks in the given collection. Number density averages are computed at a component level.\n        Nuclide temperatures from a median block-average temperature are used and the average burnup\n        is evaluated across all blocks in the block list.\n\n    Notes\n    -----\n    When generating the representative block within this collection, the geometry is checked against\n    all other blocks to ensure that the number of components are consistent. This implementation is\n    intended to be opinionated, so if a user attempts to put blocks that have geometric differences\n    then this will fail.\n\n    This selects a representative block based on the collection of candidates based on the median\n    Block-average temperatures as an assumption.\n    \"\"\"\n\n    def _getNewBlock(self):\n        newBlock = copy.deepcopy(self._selectCandidateBlock())\n        newBlock.name = \"1D_CYL_AVG_\" + newBlock.getMicroSuffix()\n        return newBlock\n\n    def _selectCandidateBlock(self):\n        \"\"\"Selects the candidate block with the median block-average temperature.\"\"\"\n        info = []\n        for b in self.getCandidateBlocks():\n            info.append((b.getAverageTempInC(), b.getName(), b))\n        info.sort()\n        medianBlockData = info[len(info) // 2]\n        return medianBlockData[-1]\n\n    def _makeRepresentativeBlock(self):\n        \"\"\"Build a representative fuel block based on component number densities.\"\"\"\n        repBlock = self._getNewBlock()\n        bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()]\n        repBlock.p.percentBu = self._calcWeightedBurnup()\n        componentsInOrder = self._orderComponentsInGroup(repBlock)\n\n        for i, (c, allSimilarComponents) in enumerate(zip(sorted(repBlock), componentsInOrder)):\n            allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights)\n            for nuc, aDensity in zip(allNucsNames, densities):\n                c.setNumberDensity(nuc, aDensity)\n            c.temperatureInC = self._getAverageComponentTemperature(i)\n        repBlock.clearCache()\n        self.calcAvgNuclideTemperatures()\n        return repBlock\n\n    @staticmethod\n    def _checkComponentConsistency(b, repBlock):\n        \"\"\"\n        Verify that all components being homogenized have same multiplicity and nuclides.\n\n        Raises\n        ------\n        ValueError\n            When the components in a candidate block do not align with the components in the\n            representative Block. This check includes component area, component multiplicity, and\n            nuclide composition.\n        \"\"\"\n        if len(b) != len(repBlock):\n            raise ValueError(f\"Blocks {b} and {repBlock} have differing number of components and cannot be homogenized\")\n\n        # NOTE: We are using Fe-56 as a proxy for structure and Na-23 as proxy for coolant, which\n        # is undesirably SFR-centric. This should be generalized in the future, if possible.\n        for c, repC in zip(sorted(b), sorted(repBlock)):\n            _checkConsistentNuclides(c, repC)\n            if c.p.mult != repC.p.mult:\n                raise ValueError(\n                    f\"Component {repC} in block {repBlock} and component {c} in block {b} must have the same \"\n                    f\"multiplicity, but they have {repC.p.mult} and {c.p.mult}, respectively.\"\n                )\n\n    def _getAverageComponentNucs(self, components, bWeights):\n        \"\"\"Compute average nuclide densities by block weights and component area fractions.\"\"\"\n        allNucNames = self._getAllNucs(components)\n        densities = np.zeros(len(allNucNames))\n        totalWeight = 0.0\n        for c, bWeight in zip(components, bWeights):\n            weight = bWeight * c.getArea()\n            totalWeight += weight\n            densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))\n\n        if totalWeight > 0.0:\n            weightedDensities = densities / totalWeight\n        else:\n            weightedDensities = np.zeros_like(densities)\n\n        return allNucNames, weightedDensities\n\n    def _orderComponentsInGroup(self, repBlock):\n        \"\"\"Order the components based on dimension and material type within the representative\n        Block.\n        \"\"\"\n        for b in self.getCandidateBlocks():\n            self._checkComponentConsistency(b, repBlock)\n        componentLists = [list(sorted(b)) for b in self.getCandidateBlocks()]\n        return [list(comps) for comps in zip(*componentLists)]\n\n    def _getNucTempHelper(self):\n        \"\"\"All candidate blocks are used in the average.\"\"\"\n        nvt = np.zeros(len(self.allNuclidesInProblem))\n        nv = np.zeros(len(self.allNuclidesInProblem))\n        for block in self.getCandidateBlocks():\n            wt = self.getWeight(block)\n            nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(block, self.allNuclidesInProblem)\n            nvt += nvtBlock * wt\n            nv += nvBlock * wt\n        return nvt, nv\n\n\nclass CylindricalComponentsDuctHetAverageBlockCollection(CylindricalComponentsAverageBlockCollection):\n    \"\"\"\n    Creates a representative block for the purpose of cross section generation with a one-\n    dimensional cylindrical model where all material inside the duct is homogenized.\n\n    .. impl:: Create partially heterogeneous representative blocks.\n        :id: I_ARMI_XSGM_CREATE_REPR_BLOCKS2\n        :implements: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n\n        This class constructs representative blocks based on a volume-weighted average using\n        cylindrical blocks from an existing block list. Inheriting functionality from the abstract\n        :py:class:`Reactor <armi.physics.neutronics.crossSectionGroupManager.BlockCollection>`\n        object, this class will construct representative blocks using averaged parameters of all\n        blocks in the given collection. Number density averages are computed at a component level.\n        Nuclide temperatures from a median block-average temperature are used and the average burnup\n        is evaluated across all blocks in the block list.\n\n        The average nuclide temperatures are calculated only for the homogenized region inside of\n        the duct. For the non-homogenized regions, the MC2 writer uses the component temperatures.\n\n    Notes\n    -----\n    The representative block for this collection is the same as the parent. The only difference between\n    the two collection types is that this collection calculates average nuclide temperatures based only\n    on the components that are inside of the duct.\n    \"\"\"\n\n    def _getNewBlock(self):\n        newBlock = copy.deepcopy(self._selectCandidateBlock())\n        newBlock.name = \"1D_CYL_DUCT_HET_AVG_\" + newBlock.getMicroSuffix()\n        return newBlock\n\n    def _makeRepresentativeBlock(self):\n        \"\"\"Build a representative fuel block based on component number densities.\"\"\"\n        self.calcAvgNuclideTemperatures()\n        return CylindricalComponentsAverageBlockCollection._makeRepresentativeBlock(self)\n\n    def _getNucTempHelper(self):\n        \"\"\"All candidate blocks are used in the average.\"\"\"\n        from armi.reactor.converters.blockConverters import stripComponents\n\n        nvt = np.zeros(len(self.allNuclidesInProblem))\n        nv = np.zeros(len(self.allNuclidesInProblem))\n        for block in self.getCandidateBlocks():\n            wt = self.getWeight(block)\n            # remove the duct and intercoolant from the block before\n            # calculating average nuclide temps\n            newBlock, _mixtureFlags = stripComponents(block, Flags.DUCT)\n            nvtBlock, nvBlock = getBlockNuclideTemperatureAvgTerms(newBlock, self.allNuclidesInProblem)\n            nvt += nvtBlock * wt\n            nv += nvBlock * wt\n        return nvt, nv\n\n\nclass SlabComponentsAverageBlockCollection(BlockCollection):\n    \"\"\"\n    Creates a representative 1D slab block.\n\n    Notes\n    -----\n    - Ignores lumped fission products since there is no foreseeable need for burn calculations in 1D\n      slab geometry since it is used for low power neutronic validation.\n    - Checks for consistent component dimensions for all blocks in a group and then creates a new\n      Block.\n    - Iterates through components of all blocks and calculates component average number densities.\n      This calculation takes the first component of each block, averages the number densities, and\n      applies this to the number density to the representative block.\n    \"\"\"\n\n    def _getNewBlock(self):\n        newBlock = copy.deepcopy(self.getCandidateBlocks()[0])\n        newBlock.name = \"1D_SLAB_AVG_\" + newBlock.getMicroSuffix()\n        return newBlock\n\n    def _makeRepresentativeBlock(self):\n        \"\"\"Build a representative fuel block based on component number densities.\"\"\"\n        repBlock = self._getNewBlock()\n        bWeights = [self.getWeight(b) for b in self.getCandidateBlocks()]\n        repBlock.p.percentBu = self._calcWeightedBurnup()\n        componentsInOrder = self._orderComponentsInGroup(repBlock)\n\n        for c, allSimilarComponents in zip(repBlock, componentsInOrder):\n            allNucsNames, densities = self._getAverageComponentNucs(allSimilarComponents, bWeights)\n            for nuc, aDensity in zip(allNucsNames, densities):\n                c.setNumberDensity(nuc, aDensity)\n        newBlock = self._removeLatticeComponents(repBlock)\n        return newBlock\n\n    def _getNucTempHelper(self):\n        raise NotImplementedError\n\n    @staticmethod\n    def _getAllNucs(components):\n        \"\"\"Iterate through components and get all unique nuclides.\"\"\"\n        nucs = set()\n        for c in components:\n            nucs = nucs.union(c.getNuclides())\n        return sorted(list(nucs))\n\n    @staticmethod\n    def _checkComponentConsistency(b, repBlock, components=None):\n        \"\"\"\n        Verify that all components being homogenized are rectangular and have consistent dimensions.\n\n        Raises\n        ------\n        ValueError\n            When the components in a candidate block do not align with the components in the\n            representative block. This check includes component area, component multiplicity, and\n            nuclide composition.\n\n        TypeError\n            When the shape of the component is not a rectangle.\n        \"\"\"\n        comps = b if components is None else components\n\n        for c, repC in zip(comps, repBlock):\n            if not isinstance(c, basicShapes.Rectangle):\n                raise TypeError(\n                    \"The shape of component {} in block {} is invalid and must be a rectangle.\".format(c, b)\n                )\n            compString = \"Component {} in block {} and component {} in block {}\".format(repC, repBlock, c, b)\n            if c.getArea() != repC.getArea():\n                raise ValueError(\n                    \"{} are in the same location, but have differing thicknesses. Check that the \"\n                    \"thicknesses are defined correctly. Note: This could also be due to \"\n                    \"thermal expansion\".format(compString)\n                )\n\n            _checkConsistentNuclides(c, repC)\n            if c.p.mult != repC.p.mult:\n                raise ValueError(\"{} must have the same multiplicity to homogenize\".format(compString))\n\n    @staticmethod\n    def _reverseComponentOrder(block):\n        \"\"\"Move the lattice component to the end of the components list.\"\"\"\n        latticeComponents = [c for c in block if c.isLatticeComponent()]\n        components = [c for c in reversed(block) if not c.isLatticeComponent()]\n        if len(latticeComponents) > 1:\n            raise ValueError(\n                \"Block {} contains multiple `lattice` components: {}. Remove the additional \"\n                \"lattice components in the reactor blueprints.\".format(block, latticeComponents)\n            )\n        components.append(latticeComponents[0])\n        return components\n\n    @staticmethod\n    def _removeLatticeComponents(repBlock):\n        \"\"\"\n        Remove the lattice component from the representative block.\n\n        Notes\n        -----\n        - This component does not serve any purpose for XS generation as it contains void material\n          with zero area.\n        - Removing this component does not modify the blocks within the reactor.\n        \"\"\"\n        for c in repBlock.iterComponents():\n            if c.isLatticeComponent():\n                repBlock.remove(c)\n        return repBlock\n\n    def _getAverageComponentNucs(self, components, bWeights):\n        \"\"\"Compute average nuclide densities by block weights and component area fractions.\"\"\"\n        allNucNames = self._getAllNucs(components)\n        densities = np.zeros(len(allNucNames))\n        totalWeight = 0.0\n        for c, bWeight in zip(components, bWeights):\n            weight = bWeight * c.getArea()\n            totalWeight += weight\n            densities += weight * np.array(c.getNuclideNumberDensities(allNucNames))\n        if totalWeight > 0.0:\n            weightedDensities = densities / totalWeight\n        else:\n            weightedDensities = np.zeros_like(densities)\n        return allNucNames, weightedDensities\n\n    def _orderComponentsInGroup(self, repBlock):\n        \"\"\"Order the components based on dimension and material type within the representative block.\"\"\"\n        orderedComponents = [[] for _ in repBlock]\n        for b in self.getCandidateBlocks():\n            if len(b) != len(repBlock):\n                raise ValueError(\n                    \"Blocks {} and {} have differing number of components and cannot be homogenized\".format(b, repBlock)\n                )\n            try:\n                self._checkComponentConsistency(b, repBlock)\n                componentsToAdd = [c for c in b]\n            except ValueError:\n                runLog.extra(\n                    \"Checking if components in block {} are in the reverse order of the components in the \"\n                    \"representative block {}\".format(b, repBlock)\n                )\n                reversedComponentOrder = self._reverseComponentOrder(b)\n                self._checkComponentConsistency(b, repBlock, components=reversedComponentOrder)\n                componentsToAdd = [c for c in reversedComponentOrder]\n            for i, c in enumerate(componentsToAdd):\n                orderedComponents[i].append(c)  # group similar components\n        return orderedComponents\n\n\nclass FluxWeightedAverageBlockCollection(AverageBlockCollection):\n    \"\"\"Flux-weighted AverageBlockCollection.\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        AverageBlockCollection.__init__(self, *args, **kwargs)\n        self.weightingParam = \"flux\"\n\n\nclass CrossSectionGroupManager(interfaces.Interface):\n    \"\"\"\n    Looks at the reactor and updates burnup group information based on current burnup.\n\n    Contains a :py:class:`BlockCollection` for each cross section group.\n\n    Notes\n    -----\n    The representative blocks created in the CrossSectionGroupManager are ordered\n    alphabetically by key.\n    \"\"\"\n\n    name = \"xsGroups\"\n\n    _REPR_GROUP = \"represented\"\n    _NON_REPR_GROUP = \"non-represented\"\n    _PREGEN_GROUP = \"pre-generated\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self._buGroupBounds = []\n        self._tempGroupBounds = []\n        self.representativeBlocks = collections.OrderedDict()\n        self.avgNucTemperatures = {}\n\n        # this turns off updates for when core changes are made, but dont want to re-evaluate XS\n        # for example if lattice physics was only once per cycle we might not want to re-evaluate groups\n        self._envGroupUpdatesEnabled = True\n        self._setBuGroupBounds(self.cs[\"buGroups\"])\n        self._setTempGroupBounds(self.cs[\"tempGroups\"])\n        self._unrepresentedXSIDs = []\n\n    def interactBOL(self):\n        \"\"\"Called at the Beginning-of-Life of a run, before any cycles start.\n\n        .. impl:: The lattice physics interface and cross-section group manager are connected at\n            BOL.\n            :id: I_ARMI_XSGM_FREQ0\n            :implements: R_ARMI_XSGM_FREQ\n\n            This method sets the cross-section block averaging method and and logic for whether all\n            blocks in a cross section group should be used when generating a representative block.\n            Furthermore, if the control logic for lattice physics frequency updates is set at\n            beginning-of-life (`BOL`) through the :py:class:`LatticePhysicsInterface\n            <armi.physics.neutronics.latticePhysics>`, the cross-section group manager will\n            construct representative blocks for each cross-section IDs at the beginning of the\n            reactor state.\n        \"\"\"\n        # now that all cs settings are loaded, apply defaults to compound XS settings\n        from armi.physics.neutronics.settings import (\n            CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,\n            CONF_LATTICE_PHYSICS_FREQUENCY,\n            CONF_XS_BLOCK_REPRESENTATION,\n        )\n\n        self.cs[CONF_CROSS_SECTION].setDefaults(\n            self.cs[CONF_XS_BLOCK_REPRESENTATION],\n            self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]\n        if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL:\n            self.createRepresentativeBlocks()\n\n    def interactBOC(self, cycle=None):\n        \"\"\"\n        Update representative blocks and block burnup groups.\n\n        .. impl:: The lattice physics interface and cross-section group manager are connected at\n            BOC.\n            :id: I_ARMI_XSGM_FREQ1\n            :implements: R_ARMI_XSGM_FREQ\n\n            This method updates representative blocks and block burnups at the beginning-of-cycle\n            for each cross-section ID if the control logic for lattice physics frequency updates is\n            set at beginning-of-cycle (`BOC`) through the :py:class:`LatticePhysicsInterface\n            <armi.physics.neutronics.latticePhysics>`. At the beginning-of-cycle, the cross-section\n            group manager will construct representative blocks for each cross-section IDs for the\n            current reactor state.\n\n        Notes\n        -----\n        The block list each each block collection cannot be emptied since it is used to derive nuclide temperatures.\n        \"\"\"\n        if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC:\n            self.createRepresentativeBlocks()\n\n    def interactEOC(self, cycle=None):\n        \"\"\"EOC interaction.\n\n        Clear out big dictionary of all blocks to avoid memory issues and out-of-date representers.\n        \"\"\"\n        self.clearRepresentativeBlocks()\n\n    def interactEveryNode(self, cycle=None, tn=None):\n        \"\"\"Interaction at every time node.\n\n        .. impl:: The lattice physics interface and cross-section group manager are connected at\n            every time node.\n            :id: I_ARMI_XSGM_FREQ2\n            :implements: R_ARMI_XSGM_FREQ\n\n            This method updates representative blocks and block burnups at every node for each\n            cross-section ID if the control logic for lattices physics frequency updates is set for\n            every node (`everyNode`) through the :py:class:`LatticePhysicsInterface\n            <armi.physics.neutronics.latticePhysics>`. At every node, the cross-section group\n            manager will construct representative blocks for each cross-section ID in the current\n            reactor state.\n        \"\"\"\n        if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode:\n            self.createRepresentativeBlocks()\n\n    def interactCoupled(self, iteration):\n        \"\"\"Update cross-section groups on each physics coupling iteration to get latest\n        temperatures.\n\n        .. impl:: The lattice physics interface and cross-section group manager are connected\n            during coupling.\n            :id: I_ARMI_XSGM_FREQ3\n            :implements: R_ARMI_XSGM_FREQ\n\n            This method updates representative blocks and block burnups at every node and the first\n            coupled iteration for each cross-section ID if the control logic for lattices physics\n            frequency updates is set for the first coupled iteration (``firstCoupledIteration``)\n            through the\n            :py:class:`LatticePhysicsInterface <armi.physics.neutronics.latticePhysics>`.\n            The cross-section group manager will construct representative blocks for each\n            cross-section ID at the first iteration of every time node.\n\n        Notes\n        -----\n        Updating the cross-section on only the first (i.e., iteration == 0) timenode can be a\n        reasonable approximation to get new cross sections with some temperature updates but not\n        have to run lattice physics on each coupled iteration. If the user desires to have the\n        cross sections updated with every coupling iteration, the ``latticePhysicsFrequency: all``\n        option.\n\n        See Also\n        --------\n        :py:meth:`~armi.physics.neutronics.latticePhysics.latticePhysics.LatticePhysicsInterface.interactCoupled`\n        \"\"\"\n        if (\n            iteration == 0 and self._latticePhysicsFrequency == LatticePhysicsFrequency.firstCoupledIteration\n        ) or self._latticePhysicsFrequency == LatticePhysicsFrequency.all:\n            self.createRepresentativeBlocks()\n\n    def clearRepresentativeBlocks(self):\n        \"\"\"Clear the representative blocks.\"\"\"\n        runLog.extra(\"Clearing representative blocks\")\n        self.representativeBlocks = collections.OrderedDict()\n        self.avgNucTemperatures = {}\n\n    def _setBuGroupBounds(self, buGroupBounds):\n        \"\"\"\n        Set the burnup group structure.\n\n        Parameters\n        ----------\n        buGroupBounds : list\n            List of upper burnup values in percent.\n\n        Raises\n        ------\n        ValueError\n            If the provided burnup groups are invalid\n        \"\"\"\n        lastBu = 0.0\n        # validate structure\n        for upperBu in buGroupBounds:\n            if upperBu <= 0 or upperBu > 100:\n                raise ValueError(\"Burnup group upper bound {0} is invalid\".format(upperBu))\n            if upperBu < lastBu:\n                raise ValueError(\"Burnup groups must be ascending\")\n            lastBu = upperBu\n\n        self._buGroupBounds = buGroupBounds + [float(\"inf\")]\n\n    def _setTempGroupBounds(self, tempGroupBounds):\n        \"\"\"Set the temperature group structure.\"\"\"\n        lastTemp = -C_TO_K\n        # validate structure\n        for upperTemp in tempGroupBounds:\n            if upperTemp < -C_TO_K:\n                raise ValueError(\"Temperature boundary is below absolute zero {0}.format(upperTemp)\")\n            if upperTemp < lastTemp:\n                raise ValueError(\"Temp groups must be ascending\")\n            lastTemp = upperTemp\n        self._tempGroupBounds = tempGroupBounds + [float(\"inf\")]\n\n    def _updateEnvironmentGroups(self, blockList):\n        \"\"\"\n        Update the burnup group of each block based on its burnup and temperature .\n\n        If only one burnup group exists, then this is skipped so as to accommodate the possibility\n        of 2-character xsGroup values (useful for detailed V&V models w/o depletion).\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.getMicroSuffix\n        \"\"\"\n        if not self._envGroupUpdatesEnabled:\n            runLog.debug(\"Skipping burnup group update of {0} blocks because it is disabled\".format(len(blockList)))\n            return\n\n        numBuGroups = len(self._buGroupBounds)\n        if numBuGroups == 1 and len(self._tempGroupBounds) == 1:\n            # dont set block.p.envGroupNum since all 1 group and we want to support 2 char xsGroup\n            return\n        runLog.debug(\"Updating env groups of {0} blocks\".format(len(blockList)))\n        for block in blockList:\n            bu = block.p.percentBu\n            for buIndex, upperBu in enumerate(self._buGroupBounds):\n                if bu <= upperBu:\n                    buGroupVal = buIndex\n                    tempGroupVal = 0\n                    isotope = self._initializeXsID(block.getMicroSuffix()).xsTempIsotope\n                    if isotope and len(self._tempGroupBounds) > 1:\n                        # if statement saves this somewhat expensive calc if we are not doing temp groups\n                        tempC = getBlockNuclideTemperature(block, isotope)\n                        for tempIndex, upperTemp in enumerate(self._tempGroupBounds):\n                            if tempC <= upperTemp:\n                                tempGroupVal = tempIndex\n                                break\n                    # this ordering groups like-temperatures together in group number\n                    block.p.envGroupNum = tempGroupVal * numBuGroups + buGroupVal\n                    break\n\n    def _addXsGroupsFromBlocks(self, blockCollectionsByXsGroup, blockList):\n        \"\"\"\n        Build all the cross section groups based on their XS type and Env group.\n\n        Also ensures that their Env group is up to date with their environment.\n        \"\"\"\n        self._updateEnvironmentGroups(blockList)\n        for b in blockList:\n            xsID = b.getMicroSuffix()\n            xsSettings = self._initializeXsID(xsID)\n            if self.cs[\"tempGroups\"] and xsSettings.blockRepresentation == MEDIAN_BLOCK_COLLECTION:\n                runLog.warning(\n                    \"Median block currently only consider median burnup block, and \"\n                    \"not median temperature block in group\"\n                )\n            blockCollectionType = blockCollectionFactory(xsSettings, self.r.blueprints.allNuclidesInProblem)\n\n            group = blockCollectionsByXsGroup.get(xsID, blockCollectionType)\n            group.append(b)\n            blockCollectionsByXsGroup[xsID] = group\n        return blockCollectionsByXsGroup\n\n    def _initializeXsID(self, xsID):\n        \"\"\"Initialize a new xs id.\"\"\"\n        if xsID not in self.cs[CONF_CROSS_SECTION]:\n            runLog.debug(\"Initializing XS ID {}\".format(xsID), single=True)\n        return self.cs[CONF_CROSS_SECTION][xsID]\n\n    def xsTypeIsPregenerated(self, xsID):\n        \"\"\"Return True if the cross sections for the given ``xsID`` is pre-generated.\"\"\"\n        return self.cs[CONF_CROSS_SECTION][xsID].xsIsPregenerated\n\n    def fluxSolutionIsPregenerated(self, xsID):\n        \"\"\"Return True if an external flux solution file for the given ``xsID`` is pre-generated.\"\"\"\n        return self.cs[CONF_CROSS_SECTION][xsID].fluxIsPregenerated\n\n    def _copyPregeneratedXSFile(self, xsID):\n        # stop a race condition to copy files between all processors\n        if context.MPI_RANK != 0:\n            return\n\n        for xsFileLocation, xsFileName in self._getPregeneratedXsFileLocationData(xsID):\n            dest = os.path.join(os.getcwd(), xsFileName)\n            runLog.extra(\n                \"Copying pre-generated XS file {} from {} for XS ID {}\".format(\n                    xsFileName, os.path.dirname(xsFileLocation), xsID\n                )\n            )\n            # Prevent copy error if the path and destination are the same.\n            if xsFileLocation != dest:\n                safeCopy(xsFileLocation, dest)\n\n    def _copyPregeneratedFluxSolutionFile(self, xsID):\n        # stop a race condition to copy files between all processors\n        if context.MPI_RANK != 0:\n            return\n\n        fluxFileLocation, fluxFileName = self._getPregeneratedFluxFileLocationData(xsID)\n        dest = os.path.join(os.getcwd(), fluxFileName)\n        runLog.extra(\n            \"Copying pre-generated flux solution file {} from {} for XS ID {}\".format(\n                fluxFileName, os.path.dirname(fluxFileLocation), xsID\n            )\n        )\n        # Prevent copy error if the path and destination are the same.\n        if fluxFileLocation != dest:\n            safeCopy(fluxFileLocation, dest)\n\n    def _getPregeneratedXsFileLocationData(self, xsID):\n        \"\"\"\n        Gather the pre-generated cross section file data and check that the files exist.\n\n        Notes\n        -----\n        Multiple files can exist on the `file location` setting for a single XS ID. This checks that all files exist\n        and returns a list of tuples (file path, fileName).\n        \"\"\"\n        fileData = []\n        filePaths = self.cs[CONF_CROSS_SECTION][xsID].xsFileLocation\n        for filePath in filePaths:\n            filePath = os.path.abspath(filePath)\n            if not os.path.exists(filePath) or os.path.isdir(filePath):\n                raise ValueError(\n                    \"External cross section path for XS ID {} is not a valid file location {}\".format(xsID, filePath)\n                )\n            fileName = os.path.basename(filePath)\n            fileData.append((filePath, fileName))\n        return fileData\n\n    def _getPregeneratedFluxFileLocationData(self, xsID):\n        \"\"\"Gather the pre-generated flux solution file data and check that the files exist.\"\"\"\n        filePath = self.cs[CONF_CROSS_SECTION][xsID].fluxFileLocation\n        filePath = os.path.abspath(filePath)\n        if not os.path.exists(filePath) or os.path.isdir(filePath):\n            raise ValueError(\n                \"External cross section path for XS ID {} is not a valid file location {}\".format(xsID, filePath)\n            )\n        fileName = os.path.basename(filePath)\n        return (filePath, fileName)\n\n    def createRepresentativeBlocks(self):\n        \"\"\"Get a representative block from each cross-section ID managed here.\n\n        .. impl:: Create collections of blocks based on cross-section type and burn-up group.\n            :id: I_ARMI_XSGM_CREATE_XS_GROUPS\n            :implements: R_ARMI_XSGM_CREATE_XS_GROUPS\n\n            This method constructs the representative blocks and block burnups\n            for each cross-section ID in the reactor model. Starting with the making of cross-section groups, it will\n            find candidate blocks and create representative blocks from that selection.\n\n        \"\"\"\n        representativeBlocks = {}\n        self.avgNucTemperatures = {}\n        runLog.extra(\"Generating representative blocks for XS\")\n        blockCollectionsByXsGroup = self.makeCrossSectionGroups()\n        for xsID, collection in blockCollectionsByXsGroup.items():\n            numCandidateBlocks = len(collection.getCandidateBlocks())\n            if self.xsTypeIsPregenerated(xsID):\n                self._copyPregeneratedXSFile(xsID)\n                continue\n            if numCandidateBlocks > 0:\n                runLog.debug(\"Creating representative block for {}\".format(xsID))\n                if self.fluxSolutionIsPregenerated(xsID):\n                    self._copyPregeneratedFluxSolutionFile(xsID)\n                reprBlock = collection.createRepresentativeBlock()\n                representativeBlocks[xsID] = reprBlock\n                self.avgNucTemperatures[xsID] = collection.avgNucTemperatures\n\n        self.representativeBlocks = collections.OrderedDict(sorted(representativeBlocks.items()))\n        self._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup)\n        self._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup)\n        self._summarizeGroups(blockCollectionsByXsGroup)\n\n    def createRepresentativeBlocksUsingExistingBlocks(self, blockList, originalRepresentativeBlocks):\n        \"\"\"\n        Create a new set of representative blocks using provided blocks.\n\n        This uses an input list of blocks and creates new representative blocks for these blocks based on the\n        compositions and temperatures of their original representative blocks.\n\n        Notes\n        -----\n        This is required for computing Doppler, Voided-Doppler, Temperature, and Voided-Temperature reactivity\n        coefficients, where the composition of the representative block must remain the same, but only the\n        temperatures within the representative blocks are to be modified.\n\n        Parameters\n        ----------\n        blockList : list\n            A list of blocks defined within the core\n        originalRepresentativeBlocks : dict\n            A dict of unperturbed representative blocks that the new representative blocks are formed from\n            keys: XS group ID (e.g., \"AA\")\n            values: representative block for the XS group\n\n        Returns\n        -------\n        blockCollectionByXsGroup : dict\n            Mapping between XS IDs and the new block collections\n        modifiedReprBlocks : dict\n            Mapping between XS IDs and the new representative blocks\n        origXSIDsFromNew : dict\n            Mapping of original XS IDs to new XS IDs. New XS IDs are created to\n            represent a modified state (e.g., a Doppler temperature perturbation).\n\n        Raises\n        ------\n        ValueError\n            If passed list arguments are empty\n        \"\"\"\n        if not blockList:\n            raise ValueError(\"A block list was not supplied to create new representative blocks\")\n        if not originalRepresentativeBlocks:\n            raise ValueError(\n                \"New representative blocks cannot be created because a list of unperturbed \"\n                \"representative blocks was not provided\"\n            )\n        newBlockCollectionsByXsGroup = collections.OrderedDict()\n        blockCollectionByXsGroup = self.makeCrossSectionGroups()\n        modifiedReprBlocks, origXSIDsFromNew = self._getModifiedReprBlocks(blockList, originalRepresentativeBlocks)\n        if not modifiedReprBlocks:\n            return None\n        for newXSID in modifiedReprBlocks:\n            oldXSID = origXSIDsFromNew[newXSID]\n            oldBlockCollection = blockCollectionByXsGroup[oldXSID]\n\n            # create a new block collection that inherits all of the properties\n            # and settings from oldBlockCollection.\n            validBlockTypes = oldBlockCollection._validRepresentativeBlockTypes\n            if validBlockTypes is not None and len(validBlockTypes) > 0:\n                validBlockTypes = [\n                    flags._toString(Flags, flag) for flag in oldBlockCollection._validRepresentativeBlockTypes\n                ]\n            newBlockCollection = oldBlockCollection.__class__(\n                oldBlockCollection.allNuclidesInProblem,\n                validBlockTypes=validBlockTypes,\n                averageByComponent=oldBlockCollection.averageByComponent,\n            )\n            newBlockCollectionsByXsGroup[newXSID] = newBlockCollection\n\n        # clean up any unrepresented XS IDs\n        self._checkForUnrepresentedXSIDs(blockCollectionByXsGroup)\n        self._modifyUnrepresentedXSIDs(blockCollectionByXsGroup)\n        return newBlockCollectionsByXsGroup, modifiedReprBlocks, origXSIDsFromNew\n\n    def _getModifiedReprBlocks(self, blockList, originalRepresentativeBlocks):\n        \"\"\"\n        Create a new representative block for each unique XS ID on blocks to be modified.\n\n        Returns\n        -------\n        modifiedReprBlocks : dict\n            Mapping between the new XS IDs and the new representative blocks\n        origXSIDsFromNew : dict\n            Mapping between the new representative block XS IDs and the original representative block XS IDs\n        \"\"\"\n        modifiedBlockXSTypes = collections.OrderedDict()\n        modifiedReprBlocks = collections.OrderedDict()\n        origXSIDsFromNew = collections.OrderedDict()\n        for b in blockList:\n            origXSID = b.getMicroSuffix()\n            # Filter out the pre-generated XS IDs\n            if origXSID not in originalRepresentativeBlocks:\n                if self.xsTypeIsPregenerated(origXSID):\n                    runLog.warning(\n                        \"A modified representative block for XS ID `{}` cannot be created because it is \"\n                        \"mapped to a pre-generated cross section set. Please ensure that this \"\n                        \"approximation is valid for the analysis.\".format(origXSID),\n                        single=True,\n                    )\n            else:\n                origXSType = origXSID[0]\n                if origXSType not in modifiedBlockXSTypes.keys():\n                    nextXSType = self.getNextAvailableXsTypes(excludedXSTypes=modifiedBlockXSTypes.values())[0]\n                    modifiedBlockXSTypes[origXSType] = nextXSType\n                newXSID = modifiedBlockXSTypes[origXSType] + origXSID[1]  # New XS Type + Old Burnup Group\n                origXSIDsFromNew[newXSID] = origXSID\n\n        # Create new representative blocks based on the original XS IDs\n        for newXSID, origXSID in origXSIDsFromNew.items():\n            runLog.extra(\n                \"Creating representative block `{}` with composition from representative block `{}`\".format(\n                    newXSID, origXSID\n                )\n            )\n            newXSType = newXSID[0]\n            newReprBlock = copy.deepcopy(originalRepresentativeBlocks[origXSID])\n            newReprBlock.p.xsType = newXSType\n            newReprBlock.name = \"AVG_{}\".format(newXSID)\n            modifiedReprBlocks[newXSID] = newReprBlock\n            # Update the XS types of the blocks that will be modified\n            for b in blockList:\n                if b.getMicroSuffix() == origXSID:\n                    b.p.xsType = newXSType\n\n            # copy XS settings to new XS ID\n            self.cs[CONF_CROSS_SECTION][newXSID] = copy.deepcopy(self.cs[CONF_CROSS_SECTION][origXSID])\n            self.cs[CONF_CROSS_SECTION][newXSID].xsID = newXSID\n\n        return modifiedReprBlocks, origXSIDsFromNew\n\n    def _checkForUnrepresentedXSIDs(self, blockCollectionsByXsGroup):\n        \"\"\"\n        Check for unrepresented XS IDs after self._updateEnvironmentGroups() has been called.\n\n        Parameters\n        ----------\n        blockCollectionsByXsGroup: dict[str, BlockCollection]\n            Dict of BlockCollection keyed by the XS group they belong to.\n\n        Notes\n        -----\n        This should be run after :meth:`CrossSectionGroupManager._updateEnvironmentGroups`, which resets\n        ``b.p.envGroup`` and can result in unrepresented cross section IDs. This is usually invoked\n        as a result of a call to :meth:`CrossSectionGroupManager.makeCrossSectionGroups`\n        \"\"\"\n        self._unrepresentedXSIDs = []\n        for xsID, collection in blockCollectionsByXsGroup.items():\n            if self.xsTypeIsPregenerated(xsID) or len(collection.getCandidateBlocks()) > 0:\n                continue\n            else:\n                runLog.debug(\n                    \"No candidate blocks in group for {} (with a valid representative block flag). \"\n                    \"Will apply different environment group\".format(xsID)\n                )\n                self._unrepresentedXSIDs.append(xsID)\n\n    def getNextAvailableXsTypes(self, howMany=1, excludedXSTypes=None):\n        \"\"\"Return the next however many available xs types.\n\n        Parameters\n        ----------\n        howMany : int, optional\n            The number of requested xs types\n        excludedXSTypes : list, optional\n            A list of cross section types to exclude from using\n\n        Raises\n        ------\n        ValueError\n            If there are no available XS types to be allocated\n        \"\"\"\n        allocatedXSTypes = set()\n        for b in self.r.core.getBlocks(includeAll=True):\n            allocatedXSTypes.add(b.p.xsType)\n        if excludedXSTypes is not None:\n            for xsType in excludedXSTypes:\n                allocatedXSTypes.add(xsType)\n        availableXsTypes = sorted(list(set(_ALLOWABLE_XS_TYPE_LIST).difference(allocatedXSTypes)))\n        if len(availableXsTypes) < howMany:\n            raise ValueError(\n                \"There are not enough available xs types. {} have been allocated, {} are available, and \"\n                \"{} have been requested.\".format(len(allocatedXSTypes), len(availableXsTypes), howMany)\n            )\n\n        # check for lower-case on case-insensitive file system\n        if sys.platform.startswith(\"win\"):\n            allXSTypes = allocatedXSTypes.union(set(availableXsTypes[:howMany]))\n            allCaps = {c.capitalize() for c in allXSTypes}\n            if len(allCaps) != len(allXSTypes):\n                runLog.warning(\n                    \"Mixing upper and lower-case XS group types on a Windows system, which is not \"\n                    \"case-sensitive. There is a chance that ARMI could overwrite previously \"\n                    \"generated XS files, which could cause mysterious and/or unpredictable errors.\"\n                )\n        return availableXsTypes[:howMany]\n\n    def _getMissingBlueprintBlocks(self, blockCollectionsByXsGroup):\n        \"\"\"\n        Gets all blocks with suffixes not yet represented.\n        (for blocks in assemblies in the blueprints but not in the core).\n\n        Notes\n        -----\n        Certain cases (ZPPR validation cases) need to run cross sections for assemblies not in\n        the core to get by region cross sections and flux factors.\n        \"\"\"\n        missingBlueprintBlocks = []\n        blockList = []\n        for a in self.r.blueprints.assemblies.values():\n            blockList.extend(b for b in a)\n\n        self._updateEnvironmentGroups(blockList)\n        for b in blockList:\n            if b.getMicroSuffix() not in blockCollectionsByXsGroup:\n                b2 = copy.deepcopy(b)\n                missingBlueprintBlocks.append(b2)\n        return missingBlueprintBlocks\n\n    def makeCrossSectionGroups(self):\n        \"\"\"Make cross section groups for all blocks in reactor and unrepresented blocks from blueprints.\"\"\"\n        bCollectXSGroup = {}  # clear old groups (in case some are no longer existent)\n        bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self.r.core.getBlocks())\n\n        # add blocks that are defined in blueprints, but not in core\n        bCollectXSGroup = self._addXsGroupsFromBlocks(bCollectXSGroup, self._getMissingBlueprintBlocks(bCollectXSGroup))\n        blockCollectionsByXsGroup = collections.OrderedDict(sorted(bCollectXSGroup.items()))\n        return blockCollectionsByXsGroup\n\n    def _getAlternateEnvGroup(self, missingXsType):\n        \"\"\"Get a substitute block to use since there are no blocks with flags for xs gen.\"\"\"\n        for otherXsID in self.representativeBlocks:\n            repType, repEnvGroup = otherXsID\n            if repType == missingXsType:\n                return repEnvGroup\n\n    def _modifyUnrepresentedXSIDs(self, blockCollectionsByXsGroup):\n        \"\"\"\n        Adjust the xsID of blocks in the groups that are not represented.\n\n        Try to just adjust the burnup group up to something that is represented\n        (can happen to structure in AA when only AB, AC, AD still remain,\n        but if some fresh AA happened to be added it might be needed).\n        \"\"\"\n        # No blocks in in this ID had a valid representative block flag (such as `fuel` for default),\n        # so nothing valid to run lattice physics on...\n        for xsID in self._unrepresentedXSIDs:\n            missingXsType, _missingEnvGroup = xsID\n            nonRepBlocks = blockCollectionsByXsGroup.get(xsID)\n            if nonRepBlocks:\n                newEnvGroup = self._getAlternateEnvGroup(missingXsType)\n                if newEnvGroup:\n                    # there were no blocks flagged to xs gen even though there were some not suitable for\n                    # generation in the group so can't make XS and use different.\n                    runLog.warning(\n                        \"Changing XSID of {0} blocks from {1} to {2}\".format(\n                            len(nonRepBlocks), xsID, missingXsType[0] + newEnvGroup\n                        )\n                    )\n                    for b in nonRepBlocks:\n                        b.p.envGroup = newEnvGroup\n                else:\n                    runLog.warning(\n                        \"No representative blocks with XS type {0} exist in the core. \"\n                        \"There were also no similar blocks to use. \"\n                        \"These XS cannot be generated and must exist in the working \"\n                        \"directory or the run will fail.\".format(xsID)\n                    )\n\n    def _summarizeGroups(self, blockCollectionsByXsGroup):\n        \"\"\"Summarize current contents of the XS groups.\"\"\"\n        from armi.physics.neutronics.settings import CONF_XS_BLOCK_REPRESENTATION\n\n        runLog.extra(\"Cross section group manager summary\")\n        runLog.extra(\"Averaging performed by `{0}`\".format(self.cs[CONF_XS_BLOCK_REPRESENTATION]))\n        for xsID, blocks in blockCollectionsByXsGroup.items():\n            if blocks:\n                xsIDGroup = self._getXsIDGroup(xsID)\n                if xsIDGroup == self._REPR_GROUP:\n                    reprBlock = self.representativeBlocks.get(xsID)\n                    xsSettings = self._initializeXsID(reprBlock.getMicroSuffix())\n                    temp = self.avgNucTemperatures[xsID].get(xsSettings.xsTempIsotope, \"N/A\")\n                    runLog.extra(\n                        (\n                            \"XS ID {} contains {:4d} blocks, with avg burnup {} \"\n                            \"and avg fuel temp {}, represented by: {:65s}\"\n                        ).format(\n                            xsID,\n                            len(blocks),\n                            reprBlock.p.percentBu,\n                            temp,\n                            reprBlock,\n                        )\n                    )\n                elif xsIDGroup == self._NON_REPR_GROUP:\n                    runLog.extra(\n                        \"XS ID {} contains {:4d} blocks, but no representative block.\".format(xsID, len(blocks))\n                    )\n                elif xsIDGroup == self._PREGEN_GROUP:\n                    xsFileNames = [y for _x, y in self._getPregeneratedXsFileLocationData(xsID)]\n                    runLog.extra(\n                        \"XS ID {} contains {:4d} blocks, represented by: {}\".format(xsID, len(blocks), xsFileNames)\n                    )\n                else:\n                    raise ValueError(\"No valid group for XS ID {}\".format(xsID))\n\n    def _getXsIDGroup(self, xsID):\n        if self.xsTypeIsPregenerated(xsID):\n            return self._PREGEN_GROUP\n        elif xsID in self.representativeBlocks.keys():\n            return self._REPR_GROUP\n        elif xsID in self._unrepresentedXSIDs:\n            return self._NON_REPR_GROUP\n        return None\n\n    def disableEnvGroupUpdates(self):\n        \"\"\"\n        Turn off updating Env groups based on environment.\n\n        Useful during reactivity coefficient calculations to be consistent with ref. run.\n\n        See Also\n        --------\n        enableEnvGroupUpdates\n        \"\"\"\n        runLog.extra(\"Environment xs group updating disabled\")\n        wasEnabled = self._envGroupUpdatesEnabled\n        self._envGroupUpdatesEnabled = False\n        return wasEnabled\n\n    def enableEnvGroupUpdates(self):\n        \"\"\"\n        Turn on updating Env groups based on environment.\n\n        See Also\n        --------\n        disableEnvGroupUpdates\n        \"\"\"\n        runLog.extra(\"Environment xs group updating enabled\")\n        self._envGroupUpdatesEnabled = True\n\n    def getNucTemperature(self, xsID, nucName):\n        \"\"\"\n        Return the temperature (in C) of the nuclide in the group with specified xsID.\n\n        Notes\n        -----\n        Returns None if the xsID or nucName are not in the average nuclide temperature dictionary\n        `self.avgNucTemperatures`\n        \"\"\"\n        if xsID not in self.avgNucTemperatures:\n            return None\n        return self.avgNucTemperatures[xsID].get(nucName, None)\n\n    def updateNuclideTemperatures(self, blockCollectionByXsGroup=None):\n        \"\"\"\n        Recompute nuclide temperatures for the block collections within the core.\n\n        Parameters\n        ----------\n        blockCollectionByXsGroup : dict, optional\n            Mapping between the XS IDs in the core and the block collections. Note that providing this as\n            an argument will only update the average temperatures of these XS IDs/block collections and will\n            result in other XS ID average temperatures not included to be discarded.\n\n        Notes\n        -----\n        This method does not update any properties of the representative blocks.\n        Temperatures are obtained from the BlockCollection class rather than the representative block.\n        \"\"\"\n        self.avgNucTemperatures = {}\n        blockCollectionsByXsGroup = blockCollectionByXsGroup or self.makeCrossSectionGroups()\n        runLog.info(\n            \"Updating representative block average nuclide temperatures for the following XS IDs: {}\".format(\n                blockCollectionsByXsGroup.keys()\n            )\n        )\n        for xsID, collection in blockCollectionsByXsGroup.items():\n            collection.calcAvgNuclideTemperatures()\n            self.avgNucTemperatures[xsID] = collection.avgNucTemperatures\n            runLog.extra(\"XS ID: {}, Collection: {}\".format(xsID, collection))\n\n\n# String constants\nMEDIAN_BLOCK_COLLECTION = \"Median\"\nAVERAGE_BLOCK_COLLECTION = \"Average\"\nFLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION = \"FluxWeightedAverage\"\nSLAB_COMPONENTS_BLOCK_COLLECTION = \"ComponentAverage1DSlab\"\nCYLINDRICAL_COMPONENTS_BLOCK_COLLECTION = \"ComponentAverage1DCylinder\"\nCYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION = \"ComponentAverage1DCylinderDuctHeterogeneous\"\n\n# Mapping between block collection string constants and their\n# respective block collection classes.\nBLOCK_COLLECTIONS = {\n    MEDIAN_BLOCK_COLLECTION: MedianBlockCollection,\n    AVERAGE_BLOCK_COLLECTION: AverageBlockCollection,\n    FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION: FluxWeightedAverageBlockCollection,\n    SLAB_COMPONENTS_BLOCK_COLLECTION: SlabComponentsAverageBlockCollection,\n    CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION: CylindricalComponentsAverageBlockCollection,\n    CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION: CylindricalComponentsDuctHetAverageBlockCollection,\n}\n\n\ndef blockCollectionFactory(xsSettings, allNuclidesInProblem):\n    \"\"\"Build a block collection based on user settings and input.\"\"\"\n    blockRepresentation = xsSettings.blockRepresentation\n    if (blockRepresentation == CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION) and xsSettings.ductHeterogeneous:\n        blockRepresentation = CYLINDRICAL_COMPONENTS_DUCT_HET_BLOCK_COLLECTION\n    validBlockTypes = xsSettings.validBlockTypes\n    averageByComponent = xsSettings.averageByComponent\n    return BLOCK_COLLECTIONS[blockRepresentation](\n        allNuclidesInProblem,\n        validBlockTypes=validBlockTypes,\n        averageByComponent=averageByComponent,\n    )\n"
  },
  {
    "path": "armi/physics/neutronics/crossSectionSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe data structures and schema of the cross section modeling options.\n\nThese are advanced/compound settings that are carried along in the normal cs\nobject but aren't simple key/value pairs.\n\nThe cs object could either hold the base data (dicts) and create instances\nof these data structure objects as needed, or the settings system could actually\nhold instances of these data structures. It is most convenient to let the cs\nobject hold actual instances of these data.\n\nSee detailed docs in `:doc: Lattice Physics <reference/physics/neutronics/latticePhysics/latticePhysics>`.\n\"\"\"\n\nfrom enum import Enum\nfrom typing import Dict, Union\n\nimport voluptuous as vol\n\nfrom armi import context, runLog\nfrom armi.physics.neutronics import crossSectionGroupManager\nfrom armi.physics.neutronics.crossSectionGroupManager import BLOCK_COLLECTIONS\nfrom armi.settings import Setting\n\nCONF_BLOCK_REPRESENTATION = \"blockRepresentation\"\nCONF_MEMORY_REQUIREMENT = \"requiredRAM\"\nCONF_BLOCKTYPES = \"validBlockTypes\"\nCONF_BUCKLING = \"criticalBuckling\"\nCONF_DRIVER = \"driverID\"\nCONF_EXTERNAL_DRIVER = \"externalDriver\"\nCONF_EXTERNAL_RINGS = \"numExternalRings\"\nCONF_XS_FILE_LOCATION = \"xsFileLocation\"\nCONF_EXTERNAL_FLUX_FILE_LOCATION = \"fluxFileLocation\"\nCONF_GEOM = \"geometry\"\nCONF_HOMOGBLOCK = \"useHomogenizedBlockComposition\"\nCONF_INTERNAL_RINGS = \"numInternalRings\"\nCONF_MERGE_INTO_CLAD = \"mergeIntoClad\"\nCONF_MERGE_INTO_FUEL = \"mergeIntoFuel\"\nCONF_MESH_PER_CM = \"meshSubdivisionsPerCm\"\nCONF_REACTION_DRIVER = \"nuclideReactionDriver\"\nCONF_XSID = \"xsID\"\nCONF_XS_EXECUTE_EXCLUSIVE = \"xsExecuteExclusive\"\nCONF_XS_PRIORITY = \"xsPriority\"\nCONF_COMPONENT_AVERAGING = \"averageByComponent\"\nCONF_XS_MAX_ATOM_NUMBER = \"xsMaxAtomNumber\"\nCONF_MIN_DRIVER_DENSITY = \"minDriverDensity\"\nCONF_DUCT_HETEROGENEOUS = \"ductHeterogeneous\"\nCONF_TRACE_ISOTOPE_THRESHOLD = \"traceIsotopeThreshold\"\nCONF_XS_TEMP_ISOTOPE = \"xsTempIsotope\"\n\n\nclass XSGeometryTypes(Enum):\n    \"\"\"\n    Data structure for storing the available geometry options\n    within the framework.\n    \"\"\"\n\n    ZERO_DIMENSIONAL = 1\n    ONE_DIMENSIONAL_SLAB = 2\n    ONE_DIMENSIONAL_CYLINDER = 4\n    TWO_DIMENSIONAL_HEX = 8\n\n    @classmethod\n    def _mapping(cls):\n        mapping = {\n            cls.ZERO_DIMENSIONAL: \"0D\",\n            cls.ONE_DIMENSIONAL_SLAB: \"1D slab\",\n            cls.ONE_DIMENSIONAL_CYLINDER: \"1D cylinder\",\n            cls.TWO_DIMENSIONAL_HEX: \"2D hex\",\n        }\n        return mapping\n\n    @classmethod\n    def getStr(cls, typeSpec: Enum):\n        \"\"\"\n        Return a string representation of the given ``typeSpec``.\n\n        Examples\n        --------\n            XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL) == \"0D\"\n            XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX) == \"2D hex\"\n        \"\"\"\n        geometryTypes = list(cls)\n        if typeSpec not in geometryTypes:\n            raise TypeError(f\"{typeSpec} not in {geometryTypes}\")\n        return cls._mapping()[cls[typeSpec.name]]\n\n\nXS_GEOM_TYPES = {\n    XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL),\n    XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB),\n    XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER),\n    XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX),\n}\n\n# This dictionary defines the valid set of inputs based on\n# the geometry type within the ``XSModelingOptions``\n_VALID_INPUTS_BY_GEOMETRY_TYPE = {\n    XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL): {\n        CONF_XSID,\n        CONF_GEOM,\n        CONF_BUCKLING,\n        CONF_DRIVER,\n        CONF_BLOCKTYPES,\n        CONF_BLOCK_REPRESENTATION,\n        CONF_EXTERNAL_FLUX_FILE_LOCATION,\n        CONF_COMPONENT_AVERAGING,\n        CONF_XS_EXECUTE_EXCLUSIVE,\n        CONF_XS_PRIORITY,\n        CONF_XS_MAX_ATOM_NUMBER,\n        CONF_XS_TEMP_ISOTOPE,\n    },\n    XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB): {\n        CONF_XSID,\n        CONF_GEOM,\n        CONF_MESH_PER_CM,\n        CONF_BLOCKTYPES,\n        CONF_BLOCK_REPRESENTATION,\n        CONF_EXTERNAL_FLUX_FILE_LOCATION,\n        CONF_COMPONENT_AVERAGING,\n        CONF_XS_EXECUTE_EXCLUSIVE,\n        CONF_XS_PRIORITY,\n        CONF_XS_MAX_ATOM_NUMBER,\n        CONF_MIN_DRIVER_DENSITY,\n        CONF_XS_TEMP_ISOTOPE,\n    },\n    XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER): {\n        CONF_XSID,\n        CONF_GEOM,\n        CONF_MERGE_INTO_CLAD,\n        CONF_MERGE_INTO_FUEL,\n        CONF_DRIVER,\n        CONF_HOMOGBLOCK,\n        CONF_INTERNAL_RINGS,\n        CONF_EXTERNAL_RINGS,\n        CONF_MESH_PER_CM,\n        CONF_BLOCKTYPES,\n        CONF_BLOCK_REPRESENTATION,\n        CONF_EXTERNAL_FLUX_FILE_LOCATION,\n        CONF_COMPONENT_AVERAGING,\n        CONF_XS_EXECUTE_EXCLUSIVE,\n        CONF_XS_PRIORITY,\n        CONF_XS_MAX_ATOM_NUMBER,\n        CONF_MIN_DRIVER_DENSITY,\n        CONF_DUCT_HETEROGENEOUS,\n        CONF_TRACE_ISOTOPE_THRESHOLD,\n        CONF_XS_TEMP_ISOTOPE,\n    },\n    XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX): {\n        CONF_XSID,\n        CONF_GEOM,\n        CONF_BUCKLING,\n        CONF_EXTERNAL_DRIVER,\n        CONF_DRIVER,\n        CONF_REACTION_DRIVER,\n        CONF_EXTERNAL_RINGS,\n        CONF_BLOCK_REPRESENTATION,\n        CONF_EXTERNAL_FLUX_FILE_LOCATION,\n        CONF_COMPONENT_AVERAGING,\n        CONF_XS_EXECUTE_EXCLUSIVE,\n        CONF_XS_PRIORITY,\n        CONF_XS_MAX_ATOM_NUMBER,\n        CONF_MIN_DRIVER_DENSITY,\n        CONF_XS_TEMP_ISOTOPE,\n    },\n}\n\n_SINGLE_XS_SCHEMA = vol.Schema(\n    {\n        vol.Optional(CONF_GEOM): vol.All(str, vol.In(XS_GEOM_TYPES)),\n        vol.Optional(CONF_BLOCK_REPRESENTATION): vol.All(\n            str,\n            vol.In(\n                set(BLOCK_COLLECTIONS.keys()),\n            ),\n        ),\n        vol.Optional(CONF_DRIVER): str,\n        vol.Optional(CONF_BUCKLING): bool,\n        vol.Optional(CONF_REACTION_DRIVER): str,\n        vol.Optional(CONF_BLOCKTYPES): [str],\n        vol.Optional(CONF_HOMOGBLOCK): bool,\n        vol.Optional(CONF_EXTERNAL_DRIVER): bool,\n        vol.Optional(CONF_INTERNAL_RINGS): vol.Coerce(int),\n        vol.Optional(CONF_EXTERNAL_RINGS): vol.Coerce(int),\n        vol.Optional(CONF_MERGE_INTO_CLAD): [str],\n        vol.Optional(CONF_MERGE_INTO_FUEL): [str],\n        vol.Optional(CONF_XS_FILE_LOCATION): [str],\n        vol.Optional(CONF_EXTERNAL_FLUX_FILE_LOCATION): str,\n        vol.Optional(CONF_MESH_PER_CM): vol.Coerce(float),\n        vol.Optional(CONF_XS_EXECUTE_EXCLUSIVE): bool,\n        vol.Optional(CONF_XS_PRIORITY): vol.Coerce(float),\n        vol.Optional(CONF_XS_MAX_ATOM_NUMBER): vol.Coerce(int),\n        vol.Optional(CONF_MIN_DRIVER_DENSITY): vol.Coerce(float),\n        vol.Optional(CONF_COMPONENT_AVERAGING): bool,\n        vol.Optional(CONF_DUCT_HETEROGENEOUS): bool,\n        vol.Optional(CONF_TRACE_ISOTOPE_THRESHOLD): vol.Coerce(float),\n        vol.Optional(CONF_XS_TEMP_ISOTOPE): str,\n        vol.Optional(CONF_MEMORY_REQUIREMENT): vol.Coerce(float),\n    }\n)\n\n_XS_SCHEMA = vol.Schema({vol.All(str, vol.Length(min=1, max=2)): _SINGLE_XS_SCHEMA})\n\n\nclass XSSettings(dict):\n    \"\"\"\n    Container for holding multiple cross section settings based on their XSID.\n\n    This is intended to be stored as part of a case settings and to be\n    used for cross section modeling within a run.\n\n    Notes\n    -----\n    This is a specialized dictionary that functions in a similar manner as a\n    defaultdict where if a key (i.e., XSID) is missing then a default will\n    be set. If a missing key is being added before the ``setDefaults`` method\n    is called then this will produce an error.\n\n    This cannot just be a defaultdict because the creation of new cross\n    section settings are dependent on user settings.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        dict.__init__(self, *args, **kwargs)\n        self._blockRepresentation = None\n        self._validBlockTypes = None\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} with XS IDs {self.keys()}>\"\n\n    def __getitem__(self, xsID):\n        \"\"\"\n        Return the stored settings of the same xs type and the lowest burnup group if they exist.\n\n        Notes\n        -----\n        1. If ``AA`` and ``AB`` exist, but ``AC`` is created, then the intended behavior\n           is that ``AC`` settings will be set to the settings in ``AA``.\n\n        2. If only ``YZ`` exists and ``YA`` is created, then the intended behavior is that\n           ``YA`` settings will NOT be set to the settings in ``YZ``\n\n        3. Requirements for using the existing cross section settings:\n\n           a.  The existing XS ID must match the current XS ID.\n           b.  The current xs burnup group must be larger than the lowest burnup group for the\n               existing XS ID\n           c.  If 3a. and 3b. are not met, then the default cross section settings will be\n               set for the current XS ID\n\n        \"\"\"\n        if xsID in self:\n            return dict.__getitem__(self, xsID)\n\n        # exact key not present so give lowest env group key, eg AA or BA as the source for\n        # settings since users do not typically provide all combinations of second chars explicitly\n        xsType = xsID[0]\n        envGroup = xsID[1]\n        existingXsOpts = [xsOpt for xsOpt in self.values() if xsOpt.xsType == xsType and xsOpt.envGroup < envGroup]\n\n        if not any(existingXsOpts):\n            return self._getDefault(xsID)\n\n        else:\n            return sorted(existingXsOpts, key=lambda xsOpt: xsOpt.envGroup)[0]\n\n    def setDefaults(self, blockRepresentation, validBlockTypes):\n        \"\"\"\n        Set defaults for current and future xsIDs based user settings.\n\n        This must be delayed after read-time since the settings affecting this may not be loaded yet and could still be\n        at their own defaults when this input is being processed. Thus, defaults are set at a later time.\n\n        Parameters\n        ----------\n        blockRepresentation : str\n            Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS``\n        validBlockTypes : list of str or bool\n           This configures which blocks (by their type) the cross section group manager will merge together to create a\n           representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If\n           set to ``False`` then a default of [\"fuel\"] will be used. If set to a list of strings then the specific list\n           will be used. A typical input may be [\"fuel\"] to just consider the fuel blocks.\n\n        See Also\n        --------\n        armi.physics.neutronics.crossSectionGroupManager.CrossSectionGroupManager.interactBOL : calls this\n        \"\"\"\n        self._blockRepresentation = blockRepresentation\n        self._validBlockTypes = validBlockTypes\n        for _xsId, xsOpt in self.items():\n            xsOpt.setDefaults(blockRepresentation, validBlockTypes)\n            xsOpt.validate()\n\n    def _getDefault(self, xsID):\n        \"\"\"\n        Process the optional ``crossSectionControl`` setting.\n\n        This input allows users to override global defaults for specific cross section IDs (xsID).\n\n        To simplify downstream handling of the various XS controls, we build a full data structure here\n        that should fully define the settings for each individual cross section ID.\n        \"\"\"\n        # Only check since the state of the underlying cross section dictionary does not\n        # get broadcasted to worker nodes. This check is only relevant for the first time\n        # this is called and when called by the head node.\n        if context.MPI_RANK == 0:\n            if self._blockRepresentation is None:\n                raise ValueError(\n                    f\"The defaults of {self} have not been set. Call ``setDefaults`` first \"\n                    \"before attempting to add a new XS ID.\"\n                )\n\n        xsOpt = XSModelingOptions(xsID, geometry=XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL))\n        xsOpt.setDefaults(self._blockRepresentation, self._validBlockTypes)\n        xsOpt.validate()\n        return xsOpt\n\n\nclass XSModelingOptions:\n    \"\"\"\n    Cross section modeling options for a particular XS ID.\n\n    Attributes\n    ----------\n    xsID : str\n        Cross section ID that is two characters maximum (i.e., AA).\n\n    geometry: str\n        The geometry modeling approximation for regions of the core with\n        this assigned xsID. This is required if the ``xsFileLocation``\n        attribute is not provided. This cannot be set if the ``xsFileLocation``\n        is provided.\n\n    xsFileLocation: list of str or None\n        This should be a list of paths where the cross sections for this\n        xsID can be copied from. This is required if the ``geometry``\n        attribute is not provided. This cannot be set if the ``geometry``\n        is provided.\n\n    fluxFileLocation: str or None\n        This should be a path where a pre-calculated flux solution\n        for this xsID can be copied from. The ``geometry`` attribute\n        must be provided with this input.\n\n    validBlockTypes: str or None\n        This is a configuration option for how the cross section group manager\n        determines which blocks/regions to manage as part of the same collection\n        for the current xsID. If this is set to ``None`` then all blocks/regions\n        with the current xsID will be considered.\n\n    blockRepresentation : str\n        This is a configuration option for how the cross section group manager\n        will select how to create a representative block based on the collection\n        within the same xsID. See: ``crossSectionGroupManager.BLOCK_COLLECTIONS``.\n\n    driverID : str\n        This is a lattice physics configuration option used to determine which\n        representative block can be used as a \"fixed source\" driver for another\n        composition. This is particularly useful for non-fuel or highly subcritical\n        regions.\n\n    criticalBuckling : bool\n        This is a lattice physics configuration option used to enable or disable\n        the critical buckling search option.\n\n    nuclideReactionDriver : str\n        This is a lattice physics configuration option that is similar to the\n        ``driverID``, but rather than applying the source from a specific\n        representative block, the neutron source is taken from a single\n        nuclides fission spectrum (i.e., U235). This is particularly useful\n        for configuring SERPENT 2 lattice physics calculations.\n\n    externalDriver : bool\n        This is a lattice physics configuration option that can be used\n        to determine if the fixed source problem is internally driven\n        or externally driven by the ``driverID`` region. Externally\n        driven means that the region will be placed on the outside of the\n        current xsID block/region. If this is False then the driver\n        region will be \"inside\" (i.e., an inner ring in a cylindrical\n        model).\n\n    useHomogenizedBlockComposition : bool\n        This is a lattice physics configuration option that is useful for\n        modeling spatially dependent problems (i.e., 1D/2D). If this is\n        True then the representative block for the current xsID will be\n        be a homogenized region. If this is False then the block will be\n        represented in the geometry type selected. This is mainly used for\n        1D cylindrical problems.\n\n    numInternalRings : int\n        This is a lattice physics configuration option that is used to\n        specify the number of grid-based rings for the representative block.\n\n    numExternalRings : int\n        This is a lattice physics configuration option that is used to\n        specify the number of grid-based rings for the driver block.\n\n    mergeIntoClad : list of str\n        This is a lattice physics configuration option that is a list of component\n        names to merge into a \"clad\" component. This is highly-design specific\n        and is sometimes used to merge a \"gap\" or low-density region into\n        a \"clad\" region to avoid numerical issues.\n\n    mergeIntoFuel : list of str\n        This is a lattice physics configuration option that is a list of component\n        names to merge into a \"fuel\" component. This is highly-design specific\n        and is sometimes used to merge a \"gap\" or low-density region into\n        a \"fuel\" region to avoid numerical issues.\n\n    meshSubdivisionsPerCm : float\n        This is a lattice physics configuration option that can be used to control\n        subregion meshing of the representative block in 1D problems.\n\n    xsExecuteExclusive : bool\n        The mpi task that results from this xsID will reserve a full processor and\n        no others will allocate to it. This is useful for time balancing when you\n        have one task that takes much longer than the others.\n\n    xsPriority: int\n        The priority of the mpi tasks that results from this xsID. Lower priority\n        will execute first. starting longer jobs first is generally more efficient.\n\n    xsMaxAtomNumber : int\n        The maximum atom number to model for infinite dilute isotopes in lattice physics.\n        This is used to avoid modeling isotopes with a large atomic number\n        (e.g., fission products) as a depletion product of an isotope with a much\n        smaller atomic number.\n\n    averageByComponent: bool\n        Controls whether the representative block averaging is performed on a\n        component-by-component basis or on the block as a whole. If True, the\n        resulting representative block will have component compositions that\n        largely reflect those of the underlying blocks in the collection. If\n        False, the number densities of some nuclides in the individual\n        components may not be reflective of those of the underlying components\n        due to the block number density \"dehomogenization\".\n\n    minDriverDensity: float\n        The minimum number density for nuclides included in driver material for a 1D\n        lattice physics model.\n\n    ductHeterogeneous : bool\n        This is a lattice physics configuration option used to enable a partially\n        heterogeneous approximation for a 1D cylindrical model. Everything inside of the\n        duct will be treated as homogeneous.\n\n    traceIsotopeThreshold : float\n        This is a lattice physics configuration option used to enable a separate 0D fuel\n        cross section calculation for trace fission products when using a 1D cross section\n        model. This can significantly reduce the memory and run time required for the 1D\n        model. The setting takes a float value that represents the number density cutoff\n        for isotopes to be considered \"trace\". If no value is provided, the default is 0.0.\n\n    xsTempIsotope: str\n        The isotope whose temperature is interrogated when placing a block in a temperature cross section group.\n        See `tempGroups`. \"U238\" is default since it tends to be dominant doppler isotope in most reactors.\n\n    requiredRAM: float\n        The amount of available memory needed to run this cross section model.\n\n    Notes\n    -----\n    Not all default attributes may be useful for your specific application and you may\n    require other types of configuration options. These are provided as examples since\n    the base ``latticePhysicsInterface`` does not implement models that use these. For\n    additional options, consider subclassing the base ``Setting`` object and using this\n    model as a template.\n    \"\"\"\n\n    def __init__(\n        self,\n        xsID,\n        geometry=None,\n        xsFileLocation=None,\n        fluxFileLocation=None,\n        validBlockTypes=None,\n        blockRepresentation=None,\n        driverID=None,\n        criticalBuckling=None,\n        nuclideReactionDriver=None,\n        externalDriver=None,\n        useHomogenizedBlockComposition=None,\n        numInternalRings=None,\n        numExternalRings=None,\n        mergeIntoClad=None,\n        mergeIntoFuel=None,\n        meshSubdivisionsPerCm=None,\n        xsExecuteExclusive=None,\n        xsPriority=None,\n        xsMaxAtomNumber=None,\n        averageByComponent=False,\n        minDriverDensity=0.0,\n        ductHeterogeneous=False,\n        traceIsotopeThreshold=0.0,\n        xsTempIsotope=\"U238\",\n        requiredRAM=0.0,\n    ):\n        self.xsID = xsID\n        self.geometry = geometry\n        self.xsFileLocation = xsFileLocation\n        self.validBlockTypes = validBlockTypes\n        self.blockRepresentation = blockRepresentation\n\n        # These are application specific, feel free use them\n        # in your own lattice physics plugin(s).\n        self.fluxFileLocation = fluxFileLocation\n        self.driverID = driverID\n        self.criticalBuckling = criticalBuckling\n        self.nuclideReactionDriver = nuclideReactionDriver\n        self.externalDriver = externalDriver\n        self.useHomogenizedBlockComposition = useHomogenizedBlockComposition\n        self.numInternalRings = numInternalRings\n        self.numExternalRings = numExternalRings\n        self.mergeIntoClad = mergeIntoClad\n        self.mergeIntoFuel = mergeIntoFuel\n        self.meshSubdivisionsPerCm = meshSubdivisionsPerCm\n        self.xsMaxAtomNumber = xsMaxAtomNumber\n        self.minDriverDensity = minDriverDensity\n        self.averageByComponent = averageByComponent\n        self.ductHeterogeneous = ductHeterogeneous\n        self.traceIsotopeThreshold = traceIsotopeThreshold\n        # these are related to execution\n        self.xsExecuteExclusive = xsExecuteExclusive\n        self.xsPriority = xsPriority\n        self.xsTempIsotope = xsTempIsotope\n        self.requiredRAM = requiredRAM\n\n    def __repr__(self):\n        if self.xsIsPregenerated:\n            suffix = f\"Pregenerated: {self.xsIsPregenerated}\"\n        else:\n            suffix = f\"Geometry Model: {self.geometry}\"\n            if self.fluxIsPregenerated:\n                suffix = f\"{suffix}, External Flux Solution: {self.fluxFileLocation}\"\n\n        return f\"<{self.__class__.__name__}, XSID: {self.xsID}, {suffix}>\"\n\n    def __iter__(self):\n        return iter(self.__dict__.items())\n\n    @property\n    def xsType(self):\n        \"\"\"Return the single-char cross section type indicator.\"\"\"\n        return self.xsID[0]\n\n    @property\n    def envGroup(self):\n        \"\"\"Return the single-char burnup group indicator.\"\"\"\n        return self.xsID[1]\n\n    @property\n    def xsIsPregenerated(self):\n        \"\"\"True if this points to a pre-generated XS file.\"\"\"\n        return self.xsFileLocation is not None\n\n    @property\n    def fluxIsPregenerated(self):\n        \"\"\"True if this points to a pre-generated flux solution file.\"\"\"\n        return self.fluxFileLocation is not None\n\n    def serialize(self):\n        \"\"\"Return as a dictionary without ``CONF_XSID`` and with ``None`` values excluded.\"\"\"\n        doNotSerialize = [CONF_XSID]\n        return {key: val for key, val in self if key not in doNotSerialize and val is not None}\n\n    def validate(self):\n        \"\"\"\n        Performs validation checks on the inputs and provides warnings for option inconsistencies.\n\n        Raises\n        ------\n        ValueError\n            When the mutually exclusive ``xsFileLocation`` and ``geometry`` attributes\n            are provided or when neither are provided.\n        \"\"\"\n        # Check for valid inputs when the file location is supplied.\n        if self.xsFileLocation:\n            if self.geometry is not None:\n                runLog.warning(\n                    f\"Either file location or geometry inputs in {self} should be given, but not both. \"\n                    \"The file location setting will take precedence over the geometry inputs. \"\n                    \"Remove one or the other in the `crossSectionSettings` input to fix this warning.\"\n                )\n\n        if self.xsFileLocation is None or self.fluxFileLocation is not None:\n            if self.geometry is None:\n                raise ValueError(f\"{self} is missing a geometry input or a file location.\")\n\n        invalids = []\n        if self.xsFileLocation is not None:\n            for var, val in self:\n                # Skip these attributes since they are valid options\n                # when the ``xsFileLocation`` attribute`` is set.\n                if var in [CONF_XSID, CONF_XS_FILE_LOCATION, CONF_BLOCK_REPRESENTATION]:\n                    continue\n                if val is not None:\n                    invalids.append((var, val))\n\n        if invalids:\n            runLog.debug(f\"The following inputs in {self} are not valid when the file location is set:\")\n            for var, val in invalids:\n                runLog.debug(f\"\\tAttribute: {var}, Value: {val}\")\n\n        # Check for valid inputs when the geometry is supplied.\n        invalids = []\n        if self.geometry is not None:\n            validOptions = _VALID_INPUTS_BY_GEOMETRY_TYPE[self.geometry]\n            for var, val in self:\n                if var not in validOptions and val is not None:\n                    invalids.append((var, val))\n\n        if invalids:\n            runLog.debug(f\"The following inputs in {self} are not valid when `{self.geometry}` geometry type is set:\")\n            for var, val in invalids:\n                runLog.debug(f\"\\tAttribute: {var}, Value: {val}\")\n            runLog.debug(f\"The valid options for the `{self.geometry}` geometry are: {validOptions}\")\n\n    def setDefaults(self, blockRepresentation, validBlockTypes):\n        \"\"\"\n        This sets the defaults based on some recommended values based on the geometry type.\n\n        Parameters\n        ----------\n        blockRepresentation : str\n            Valid options are provided in ``CrossSectionGroupManager.BLOCK_COLLECTIONS``\n        validBlockTypes : list of str or bool\n           This configures which blocks (by their type) the cross section group manager will merge together to create a\n           representative block. If set to ``None`` or ``True`` then all block types in the XS ID will be considered. If\n           set to ``False`` then a default of [\"fuel\"] will be used. If set to a list of strings then the specific list\n           will be used. A typical input may be [\"fuel\"] to just consider the fuel blocks.\n\n        Notes\n        -----\n        These defaults are application-specific and design specific. They are included to provide an example and are\n        tuned to fit the internal needs of TerraPower. Consider a separate implementation/subclass if you would like\n        different behavior.\n        \"\"\"\n        if type(validBlockTypes) is bool:\n            validBlockTypes = None if validBlockTypes else [\"fuel\"]\n        else:\n            validBlockTypes = validBlockTypes\n\n        defaults = {}\n        if self.xsIsPregenerated:\n            allowableBlockCollections = [\n                crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,\n                crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,\n                crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,\n            ]\n            defaults = {\n                CONF_XS_FILE_LOCATION: self.xsFileLocation,\n                CONF_BLOCK_REPRESENTATION: blockRepresentation,\n            }\n\n        elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ZERO_DIMENSIONAL):\n            allowableBlockCollections = [\n                crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,\n                crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,\n                crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,\n            ]\n            bucklingSearch = not self.fluxIsPregenerated\n            defaults = {\n                CONF_GEOM: self.geometry,\n                CONF_BUCKLING: bucklingSearch,\n                CONF_DRIVER: \"\",\n                CONF_BLOCK_REPRESENTATION: blockRepresentation,\n                CONF_BLOCKTYPES: validBlockTypes,\n                CONF_EXTERNAL_FLUX_FILE_LOCATION: self.fluxFileLocation,\n            }\n        elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_SLAB):\n            allowableBlockCollections = [\n                crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION,\n            ]\n            defaults = {\n                CONF_GEOM: self.geometry,\n                CONF_MESH_PER_CM: 1.0,\n                CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.SLAB_COMPONENTS_BLOCK_COLLECTION,\n                CONF_BLOCKTYPES: validBlockTypes,\n            }\n        elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.ONE_DIMENSIONAL_CYLINDER):\n            allowableBlockCollections = [crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION]\n            defaults = {\n                CONF_GEOM: self.geometry,\n                CONF_DRIVER: \"\",\n                CONF_MERGE_INTO_CLAD: [\"gap\"],\n                CONF_MERGE_INTO_FUEL: [],\n                CONF_MESH_PER_CM: 1.0,\n                CONF_INTERNAL_RINGS: 0,\n                CONF_EXTERNAL_RINGS: 1,\n                CONF_HOMOGBLOCK: False,\n                CONF_BLOCK_REPRESENTATION: crossSectionGroupManager.CYLINDRICAL_COMPONENTS_BLOCK_COLLECTION,\n                CONF_BLOCKTYPES: validBlockTypes,\n                CONF_DUCT_HETEROGENEOUS: False,\n                CONF_TRACE_ISOTOPE_THRESHOLD: 0.0,\n            }\n        elif self.geometry == XSGeometryTypes.getStr(XSGeometryTypes.TWO_DIMENSIONAL_HEX):\n            allowableBlockCollections = [\n                crossSectionGroupManager.MEDIAN_BLOCK_COLLECTION,\n                crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION,\n                crossSectionGroupManager.FLUX_WEIGHTED_AVERAGE_BLOCK_COLLECTION,\n            ]\n            defaults = {\n                CONF_GEOM: self.geometry,\n                CONF_BUCKLING: False,\n                CONF_EXTERNAL_DRIVER: True,\n                CONF_DRIVER: \"\",\n                CONF_REACTION_DRIVER: None,\n                CONF_EXTERNAL_RINGS: 1,\n                CONF_BLOCK_REPRESENTATION: blockRepresentation,\n            }\n\n        defaults[CONF_XS_EXECUTE_EXCLUSIVE] = False\n        defaults[CONF_XS_PRIORITY] = 5\n        defaults[CONF_COMPONENT_AVERAGING] = False\n        defaults[CONF_MEMORY_REQUIREMENT] = 0.0\n\n        for attrName, defaultValue in defaults.items():\n            currentValue = getattr(self, attrName)\n            if currentValue is None:\n                setattr(self, attrName, defaultValue)\n            else:\n                if attrName == CONF_BLOCK_REPRESENTATION:\n                    if currentValue not in allowableBlockCollections:\n                        raise ValueError(\n                            f\"Invalid block collection type `{currentValue}` assigned \"\n                            f\"for {self.xsID}. Expected one of the \"\n                            f\"following: {allowableBlockCollections}\"\n                        )\n\n        self.validate()\n\n\ndef serializeXSSettings(xsSettingsDict: Union[XSSettings, Dict]) -> Dict[str, Dict]:\n    \"\"\"\n    Return a serialized form of the ``XSSettings`` as a dictionary.\n\n    Notes\n    -----\n    Attributes that are not set (i.e., set to None) will be skipped.\n    \"\"\"\n    if not isinstance(xsSettingsDict, dict):\n        raise TypeError(f\"Expected a dictionary for {xsSettingsDict}\")\n\n    output = {}\n    for xsID, xsOpts in xsSettingsDict.items():\n        # Setting the value to an empty dictionary\n        # if it is set to a None or an empty\n        # dictionary.\n        if not xsOpts:\n            continue\n\n        if isinstance(xsOpts, XSModelingOptions):\n            xsIDVals = xsOpts.serialize()\n\n        elif isinstance(xsOpts, dict):\n            xsIDVals = {\n                config: confVal for config, confVal in xsOpts.items() if config != CONF_XSID and confVal is not None\n            }\n        else:\n            raise TypeError(\n                f\"{xsOpts} was expected to be a ``dict`` or \"\n                f\"``XSModelingOptions`` options type but is type {type(xsOpts)}\"\n            )\n\n        output[str(xsID)] = xsIDVals\n    return output\n\n\nclass XSSettingDef(Setting):\n    \"\"\"\n    Custom setting object to manage the cross section dictionary-like inputs.\n\n    Notes\n    -----\n    This uses the ``xsSettingsValidator`` schema to validate the inputs\n    and will automatically coerce the value into a ``XSSettings`` dictionary.\n    \"\"\"\n\n    def __init__(self, name):\n        description = \"Data structure defining how cross sections are created\"\n        label = \"Cross section control\"\n        default = XSSettings()\n        options = None\n        schema = xsSettingsValidator\n        enforcedOptions = False\n        subLabels = None\n        isEnvironment = False\n        oldNames = None\n        Setting.__init__(\n            self,\n            name,\n            default,\n            description,\n            label,\n            options,\n            schema,\n            enforcedOptions,\n            subLabels,\n            isEnvironment,\n            oldNames,\n        )\n\n    def dump(self):\n        \"\"\"Return a serialized version of the ``XSSetting`` object.\"\"\"\n        return serializeXSSettings(self._value)\n\n\ndef xsSettingsValidator(xsSettingsDict: Dict[str, Dict]) -> XSSettings:\n    \"\"\"\n    Returns a ``XSSettings`` object if validation is successful.\n\n    Notes\n    -----\n    This provides two levels of checks. The first check is that the attributes\n    provided as user input contains the correct key/values and the values are\n    of the correct type. The second check uses the ``XSModelingOptions.validate``\n    method to check for input inconsistencies and provides warnings if there\n    are any issues.\n    \"\"\"\n    xsSettingsDict = serializeXSSettings(xsSettingsDict)\n    xsSettingsDict = _XS_SCHEMA(xsSettingsDict)\n    vals = XSSettings()\n    for xsID, inputParams in xsSettingsDict.items():\n        if not inputParams:\n            continue\n        xsOpt = XSModelingOptions(xsID, **inputParams)\n        xsOpt.validate()\n        vals[xsID] = xsOpt\n    return vals\n"
  },
  {
    "path": "armi/physics/neutronics/diffIsotxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This script is used to compare ISOTXS files.\"\"\"\n\nfrom armi import runLog\nfrom armi.cli.entryPoint import EntryPoint\n\n\nclass CompareIsotxsLibraries(EntryPoint):\n    \"\"\"Compare two ISOTXS files.\"\"\"\n\n    name = \"diff-isotxs\"\n\n    def addOptions(self):\n        self.parser.add_argument(\n            \"reference\",\n            help=\"Reference ISOTXS for comparison. Percent differences are given in relation to this file.\",\n        )\n        self.parser.add_argument(\n            \"comparisonFiles\",\n            nargs=\"+\",\n            help=\"ISOTXS files to compare to the reference\",\n        )\n        self.parser.add_argument(\n            \"--nuclidesNames\",\n            \"-n\",\n            nargs=\"+\",\n            help=\"For the interaction types identified only compare these nuclides.\",\n        )\n        self.parser.add_argument(\n            \"--interactions\",\n            \"-i\",\n            nargs=\"+\",\n            help=\"Compare the cross sections for these interactins and specified nuclides.\",\n        )\n        self.parser.add_argument(\n            \"--fluxFile\",\n            \"-f\",\n            help=\"Mcc3 file containing flux_bg (broad group flux) for single-group comparison.\",\n        )\n\n    def invoke(self):\n        from armi.nuclearDataIO import isotxs, xsLibraries\n\n        runLog.setVerbosity(0)\n        refIsotxs = isotxs.readBinary(self.args.reference)\n\n        for fname in self.args.comparisonFiles:\n            cmpIsotxs = isotxs.readBinary(fname)\n            xsLibraries.compare(refIsotxs, cmpIsotxs)\n"
  },
  {
    "path": "armi/physics/neutronics/energyGroups.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Energy group structures for multigroup neutronics calculations.\"\"\"\n\nimport copy\nimport itertools\nimport math\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.physics.neutronics.const import (\n    FAST_FLUX_THRESHOLD_EV,\n    HIGH_ENERGY_EV,\n    MAXIMUM_XS_LIBRARY_ENERGY,\n    ULTRA_FINE_GROUP_LETHARGY_WIDTH,\n)\nfrom armi.utils.mathematics import findNearestValue\n\n\ndef getFastFluxGroupCutoff(eGrpStruc):\n    \"\"\"\n    Given a constant \"fast\" energy threshold, return which ARMI energy group\n    index contains this threshold.\n\n    .. impl:: Return the energy group index which contains a given energy threshold.\n        :id: I_ARMI_EG_FE\n        :implements: R_ARMI_EG_FE\n\n        This function returns the energy group within a given group structure\n        that contains the fast flux threshold energy. The threshold energy is\n        imported from the :py:mod:`constants <armi.physics.neutronics.const>` in\n        the neutronics module, where it is defined as 100 keV. This is a\n        standard definition for fast flux. This function also calculates and\n        returns the fraction of the threshold energy group that is above the 100\n        keV threshold.\n    \"\"\"\n    gThres = -1\n    for g, eV in enumerate(eGrpStruc):\n        if eV < FAST_FLUX_THRESHOLD_EV:\n            gThres = g\n            break\n\n    dE = eGrpStruc[gThres - 1] - eGrpStruc[gThres]  # eV\n    fastFluxFracInG = (eGrpStruc[gThres - 1] - FAST_FLUX_THRESHOLD_EV) / dE\n\n    return gThres - 1, fastFluxFracInG\n\n\ndef _flatten(*numbers):\n    result = []\n    for item in numbers:\n        if isinstance(item, int):\n            result.append(item)\n        else:\n            result.extend(item)\n    return result\n\n\ndef _create_anl_energies_with_group_lethargies(*group_lethargies):\n    anl_energy_max = MAXIMUM_XS_LIBRARY_ENERGY\n    en = anl_energy_max\n    energies = []\n    for ee in _flatten(*group_lethargies):\n        energies.append(en)\n        en *= math.e ** (-ee * ULTRA_FINE_GROUP_LETHARGY_WIDTH)\n    return energies\n\n\ndef getGroupStructure(name):\n    \"\"\"\n    Return descending neutron energy group upper bounds in eV for a given\n    structure name.\n\n    .. impl:: Provide the neutron energy group bounds for a given group structure.\n        :id: I_ARMI_EG_NE\n        :implements: R_ARMI_EG_NE\n\n        There are several built-in group structures that are defined in this\n        module, which are stored in a dictionary. This function takes a group\n        structure name as an input parameter, which it uses as a key for the\n        group structure dictionary. If the group structure name is valid, it\n        returns a copy of the energy group structure resulting from the\n        dictionary lookup. Otherwise, it throws an error.\n\n    Notes\n    -----\n    Copy of the group structure is return so that modifications of the energy\n    bounds does not propagate back to the `GROUP_STRUCTURE` dictionary.\n    \"\"\"\n    try:\n        return copy.copy(GROUP_STRUCTURE[name])\n    except KeyError as ke:\n        runLog.error(\n            'Could not find groupStructure with the name \"{}\".\\nChoose one of: {}'.format(\n                name, \", \".join(GROUP_STRUCTURE.keys())\n            )\n        )\n        raise ke\n\n\ndef getGroupStructureType(neutronEnergyBoundsInEv):\n    \"\"\"Return neutron energy group structure name for a given set of neutron energy group bounds in eV.\"\"\"\n    neutronEnergyBoundsInEv = np.array(neutronEnergyBoundsInEv)\n    for groupStructureType in GROUP_STRUCTURE:\n        refNeutronEnergyBoundsInEv = np.array(getGroupStructure(groupStructureType))\n        if len(refNeutronEnergyBoundsInEv) != len(neutronEnergyBoundsInEv):\n            continue\n        if np.allclose(refNeutronEnergyBoundsInEv, neutronEnergyBoundsInEv, 1e-5):\n            return groupStructureType\n    raise ValueError(\n        \"Neutron energy group structure type does not exist for the given neutron energy bounds: {}\".format(\n            neutronEnergyBoundsInEv\n        )\n    )\n\n\nGROUP_STRUCTURE = {}\n\"\"\"\nEnergy groups for use in multigroup neutronics.\n\nValues are the upper bound of each energy in eV from highest energy to lowest\n(because neutrons typically downscatter...)\n\n:meta hide-value:\n\"\"\"\n\nGROUP_STRUCTURE[\"2\"] = [HIGH_ENERGY_EV, 6.25e-01]\n\n# for calculating fast flux\nGROUP_STRUCTURE[\"FastFlux\"] = [HIGH_ENERGY_EV, FAST_FLUX_THRESHOLD_EV]\n\n# Nuclear Reactor Engineering: Reactor Systems Engineering, Vol. 1\nGROUP_STRUCTURE[\"4gGlasstoneSesonske\"] = [HIGH_ENERGY_EV, 5.00e04, 5.00e02, 6.25e-01]\n\n# http://serpent.vtt.fi/mediawiki/index.php/CASMO_4-group_structure\nGROUP_STRUCTURE[\"CASMO4\"] = [HIGH_ENERGY_EV, 8.21e05, 5.53e03, 6.25e-01]\n\n\nGROUP_STRUCTURE[\"CASMO12\"] = [\n    HIGH_ENERGY_EV,\n    2.23e06,\n    8.21e05,\n    5.53e03,\n    4.81e01,\n    4.00e00,\n    6.25e-01,\n    3.50e-01,\n    2.80e-01,\n    1.40e-01,\n    5.80e-02,\n    3.00e-02,\n]\n\n\n# For typically for use with MCNP will need conversion to MeV, and ordering from low to high.\n# reference: https://www.sciencedirect.com/science/article/pii/S0149197022003778\n# reference: https://mcnp.lanl.gov/pdf_files/TechReport_2017_LANL_LA-UR-17-29981_WernerArmstrongEtAl.pdf\nGROUP_STRUCTURE[\"CINDER63\"] = [\n    2.5000e7,\n    2.0000e7,\n    1.6905e7,\n    1.4918e7,\n    1.0000e7,\n    6.0650e6,\n    4.9658e6,\n    3.6788e6,\n    2.8651e6,\n    2.2313e6,\n    1.7377e6,\n    1.3534e6,\n    1.1080e6,\n    8.2085e5,\n    6.3928e5,\n    4.9790e5,\n    3.8870e5,\n    3.0200e5,\n    1.8320e5,\n    1.1110e5,\n    6.7380e4,\n    4.0870e4,\n    2.5540e4,\n    1.9890e4,\n    1.5030e4,\n    9.1190e3,\n    5.5310e3,\n    3.3550e3,\n    2.8400e3,\n    2.4040e3,\n    2.0350e3,\n    1.2340e3,\n    7.4850e2,\n    4.5400e2,\n    2.7540e2,\n    1.6700e2,\n    1.0130e2,\n    6.1440e1,\n    3.7270e1,\n    2.2600e1,\n    1.3710e1,\n    8.3150,\n    5.0430,\n    3.0590,\n    1.8550,\n    1.1250,\n    6.8300e-1,\n    4.1400e-1,\n    2.5100e-1,\n    1.5200e-1,\n    1.0000e-1,\n    8.0000e-2,\n    6.7000e-2,\n    5.8000e-2,\n    5.0000e-2,\n    4.2000e-2,\n    3.5000e-2,\n    3.0000e-2,\n    2.5000e-2,\n    2.0000e-2,\n    1.5000e-2,\n    1.0000e-2,\n    5.0000e-3,\n]\n\n# Group structures below here are derived from Appendix E in\n# https://www.osti.gov/biblio/1483949-mc2-multigroup-cross-section-generation-code-fast-reactor-analysis-nuclear\nGROUP_STRUCTURE[\"ANL9\"] = _create_anl_energies_with_group_lethargies(222, 120, itertools.repeat(180, 5), 540, 300)\n\nGROUP_STRUCTURE[\"ANL33\"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(60, 28), 90, 240, 29, 1)\n\nGROUP_STRUCTURE[\"ANL70\"] = _create_anl_energies_with_group_lethargies(42, itertools.repeat(30, 67), 29, 1)\n\n# fmt: off\nGROUP_STRUCTURE[\"ANL116\"] = _create_anl_energies_with_group_lethargies(\n    15*[6] + [3] + 2*[6] + [3] + [12] + 3*[6] + 3*[12] + 2*[6] + 2*[12] + [4] + [6] + [2] +\n    [12] + 2*[6] + [12] + 2*[6] +2*[12] + [6] + [12] + 2*[6] + 6*[12] + [6] + 4*[12] + 4*[6] +\n    5*[12] + [6] + 3*[12] + [6] + 2*[30] + 2*[15] + [30] + 4*[15] + [18] + [12] + 5*[30] +\n    [24] + [12] + [24] + [19] + [11] + [18] + [24] + 3*[18] + 2*[12] + 14*[60] + 2*[30] + [29] + [1]\n)\n\nGROUP_STRUCTURE[\"ANL230\"] = _create_anl_energies_with_group_lethargies(\n    [\n         3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,\n         3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  3,  1,  1,  1,  3,  3,  3,  3,  3,\n         6,  6,  6,  3,  3,  3,  3,  6,  6,  6,  6,  6,  6,  3,  3,  3,  3,  6,  6,\n         6,  6,  2,  2,  1,  1,  2,  2,  2,  6,  6,  3,  3,  3,  3,  6,  6,  3,  3,\n         3,  3,  6,  6,  6,  6,  3,  3,  6,  6,  6,  3,  2,  1,  6,  6,  6,  6,  6,\n         6,  6,  6,  6,  6,  6,  3,  3,  3,  3,  6,  6,  6,  6,  6,  6,  6,  6,  6,\n         3,  3,  3,  3,  3,  3,  6,  6,  6,  6,  6,  6,  6,  6,  6,  3,  3,  3,  3,\n         6,  6,  6,  6,  6,  6,  6, 15, 15, 15, 15,  9,  6,  6,  9, 15, 15, 15,  3,\n         3,  9, 15,  9,  6,  3,  3,  9,  3, 12, 15, 15, 15, 15, 15, 15, 15, 15, 15,\n        15, 12, 12,  6,  6, 12, 12, 12,  7,  5,  6,  6, 12, 12, 12, 12,  6,  6, 12,\n        12,  6,  6,  6,  6,  6, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,\n        30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30,  6, 24, 10, 20,\n        29,  1,\n    ]\n)\n\n# Reactor agnostic. Similar to ANL1041 but with 6 UFGs grouped together.\n# More likely to not error out on memory than 703\nGROUP_STRUCTURE[\"348\"] = _create_anl_energies_with_group_lethargies(itertools.repeat(6, 346), 5, 1)\n\n# Note that at one point the MC2 manual was inconsistent with the code itself\nGROUP_STRUCTURE[\"ANL703\"] = _create_anl_energies_with_group_lethargies(\n    [\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 2, 2,\n        2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n        3, 3, 3, 3, 3, 3, 3, 3, 1, 3, 3, 3, 3, 3, 3, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2,\n        1,\n    ]\n)\n# fmt: on\n\nGROUP_STRUCTURE[\"ANL1041\"] = _create_anl_energies_with_group_lethargies(itertools.repeat(2, 1041))\n\nGROUP_STRUCTURE[\"ANL2082\"] = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082))\n\n\ndef _create_multigroup_structures_on_finegroup_energies(multigroup_energy_bounds, finegroup_energy_bounds):\n    \"\"\"Set energy group bounds to the nearest ultra-fine group boundaries.\"\"\"\n    modifiedEnergyBounds = set()\n    modifiedEnergyBounds.add(max(finegroup_energy_bounds))\n    for energyBound in multigroup_energy_bounds[1:]:\n        modifiedEnergyBounds.add(findNearestValue(finegroup_energy_bounds, energyBound))\n\n    return sorted(modifiedEnergyBounds, reverse=True)\n\n\ndef _create_anl_energies_with_group_energies(group_energy_bounds):\n    \"\"\"Set energy group bounds to the nearest ultra-fine group boundaries.\"\"\"\n    ufgEnergies = _create_anl_energies_with_group_lethargies(itertools.repeat(1, 2082))\n    return _create_multigroup_structures_on_finegroup_energies(group_energy_bounds, ufgEnergies)\n\n\n\"\"\"\nTaken from Section A3.1 SHEM-361 in\nNgeleka, Tholakele Prisca. \"Examination and improvement of the SHEM energy\ngroup structure for HTR and deep burn HTR design and analysis.\" (2012).\n\"\"\"\nGROUP_STRUCTURE[\"SHEM361\"] = [\n    19640300,\n    14918200,\n    13840300,\n    11618300,\n    9999990,\n    9048360,\n    8187300,\n    7408170,\n    6703190,\n    6065300,\n    4965850,\n    4065690,\n    3328710,\n    2725310,\n    2231300,\n    1901390,\n    1636540,\n    1405770,\n    1336940,\n    1286960,\n    1162050,\n    1051150,\n    951119,\n    860006,\n    706511,\n    578443,\n    494002,\n    456021,\n    412501,\n    383884,\n    320646,\n    267826,\n    230014,\n    195008,\n    164999,\n    140000,\n    122773,\n    115624,\n    94664.5,\n    82297.4,\n    67379.4,\n    55165.6,\n    49915.9,\n    40867.7,\n    36978.6,\n    33459.6,\n    29281,\n    27394.4,\n    26100.1,\n    24999.1,\n    22699.4,\n    18584.7,\n    16200.5,\n    14899.7,\n    13603.7,\n    11137.7,\n    9118.81,\n    7465.85,\n    6112.52,\n    5004.51,\n    4097.35,\n    3481.07,\n    2996.18,\n    2700.24,\n    2397.29,\n    2084.1,\n    1811.83,\n    1586.2,\n    1343.58,\n    1134.67,\n    1064.32,\n    982.494,\n    909.681,\n    832.218,\n    748.517,\n    677.287,\n    646.837,\n    612.834,\n    600.099,\n    592.941,\n    577.146,\n    539.204,\n    501.746,\n    453.999,\n    419.094,\n    390.76,\n    371.703,\n    353.575,\n    335.323,\n    319.928,\n    295.922,\n    288.327,\n    284.888,\n    276.468,\n    268.297,\n    256.748,\n    241.796,\n    235.59,\n    224.325,\n    212.108,\n    200.958,\n    195.996,\n    193.078,\n    190.204,\n    188.877,\n    187.559,\n    186.251,\n    184.952,\n    183.295,\n    175.229,\n    167.519,\n    163.056,\n    154.176,\n    146.657,\n    139.504,\n    132.701,\n    126.229,\n    120.554,\n    117.577,\n    116.524,\n    115.48,\n    112.854,\n    110.288,\n    105.646,\n    103.038,\n    102.115,\n    101.605,\n    101.098,\n    100.594,\n    97.3287,\n    93.3256,\n    88.7741,\n    83.9393,\n    79.3679,\n    76.3322,\n    73.5595,\n    71.8869,\n    69.0682,\n    66.8261,\n    66.4929,\n    66.1612,\n    65.8312,\n    65.5029,\n    65.046,\n    64.5923,\n    63.6306,\n    62.3083,\n    59.925,\n    57.0595,\n    54.06,\n    52.9895,\n    51.7847,\n    49.2591,\n    47.5173,\n    46.2053,\n    45.2904,\n    44.1721,\n    43.1246,\n    42.1441,\n    41.227,\n    39.7295,\n    38.7874,\n    37.7919,\n    37.3038,\n    36.8588,\n    36.4191,\n    36.0568,\n    35.698,\n    34.5392,\n    33.0855,\n    31.693,\n    27.8852,\n    24.6578,\n    22.5356,\n    22.3788,\n    22.1557,\n    22.0011,\n    21.7018,\n    21.4859,\n    21.336,\n    21.2296,\n    21.1448,\n    21.0604,\n    20.9763,\n    20.7676,\n    20.6847,\n    20.6021,\n    20.5199,\n    20.4175,\n    20.2751,\n    20.0734,\n    19.5974,\n    19.3927,\n    19.1997,\n    19.0848,\n    17.9591,\n    17.759,\n    17.5648,\n    17.4457,\n    16.8305,\n    16.5501,\n    16.0498,\n    15.7792,\n    14.8662,\n    14.7301,\n    14.5952,\n    14.4702,\n    14.2505,\n    14.0496,\n    13.546,\n    13.3297,\n    12.6,\n    12.4721,\n    12.3086,\n    12.1302,\n    11.9795,\n    11.8153,\n    11.7094,\n    11.5894,\n    11.2694,\n    11.0529,\n    10.8038,\n    10.5793,\n    9.50002,\n    9.14031,\n    8.97995,\n    8.80038,\n    8.67369,\n    8.52407,\n    8.30032,\n    8.13027,\n    7.97008,\n    7.83965,\n    7.73994,\n    7.60035,\n    7.38015,\n    7.13987,\n    6.99429,\n    6.91778,\n    6.87021,\n    6.83526,\n    6.8107,\n    6.79165,\n    6.77605,\n    6.75981,\n    6.74225,\n    6.71668,\n    6.63126,\n    6.60611,\n    6.58829,\n    6.57184,\n    6.55609,\n    6.53907,\n    6.51492,\n    6.48178,\n    6.43206,\n    6.35978,\n    6.28016,\n    6.16011,\n    6.05991,\n    5.96014,\n    5.80021,\n    5.72015,\n    5.61979,\n    5.53004,\n    5.48817,\n    5.41025,\n    5.38003,\n    5.32011,\n    5.21008,\n    5.10997,\n    4.93323,\n    4.76785,\n    4.4198,\n    4.30981,\n    4.21983,\n    4,\n    3.88217,\n    3.71209,\n    3.54307,\n    3.14211,\n    2.88405,\n    2.77512,\n    2.74092,\n    2.7199,\n    2.70012,\n    2.64004,\n    2.62005,\n    2.59009,\n    2.55,\n    2.46994,\n    2.33006,\n    2.27299,\n    2.21709,\n    2.15695,\n    2.0701,\n    1.98992,\n    1.90008,\n    1.77997,\n    1.66895,\n    1.58803,\n    1.51998,\n    1.44397,\n    1.41001,\n    1.38098,\n    1.33095,\n    1.29304,\n    1.25094,\n    1.21397,\n    1.16999,\n    1.14797,\n    1.12997,\n    1.11605,\n    1.10395,\n    1.09198,\n    1.07799,\n    1.03499,\n    1.02101,\n    1.00904,\n    0.996501,\n    0.981959,\n    0.96396,\n    0.944022,\n    0.919978,\n    0.880024,\n    0.800371,\n    0.719999,\n    0.624999,\n    0.594993,\n    0.55499,\n    0.520011,\n    0.475017,\n    0.431579,\n    0.390001,\n    0.352994,\n    0.325008,\n    0.305012,\n    0.279989,\n    0.254997,\n    0.231192,\n    0.20961,\n    0.190005,\n    0.161895,\n    0.137999,\n    0.119995,\n    0.104298,\n    0.0897968,\n    0.0764969,\n    0.0651999,\n    0.0554982,\n    0.0473019,\n    0.0402999,\n    0.0343998,\n    0.0292989,\n    0.0249394,\n    0.0200104,\n    0.01483,\n    0.0104505,\n    0.00714526,\n    0.00455602,\n    0.0024999,\n]\n\n# Energy bounds of ARMI33 and ARMI45 are modified to the nearest ultra-fine group boundaries\nGROUP_STRUCTURE[\"ARMI33\"] = _create_anl_energies_with_group_energies(\n    [\n        1.4190e07,\n        1.0000e07,\n        6.0650e06,\n        3.6780e06,\n        2.2313e06,\n        1.3530e06,\n        8.2080e05,\n        4.9787e05,\n        3.0190e05,\n        1.8310e05,\n        1.1109e05,\n        6.7370e04,\n        4.0860e04,\n        2.4788e04,\n        1.5030e04,\n        9.1180e03,\n        5.5308e03,\n        3.3540e03,\n        2.0340e03,\n        1.2341e03,\n        7.4850e02,\n        4.5390e02,\n        3.0432e02,\n        1.4860e02,\n        9.1660e01,\n        6.7904e01,\n        4.0160e01,\n        2.2600e01,\n        1.3709e01,\n        8.3150e00,\n        4.0000e00,\n        5.4000e-01,\n        4.1400e-01,\n    ]\n)\n\n# Energy bounds of SHEM33_361 is ANL33 modified to the nearest SHEM361 fine group boundaries\nGROUP_STRUCTURE[\"SHEM33_361\"] = _create_multigroup_structures_on_finegroup_energies(\n    GROUP_STRUCTURE[\"ANL33\"], GROUP_STRUCTURE[\"SHEM361\"]\n)\n\nGROUP_STRUCTURE[\"ARMI45\"] = _create_anl_energies_with_group_energies(\n    [\n        1.419e07,\n        1.000e07,\n        6.065e06,\n        4.966e06,\n        3.679e06,\n        2.865e06,\n        2.231e06,\n        1.738e06,\n        1.353e06,\n        1.108e06,\n        8.209e05,\n        6.393e05,\n        4.979e05,\n        3.887e05,\n        3.020e05,\n        1.832e05,\n        1.111e05,\n        6.738e04,\n        4.087e04,\n        2.554e04,\n        1.989e04,\n        1.503e04,\n        9.119e03,\n        5.531e03,\n        3.355e03,\n        2.840e03,\n        2.404e03,\n        2.035e03,\n        1.234e03,\n        7.485e02,\n        4.540e02,\n        2.754e02,\n        1.670e02,\n        1.013e02,\n        6.144e01,\n        3.727e01,\n        2.260e01,\n        1.371e01,\n        8.315e00,\n        5.043e00,\n        3.059e00,\n        1.855e00,\n        1.125e00,\n        6.830e-01,\n        4.140e-01,\n    ]\n)\n\n\"\"\"\nTaken from Table 5.1 of \"GAMSOR: Gamma Source Preparation and DIF3D Flux Solution\",\nANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022.\n\"\"\"\nGROUP_STRUCTURE[\"ANL21G\"] = [\n    2.0e7,\n    1.0e7,\n    8.0e6,\n    7.0e6,\n    6.0e6,\n    5.0e6,\n    4.0e6,\n    3.0e6,\n    2.5e6,\n    2.0e6,\n    1.5e6,\n    1.0e6,\n    7.0e5,\n    4.5e5,\n    3.0e5,\n    1.5e5,\n    1.0e5,\n    7.5e4,\n    4.5e4,\n    3.0e4,\n    2.0e4,\n]\n\n\"\"\"\nTaken from Table 5.2 of \"GAMSOR: Gamma Source Preparation and DIF3D Flux Solution\",\nANL/NE-16/50 Rev 2.0, M.A. Smith, C.H. Lee, R.N. Hill, Aug 30 2022.\n\"\"\"\nGROUP_STRUCTURE[\"ANL94G\"] = [\n    2.000e07,\n    1.400e07,\n    1.200e07,\n    1.100e07,\n    1.060e07,\n    1.000e07,\n    9.500e06,\n    9.000e06,\n    8.500e06,\n    8.000e06,\n    7.750e06,\n    7.500e06,\n    7.250e06,\n    7.000e06,\n    6.750e06,\n    6.500e06,\n    6.250e06,\n    6.000e06,\n    5.750e06,\n    5.500e06,\n    5.400e06,\n    5.200e06,\n    5.000e06,\n    4.700e06,\n    4.500e06,\n    4.400e06,\n    4.200e06,\n    4.000e06,\n    3.900e06,\n    3.800e06,\n    3.650e06,\n    3.500e06,\n    3.333e06,\n    3.166e06,\n    3.000e06,\n    2.833e06,\n    2.666e06,\n    2.500e06,\n    2.333e06,\n    2.166e06,\n    2.000e06,\n    1.875e06,\n    1.750e06,\n    1.660e06,\n    1.600e06,\n    1.500e06,\n    1.420e06,\n    1.330e06,\n    1.250e06,\n    1.200e06,\n    1.125e06,\n    1.000e06,\n    9.000e05,\n    8.650e05,\n    8.250e05,\n    8.000e05,\n    7.500e05,\n    7.000e05,\n    6.750e05,\n    6.500e05,\n    6.250e05,\n    6.000e05,\n    5.750e05,\n    5.500e05,\n    5.250e05,\n    5.000e05,\n    4.500e05,\n    4.250e05,\n    4.000e05,\n    3.750e05,\n    3.500e05,\n    3.250e05,\n    3.000e05,\n    2.600e05,\n    2.200e05,\n    1.900e05,\n    1.600e05,\n    1.500e05,\n    1.400e05,\n    1.200e05,\n    1.000e05,\n    9.000e04,\n    8.000e04,\n    7.500e04,\n    6.500e04,\n    6.000e04,\n    5.500e04,\n    4.500e04,\n    4.000e04,\n    3.500e04,\n    3.000e04,\n    2.000e04,\n    1.500e04,\n    1.000e04,\n]\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Fission product model subpackage.\"\"\"\n\nimport os\n\nfrom armi.context import RES\n\nREFERENCE_LUMPED_FISSION_PRODUCT_FILE = os.path.join(RES, \"referenceFissionProducts.dat\")\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/fissionProductModel.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains the implementation of the ``FissionProductModel`` interface.\n\n\nThis ``FissionProductModel`` class implements the management of fission products within\nthe reactor core and can be extended to support more general applications. Currently, the\nfission product model supports explicit modeling of fission products in each of the\nblocks/components, independent management of lumped fission products for each\nblocks/components within the core, or global management of lumped fission products\nwhere the fission products between all blocks/components are shared and are modified\ntogether.\n\nWithin the framework, there is a coupling between the management of the fission products\nthrough this model to neutronics evaluations of flux and depletion calculations.\n\nWhen using a Monte Carlo solver, such as MCNP (i.e., there is an interface that is attached\nto the operator that has a name of \"mcnp\"), the fission products will always be treated\nindependently and fission products (either explicit or lumped) will be added to all\nblocks/components in the core. The reason for this is that Monte Carlo solvers, like MCNP,\nmay implement their own coupling between flux and depletion evaluations and having the\ninitialization of these fission products in each block/component independently will\nallow that solver to manage the inventory over time.\n\nWhen determining which fission product model to use (either explicit or lumped) it is\nimportant to consider which cross section data is available to the flux and/or depletion\nsolvers, and what level of fidelity is required for the analysis. This is where decisions\nas a developer/user need to be made, and the implementation of this specific model may\nnot be, in general, accurate for any reactor system. It is dependent on which plugins\nare implemented and the requirements of the individual flux/depletion solver.\n\nLumped fission products are generally useful for fast reactor applications, especially\nin fuel cycle calculations or scoping evaluations where the tracking of the detailed\nnuclide inventory would not have substantial impacts on core reactivity predictions.\nThis is typically done by collapsing all fission products into lumped nuclides, like\n``LFP35``, ``LFP38``, ``LFP39``, ``LFP40``, and ``LFP41``. This is the implementation\nin the framework, which is discussed a bit more in the ``fpModel`` setting. These\nlumped fission products are separated into different bins that represent the fission\nproduct yields from U-235, U-238, Pu-239, Pu-240, and Pu-241/Am-241, respectively. The\nexact binning of which fission events from which target nuclides is specified by the\n``burn-chain.yaml`` file, which can be modified by a user/developer. When selecting this\nmodeling option, the blocks/components will have these ``LFP`` nuclides in the number\ndensity dictionaries. The key thing here is that these lumped nuclides do not exist\nin nature and therefore do not have nuclear data directly available in cross section\nevaluations, like ENDF/B. If the user wishes to consider these nuclides in the flux/depletion\nevaluations, then cross sections for these ``LFP`` nuclides will need to be prepared. Generally\nspeaking, the the ``crossSectionGroupManager`` and the  ``latticePhysicsInterface`` could be\nused to implement this for cross section generation codes, like NJOY, CASMO, MC2-3, Serpent,\netc.\n\n.. warning::\n\n    The lumped fission product model and the ``burn-chain.yaml`` data may not be directly\n    applicable to light water reactor systems, especially if there are strong reactivity\n    impacts with fission products like ``Xe`` and ``Sm`` that need to be tracked independently.\n    A user/developer may update the ``referenceFissionProducts.dat`` data file to exclude\n    these important nuclides from the lumped fission product models if need be, but this\n    would also require updating the ``burn-chain.yaml`` file as well as updating the\n    ``nuclideFlags`` specification within the reactor blueprints input.\n\nA further simplified option for lumped fission product treatment that is available is to\ntreat all fission products explicitly as ``Mo-99``. This is not guaranteed to be an accurate\ntreatment of the fission products from a reactivity/depletion perspective, but it is\navailable for quick scoping evaluations and model building.\n\nFinally, the explicit fission product modeling aims to include as many nuclides on the\nblocks/components as the user wishes to consider, but the nuclides that are modeled\nmust be compatible with the plugins that are implemented for the application. When using this\noption, the user should look to set the ``fpModelLibrary`` setting.\n\n    - If this setting is not set, then it is expected that the user will need to manually add\n      all nuclides to the ``nuclideFlags`` section of the reactor core blueprints.\n\n    - If the ``fpModelLibrary`` is selected then this will automatically add to the\n      ``nuclideFlags`` input using :py:func:`isotopicOptions.autoUpdateNuclideFlags`\n      and this class will initialize all added nuclides to have zero number densities.\n\n.. warning::\n\n    The explicit fission product model is being implemented with the vision of using\n    generating multi-group cross sections for nuclides that are added with the\n    ``fpModelLibrary`` setting with follow-on depletion calculations that will be managed by\n    a detailed depletion solver, like ORIGEN. There are many caveats to how this model\n    is initialized and may not be an out-of-the-box general solution.\n\"\"\"\n\nfrom armi import interfaces, runLog\nfrom armi.physics.neutronics.fissionProductModel import lumpedFissionProduct\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n    CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT,\n)\nfrom armi.reactor.flags import Flags\n\nNUM_FISSION_PRODUCTS_PER_LFP = 2.0\n\nORDER = interfaces.STACK_ORDER.AFTER + interfaces.STACK_ORDER.PREPROCESSING\n\n\ndef describeInterfaces(_cs):\n    \"\"\"Function for exposing interface(s) to other code.\"\"\"\n    return (FissionProductModel, {})\n\n\nclass FissionProductModel(interfaces.Interface):\n    \"\"\"Coordinates the fission product model on the reactor.\"\"\"\n\n    name = \"fissionProducts\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self._globalLFPs = lumpedFissionProduct.lumpedFissionProductFactory(self.cs)\n\n    @property\n    def _explicitFissionProducts(self):\n        return self.cs[CONF_FP_MODEL] == \"explicitFissionProducts\"\n\n    @property\n    def _useGlobalLFPs(self):\n        return not (self.cs[CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT] or self._explicitFissionProducts)\n\n    @property\n    def _fissionProductBlockType(self):\n        \"\"\"\n        Set the block type that the fission products will be applied to.\n\n        Notes\n        -----\n        Some Monte Carlo codes require all nuclides to be consistent in all\n        materials when assemblies are shuffled.  This requires that fission\n        products be consistent across all blocks, even if fission products are\n        not generated when the block is depleted.\n        \"\"\"\n        return None if self.getInterface(\"mcnp\") is not None else Flags.FUEL\n\n    def interactBOL(self):\n        interfaces.Interface.interactBOL(self)\n        if self._explicitFissionProducts:\n            self.setAllComponentFissionProducts()\n        else:\n            self.setAllBlockLFPs()\n\n    def setAllComponentFissionProducts(self):\n        \"\"\"\n        Initialize all nuclides for each ``DEPLETABLE`` component in the core.\n\n        Notes\n        -----\n        This should be called when explicit fission product modeling is enabled to\n        ensure that all isotopes are initialized on the depletable components within\n        the reactor data model so that there is some density as a starting point.\n\n        When explicit fission products are enabled and the user has not already included\n        all fission products in the blueprints (in ``nuclideFlags``), the ``fpModelLibrary`` setting is used\n        to autofill all the nuclides in a given library into the ``blueprints.allNuclidesInProblem``\n        list. All nuclides that were not manually initialized by the user are added to\n        the ``DEPLETABLE`` components throughout every block in the core.\n\n        The ``DEPLETABLE`` flag is based on the user adding this explicitly in the blueprints,\n        or is based on the user setting a nuclide to ``burn: true`` in the blueprint ``nuclideFlags``.\n\n        See Also\n        --------\n        armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags\n        armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary\n        \"\"\"\n        for b in self.r.core.getBlocks(includeAll=True):\n            b.setLumpedFissionProducts(None)\n            for c in b.getComponents(Flags.DEPLETABLE):\n                # Add all isotopes in problem at 0.0 density\n                updatedNDens = c.getNumberDensities()\n                # self.r.blueprints.allNuclidesInProblem contains ~everything in ENDF if _explicitFissionProducts\n                for nuc in self.r.blueprints.allNuclidesInProblem:\n                    if nuc in updatedNDens:\n                        continue\n                    updatedNDens[nuc] = 0.0\n                c.updateNumberDensities(updatedNDens)\n\n    def setAllBlockLFPs(self):\n        \"\"\"\n        Sets all the block lumped fission products attributes.\n\n        See Also\n        --------\n        armi.reactor.components.Component.setLumpedFissionProducts\n        \"\"\"\n        for b in self.r.core.getBlocks(self._fissionProductBlockType, includeAll=True):\n            if self._useGlobalLFPs:\n                b.setLumpedFissionProducts(self.getGlobalLumpedFissionProducts())\n            else:\n                independentLFPs = self.getGlobalLumpedFissionProducts().duplicate()\n                b.setLumpedFissionProducts(independentLFPs)\n\n    def getGlobalLumpedFissionProducts(self):\n        r\"\"\"\n        Lookup the detailed fission product object associated with a xsType and burnup group.\n\n        See Also\n        --------\n        armi.physics.neutronics.isotopicDepletion.depletion.DepletionInterface.buildFissionProducts\n        armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this\n        \"\"\"\n        return self._globalLFPs\n\n    def setGlobalLumpedFissionProducts(self, lfps):\n        r\"\"\"\n        Lookup the detailed fission product object associated with a xsType and burnup group.\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.getLumpedFissionProductCollection : same thing, but block-level compatible. Use this\n        \"\"\"\n        self._globalLFPs = lfps\n\n    def interactBOC(self, cycle=None):\n        if self._explicitFissionProducts:\n            self.setAllComponentFissionProducts()\n        else:\n            self.setAllBlockLFPs()\n\n    def interactDistributeState(self):\n        if self._explicitFissionProducts:\n            self.setAllComponentFissionProducts()\n        else:\n            self.setAllBlockLFPs()\n\n    def getAllFissionProductNames(self):\n        \"\"\"\n        Find all fission product names from the lumped fission product collection.\n\n        Notes\n        -----\n        This considers all LFP collections, whether they are global, block-level,\n        or a mix of these.\n        \"\"\"\n        runLog.debug(\"Gathering all possible fission products that are modeled.\")\n        fissionProductNames = []\n        lfpCollections = []\n        # get all possible lfp collections (global + block-level)\n        for b in self.r.core.getBlocks(Flags.FUEL, includeAll=True):\n            lfpCollection = b.getLumpedFissionProductCollection()\n            if lfpCollection and lfpCollection not in lfpCollections:\n                lfpCollections.append(lfpCollection)\n\n        # get all possible FP names in each LFP collection\n        for lfpCollection in lfpCollections:\n            for fpName in lfpCollection.getAllFissionProductNames():\n                if fpName not in fissionProductNames:\n                    fissionProductNames.append(fpName)\n\n        return fissionProductNames\n\n    def removeFissionGasesFromBlocks(self):\n        \"\"\"\n        Return False to indicate that no fission products are being removed.\n\n        Notes\n        -----\n        This should be implemented on an application-specific model.\n        \"\"\"\n        runLog.warning(f\"Fission gas removal is not implemented in {self}\")\n        return False\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/fissionProductModelSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Settings related to the fission product model.\"\"\"\n\nfrom armi.physics.neutronics import fissionProductModel\nfrom armi.settings import setting\n\nCONF_FP_MODEL = \"fpModel\"\nCONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT = \"makeAllBlockLFPsIndependent\"\nCONF_LFP_COMPOSITION_FILE_PATH = \"lfpCompositionFilePath\"\nCONF_FISSION_PRODUCT_LIBRARY_NAME = \"fpModelLibrary\"\n\n\ndef defineSettings():\n    \"\"\"Define settings for the plugin.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_FP_MODEL,\n            default=\"infinitelyDilute\",\n            label=\"Fission Product Model\",\n            description=(\n                \"This setting is used to determine how fission products are treated in an \"\n                \"analysis. By choosing `noFissionProducts`, no fission products will be added. By \"\n                \"selecting, `infinitelyDilute`, lumped fission products will be initialized to a \"\n                \"very small number on the blocks/components that require them. By choosing `MO99`, \"\n                \"the fission products will be represented only by Mo-99. This is a simplistic \"\n                \"assumption that is commonly used by fast reactor analyses in scoping calculations \"\n                \"and is not necessarily a great assumption for depletion evaluations. Finally, by \"\n                \"choosing `explicitFissionProducts` the fission products will be added explicitly \"\n                \"to the blocks/components that are depletable. This is useful for detailed tracking \"\n                \"of fission products.\"\n            ),\n            options=[\n                \"noFissionProducts\",\n                \"infinitelyDilute\",\n                \"MO99\",\n                \"explicitFissionProducts\",\n            ],\n        ),\n        setting.Setting(\n            CONF_FISSION_PRODUCT_LIBRARY_NAME,\n            default=\"\",\n            label=\"Fission Product Library\",\n            description=(\n                f\"This setting should be used when `{CONF_FP_MODEL}` is set to \"\n                \"`explicitFissionProducts`. It is used in conjunction with any nuclideFlags \"\n                \"defined in the blueprints to configure all the nuclides that are modeled within \"\n                \"the core. Selecting any library option will add all nuclides from the selected \"\n                \"library to the model so that analysts do not need to change their inputs when \"\n                \"modifying the fission product treatment for calculations.\"\n            ),\n            options=[\n                \"\",\n                \"MC2-3\",\n            ],\n        ),\n        setting.Setting(\n            CONF_MAKE_ALL_BLOCK_LFPS_INDEPENDENT,\n            default=False,\n            label=\"Use Independent LFPs\",\n            description=(\n                \"Flag to make all blocks have independent lumped fission products. Note that this \"\n                \"is forced to be True when the `explicitFissionProducts` modeling option is \"\n                \"selected or an interface named `mcnp` is on registered on the operator stack.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_LFP_COMPOSITION_FILE_PATH,\n            default=fissionProductModel.REFERENCE_LUMPED_FISSION_PRODUCT_FILE,\n            label=\"LFP Definition File\",\n            description=(\n                \"Path to the file that contains lumped fission product composition definitions \"\n                \"(e.g. equilibrium yields). This is unused when the `explicitFissionProducts` or \"\n                \"`MO99` modeling options are selected.\"\n            ),\n        ),\n    ]\n    return settings\n\n\ndef getFissionProductModelSettingValidators(inspector):\n    \"\"\"The standard helper method, to provide validators to the fission product model.\"\"\"\n    # Import the Query class here to avoid circular imports.\n    from armi.settings.settingsValidation import Query\n\n    queries = []\n\n    queries.append(\n        Query(\n            lambda: inspector.cs[CONF_FP_MODEL] != \"explicitFissionProducts\"\n            and not bool(inspector.cs[\"initializeBurnChain\"]),\n            (\n                \"The burn chain is not being initialized and the fission product model is not set \"\n                \"to `explicitFissionProducts`. This will likely fail.\"\n            ),\n            f\"Would you like to set the `{CONF_FP_MODEL}` to `explicitFissionProducts`?\",\n            lambda: inspector._assignCS(CONF_FP_MODEL, \"explicitFissionProducts\"),\n        )\n    )\n\n    queries.append(\n        Query(\n            lambda: inspector.cs[CONF_FP_MODEL] != \"explicitFissionProducts\"\n            and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] != \"\",\n            (\n                \"The explicit fission product model is disabled and the fission product model \"\n                \"library is set. This will have no impact on the results, but it is best to \"\n                f\"disable the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option.\"\n            ),\n            \"Would you like to do this?\",\n            lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, \"\"),\n        )\n    )\n\n    queries.append(\n        Query(\n            lambda: inspector.cs[CONF_FP_MODEL] == \"explicitFissionProducts\"\n            and bool(inspector.cs[\"initializeBurnChain\"]),\n            (\n                \"The explicit fission product model is enabled, but initializing the burn chain is \"\n                \"also enabled. This will likely fail.\"\n            ),\n            \"Would you like to disable the burn chain initialization?\",\n            lambda: inspector._assignCS(\"initializeBurnChain\", False),\n        )\n    )\n\n    queries.append(\n        Query(\n            lambda: inspector.cs[CONF_FP_MODEL] == \"explicitFissionProducts\"\n            and inspector.cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == \"\",\n            (\n                \"The explicit fission product model is enabled and the fission product model \"\n                \"library is disabled. May result in no fission product nuclides being added to the \"\n                \"case, unless these have manually added in `nuclideFlags`.\"\n            ),\n            (\n                f\"Would you like to set the `{CONF_FISSION_PRODUCT_LIBRARY_NAME}` option to be \"\n                \"equal to the default implementation of MC2-3?.\"\n            ),\n            lambda: inspector._assignCS(CONF_FISSION_PRODUCT_LIBRARY_NAME, \"MC2-3\"),\n        )\n    )\n\n    return queries\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/lumpedFissionProduct.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThe lumped fission product (LFP)  module deals with representing LFPs and loading\nthem from files.\n\nThese are generally managed by the\n:py:mod:`~armi.physics.neutronics.fissionProductModel.fissionProductModel.FissionProductModel`\n\"\"\"\n\nimport os\n\nfrom armi import runLog\nfrom armi.nucDirectory import elements, nuclideBases\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n    CONF_LFP_COMPOSITION_FILE_PATH,\n)\n\n\nclass LumpedFissionProduct:\n    r\"\"\"\n    Lumped fission product.\n\n    The yields are in number fraction and they sum to 2.0 in general so a\n    fission of an actinide results in one LFP, which represents 2 real FPs.\n\n    This object is a data structure and works a lot like a dictionary in terms\n    of accessing and modifying the data.\n\n    The yields are indexed by nuclideBase -- in self.yld the yield fraction is\n    indexed by nuclideBases of the individual fission product isotopes\n\n    Examples\n    --------\n    >>> fpd = FissionProductDefinitionFile(stream)\n    >>> lfp = fpd.createSingleLFPFromFile(\"LFP39\")\n    >>> lfp[<nuclidebase for EU151>]\n    2.9773e-05\n\n    See Also\n    --------\n    armi.reactor.blocks.Block.getLumpedFissionProductCollection : how you should access these.\n    \"\"\"\n\n    def __init__(self, name=None):\n        \"\"\"\n        Make an LFP.\n\n        Parameters\n        ----------\n        name : str, optional\n            A name for the LFP. Will be overwritten if you load from file. Provide only\n            if you are spinning your own custom LFPs.\n        \"\"\"\n        self.name = name\n        self.yld = {}\n\n    def duplicate(self):\n        \"\"\"Make a copy of this w/o using deepcopy.\"\"\"\n        new = self.__class__(self.name)\n        for key, val in self.yld.items():\n            new.yld[key] = val\n        return new\n\n    def __getitem__(self, fissionProduct):\n        \"\"\"\n        Return the yield of a particular fission product.\n\n        This allows the LFP to be accessed via indexing, like this: ``lfp[fp]``\n\n        Returns\n        -------\n        yld : yield of the fission product.\n        \"\"\"\n        return self.yld.get(fissionProduct, 0.0)\n\n    def __setitem__(self, key, val):\n        from armi.physics.neutronics.fissionProductModel.fissionProductModel import (\n            NUM_FISSION_PRODUCTS_PER_LFP,\n        )\n\n        if val < 0.0:\n            raise ValueError(f\"Cannot set the yield of {key} in {self} to be less than zero as this is non-physical.\")\n        if val > NUM_FISSION_PRODUCTS_PER_LFP:\n            raise ValueError(\n                f\"Cannot set the yield of {key} in {self} to be greater than {NUM_FISSION_PRODUCTS_PER_LFP}.\"\n            )\n\n        self.yld[key] = val\n\n    def __contains__(self, item):\n        return item in self.yld\n\n    def __repr__(self):\n        return f\"<Lumped Fission Product {self.name}>\"\n\n    def keys(self):\n        return self.yld.keys()\n\n    def values(self):\n        return self.yld.values()\n\n    def items(self):\n        for nuc in self.keys():\n            yield nuc, self[nuc]\n\n    def getGaseousYieldFraction(self):\n        \"\"\"Return the yield fraction of the gaseous nuclides.\"\"\"\n        yld = 0.0\n        for nuc in self.keys():\n            if not isGas(nuc):\n                continue\n            yld += self[nuc]\n        return yld\n\n    def getTotalYield(self):\n        \"\"\"\n        Get the fractional yield of all nuclides in this lumped fission product.\n\n        Accounts for any fission gas that may be removed.\n\n        Returns\n        -------\n        total yield of all fps\n        \"\"\"\n        return sum([self[nuc] for nuc in self.yld])\n\n    def getMassFracs(self):\n        \"\"\"\n        Return a dictionary of mass fractions indexed by nuclide.\n\n        Returns\n        -------\n        massFracs : dict\n            mass fractions (floats) of LFP masses\n        \"\"\"\n        massFracs = {}\n        for nuc in self.keys():\n            massFracs[nuc] = self.getMassFrac(nuclideBase=nuc)\n        return massFracs\n\n    def getMassFrac(self, nucName=None, nuclideBase=None):\n        \"\"\"\n        Return the mass fraction of the given nuclide.\n\n        Returns\n        -------\n        nuclide mass fraction (float)\n        \"\"\"\n        massFracDenom = self.getMassFracDenom()\n        if not nuclideBase:\n            nuclideBase = nuclideBases.byName[nucName]\n        return self.__getitem__(nuclideBase) * (nuclideBase.weight / massFracDenom)\n\n    def getMassFracDenom(self):\n        \"\"\"\n        See Also\n        --------\n        armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct.getMassFrac\n        \"\"\"\n        massFracDenom = 0.0\n        for nuc in self.keys():\n            massFracDenom += self[nuc] * nuc.weight\n        return massFracDenom\n\n\nclass LumpedFissionProductCollection(dict):\n    \"\"\"\n    A set of lumped fission products.\n\n    Typically there would be one of these on a block or on a global level.\n    \"\"\"\n\n    def __init__(self):\n        self.collapsible = False\n\n    def duplicate(self):\n        new = self.__class__()\n        for lfpName, lfp in self.items():\n            new[lfpName] = lfp.duplicate()\n        return new\n\n    def getLumpedFissionProductNames(self):\n        return self.keys()\n\n    def getAllFissionProductNames(self):\n        \"\"\"Gets names of all fission products in this collection.\"\"\"\n        fpNames = set()\n        for lfp in self.values():\n            for fp in lfp.keys():\n                fpNames.add(fp.name)\n        return sorted(fpNames)\n\n    def getAllFissionProductNuclideBases(self):\n        \"\"\"Gets names of all fission products in this collection.\"\"\"\n        nucs = set()\n        for _lfpName, lfp in self.items():\n            for fp in lfp.keys():\n                nucs.add(fp)\n        return sorted(nucs)\n\n    def getNumberDensities(self, objectWithParentDensities=None, densFunc=None):\n        \"\"\"\n        Gets all FP number densities in collection.\n\n        Parameters\n        ----------\n        objectWithParentDensities : ArmiObject\n            object (probably block) that can be called with getNumberDensity('LFP35'), etc. to get densities of LFPs.\n        densFunc : function, optional\n            Optional method to extract LFP densities\n\n        Returns\n        -------\n        fpDensities : dict\n            keys are fp names, vals are fission product number density in atoms/bn-cm.\n        \"\"\"\n        if not densFunc:\n            densFunc = lambda lfpName: objectWithParentDensities.getNumberDensity(lfpName)\n        fpDensities = {}\n        for lfpName, lfp in self.items():\n            lfpDens = densFunc(lfpName)\n            for fp, fpFrac in lfp.items():\n                fpDensities[fp.name] = fpDensities.get(fp.name, 0.0) + fpFrac * lfpDens\n        return fpDensities\n\n    def getMassFrac(self, oldMassFrac=None):\n        \"\"\"Returns the mass fraction vector of the collection of lumped fission products.\"\"\"\n        if not oldMassFrac:\n            raise ValueError(\"You must define a massFrac vector\")\n\n        massFrac = {}\n\n        for lfpName, lfp in self.items():\n            lfpMFrac = oldMassFrac[lfpName]\n            for nuc, mFrac in lfp.getMassFracs().items():\n                try:\n                    massFrac[nuc] += lfpMFrac * mFrac\n                except KeyError:\n                    massFrac[nuc] = lfpMFrac * mFrac\n\n        return massFrac\n\n\nclass FissionProductDefinitionFile:\n    \"\"\"\n    Reads a file that has definitions of one or more LFPs in it to produce LFPs.\n\n    The format for this file is as follows::\n\n        LFP35 GE73  5.9000E-06\n        LFP35 GE74  1.4000E-05\n        LFP35 GE76  1.6000E-04\n        LFP35 AS75  8.9000E-05\n\n    and so on\n\n    Examples\n    --------\n    >>> fpd = FissionProductDefinitionFile(stream)\n    >>> lfps = fpd.createLFPsFromFile()\n\n    The path to this file is specified by the `lfpCompositionFilePath` user setting.\n    \"\"\"\n\n    def __init__(self, stream):\n        self.stream = stream\n\n    def createLFPsFromFile(self):\n        \"\"\"\n        Read the file and create LFPs from the contents.\n\n        Returns\n        -------\n        lfps : list\n            List of LumpedFissionProducts contained in the file\n        \"\"\"\n        lfps = LumpedFissionProductCollection()\n        for lfpLines in self._splitIntoIndividualLFPLines():\n            lfp = self._readOneLFP(lfpLines)\n            lfps[lfp.name] = lfp\n        return lfps\n\n    def createSingleLFPFromFile(self, name):\n        \"\"\"Read one LFP from the file.\"\"\"\n        lfpLines = self._splitIntoIndividualLFPLines(name)\n        lfp = self._readOneLFP(lfpLines[0])  # only one LFP expected. Use it.\n        return lfp\n\n    def _splitIntoIndividualLFPLines(self, lfpName=None):\n        \"\"\"\n        The lfp file can contain one or more LFPs. This splits them.\n\n        Ignores DUMPs.\n\n        Parameters\n        ----------\n        lfpName : str, optional\n            Restrict to just these names if desired.\n\n        Returns\n        -------\n        allLFPLines : list of list\n            each entry is a list of lines that define one LFP\n        \"\"\"\n        lines = self.stream.readlines()\n\n        allLFPLines = []\n        thisLFPLines = []\n        lastName = None\n        for line in lines:\n            name = line.split()[0]\n            if \"DUMP\" in name or (lfpName and lfpName not in name):\n                continue\n            if lastName and name != lastName:\n                allLFPLines.append(thisLFPLines)\n                thisLFPLines = []\n            thisLFPLines.append(line)\n            lastName = name\n\n        if thisLFPLines:\n            allLFPLines.append(thisLFPLines)\n\n        return allLFPLines\n\n    def _readOneLFP(self, linesOfOneLFP):\n        lfp = LumpedFissionProduct()\n        totalYield = 0.0\n        for line in linesOfOneLFP:\n            data = line.split()\n            parent = data[0]\n            nucLibId = data[1]\n            nuc = nuclideBases.byName[nucLibId]\n            yld = float(data[2])\n            lfp.yld[nuc] = yld\n            totalYield += yld\n\n        lfp.name = parent  # e.g. LFP38\n        runLog.debug(\"Loaded {0} {1} nuclides for a total yield of {2}\".format(len(lfp.yld), lfp.name, totalYield))\n        return lfp\n\n\ndef lumpedFissionProductFactory(cs):\n    \"\"\"Build lumped fission products.\"\"\"\n    if cs[CONF_FP_MODEL] == \"explicitFissionProducts\":\n        return None\n\n    if cs[CONF_FP_MODEL] == \"MO99\":\n        return _buildMo99LumpedFissionProduct()\n\n    lfpPath = cs[CONF_LFP_COMPOSITION_FILE_PATH]\n    if not lfpPath or not os.path.exists(lfpPath):\n        raise ValueError(\n            f\"The fission product reference file does not exist or is not a valid path. Path provided: {lfpPath}\"\n        )\n\n    runLog.extra(f\"Loading global lumped fission products (LFPs) from {lfpPath}\")\n    with open(lfpPath) as lfpStream:\n        lfpFile = FissionProductDefinitionFile(lfpStream)\n        lfps = lfpFile.createLFPsFromFile()\n    return lfps\n\n\ndef _buildMo99LumpedFissionProduct():\n    \"\"\"\n    Build a dummy MO-99 LFP collection.\n\n    This is a very bad FP approximation from a physics standpoint but can be very useful\n    for rapid-running test cases.\n    \"\"\"\n    mo99 = nuclideBases.byName[\"MO99\"]\n    mo99LFPs = LumpedFissionProductCollection()\n    for lfp in nuclideBases.where(lambda nb: isinstance(nb, nuclideBases.LumpNuclideBase)):\n        # Not all lump nuclides bases defined are fission products, so ensure that only fission\n        # products are considered.\n        if not (\"FP\" in lfp.name or \"REGN\" in lfp.name):\n            continue\n        mo99FP = LumpedFissionProduct(lfp.name)\n        mo99FP[mo99] = 2.0\n        mo99LFPs[lfp.name] = mo99FP\n    return mo99LFPs\n\n\ndef isGas(nuc):\n    \"\"\"True if nuclide is considered a gas.\"\"\"\n    # ruff: noqa: SIM110\n    for element in elements.getElementsByChemicalPhase(elements.ChemicalPhase.GAS):\n        if element == nuc.element:\n            return True\n    return False\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/tests/test_fissionProductModel.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test the fission product module to ensure all FP are available.\"\"\"\n\nimport unittest\n\nfrom armi.physics.neutronics.fissionProductModel import fissionProductModel\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FISSION_PRODUCT_LIBRARY_NAME,\n    CONF_FP_MODEL,\n)\nfrom armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct\nfrom armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import (\n    isDepletable,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_reactors import (\n    buildOperatorOfEmptyHexBlocks,\n    loadTestReactor,\n)\n\n\nclass TestFPMLumpedFP(unittest.TestCase):\n    \"\"\"\n    Tests the fission product model interface behavior when lumped fission products are enabled.\n\n    Notes\n    -----\n    This loads the global fission products from a file stream.\n    \"\"\"\n\n    def setUp(self):\n        o = buildOperatorOfEmptyHexBlocks()\n        o.removeAllInterfaces()\n        self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)\n        o.addInterface(self.fpModel)\n\n        # Load the fission products from a file stream.\n        dummyLFPs = test_lumpedFissionProduct.getDummyLFPFile()\n        self.fpModel.setGlobalLumpedFissionProducts(dummyLFPs.createLFPsFromFile())\n\n        # Set up the global LFPs and check that they are setup.\n        self.fpModel.interactBOL()\n        self.assertTrue(self.fpModel._useGlobalLFPs)\n\n    def test_loadGlobalLFPsFromFile(self):\n        \"\"\"Tests that loading lumped fission products from a file.\"\"\"\n        self.assertEqual(len(self.fpModel._globalLFPs), 3)\n        lfps = self.fpModel.getGlobalLumpedFissionProducts()\n        self.assertIn(\"LFP39\", lfps)\n\n    def test_getAllFissionProductNames(self):\n        \"\"\"Tests retrieval of the fission product names within all the lumped fission products of the core.\"\"\"\n        fissionProductNames = self.fpModel.getAllFissionProductNames()\n        self.assertGreater(len(fissionProductNames), 5)\n        self.assertIn(\"XE135\", fissionProductNames)\n\n    def test_fpApplication(self):\n        o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)\n        # Set up the global LFPs and check that they are setup.\n        self.assertTrue(fpModel._useGlobalLFPs)\n        fpModel.interactBOL()\n        for b in r.core.iterBlocks():\n            if b.isFuel():\n                self.assertTrue(b._lumpedFissionProducts is not None)\n            else:\n                self.assertTrue(b._lumpedFissionProducts is None)\n\n        # now check if all depletable blocks do not have all nuclides if not detailedAxialExpansion\n        fpModel.allBlocksNeedAllNucs = False\n        fpModel.interactBOL()\n        allNucsInProblem = set(r.blueprints.allNuclidesInProblem)\n        for b in r.core.iterBlocks():\n            if isDepletable(b):\n                if len(allNucsInProblem - set(b.getNuclides())) > 0:\n                    break\n        else:\n            self.assertTrue(False, \"All blocks have all nuclides!\")\n\n\nclass TestFPMExplicitMC2Lib(unittest.TestCase):\n    \"\"\"\n    Tests the fission product model interface behavior when explicit fission products are enabled.\n\n    These tests can use a smaller test reactor, and so will be faster.\n    \"\"\"\n\n    def setUp(self):\n        o, r = loadTestReactor(\n            customSettings={\n                CONF_FP_MODEL: \"explicitFissionProducts\",\n                CONF_FISSION_PRODUCT_LIBRARY_NAME: \"MC2-3\",\n            },\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n        self.r = r\n        self.nuclideBases = self.r.nuclideBases\n        self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)\n        # Set up the global LFPs and check that they are setup.\n        self.assertFalse(self.fpModel._useGlobalLFPs)\n\n    def test_nuclideFlags(self):\n        \"\"\"Test that the nuclide flags contain the set of MC2-3 modeled nuclides.\"\"\"\n        # Run the ``interactBOL`` here to trigger setting up the fission\n        # products in the reactor data model.\n        self.fpModel.interactBOL()\n\n        for nb in self.nuclideBases.byMcc3Id.values():\n            self.assertIn(nb.name, self.r.blueprints.nuclideFlags.keys())\n\n    def test_nuclidesInModelFuel(self):\n        \"\"\"Test that the fuel blocks contain all the MC2-3 modeled nuclides.\"\"\"\n        # Run the ``interactBOL`` here to trigger setting up the fission\n        # products in the reactor data model.\n        self.fpModel.interactBOL()\n\n        b = self.r.core.getFirstBlock(Flags.FUEL)\n        nuclideList = b.getNuclides()\n        for nb in self.nuclideBases.byMcc3Id.values():\n            self.assertIn(nb.name, nuclideList)\n\n\nclass TestFPMExplicitMC2LibSlow(unittest.TestCase):\n    \"\"\"\n    Tests the fission product model interface behavior when explicit fission products are enabled.\n\n    These tests require a large test reactor, and will lead to slower tests.\n    \"\"\"\n\n    def setUp(self):\n        o, r = loadTestReactor(\n            customSettings={\n                CONF_FP_MODEL: \"explicitFissionProducts\",\n                CONF_FISSION_PRODUCT_LIBRARY_NAME: \"MC2-3\",\n            }\n        )\n        self.r = r\n        self.nuclideBases = self.r.nuclideBases\n        self.fpModel = fissionProductModel.FissionProductModel(o.r, o.cs)\n        # Set up the global LFPs and check that they are setup.\n        self.assertFalse(self.fpModel._useGlobalLFPs)\n\n    def test_nuclidesInModelAllDepletableBlocks(self):\n        \"\"\"Test that the depletable blocks contain all the MC2-3 modeled nuclides.\"\"\"\n        # Check that there are some fuel and control blocks in the core model.\n        fuelBlocks = self.r.core.getBlocks(Flags.FUEL)\n        controlBlocks = self.r.core.getBlocks(Flags.CONTROL)\n        self.assertGreater(len(fuelBlocks), 0)\n        self.assertGreater(len(controlBlocks), 0)\n\n        # prove that the control blocks are not depletable\n        for b in controlBlocks:\n            self.assertFalse(isDepletable(b))\n\n        # as a corrolary of the above, prove that no components in the control blocks are depletable\n        for b in controlBlocks:\n            for c in b.getComponents():\n                self.assertFalse(isDepletable(c))\n\n        # Force the the first component in the control blocks\n        # to be labeled as depletable for checking that explicit\n        # fission products can be assigned.\n        for b in controlBlocks:\n            c = b.getComponents()[0]\n            c.p.flags |= Flags.DEPLETABLE\n\n        # now each control block should be depletable\n        for b in controlBlocks:\n            self.assertTrue(isDepletable(b))\n\n        # as a corrolary of the above, prove that only the first component in each control block is depletable\n        for b in controlBlocks:\n            comps = list(b.getComponents())\n            for i, c in enumerate(comps):\n                if i == 0:\n                    self.assertTrue(isDepletable(c))\n                else:\n                    self.assertFalse(isDepletable(c))\n\n        # Run the ``interactBOL`` here to trigger setting up the fission\n        # products in the reactor data model.\n        self.fpModel.interactBOL()\n\n        # Check that the depletable blocks have all explicit\n        # fission products in them.\n        for b in self.r.core.iterBlocks():\n            nuclideList = b.getNuclides()\n            if isDepletable(b):\n                for nb in self.nuclideBases.byMcc3Id.values():\n                    self.assertIn(nb.name, nuclideList)\n            else:\n                self.assertLess(len(b.getNuclides()), len(self.nuclideBases.byMcc3Id))\n"
  },
  {
    "path": "armi/physics/neutronics/fissionProductModel/tests/test_lumpedFissionProduct.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for lumpedFissionProduce module.\"\"\"\n\nimport io\nimport math\nimport os\nimport unittest\n\nfrom armi.context import RES\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.physics.neutronics.fissionProductModel import (\n    REFERENCE_LUMPED_FISSION_PRODUCT_FILE,\n    lumpedFissionProduct,\n)\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n    CONF_LFP_COMPOSITION_FILE_PATH,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_reactors import buildOperatorOfEmptyHexBlocks\nfrom armi.settings import Settings\n\nLFP_TEXT = \"\"\"LFP35 GE73   5.9000E-06\nLFP35 GE74    1.4000E-05\nLFP35 GE76    1.6000E-04\nLFP35 AS75    8.9000E-05\nLFP35 KR85    8.9000E-05\nLFP35 MO99    8.9000E-05\nLFP35 SM150   8.9000E-05\nLFP35 XE135   8.9000E-05\nLFP39 XE135   8.9000E-05\nLFP38 XE135   8.9000E-05\n\"\"\"\n\n\ndef getDummyLFPFile():\n    return lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))\n\n\nclass TestFissionProductDefinitionFile(unittest.TestCase):\n    \"\"\"Test of the fission product model.\"\"\"\n\n    def setUp(self):\n        self.fpd = getDummyLFPFile()\n        self.nuclideBases = NuclideBases()\n\n    def test_createLFPs(self):\n        \"\"\"Test of the fission product model creation.\"\"\"\n        lfps = self.fpd.createLFPsFromFile()\n        xe135 = self.nuclideBases.fromName(\"XE135\")\n        self.assertEqual(len(lfps), 3)\n        self.assertIn(\"LFP35\", lfps)\n        for lfp in lfps.values():\n            self.assertIn(xe135, lfp)\n\n    def test_createReferenceLFPs(self):\n        \"\"\"Test of the reference fission product model creation.\"\"\"\n        with open(REFERENCE_LUMPED_FISSION_PRODUCT_FILE, \"r\") as LFP_FILE:\n            LFP_TEXT = LFP_FILE.read()\n        fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))\n        fpd.fName = REFERENCE_LUMPED_FISSION_PRODUCT_FILE\n        lfps = fpd.createLFPsFromFile()\n        self.assertEqual(len(lfps), 5)\n\n        LFP_IDS = [\n            \"LFP35\",\n            \"LFP38\",\n            \"LFP39\",\n            \"LFP40\",\n            \"LFP41\",\n        ]\n\n        for lfp_id in LFP_IDS:\n            self.assertIn(lfp_id, lfps)\n\n        mo99 = self.nuclideBases.fromName(\"MO99\")\n        ref_mo99_yields = [0.00091, 0.00112, 0.00099, 0.00108, 0.00101]\n\n        for ref_fp_yield, lfp_id in zip(ref_mo99_yields, LFP_IDS):\n            lfp = lfps[lfp_id]\n            self.assertIn(mo99, lfp)\n\n            error = math.fabs(ref_fp_yield - lfp[mo99]) / ref_fp_yield\n            self.assertLess(error, 1e-6)\n\n\nclass TestLFP(unittest.TestCase):\n    \"\"\"Test of the lumped fission product yields.\"\"\"\n\n    def setUp(self):\n        self.fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))\n        self.nuclideBases = NuclideBases()\n\n    def test_getYield(self):\n        \"\"\"Test of the yield of a fission product.\"\"\"\n        xe135 = self.nuclideBases.fromName(\"XE135\")\n        lfp = self.fpd.createSingleLFPFromFile(\"LFP39\")\n        lfp[xe135] = 1.2\n        val3 = lfp[xe135]\n        self.assertEqual(val3, 1.2)\n        self.assertEqual(lfp[5], 0.0)\n\n    def test_gaseousYieldFraction(self):\n        lfp = self.fpd.createSingleLFPFromFile(\"LFP39\")\n        # This is equal to the Xe yield set in the dummy ``LFP_TEXT``\n        # data for these tests.\n        self.assertEqual(lfp.getGaseousYieldFraction(), 8.9000e-05)\n\n    def test_isGas(self):\n        \"\"\"Tests that a nuclide is a gas or not at STP based on its chemical phase.\"\"\"\n        nb = self.nuclideBases.byName[\"H1\"]\n        self.assertTrue(lumpedFissionProduct.isGas(nb))\n        nb = self.nuclideBases.byName[\"H2\"]\n        self.assertTrue(lumpedFissionProduct.isGas(nb))\n        nb = self.nuclideBases.byName[\"H3\"]\n        self.assertTrue(lumpedFissionProduct.isGas(nb))\n\n        nb = self.nuclideBases.byName[\"U235\"]\n        self.assertFalse(lumpedFissionProduct.isGas(nb))\n\n        nb = self.nuclideBases.byName[\"O16\"]\n        self.assertTrue(lumpedFissionProduct.isGas(nb))\n\n        nb = self.nuclideBases.byName[\"XE135\"]\n        self.assertTrue(lumpedFissionProduct.isGas(nb))\n\n\nclass TestLFPCollection(unittest.TestCase):\n    \"\"\"Test of the fission product collection.\"\"\"\n\n    def setUp(self):\n        fpd = lumpedFissionProduct.FissionProductDefinitionFile(io.StringIO(LFP_TEXT))\n        self.lfps = fpd.createLFPsFromFile()\n        self.nuclideBases = NuclideBases()\n\n    def test_getAllFissionProductNames(self):\n        \"\"\"Test to ensure the fission product names are present.\"\"\"\n        names = self.lfps.getAllFissionProductNames()\n        self.assertIn(\"XE135\", names)\n        self.assertIn(\"KR85\", names)\n\n    def test_getAllFissionProductNuclideBases(self):\n        \"\"\"Test to ensure the fission product nuclide bases are present.\"\"\"\n        clideBases = self.lfps.getAllFissionProductNuclideBases()\n        xe135 = self.nuclideBases.fromName(\"XE135\")\n        kr85 = self.nuclideBases.fromName(\"KR85\")\n        self.assertIn(xe135, clideBases)\n        self.assertIn(kr85, clideBases)\n\n    def test_duplicate(self):\n        \"\"\"Test to ensure that when we duplicate, we don't adjust the original file.\"\"\"\n        newLfps = self.lfps.duplicate()\n        ba = self.nuclideBases.fromName(\"XE135\")\n        lfp1 = self.lfps[\"LFP39\"]\n        lfp2 = newLfps[\"LFP39\"]\n        v1 = lfp1[ba]\n        lfp1[ba] += 1.3  # make sure copy doesn't change w/ first.\n        v2 = lfp2[ba]\n        self.assertEqual(v1, v2)\n\n    def test_getNumberDensities(self):\n        o = buildOperatorOfEmptyHexBlocks()\n        b = next(o.r.core.iterBlocks(Flags.FUEL))\n        fpDensities = self.lfps.getNumberDensities(objectWithParentDensities=b)\n        for fp in [\"GE73\", \"GE74\", \"GE76\", \"AS75\", \"KR85\", \"MO99\", \"SM150\", \"XE135\"]:\n            self.assertEqual(fpDensities[fp], 0.0)\n            # basic test reactor has no fission products in it\n\n    def test_getMassFrac(self):\n        with self.assertRaises(ValueError):\n            self.lfps.getMassFrac(oldMassFrac=None)\n        oldMassFrac = {\n            \"LFP35\": 0.5,\n            \"LFP38\": 0.2,\n            \"LFP39\": 0.3,\n        }\n        newMassFracs = self.lfps.getMassFrac(oldMassFrac)\n        refMassFrac = {\n            \"GE73\": 0.0034703064077030933,\n            \"GE74\": 0.00834728937688672,\n            \"GE76\": 0.09797894499881823,\n            \"AS75\": 0.053783069618403435,\n            \"KR85\": 0.0609551394006646,\n            \"MO99\": 0.07100169460812283,\n            \"SM150\": 0.1076193196365748,\n            \"XE135\": 0.5968442359528263,\n        }\n        for fp, newMassFrac in newMassFracs.items():\n            self.assertAlmostEqual(newMassFrac, refMassFrac[fp.name])\n\n\nclass TestLFPFromRefFile(unittest.TestCase):\n    \"\"\"Tests loading from the `referenceFissionProducts.dat` file.\"\"\"\n\n    def test_fissionProductYields(self):\n        \"\"\"Test that the fission product yields for the lumped fission products sums to 2.0.\"\"\"\n        cs = Settings()\n        cs[CONF_FP_MODEL] = \"infinitelyDilute\"\n        cs[CONF_LFP_COMPOSITION_FILE_PATH] = os.path.join(RES, \"referenceFissionProducts.dat\")\n        self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs)\n        for lfp in self.lfps.values():\n            self.assertAlmostEqual(lfp.getTotalYield(), 2.0, places=3)\n\n\nclass TestLFPExplicit(unittest.TestCase):\n    \"\"\"Tests loading fission products with explicit modeling.\"\"\"\n\n    def test_explicitFissionProducts(self):\n        \"\"\"Tests that there are no lumped fission products added when the `explicitFissionProducts` model is enabled.\"\"\"\n        cs = Settings()\n        cs[CONF_FP_MODEL] = \"explicitFissionProducts\"\n        self.lfps = lumpedFissionProduct.lumpedFissionProductFactory(cs)\n        self.assertIsNone(self.lfps)\n\n\nclass TestMo99LFP(unittest.TestCase):\n    \"\"\"Test of the fission product model from Mo99.\"\"\"\n\n    def setUp(self):\n        self.lfps = lumpedFissionProduct._buildMo99LumpedFissionProduct()\n\n    def test_getAllFissionProductNames(self):\n        \"\"\"Test to ensure that Mo99 is present, but other FP are not.\"\"\"\n        names = self.lfps.getAllFissionProductNames()\n        self.assertIn(\"MO99\", names)\n        self.assertNotIn(\"KR85\", names)\n        self.assertAlmostEqual(self.lfps[\"LFP35\"].getTotalYield(), 2.0)\n"
  },
  {
    "path": "armi/physics/neutronics/globalFlux/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Global flux solvers.\"\"\"\n\nRX_ABS_MICRO_LABELS = [\"nGamma\", \"fission\", \"nalph\", \"np\", \"nd\", \"nt\"]\nRX_PARAM_NAMES = [\"rateCap\", \"rateFis\", \"rateProdN2n\", \"rateProdFis\", \"rateAbs\"]\n"
  },
  {
    "path": "armi/physics/neutronics/globalFlux/globalFluxInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The Global flux interface provide a base class for all neutronics tools that compute the neutron\nand/or photon flux.\n\"\"\"\n\nimport math\nfrom typing import Dict, Optional\n\nimport numpy as np\n\nfrom armi import interfaces, runLog\nfrom armi.physics import constants, executers, neutronics\nfrom armi.physics.neutronics.globalFlux import RX_ABS_MICRO_LABELS, RX_PARAM_NAMES\nfrom armi.reactor import geometry, reactors\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.converters import geometryConverters, uniformMesh\nfrom armi.reactor.flags import Flags\nfrom armi.settings.caseSettings import Settings\nfrom armi.utils import getBurnSteps, getMaxBurnSteps, units\n\nORDER = interfaces.STACK_ORDER.FLUX\n\n\nclass GlobalFluxInterface(interfaces.Interface):\n    \"\"\"\n    A general abstract interface for global flux-calculating modules.\n\n    Should be subclassed by more specific implementations.\n    \"\"\"\n\n    name = \"GlobalFlux\"  # make sure to set this in subclasses\n    purpose = \"globalFlux\"\n    _ENERGY_BALANCE_REL_TOL = 1e-5\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        if self.cs[\"nCycles\"] > 1000:\n            self.cycleFmt = \"04d\"  # produce ig0001.inp\n        else:\n            self.cycleFmt = \"03d\"  # produce ig001.inp\n\n        if getMaxBurnSteps(self.cs) > 10:\n            self.nodeFmt = \"03d\"  # produce ig001_001.inp\n        else:\n            self.nodeFmt = \"1d\"  # produce ig001_1.inp.\n        self._bocKeff = None  # for tracking rxSwing\n        self._setTightCouplingDefaults()\n\n    def _setTightCouplingDefaults(self):\n        \"\"\"Enable tight coupling defaults for the interface.\n\n        - allows users to set tightCoupling: true in settings without\n          having to specify the specific tightCouplingSettings for this interface.\n        - this is splt off from self.__init__ for testing\n        \"\"\"\n        if self.coupler is None and self.cs[\"tightCoupling\"]:\n            self.coupler = interfaces.TightCoupler(\"keff\", 1.0e-4, self.cs[\"tightCouplingMaxNumIters\"])\n\n    @staticmethod\n    def getHistoryParams():\n        \"\"\"Return parameters that will be added to assembly versus time history printouts.\"\"\"\n        return [\"detailedDpa\", \"detailedDpaPeak\", \"detailedDpaPeakRate\"]\n\n    def interactBOC(self, cycle=None):\n        interfaces.Interface.interactBOC(self, cycle)\n        self.r.core.p.rxSwing = 0.0  # zero out rxSwing until last time node.\n        self.r.core.p.maxDetailedDpaThisCycle = 0.0  # zero out cumulative params\n        self.r.core.p.dpaFullWidthHalfMax = 0.0\n        self.r.core.p.elevationOfACLP3Cycles = 0.0\n        self.r.core.p.elevationOfACLP7Cycles = 0.0\n        for b in self.r.core.iterBlocks():\n            b.p.detailedDpaThisCycle = 0.0\n            b.p.newDPA = 0.0\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"\n        Calculate flux, power, and keff for this cycle and node.\n\n        Flux, power, and keff are generally calculated at every timestep to ensure flux\n        is up to date with the reactor state.\n        \"\"\"\n        interfaces.Interface.interactEveryNode(self, cycle, node)\n        self._setRxSwingRelatedParams()\n\n    def interactCoupled(self, iteration):\n        \"\"\"Runs during a tightly-coupled physics iteration to updated the flux and power.\"\"\"\n        interfaces.Interface.interactCoupled(self, iteration)\n        self._setRxSwingRelatedParams()\n\n    def _setRxSwingRelatedParams(self):\n        \"\"\"Set Params Related to Rx Swing.\"\"\"\n        if self.r.p.timeNode == 0:\n            # track boc uncontrolled keff for rxSwing param.\n            self._bocKeff = self.r.core.p.keffUnc or self.r.core.p.keff\n\n        # A 1 burnstep cycle would have 2 nodes, and the last node would be node index 1 (first is zero)\n        lastNodeInCycle = getBurnSteps(self.cs)[self.r.p.cycle]\n        if self.r.p.timeNode == lastNodeInCycle and self._bocKeff is not None:\n            eocKeff = self.r.core.p.keffUnc or self.r.core.p.keff\n            swing = (eocKeff - self._bocKeff) / (eocKeff * self._bocKeff)\n            self.r.core.p.rxSwing = swing * units.ABS_REACTIVITY_TO_PCM\n            runLog.info(\n                f\"BOC Uncontrolled keff: {self._bocKeff},  \"\n                f\"EOC Uncontrolled keff: {self.r.core.p.keffUnc}, \"\n                f\"Cycle Reactivity Swing: {self.r.core.p.rxSwing} pcm\"\n            )\n\n    def checkEnergyBalance(self):\n        \"\"\"Check that there is energy balance between the power generated and the specified power.\n\n        .. impl:: Validate the energy generation matches user specifications.\n            :id: I_ARMI_FLUX_CHECK_POWER\n            :implements: R_ARMI_FLUX_CHECK_POWER\n\n            This method checks that the global power computed from flux\n            evaluation matches the global power specified from the user within a\n            tolerance; if it does not, a ``ValueError`` is raised. The\n            global power from the flux solve is computed by summing the\n            block-wise power in the core. This value is then compared to the\n            user-specified power and raises an error if relative difference is\n            above :math:`10^{-5}`.\n        \"\"\"\n        powerGenerated = (\n            self.r.core.calcTotalParam(\"power\", calcBasedOnFullObj=False, generationNum=2) / units.WATTS_PER_MW\n        )\n        self.r.core.setPowerIfNecessary()\n        specifiedPower = self.r.core.p.power / units.WATTS_PER_MW / self.r.core.powerMultiplier\n\n        if not math.isclose(powerGenerated, specifiedPower, rel_tol=self._ENERGY_BALANCE_REL_TOL):\n            raise ValueError(\n                \"The power generated in {} is {} MW, but the user specified power is {} MW.\\n\"\n                \"This indicates a software bug. Please report to the developers.\".format(\n                    self.r.core, powerGenerated, specifiedPower\n                )\n            )\n\n    def getIOFileNames(self, cycle, node, coupledIter=None, additionalLabel=\"\"):\n        \"\"\"\n        Return the input and output file names for this run.\n\n        Parameters\n        ----------\n        cycle : int\n            The cycle number\n        node : int\n            The burn node number (e.g. 0 for BOC, 1 for MOC, etc.)\n        coupledIter : int, optional\n            Coupled iteration number (for tightly-coupled cases)\n        additionalLabel : str, optional\n            An optional tag to the file names to differentiate them\n            from another case.\n\n        Returns\n        -------\n        inName : str\n            Input file name\n        outName : str\n            Output file name\n        stdName : str\n            Standard output file name\n        \"\"\"\n        timeId = \"{0:\" + self.cycleFmt + \"}_{1:\" + self.nodeFmt + \"}\"  # build names with proper number of zeros\n        if coupledIter is not None:\n            timeId += \"_{0:03d}\".format(coupledIter)\n\n        inName = self.cs.caseTitle + timeId.format(cycle, node) + \"{}.{}.inp\".format(additionalLabel, self.name)\n        outName = self.cs.caseTitle + timeId.format(cycle, node) + \"{}.{}.out\".format(additionalLabel, self.name)\n        stdName = outName.strip(\".out\") + \".stdout\"\n\n        return inName, outName, stdName\n\n    def calculateKeff(self, label=\"keff\"):\n        \"\"\"\n        Runs neutronics tool and returns keff without applying it to the reactor.\n\n        Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations.\n        For anything more complicated than getting keff, clients should\n        call ``getExecuter`` to build their case.\n        \"\"\"\n        raise NotImplementedError()\n\n\nclass GlobalFluxInterfaceUsingExecuters(GlobalFluxInterface):\n    \"\"\"\n    A global flux interface that makes use of the ARMI Executer system to run.\n\n    Using Executers is optional but seems to allow easy interoperability between\n    the myriad global flux solvers in the world.\n\n    If a new global flux solver does not fit easily into the Executer pattern, then\n    it will be best to just start from the base GlobalFluxInterface rather than\n    trying to adjust the Executer pattern to fit.\n\n    Notes\n    -----\n    This points library users to the Executer object, which is intended to\n    provide commonly-used structure useful for many global flux plugins.\n    \"\"\"\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"\n        Calculate flux, power, and keff for this cycle and node.\n\n        Flux, power, and keff are generally calculated at every timestep to ensure flux\n        is up to date with the reactor state.\n        \"\"\"\n        executer = self.getExecuter(label=self.getLabel(self.cs.caseTitle, cycle, node))\n        executer.run()\n        GlobalFluxInterface.interactEveryNode(self, cycle, node)\n\n    def interactCoupled(self, iteration):\n        \"\"\"Runs during a tightly-coupled physics iteration to updated the flux and power.\"\"\"\n        executer = self.getExecuter(\n            label=self.getLabel(self.cs.caseTitle, self.r.p.cycle, self.r.p.timeNode, iteration)\n        )\n        executer.run()\n        GlobalFluxInterface.interactCoupled(self, iteration)\n\n    def getTightCouplingValue(self):\n        \"\"\"Return the parameter value.\"\"\"\n        if self.coupler.parameter == \"keff\":\n            return self.r.core.p.keff\n        if self.coupler.parameter == \"power\":\n            scaledCorePowerDistribution = []\n            for a in self.r.core:\n                scaledPower = []\n                assemPower = sum(b.p.power for b in a)\n                for b in a:\n                    scaledPower.append(b.p.power / assemPower)\n\n                scaledCorePowerDistribution.append(scaledPower)\n\n            return scaledCorePowerDistribution\n\n        return None\n\n    @staticmethod\n    def getOptionsCls():\n        \"\"\"\n        Get a blank options object.\n\n        Subclass this to allow generic updating of options.\n        \"\"\"\n        return GlobalFluxOptions\n\n    @staticmethod\n    def getExecuterCls():\n        return GlobalFluxExecuter\n\n    def getExecuterOptions(self, label=None):\n        \"\"\"\n        Get an executer options object populated from current user settings and reactor.\n\n        If you want to set settings more deliberately (e.g. to specify a cross section\n        library rather than use an auto-derived name), use ``getOptionsCls`` and build\n        your own.\n        \"\"\"\n        opts = self.getOptionsCls()(label)\n        opts.fromUserSettings(self.cs)\n        opts.fromReactor(self.r)\n        return opts\n\n    def getExecuter(self, options=None, label=None):\n        \"\"\"\n        Get executer object for performing custom client calcs.\n\n        This allows plugins to update options in a somewhat generic\n        way. For example, reactivity coefficients plugin may want to\n        request adjoint flux.\n        \"\"\"\n        if options and label:\n            raise ValueError(\n                f\"Cannot supply a label (`{label}`) and options at the same time. Apply label to options object first.\"\n            )\n        opts = options or self.getExecuterOptions(label)\n        executer = self.getExecuterCls()(options=opts, reactor=self.r)\n        return executer\n\n    def calculateKeff(self, label=\"keff\"):\n        \"\"\"\n        Run global flux with current user options and just return keff without applying it.\n\n        Used for things like direct-eigenvalue reactivity coefficients and CR worth iterations.\n        \"\"\"\n        executer = self.getExecuter(label=label)\n        executer.options.applyResultsToReactor = False\n        executer.options.calcReactionRatesOnMeshConversion = False\n        output = executer.run()\n        return output.getKeff()\n\n    @staticmethod\n    def getLabel(caseTitle, cycle, node, iteration=None):\n        \"\"\"\n        Make a label (input/output file name) for the executer based on cycle, node, iteration.\n\n        Parameters\n        ----------\n        caseTitle : str, required\n            The caseTitle for the ARMI run\n        cycle : int, required\n            The cycle number\n        node : int, required\n            The time node index\n        iteration : int, optional\n            The coupled iteration index\n        \"\"\"\n        if iteration is not None:\n            return f\"{caseTitle}-flux-c{cycle}n{node}i{iteration}\"\n        else:\n            return f\"{caseTitle}-flux-c{cycle}n{node}\"\n\n\nclass GlobalFluxOptions(executers.ExecutionOptions):\n    \"\"\"Data structure representing common options in Global Flux Solvers.\n\n    .. impl:: Options for neutronics solvers.\n        :id: I_ARMI_FLUX_OPTIONS\n        :implements: R_ARMI_FLUX_OPTIONS\n\n        This class functions as a data structure for setting and retrieving\n        execution options for performing flux evaluations, these options\n        involve:\n\n        * What sort of problem is to be solved, i.e. real/adjoint,\n          eigenvalue/fixed-source, neutron/gamma, boundary conditions\n        * Convergence criteria for iterative algorithms\n        * Geometry type and mesh conversion details\n        * Specific parameters to be calculated after flux has been evaluated\n\n        These options can be retrieved by directly accessing class members. The\n        options are set by specifying a :py:class:`Settings\n        <armi.settings.caseSettings.Settings>` object and optionally specifying\n        a :py:class:`Reactor <armi.reactor.reactors.Reactor>` object.\n\n    Attributes\n    ----------\n    adjoint : bool\n        True if the ``CONF_NEUTRONICS_TYPE`` setting is set to ``adjoint`` or ``real``.\n    calcReactionRatesOnMeshConversion : bool\n        This option is used to recalculate reaction rates after a mesh\n        conversion and remapping of neutron flux. This can be disabled\n        in certain global flux implementations if reaction rates are not\n        required, but by default it is enabled.\n    eigenvalueProblem : bool\n        Whether this is a eigenvalue problem or a fixed source problem\n    includeFixedSource : bool\n        This can happen in eig if Fredholm Alternative satisfied.\n    photons : bool\n        Run the photon/gamma uniform mesh converter?\n    real : bool\n        True if  ``CONF_NEUTRONICS_TYPE`` setting is set to ``real``.\n    aclpDoseLimit : float\n        Dose limit in dpa used to position the above-core load pad (if one exists)\n    boundaries : str\n        External Neutronic Boundary Conditions. Reflective does not include axial.\n    cs : Settings\n        Settings for this run\n    detailedAxialExpansion : bool\n        Turn on detailed axial expansion? from settings\n    dpaPerFluence : float\n        A quick and dirty conversion that is used to get dpaPeak\n    energyDepoCalcMethodStep : str\n        For gamma transport/normalization\n    epsEigenvalue : float\n        Convergence criteria for calculating the eigenvalue in the global flux solver\n    epsFissionSourceAvg : float\n        Convergence criteria for average fission source, from settings\n    epsFissionSourcePoint : float\n        Convergence criteria for point fission source, from settings\n    geomType : geometry.GeomType\n        Reactor Core geometry type (HEX, RZ, RZT, etc)\n    hasNonUniformAssems: bool\n        Has any non-uniform assembly flags, from settings\n    isRestart : bool\n        Restart global flux case using outputs from last time as a guess\n    kernelName : str\n        The neutronics / depletion solver for global flux solve.\n    loadPadElevation : float\n        The elevation of the bottom of the above-core load pad (ACLP) from\n        the bottom of the upper grid plate (in cm).\n    loadPadLength : float\n        The length of the load pad. Used to compute average and peak dose.\n    maxOuters : int\n        XY and Axial partial current sweep max outer iterations.\n    savePhysicsFilesList : bool\n        Is this timestamp in the list of savePhysicsFiles in the settings?\n    symmetry : str\n        Reactor symmetry: full core, third-core, etc\n    xsKernel : str\n        Lattice Physics Kernel, from settings\n    \"\"\"\n\n    def __init__(self, label: Optional[str] = None):\n        executers.ExecutionOptions.__init__(self, label)\n        # have defaults\n        self.adjoint: bool = False\n        self.calcReactionRatesOnMeshConversion: bool = True\n        self.eigenvalueProblem: bool = True\n        self.includeFixedSource: bool = False\n        self.photons: bool = False\n        self.real: bool = True\n\n        # no defaults\n        self.aclpDoseLimit: Optional[float] = None\n        self.boundaries: Optional[str] = None\n        self.cs: Optional[Settings] = None\n        self.detailedAxialExpansion: Optional[bool] = None\n        self.dpaPerFluence: Optional[float] = None\n        self.energyDepoCalcMethodStep: Optional[str] = None\n        self.epsEigenvalue: Optional[float] = None\n        self.epsFissionSourceAvg: Optional[float] = None\n        self.epsFissionSourcePoint: Optional[float] = None\n        self.geomType: Optional[geometry.GeomType] = None\n        self.hasNonUniformAssems: Optional[bool] = None\n        self.isRestart: Optional[bool] = None\n        self.kernelName: Optional[str] = None\n        self.loadPadElevation: Optional[float] = None\n        self.loadPadLength: Optional[float] = None\n        self.maxOuters: Optional[int] = None\n        self.savePhysicsFilesList: Optional[bool] = None\n        self.symmetry: Optional[str] = None\n        self.xsKernel: Optional[str] = None\n\n    def fromUserSettings(self, cs: Settings):\n        \"\"\"\n        Map user input settings from cs to a set of specific global flux options.\n\n        This is not required; these options can alternatively be set programmatically.\n        \"\"\"\n        from armi.physics.neutronics.settings import (\n            CONF_ACLP_DOSE_LIMIT,\n            CONF_BOUNDARIES,\n            CONF_DPA_PER_FLUENCE,\n            CONF_EIGEN_PROB,\n            CONF_LOAD_PAD_ELEVATION,\n            CONF_LOAD_PAD_LENGTH,\n            CONF_NEUTRONICS_KERNEL,\n            CONF_RESTART_NEUTRONICS,\n            CONF_XS_KERNEL,\n        )\n        from armi.settings.fwSettings.globalSettings import (\n            CONF_DETAILED_AXIAL_EXPANSION,\n            CONF_NON_UNIFORM_ASSEM_FLAGS,\n            CONF_PHYSICS_FILES,\n        )\n\n        self.kernelName = cs[CONF_NEUTRONICS_KERNEL]\n        self.setRunDirFromCaseTitle(cs.caseTitle)\n        self.isRestart = cs[CONF_RESTART_NEUTRONICS]\n        self.adjoint = neutronics.adjointCalculationRequested(cs)\n        self.real = neutronics.realCalculationRequested(cs)\n        self.detailedAxialExpansion = cs[CONF_DETAILED_AXIAL_EXPANSION]\n        self.hasNonUniformAssems = any([Flags.fromStringIgnoreErrors(f) for f in cs[CONF_NON_UNIFORM_ASSEM_FLAGS]])\n        self.eigenvalueProblem = cs[CONF_EIGEN_PROB]\n\n        # dose/dpa specific (should be separate subclass?)\n        self.dpaPerFluence = cs[CONF_DPA_PER_FLUENCE]\n        self.aclpDoseLimit = cs[CONF_ACLP_DOSE_LIMIT]\n        self.loadPadElevation = cs[CONF_LOAD_PAD_ELEVATION]\n        self.loadPadLength = cs[CONF_LOAD_PAD_LENGTH]\n        self.boundaries = cs[CONF_BOUNDARIES]\n        self.xsKernel = cs[CONF_XS_KERNEL]\n        self.cs = cs\n        self.savePhysicsFilesList = cs[CONF_PHYSICS_FILES]\n\n    def fromReactor(self, reactor: reactors.Reactor):\n        self.geomType = reactor.core.geomType\n        self.symmetry = reactor.core.symmetry\n\n        cycleNodeStamp = f\"{reactor.p.cycle:03d}{reactor.p.timeNode:03d}\"\n        if self.savePhysicsFilesList:\n            self.savePhysicsFiles = cycleNodeStamp in self.savePhysicsFilesList\n        else:\n            self.savePhysicsFiles = False\n\n\nclass GlobalFluxExecuter(executers.DefaultExecuter):\n    \"\"\"\n    A short-lived object that coordinates the prep, execution, and processing of a flux solve.\n\n    There are many forms of global flux solves:\n\n    * Eigenvalue/Fixed source\n    * Adjoint/real\n    * Diffusion/PN/SN/MC\n    * Finite difference/nodal\n\n    There are also many reasons someone might need a flux solve:\n\n    * Update multigroup flux and power on reactor and compute keff\n    * Just compute keff in a temporary perturbed state\n    * Just compute flux and adjoint flux on a state to\n\n    There may also be some required transformations when a flux solve is done:\n\n    * Add/remove edge assemblies\n    * Apply a uniform axial mesh\n\n    There are also I/O performance complexities, including running on fast local paths\n    and copying certain user-defined files back to the working directory on error\n    or completion. Given all these options and possible needs for information from\n    global flux, this class provides a unified interface to everything.\n\n    .. impl:: Ensure the mesh in the reactor model is appropriate for neutronics solver execution.\n        :id: I_ARMI_FLUX_GEOM_TRANSFORM\n        :implements: R_ARMI_FLUX_GEOM_TRANSFORM\n\n        The primary purpose of this class is perform geometric and mesh\n        transformations on the reactor model to ensure a flux evaluation can\n        properly perform. This includes:\n\n        * Applying a uniform axial mesh for the 3D flux solve\n        * Expanding symmetrical geometries to full-core if necessary\n        * Adding/removing edge assemblies if necessary\n        * Undoing any transformations that might affect downstream calculations\n    \"\"\"\n\n    def __init__(self, options: GlobalFluxOptions, reactor):\n        executers.DefaultExecuter.__init__(self, options, reactor)\n        self.options: GlobalFluxOptions\n        self.geomConverters: Dict[str, geometryConverters.GeometryConverter] = {}\n\n    def _performGeometryTransformations(self, makePlots=False):\n        \"\"\"\n        Apply geometry conversions to make reactor work in neutronics.\n\n        There are two conditions where things must happen:\n\n        1. If you are doing finite-difference, you need to add the edge assemblies (fast).\n           For this, we just modify the reactor in place\n\n        2. If you are doing detailed axial expansion, you need to average out the axial mesh (slow!)\n           For this we need to create a whole copy of the reactor and use that.\n\n        In both cases, we need to undo the modifications between reading the output\n        and applying the result to the data model.\n\n        See Also\n        --------\n        _undoGeometryTransformations\n        \"\"\"\n        if any(self.geomConverters):\n            raise RuntimeError(\n                \"The reactor has been transformed, but not restored to the original.\\n\"\n                + \"Geometry converter is set to {} \\n.\".format(self.geomConverters)\n                + \"This is a programming error and requires further investigation.\"\n            )\n        neutronicsReactor = self.r\n        converter = self.geomConverters.get(\"axial\")\n        if not converter:\n            if self.options.detailedAxialExpansion or self.options.hasNonUniformAssems:\n                converter = uniformMesh.converterFactory(self.options)\n                converter.convert(self.r)\n                neutronicsReactor = converter.convReactor\n\n                if makePlots:\n                    converter.plotConvertedReactor()\n\n                self.geomConverters[\"axial\"] = converter\n\n        if self.edgeAssembliesAreNeeded():\n            converter = self.geomConverters.get(\"edgeAssems\", geometryConverters.EdgeAssemblyChanger())\n            converter.addEdgeAssemblies(neutronicsReactor.core)\n            self.geomConverters[\"edgeAssems\"] = converter\n\n        self.r = neutronicsReactor\n\n    def _undoGeometryTransformations(self):\n        \"\"\"\n        Restore original data model state and/or apply results to it.\n\n        Notes\n        -----\n        These transformations occur in the opposite order than that which they were applied in.\n        Otherwise, the uniform mesh guy would try to add info to assem's on the source reactor\n        that don't exist.\n\n        See Also\n        --------\n        _performGeometryTransformations\n        \"\"\"\n        geomConverter = self.geomConverters.get(\"edgeAssems\")\n        if geomConverter:\n            geomConverter.scaleParamsRelatedToSymmetry(\n                self.r.core, paramsToScaleSubset=self.options.paramsToScaleSubset\n            )\n\n            # Resets the reactor core model to the correct symmetry and removes\n            # stored attributes on the converter to ensure that there is\n            # state data that is long-lived on the object in case the garbage\n            # collector does not remove it. Additionally, this will reset the\n            # global assembly counter.\n            geomConverter.removeEdgeAssemblies(self.r.core)\n\n        meshConverter = self.geomConverters.get(\"axial\")\n\n        if meshConverter:\n            if self.options.applyResultsToReactor or self.options.hasNonUniformAssems:\n                meshConverter.applyStateToOriginal()\n            self.r = meshConverter._sourceReactor\n\n            # Resets the stored attributes on the converter to\n            # ensure that there is state data that is long-lived on the\n            # object in case the garbage collector does not remove it.\n            # Additionally, this will reset the global assembly counter.\n            meshConverter.reset()\n\n        # clear the converters in case this function gets called twice\n        self.geomConverters = {}\n\n    def edgeAssembliesAreNeeded(self) -> bool:\n        \"\"\"\n        True if edge assemblies are needed in this calculation.\n\n        We only need them in finite difference cases that are not full core.\n        \"\"\"\n        return (\n            \"FD\" in self.options.kernelName\n            and self.options.symmetry.domain == geometry.DomainType.THIRD_CORE\n            and self.options.symmetry.boundary == geometry.BoundaryType.PERIODIC\n            and self.options.geomType == geometry.GeomType.HEX\n        )\n\n\nclass GlobalFluxResultMapper(interfaces.OutputReader):\n    \"\"\"\n    A short-lived class that maps neutronics output data to a reactor mode.\n\n    Neutronics results can come from a file or a pipe or in memory.\n    This is always subclassed for specific neutronics runs but contains\n    some generic methods that are universally useful for\n    any global flux calculation. These are mostly along the lines of\n    information that can be derived from other information, like\n    dpa rate coming from dpa deltas and cycle length.\n    \"\"\"\n\n    def getKeff(self):\n        raise NotImplementedError()\n\n    def clearFlux(self):\n        \"\"\"Delete flux on all blocks. Needed to prevent stale flux when partially reloading.\"\"\"\n        for b in self.r.core.iterBlocks():\n            b.p.mgFlux = []\n            b.p.adjMgFlux = []\n            b.p.mgFluxGamma = []\n            b.p.extSrc = []\n\n    def _renormalizeNeutronFluxByBlock(self, renormalizationCorePower):\n        \"\"\"\n        Normalize the neutron flux within each block to meet the renormalization power.\n\n        Parameters\n        ----------\n        renormalizationCorePower: float\n            Specified power to renormalize the neutron flux for using the isotopic energy\n            generation rates on the cross section libraries (in Watts)\n\n        See Also\n        --------\n        getTotalEnergyGenerationConstants\n        \"\"\"\n        # update the block power param here as well so\n        # the ratio/multiplications below are consistent\n        currentCorePower = 0.0\n        for b in self.r.core.iterBlocks():\n            # The multi-group flux is volume integrated, so J/cm * n-cm/s gives units of Watts\n            b.p.power = np.dot(b.getTotalEnergyGenerationConstants(), b.getIntegratedMgFlux())\n            b.p.flux = sum(b.getMgFlux())\n            currentCorePower += b.p.power\n\n        powerRatio = renormalizationCorePower / currentCorePower\n        runLog.info(\n            \"Renormalizing the neutron flux in {:<s} by a factor of {:<8.5e}, \"\n            \"which is derived from the current core power of {:<8.5e} W and \"\n            \"desired power of {:<8.5e} W\".format(self.r.core, powerRatio, currentCorePower, renormalizationCorePower)\n        )\n        for b in self.r.core.iterBlocks():\n            b.p.mgFlux *= powerRatio\n            b.p.flux *= powerRatio\n            b.p.fluxPeak *= powerRatio\n            b.p.power *= powerRatio\n            b.p.pdens = b.p.power / b.getVolume()\n\n    def _updateDerivedParams(self):\n        \"\"\"Computes some params that are derived directly from flux and power parameters.\"\"\"\n        for maxParamKey in [\"percentBu\", \"pdens\"]:\n            maxVal = self.r.core.getMaxBlockParam(maxParamKey, Flags.FUEL)\n            if maxVal != 0.0:\n                self.r.core.p[\"max\" + maxParamKey] = maxVal\n\n        maxFlux = self.r.core.getMaxBlockParam(\"flux\")\n        self.r.core.p.maxFlux = maxFlux\n\n        conversion = units.CM2_PER_M2 / units.WATTS_PER_MW\n        for a in self.r.core:\n            area = a.getArea()\n            for b in a:\n                b.p.arealPd = b.p.power / area * conversion\n            a.p.arealPd = a.calcTotalParam(\"arealPd\")\n        self.r.core.p.maxPD = self.r.core.getMaxParam(\"arealPd\")\n        self._updateAssemblyLevelParams()\n\n    def getDpaXs(self, b: Block):\n        \"\"\"Determine which cross sections should be used to compute dpa for a block.\n\n        Parameters\n        ----------\n        b: Block\n            The block we want the cross sections for\n\n        Returns\n        -------\n            list : cross section values\n        \"\"\"\n        from armi.physics.neutronics.settings import (\n            CONF_DPA_XS_SET,\n            CONF_GRID_PLATE_DPA_XS_SET,\n        )\n\n        if self.cs[CONF_GRID_PLATE_DPA_XS_SET] and b.hasFlags(Flags.GRID_PLATE):\n            dpaXsSetName = self.cs[CONF_GRID_PLATE_DPA_XS_SET]\n        else:\n            dpaXsSetName = self.cs[CONF_DPA_XS_SET]\n\n        try:\n            return constants.DPA_CROSS_SECTIONS[dpaXsSetName]\n        except KeyError:\n            raise KeyError(\"DPA cross section set {} does not exist\".format(dpaXsSetName))\n\n    def getBurnupPeakingFactor(self, b: Block):\n        \"\"\"\n        Get the radial peaking factor to be applied to burnup and DPA for a Block.\n\n        This may be informed by previous runs which used\n        detailed pin reconstruction and rotation. In that case,\n        it should be set on the cs setting ``burnupPeakingFactor``.\n\n        Otherwise, it just takes the current flux peaking, which\n        is typically conservatively high.\n\n        Parameters\n        ----------\n        b: Block\n            The block we want the peaking factor for\n\n        Returns\n        -------\n        burnupPeakingFactor : float\n            The peak/avg factor for burnup and DPA.\n        \"\"\"\n        burnupPeakingFactor = self.cs[\"burnupPeakingFactor\"]\n        if not burnupPeakingFactor and b.p.fluxPeak:\n            burnupPeakingFactor = b.p.fluxPeak / b.p.flux\n        elif not burnupPeakingFactor:\n            # no peak available. Finite difference model?\n            # Use 0.0 for peaking so that there isn't misuse of peaking values that don't actually have peaking applied.\n            # Uet self.cs[\"burnupPeakingFactor\"] or b.p.fluxPeak for different behavior\n            burnupPeakingFactor = 0.0\n\n        return burnupPeakingFactor\n\n    def updateDpaRate(self, blockList=None):\n        \"\"\"\n        Update state parameters that can be known right after the flux is computed.\n\n        See Also\n        --------\n        updateFluenceAndDpa : uses values computed here to update cumulative dpa\n        \"\"\"\n        if blockList is None:\n            blockList = self.r.core.iterBlocks()\n\n        hasDPA = False\n        for b in blockList:\n            xs = self.getDpaXs(b)\n            hasDPA = True\n            flux = b.getMgFlux()  # n/cm^2/s\n            dpaPerSecond = computeDpaRate(flux, xs)\n            b.p.detailedDpaPeakRate = dpaPerSecond * self.getBurnupPeakingFactor(b)\n            b.p.detailedDpaRate = dpaPerSecond\n\n        if not hasDPA:\n            return\n\n        peakRate = self.r.core.getMaxBlockParam(\"detailedDpaPeakRate\", typeSpec=Flags.GRID_PLATE, absolute=False)\n        self.r.core.p.peakGridDpaAt60Years = peakRate * 60.0 * units.SECONDS_PER_YEAR\n\n        # also update maxes at this point (since this runs at every timenode, not just those w/ depletion steps)\n        self.updateMaxDpaParams()\n\n    def updateMaxDpaParams(self):\n        \"\"\"\n        Update params that track the peak dpa.\n\n        Only consider fuel because CRs, etc. aren't always reset.\n        \"\"\"\n        maxDpa = self.r.core.getMaxBlockParam(\"detailedDpaPeak\", Flags.FUEL)\n        self.r.core.p.maxdetailedDpaPeak = maxDpa\n        self.r.core.p.maxDPA = maxDpa\n\n        # add grid plate max\n        maxGridDose = self.r.core.getMaxBlockParam(\"detailedDpaPeak\", Flags.GRID_PLATE)\n        self.r.core.p.maxGridDpa = maxGridDose\n\n    def _updateAssemblyLevelParams(self):\n        for a in self.r.core:\n            totalAbs = 0.0  # for calculating assembly average k-inf\n            totalSrc = 0.0\n            for b in a:\n                totalAbs += b.p.rateAbs\n                totalSrc += b.p.rateProdNet\n\n            a.p.maxPercentBu = a.getMaxParam(\"percentBu\")\n            a.p.maxDpaPeak = a.getMaxParam(\"detailedDpaPeak\")\n            a.p.timeToLimit = a.getMinParam(\"timeToLimit\", Flags.FUEL)\n            a.p.buLimit = a.getMaxParam(\"buLimit\")\n\n            if totalAbs > 0:\n                a.p.kInf = totalSrc / totalAbs  # assembly average k-inf.\n\n\ndef computeDpaRate(mgFlux, dpaXs):\n    r\"\"\"\n    Compute the DPA rate incurred by exposure of a certain flux spectrum.\n\n    .. impl:: Compute DPA rates.\n        :id: I_ARMI_FLUX_DPA\n        :implements: R_ARMI_FLUX_DPA\n\n        This method calculates DPA rates using the inputted multigroup flux and DPA cross sections.\n        Displacements calculated by displacement cross-section:\n\n        .. math::\n            :nowrap:\n\n            \\begin{aligned}\n            \\text{Displacement rate} &= \\phi N_{\\text{HT9}} \\sigma  \\\\\n            &= (\\#/\\text{cm}^2/s) \\cdot (1/cm^3) \\cdot (\\text{barn})\\\\\n            &= (\\#/\\text{cm}^5/s) \\cdot  \\text{(barn)} * 10^{-24} \\text{cm}^2/\\text{barn} \\\\\n            &= \\#/\\text{cm}^3/s\n            \\end{aligned}\n\n\n        ::\n\n            DPA rate = displacement density rate / (number of atoms/cc)\n                    = dr [#/cm^3/s] / (nHT9)  [1/cm^3]\n                    = flux * barn * 1e-24\n\n\n        .. math::\n\n            \\frac{\\text{dpa}}{s}  = \\frac{\\phi N \\sigma}{N} = \\phi * \\sigma\n\n        the number density of the structural material cancels out. It's in the macroscopic\n        cross-section and in the original number of atoms.\n\n    Parameters\n    ----------\n    mgFlux : list\n        multigroup neutron flux in #/cm^2/s\n\n    dpaXs : list\n        DPA cross section in barns to convolute with flux to determine DPA rate\n\n    Returns\n    -------\n    dpaPerSecond : float\n        The dpa/s in this material due to this flux\n\n    Raises\n    ------\n    RuntimeError\n       Negative dpa rate.\n    \"\"\"\n    displacements = 0.0\n    if len(mgFlux) != len(dpaXs):\n        runLog.warning(\n            \"Multigroup flux of length {} is incompatible with dpa cross section of length {};\"\n            \"dpa rate will be set do 0.0\".format(len(mgFlux), len(dpaXs)),\n            single=True,\n        )\n        return displacements\n    for flux, barns in zip(mgFlux, dpaXs):\n        displacements += flux * barns\n    dpaPerSecond = displacements * units.CM2_PER_BARN\n\n    if dpaPerSecond < 0:\n        runLog.warning(\n            \"Negative DPA rate calculated at {}\".format(dpaPerSecond),\n            single=True,\n            label=\"negativeDpaPerSecond\",\n        )\n        # ensure physical meaning of dpaPerSecond, it is likely just slightly negative\n        if dpaPerSecond < -1.0e-10:\n            raise RuntimeError(\"Calculated DPA rate is substantially negative at {}\".format(dpaPerSecond))\n        dpaPerSecond = 0.0\n\n    return dpaPerSecond\n\n\ndef calcReactionRates(obj, keff, lib):\n    r\"\"\"\n    Compute 1-group reaction rates for this object (usually a block).\n\n    .. impl:: Return the reaction rates for a given ArmiObject\n        :id: I_ARMI_FLUX_RX_RATES\n        :implements: R_ARMI_FLUX_RX_RATES\n\n        This method computes 1-group reaction rates for the inputted\n        :py:class:`ArmiObject <armi.reactor.composites.ArmiObject>` These\n        reaction rates include:\n\n        * fission\n        * nufission\n        * n2n\n        * absorption\n\n        Scatter could be added as well. This function is quite slow so it is\n        skipped for now as it is uncommonly needed.\n\n        Reaction rates are:\n\n        .. math::\n\n            \\Sigma \\phi = \\sum_{\\text{nuclides}} \\sum_{\\text{energy}} \\Sigma\n            \\phi\n\n        The units of :math:`N \\sigma \\phi` are::\n\n            [#/bn-cm] * [bn] * [#/cm^2/s] = [#/cm^3/s]\n\n        The group-averaged microscopic cross section is:\n\n        .. math::\n\n            \\sigma_g = \\frac{\\int_{E g}^{E_{g+1}} \\phi(E)  \\sigma(E)\n            dE}{\\int_{E_g}^{E_{g+1}} \\phi(E) dE}\n\n    Parameters\n    ----------\n    obj : Block\n        The object to compute reaction rates on. Notionally this could be upgraded to be\n        any kind of ArmiObject but with params defined as they are it currently is only\n        implemented for a block.\n\n    keff : float\n        The keff of the core. This is required to get the neutron production rate correct\n        via the neutron balance statement (since nuSigF has a 1/keff term).\n\n    lib : XSLibrary\n        Microscopic cross sections to use in computing the reaction rates.\n    \"\"\"\n    rate = {}\n    for simple in RX_PARAM_NAMES:\n        rate[simple] = 0.0\n\n    numberDensities = obj.getNumberDensities()\n\n    for nucName, numberDensity in numberDensities.items():\n        if numberDensity == 0.0:\n            continue\n        nucrate = {}\n        for simple in RX_PARAM_NAMES:\n            nucrate[simple] = 0.0\n\n        nucMc = lib.getNuclide(nucName, obj.getMicroSuffix())\n        micros = nucMc.micros\n\n        # absorption is fission + capture (no n2n here)\n        mgFlux = obj.getMgFlux()\n        for name in RX_ABS_MICRO_LABELS:\n            for g, (groupFlux, xs) in enumerate(zip(mgFlux, micros[name])):\n                dphi = numberDensity * groupFlux\n                nucrate[\"rateAbs\"] += dphi * xs\n\n                if name != \"fission\":\n                    nucrate[\"rateCap\"] += dphi * xs\n                else:\n                    nucrate[\"rateFis\"] += dphi * xs\n                    # scale nu by keff.\n                    nucrate[\"rateProdFis\"] += dphi * xs * micros.neutronsPerFission[g] / keff\n\n        for groupFlux, n2nXs in zip(mgFlux, micros.n2n):\n            # this n2n xs is reaction based. Multiply by 2.\n            dphi = numberDensity * groupFlux\n            nucrate[\"rateProdN2n\"] += 2.0 * dphi * n2nXs\n\n        for simple in RX_PARAM_NAMES:\n            if nucrate[simple]:\n                rate[simple] += nucrate[simple]\n\n    for paramName, val in rate.items():\n        obj.p[paramName] = val  # put in #/cm^3/s\n\n    vFuel = obj.getComponentAreaFrac(Flags.FUEL) if rate[\"rateFis\"] > 0.0 else 1.0\n    obj.p.fisDens = rate[\"rateFis\"] / vFuel\n    obj.p.fisDensHom = rate[\"rateFis\"]\n"
  },
  {
    "path": "armi/physics/neutronics/globalFlux/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/neutronics/globalFlux/tests/test_globalFluxInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for generic global flux interface.\"\"\"\n\nimport unittest\nfrom unittest.mock import patch\n\nimport numpy as np\n\nfrom armi import settings\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.physics.neutronics.globalFlux import globalFluxInterface\nfrom armi.physics.neutronics.settings import (\n    CONF_GRID_PLATE_DPA_XS_SET,\n    CONF_XS_KERNEL,\n)\nfrom armi.reactor import geometry\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_blocks, test_reactors\nfrom armi.tests import ISOAA_PATH\n\n\nclass MockReactorParams:\n    def __init__(self):\n        self.cycle = 1\n        self.timeNode = 2\n\n\nclass MockCoreParams:\n    pass\n\n\nclass MockCore:\n    def __init__(self):\n        # just pick a random geomType\n        self.geomType = geometry.GeomType.CARTESIAN\n        self.symmetry = \"full\"\n        self.p = MockCoreParams()\n\n\nclass MockReactor:\n    def __init__(self):\n        self.core = MockCore()\n        self.o = None\n        self.p = MockReactorParams()\n\n\nclass MockGlobalFluxInterface(globalFluxInterface.GlobalFluxInterface):\n    \"\"\"\n    Add fake keff calc to a the general gf interface.\n\n    This simulates a 1000 pcm keff increase over 1 step.\n    \"\"\"\n\n    def interactBOC(self, cycle=None):\n        globalFluxInterface.GlobalFluxInterface.interactBOC(self, cycle=cycle)\n        self.r.core.p.keff = 1.00\n\n    def interactEveryNode(self, cycle, node):\n        globalFluxInterface.GlobalFluxInterface.interactEveryNode(self, cycle, node)\n        self.r.core.p.keff = 1.01\n\n\nclass MockGlobalFluxWithExecuters(globalFluxInterface.GlobalFluxInterfaceUsingExecuters):\n    def getExecuterCls(self):\n        return MockGlobalFluxExecuter\n\n\nclass MockGlobalFluxWithExecutersNonUniform(MockGlobalFluxWithExecuters):\n    def getExecuterOptions(self, label=None):\n        \"\"\"Return modified executerOptions.\"\"\"\n        opts = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterOptions(self, label=label)\n        opts.hasNonUniformAssems = True  # to increase test coverage\n        return opts\n\n\nclass MockGlobalFluxExecuter(globalFluxInterface.GlobalFluxExecuter):\n    \"\"\"Tests for code that uses Executers, which rely on OutputReaders to update state.\"\"\"\n\n    def _readOutput(self):\n        class MockOutputReader:\n            def apply(self, r):\n                r.core.p.keff += 0.01\n\n            def getKeff(self):\n                return 1.05\n\n        return MockOutputReader()\n\n\nclass TestGlobalFluxOptions(unittest.TestCase):\n    \"\"\"Tests for GlobalFluxOptions.\"\"\"\n\n    def test_readFromSettings(self):\n        \"\"\"Test reading global flux options from case settings.\n\n        .. test:: Tests GlobalFluxOptions.\n            :id: T_ARMI_FLUX_OPTIONS_CS\n            :tests: R_ARMI_FLUX_OPTIONS\n        \"\"\"\n        cs = settings.Settings()\n        opts = globalFluxInterface.GlobalFluxOptions(\"neutronics-run\")\n        opts.fromUserSettings(cs)\n        self.assertFalse(opts.adjoint)\n\n    def test_readFromReactors(self):\n        \"\"\"Test reading global flux options from reactor objects.\n\n        .. test:: Tests GlobalFluxOptions.\n            :id: T_ARMI_FLUX_OPTIONS_R\n            :tests: R_ARMI_FLUX_OPTIONS\n        \"\"\"\n        reactor = MockReactor()\n        opts = globalFluxInterface.GlobalFluxOptions(\"neutronics-run\")\n        opts.fromReactor(reactor)\n        self.assertEqual(opts.geomType, geometry.GeomType.CARTESIAN)\n        self.assertFalse(opts.savePhysicsFiles)\n\n    def test_savePhysicsFiles(self):\n        reactor = MockReactor()\n        opts = globalFluxInterface.GlobalFluxOptions(\"neutronics-run\")\n\n        # savePhysicsFilesList matches MockReactor parameters\n        opts.savePhysicsFilesList = [\"001002\"]\n        opts.fromReactor(reactor)\n        self.assertTrue(opts.savePhysicsFiles)\n\n        # savePhysicsFilesList does not match MockReactor parameters\n        opts.savePhysicsFilesList = [\"001000\"]\n        opts.fromReactor(reactor)\n        self.assertFalse(opts.savePhysicsFiles)\n\n\nclass TestGFI(unittest.TestCase):\n    def test_computeDpaRate(self):\n        \"\"\"\n        Compute DPA and DPA rates from multi-group neutron flux and cross sections.\n\n        .. test:: Compute DPA rates.\n            :id: T_ARMI_FLUX_DPA\n            :tests: R_ARMI_FLUX_DPA\n        \"\"\"\n        xs = [1, 2, 3]\n        flx = [0.5, 0.75, 2]\n        res = globalFluxInterface.computeDpaRate(flx, xs)\n        self.assertEqual(res, 10**-24 * (0.5 + 1.5 + 6))\n\n    def test_interaction(self):\n        \"\"\"\n        Ensure the basic interaction hooks work.\n\n        Check that a 1000 pcm rx swing is observed due to the mock.\n        \"\"\"\n        cs = settings.Settings()\n        cs[\"burnSteps\"] = 2\n        _o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        gfi = MockGlobalFluxInterface(r, cs)\n        bocKeff = 1.1\n        r.core.p.keffUnc = 1.1\n        gfi.interactBOC()\n\n        r.p.cycle, r.p.timeNode = 0, 0\n        gfi.interactEveryNode(0, 0)\n        self.assertAlmostEqual(gfi._bocKeff, r.core.p.keffUnc)\n        r.core.p.keffUnc = 1.05\n        r.p.cycle, r.p.timeNode = 0, 1\n        gfi.interactEveryNode(0, 1)\n        # doesn't change since its not the first node\n        self.assertAlmostEqual(gfi._bocKeff, bocKeff)\n        r.core.p.keffUnc = 1.01\n        r.p.cycle, r.p.timeNode = 0, 2\n        gfi.interactEveryNode(0, 2)\n        self.assertAlmostEqual(gfi._bocKeff, bocKeff)\n        self.assertAlmostEqual(r.core.p.rxSwing, -1e5 * (1.1 - 1.01) / (1.1 * 1.01))\n        gfi.interactBOC(0)\n        # now its zeroed at BOC\n        self.assertAlmostEqual(r.core.p.rxSwing, 0)\n\n    def test_getIOFileNames(self):\n        cs = settings.Settings()\n        gfi = MockGlobalFluxInterface(MockReactor(), cs)\n        inf, _outf, _stdname = gfi.getIOFileNames(1, 2, 1)\n        self.assertEqual(inf, \"armi001_2_001.GlobalFlux.inp\")\n\n    def test_getHistoryParams(self):\n        params = globalFluxInterface.GlobalFluxInterface.getHistoryParams()\n        self.assertEqual(len(params), 3)\n        self.assertIn(\"detailedDpa\", params)\n\n    def test_checkEnergyBalance(self):\n        \"\"\"Test energy balance check.\n\n        .. test:: Block-wise power is consistent with reactor data model power.\n            :id: T_ARMI_FLUX_CHECK_POWER\n            :tests: R_ARMI_FLUX_CHECK_POWER\n        \"\"\"\n        cs = settings.Settings()\n        _o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        gfi = MockGlobalFluxInterface(r, cs)\n        self.assertEqual(gfi.checkEnergyBalance(), None)\n\n        # Test when nameplate power doesn't equal sum of block power\n        r.core.p.power = 1e-10\n        with self.assertRaises(ValueError):\n            gfi.checkEnergyBalance()\n\n\nclass TestGFIWithExecuters(unittest.TestCase):\n    \"\"\"Tests for the default global flux execution.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.cs = settings.Settings()\n        cls.r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")[1]\n\n    def setUp(self):\n        self.r.core.p.keff = 1.0\n        self.gfi = MockGlobalFluxWithExecuters(self.r, self.cs)\n\n    @patch(\"armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._execute\")\n    @patch(\"armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxExecuter._performGeometryTransformations\")\n    def test_executerInteraction(self, mockGeometryTransform, mockExecute):\n        \"\"\"Run the global flux interface and executer though one time now.\n\n        .. test:: Run the global flux interface to check that the mesh converter is called before the neutronics solver.\n            :id: T_ARMI_FLUX_GEOM_TRANSFORM_ORDER\n            :tests: R_ARMI_FLUX_GEOM_TRANSFORM\n        \"\"\"\n        call_order = []\n        mockGeometryTransform.side_effect = lambda *a, **kw: call_order.append(mockGeometryTransform)\n        mockExecute.side_effect = lambda *a, **kw: call_order.append(mockExecute)\n        gfi = self.gfi\n        gfi.interactBOC()\n        gfi.interactEveryNode(0, 0)\n        self.assertEqual([mockGeometryTransform, mockExecute], call_order)\n\n    def test_calculateKeff(self):\n        self.assertEqual(self.gfi.calculateKeff(), 1.05)  # set in mock\n\n    def test_getExecuterCls(self):\n        class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls()\n        self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter)\n\n    def test_setTightCouplingDefaults(self):\n        \"\"\"Assert that tight coupling defaults are only set if cs[\"tightCoupling\"]=True.\"\"\"\n        self.assertIsNone(self.gfi.coupler)\n        self._setTightCouplingTrue()\n        self.assertEqual(self.gfi.coupler.parameter, \"keff\")\n        self._setTightCouplingFalse()\n\n    def test_getTightCouplingValue(self):\n        \"\"\"Test getTightCouplingValue returns the correct value for keff and type for power.\"\"\"\n        self._setTightCouplingTrue()\n        self.assertEqual(self.gfi.getTightCouplingValue(), 1.0)  # set in setUp\n        self.gfi.coupler.parameter = \"power\"\n        for a in self.r.core:\n            for b in a:\n                b.p.power = 10.0\n        self.assertEqual(\n            self.gfi.getTightCouplingValue(),\n            self._getCouplingPowerDistributions(self.r.core),\n        )\n        self._setTightCouplingFalse()\n\n    @staticmethod\n    def _getCouplingPowerDistributions(core):\n        scaledPowers = []\n        for a in core:\n            assemblyPower = sum(b.p.power for b in a)\n            scaledPowers.append([b.p.power / assemblyPower for b in a])\n\n        return scaledPowers\n\n    def _setTightCouplingTrue(self):\n        self.cs[\"tightCoupling\"] = True\n        self.gfi._setTightCouplingDefaults()\n\n    def _setTightCouplingFalse(self):\n        self.cs[\"tightCoupling\"] = False\n\n\nclass TestGFIWithExecutersNonUniform(unittest.TestCase):\n    \"\"\"Tests for global flux execution with non-uniform assemblies.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cs = settings.Settings()\n        _o, cls.r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        cls.r.core.p.keff = 1.0\n        cls.gfi = MockGlobalFluxWithExecutersNonUniform(cls.r, cs)\n\n    @patch(\"armi.reactor.converters.uniformMesh.converterFactory\")\n    def test_executerInteractionNonUniformAssems(self, mockConverterFactory):\n        \"\"\"Run the global flux interface with non-uniform assemblies.\n\n        This will serve as a broad end-to-end test of the interface, and also\n        stress test the mesh issues with non-uniform assemblies.\n\n        .. test:: Run the global flux interface to show the geometry converter is called when the\n            nonuniform mesh option is used.\n            :id: T_ARMI_FLUX_GEOM_TRANSFORM_CONV\n            :tests: R_ARMI_FLUX_GEOM_TRANSFORM\n        \"\"\"\n        gfi = self.gfi\n        gfi.interactBOC()\n        gfi.interactEveryNode(0, 0)\n        self.assertTrue(gfi.getExecuterOptions().hasNonUniformAssems)\n        mockConverterFactory.assert_called()\n\n    def test_calculateKeff(self):\n        self.assertEqual(self.gfi.calculateKeff(), 1.05)  # set in mock\n\n    def test_getExecuterCls(self):\n        class0 = globalFluxInterface.GlobalFluxInterfaceUsingExecuters.getExecuterCls()\n        self.assertEqual(class0, globalFluxInterface.GlobalFluxExecuter)\n\n\nclass TestGlobalFluxResultMapper(unittest.TestCase):\n    \"\"\"\n    Test that global flux result mappings run.\n\n    Notes\n    -----\n    This does not test that the flux mapping is correct. That has to be done\n    at another level.\n    \"\"\"\n\n    def test_mapper(self):\n        # Switch to MC2v2 setting to make sure the isotopic/elemental expansions are compatible with\n        # actually doing some math using the ISOAA test microscopic library\n        o, r = test_reactors.loadTestReactor(\n            customSettings={CONF_XS_KERNEL: \"MC2v2\"},\n            inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\",\n        )\n        applyDummyFlux(r)\n        r.core.lib = isotxs.readBinary(ISOAA_PATH)\n        mapper = globalFluxInterface.GlobalFluxResultMapper(cs=o.cs)\n        mapper.r = r\n        mapper._renormalizeNeutronFluxByBlock(100)\n        self.assertAlmostEqual(r.core.calcTotalParam(\"power\", generationNum=2), 100)\n\n        mapper._updateDerivedParams()\n        self.assertGreater(r.core.p.maxPD, 0.0)\n        self.assertGreater(r.core.p.maxFlux, 0.0)\n\n        mapper.updateDpaRate()\n        block = r.core.getFirstBlock()\n        self.assertGreater(block.p.detailedDpaRate, 0)\n        self.assertEqual(block.p.detailedDpa, 0)\n\n        mapper.clearFlux()\n        self.assertEqual(len(block.p.mgFlux), 0)\n\n    def test_getDpaXs(self):\n        cs = settings.Settings()\n        mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)\n\n        # test fuel block\n        b = HexBlock(\"fuel\", height=10.0)\n        vals = mapper.getDpaXs(b)\n        self.assertEqual(len(vals), 33)\n        self.assertAlmostEqual(vals[0], 2345.69, 1)\n\n        # build a grid plate block\n        b = HexBlock(\"grid_plate\", height=10.0)\n        b.p.flags = Flags.GRID_PLATE\n        self.assertTrue(b.hasFlags(Flags.GRID_PLATE))\n\n        # test grid plate block\n        mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = \"dpa_EBRII_PE16\"\n        vals = mapper.getDpaXs(b)\n        self.assertEqual(len(vals), 33)\n        self.assertAlmostEqual(vals[0], 2478.95, 1)\n\n        # test null case\n        mapper.cs[CONF_GRID_PLATE_DPA_XS_SET] = \"fake\"\n        with self.assertRaises(KeyError):\n            mapper.getDpaXs(b)\n\n    def test_getBurnupPeakingFactor(self):\n        cs = settings.Settings()\n        mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)\n\n        # test fuel block\n        mapper.cs[\"burnupPeakingFactor\"] = 0.0\n        b = HexBlock(\"fuel\", height=10.0)\n        b.p.flux = 100.0\n        b.p.fluxPeak = 250.0\n        factor = mapper.getBurnupPeakingFactor(b)\n        self.assertEqual(factor, 2.5)\n\n    def test_getBurnupPeakingFactorZero(self):\n        cs = settings.Settings()\n        mapper = globalFluxInterface.GlobalFluxResultMapper(cs=cs)\n\n        # test fuel block without any peaking factor set\n        b = HexBlock(\"fuel\", height=10.0)\n        factor = mapper.getBurnupPeakingFactor(b)\n        self.assertEqual(factor, 0.0)\n\n\nclass TestGlobalFluxUtils(unittest.TestCase):\n    def test_calcReactionRates(self):\n        \"\"\"\n        Test that the reaction rate code executes and sets a param > 0.0.\n\n        .. test:: Return the reaction rates for a given ArmiObject.\n            :id: T_ARMI_FLUX_RX_RATES\n            :tests: R_ARMI_FLUX_RX_RATES\n        \"\"\"\n        b = test_blocks.loadTestBlock()\n        test_blocks.applyDummyData(b)\n        self.assertAlmostEqual(b.p.rateAbs, 0.0)\n        globalFluxInterface.calcReactionRates(b, 1.01, b.core.lib)\n        self.assertGreater(b.p.rateAbs, 0.0)\n        vfrac = b.getComponentAreaFrac(Flags.FUEL)\n        self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac)\n        self.assertEqual(b.p.fisDensHom, b.p.rateFis)\n\n\ndef applyDummyFlux(r, ng=33):\n    \"\"\"Set arbitrary flux distribution on a Reactor.\"\"\"\n    for b in r.core.iterBlocks():\n        b.p.power = 1.0\n        b.p.mgFlux = np.arange(ng, dtype=np.float64)\n"
  },
  {
    "path": "armi/physics/neutronics/isotopicDepletion/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This package houses helper tools that allow ARMI to communicate with external isotopic depletion programs.\"\"\"\n"
  },
  {
    "path": "armi/physics/neutronics/isotopicDepletion/crossSectionTable.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nModule containing the CrossSectionTable class.\n\nThe CrossSectionTable is useful for performing isotopic depletion analysis by storing one-group cross sections of\ninterest to such an analysis. This used to live alongside the isotopicDepletionInterface, but that proved to be an\nunpleasant coupling between the ARMI composite model and the physics code contained therein. Separating it out at least\nmeans that the composite model doesn't need to import the isotopicDepletionInterface to function.\n\"\"\"\n\nimport collections\nfrom typing import List\n\nimport numpy as np\n\nfrom armi.nucDirectory import nucDir\n\n\nclass CrossSectionTable(collections.OrderedDict):\n    \"\"\"\n    This is a set of one group cross sections for use with isotopicDepletion analysis.\n\n    It can also double as a reaction rate table.\n\n    XStable is indexed by nucNames (nG), (nF), (n2n), (nA), (nP) and (n3n) are expected the cross sections are returned\n    in barns.\n    \"\"\"\n\n    rateTypes = (\"nG\", \"nF\", \"n2n\", \"nA\", \"nP\", \"n3n\")\n\n    def __init__(self, *args, **kwargs):\n        collections.OrderedDict.__init__(self, *args, **kwargs)\n        self._name = None\n\n    def setName(self, name):\n        self._name = name\n\n    def getName(self):\n        return self._name\n\n    def add(self, nucName, nG=0.0, nF=0.0, n2n=0.0, nA=0.0, nP=0.0, n3n=0.0):\n        \"\"\"\n        Add one group cross sections to the table.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide name -- e.g. 'U235'\n        nG : float\n            (n,gamma) cross section in barns\n        nF : float\n            (n,fission) cross section in barns\n        n2n : float\n            (n,2n) cross section in barns\n        nA : float\n            (n,alpha) cross section in barns\n        nP : float\n            (n,proton) cross section in barns\n        n3n : float\n            (n,3n) cross section in barns\n        \"\"\"\n        xsData = {rateType: xs for rateType, xs in zip(self.rateTypes, [nG, nF, n2n, nA, nP, n3n])}\n        nb = nucDir.nuclideBases.byName[nucName]\n        mcnpNucName = int(nb.getMcnpId())\n        self[mcnpNucName] = xsData\n\n    def addMultiGroupXS(self, nucName, microMultiGroupXS, mgFlux, totalFlux=None):\n        \"\"\"\n        Perform group collapse to one group cross sections and add to table.\n\n        Parameters\n        ----------\n        nucName: str\n            nuclide name -- e.g. 'U235'\n        microMultiGroupXS: XSCollection\n            micro cross sections, typically a XSCollection from an ISOTXS\n        mgFlux: list like\n            The flux in each energy group\n        totalFlux: float\n            The total flux. Optional argument for increased speed if already available.\n        \"\"\"\n        totalFlux = totalFlux if totalFlux is not None else sum(mgFlux)\n        xsTypes = (\"nG\", \"nF\", \"n2n\", \"nA\", \"nP\")\n        mgCrossSections = (\n            microMultiGroupXS.nGamma,\n            microMultiGroupXS.fission,\n            microMultiGroupXS.n2n,\n            microMultiGroupXS.nalph,\n            microMultiGroupXS.np,\n        )\n\n        oneGroupXS = np.asarray(mgCrossSections).dot(mgFlux) / totalFlux\n\n        oneGroupXSbyName = {xsType: xs for xsType, xs in zip(xsTypes, oneGroupXS)}\n        oneGroupXSbyName[\"n3n\"] = 0.0\n\n        self.add(nucName, **oneGroupXSbyName)\n\n    def hasValues(self):\n        \"\"\"Determines if there are non-zero values in this cross section table.\"\"\"\n        return any(any(nuclideCrossSectionSet.values()) for nuclideCrossSectionSet in self.values())\n\n    def getXsecTable(\n        self,\n        headerFormat=\"$ xsecs for {}\",\n        tableFormat=\"\\n{{mcnpId}} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}\",\n    ):\n        \"\"\"\n        Make a cross section table for external depletion physics code input decks.\n\n        .. impl:: Generate a formatted cross section table.\n            :id: I_ARMI_DEPL_TABLES1\n            :implements: R_ARMI_DEPL_TABLES\n\n            Loops over the reaction rates stored as ``self`` to produce a string with the cross sections for each\n            nuclide in the block. Cross sections may be populated by ``makeReactionRateTable``.\n\n            The string will have a header with the table's name formatted according to ``headerFormat`` followed by rows\n            for each unique nuclide/reaction combination, where each line is formatted according to ``tableFormat``.\n\n        Parameters\n        ----------\n        headerFormat: string (optional)\n            This is the format in which the elements of the header with be returned -- i.e. if you use a .format() call\n            with the case name you'll return a formatted list of strings.\n        tableFormat: string (optional)\n            This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call\n            with mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with\n            the case name you'll return a formatted list of string elements\n\n        Results\n        -------\n        output: list\n            a list of string elements that together make a xsec card\n        \"\"\"\n        output = [headerFormat.format(self.getName())]\n        for mcnpNucName in sorted(self.keys()):\n            rxRates = self[mcnpNucName]\n            dataToWrite = {rateType: rxRates[rateType] for rateType in self.rateTypes}\n            if any(dataToWrite[rateType] for rateType in self.rateTypes):\n                dataToWrite[\"mcnpId\"] = mcnpNucName\n                output.append(tableFormat.format(**dataToWrite))\n\n        return output\n\n\ndef makeReactionRateTable(obj, nuclides: List = None):\n    \"\"\"\n    Generate a reaction rate table for given nuclides.\n\n    Often useful in support of depletion.\n\n    .. impl:: Generate a reaction rate table with entries for (nG), (nF), (n2n), (nA), and (nP) reactions.\n        :id: I_ARMI_DEPL_TABLES0\n        :implements: R_ARMI_DEPL_TABLES\n\n        For a given composite object ``obj`` and a list of nuclides ``nuclides`` in that object, call\n        ``obj.getReactionRates()`` for each nuclide with a ``nDensity`` parameter of 1.0. If ``nuclides`` is not\n        specified, use a list of all nuclides in ``obj``. This will reach upwards through the parents of ``obj`` to the\n        associated :py:class:`~armi.reactor.reactors.Core` object and pull the ISOTXS library that is stored there. If\n        ``obj`` does not belong to a ``Core``, a warning is printed.\n\n        For each child of ``obj``, use the ISOTXS library and the cross-section ID for the associated block to produce a\n        reaction rate dictionary in units of inverse seconds for the nuclide specified in the original call to\n        ``obj.getReactionRates()``. Because ``nDensity`` was originally specified as 1.0, this dictionary actually\n        represents the reaction rates per unit volume. If the nuclide is not in the ISOTXS library a warning is printed.\n\n        Combine the reaction rates for all nuclides into a combined dictionary by summing together reaction rates of the\n        same type on the same isotope from each of the children of ``obj``.\n\n        If ``obj`` has a non-zero multi-group flux, sum the group-wise flux into the total flux and normalize the\n        reaction rates by the total flux, producing a one-group macroscopic cross section for each reaction type on each\n        nuclide. Store these values in a ``CrossSectionTable``.\n\n    Parameters\n    ----------\n    nuclides : list, optional\n        list of nuclide names for which to generate the cross-section table.\n        If absent, use all nuclides obtained by self.getNuclides().\n\n    Notes\n    -----\n    This also used to do some caching on the block level but that has been removed and the calls to this may therefore\n    need to be re-optimized.\n\n    See Also\n    --------\n    armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.CrossSectionTable\n    armi.reactor.composites.Composite.getReactionRates\n    \"\"\"\n    if nuclides is None:\n        nuclides = obj.getNuclides()\n\n    rxRates = {nucName: {rxName: 0 for rxName in CrossSectionTable.rateTypes} for nucName in nuclides}\n\n    for armiObject in obj:\n        for nucName in nuclides:\n            rxnRates = armiObject.getReactionRates(nucName, nDensity=1.0)\n            for rxName, rxRate in rxnRates.items():\n                rxRates[nucName][rxName] += rxRate\n\n    crossSectionTable = CrossSectionTable()\n    crossSectionTable.setName(obj.getName())\n\n    totalFlux = sum(obj.getIntegratedMgFlux())\n    if totalFlux:\n        for nucName, nucRxRates in rxRates.items():\n            xSecs = {rxName: rxRate / totalFlux for rxName, rxRate in nucRxRates.items()}\n            crossSectionTable.add(nucName, **xSecs)\n\n    return crossSectionTable\n"
  },
  {
    "path": "armi/physics/neutronics/isotopicDepletion/isotopicDepletionInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"An abstract class for interfaces between ARMI and programs that simulate transmutation and decay.\"\"\"\n\nimport collections\n\nfrom armi import interfaces\nfrom armi.nucDirectory import nuclideBases\nfrom armi.nuclearDataIO import xsLibraries\nfrom armi.physics.neutronics.isotopicDepletion.crossSectionTable import (\n    CrossSectionTable,\n)\nfrom armi.reactor import composites\nfrom armi.reactor.flags import Flags\n\n\ndef isDepletable(obj: composites.ArmiObject):\n    \"\"\"\n    Return True if obj or any child is flagged as DEPLETABLE.\n\n    The DEPLETABLE flag is automatically set to True if any composition contains nuclides that are\n    in the active nuclides list, unless flags are specifically set and DEPLETABLE is left out.\n\n    This is often interpreted by depletion plugins as indicating which parts of the problem to apply\n    depletion to. Analysts may want to turn on and off depletion in certain problems.\n\n    For example, sometimes they want the control rods to deplete to figure out how often to replace\n    them.\n\n    Warning\n    -------\n    The ``DEPLETABLE`` flag is automatically added to compositions that have active nuclides. If you\n    explicitly define any flags at all, you must also manually include ``DEPLETABLE`` or else the\n    objects will silently not deplete.\n\n    Notes\n    -----\n    The auto-flagging of ``DEPLETABLE`` happens in the construction of blueprints\n    rather than in a plugin hook because the reactor is not available at the time\n    the plugin hook runs.\n\n    See Also\n    --------\n    armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys\n    \"\"\"\n    return obj.hasFlags(Flags.DEPLETABLE) or obj.containsAtLeastOneChildWithFlags(Flags.DEPLETABLE)\n\n\nclass AbstractIsotopicDepleter:\n    \"\"\"\n    Interact with a depletion code.\n\n    This interface and subClasses deplete under a flux defined outside this interface\n\n    The depletion in this analysis only depends on the flux, material vectors, nuclear data and continuous source and\n    loss objects.\n\n    The depleters derived from this abstract class use all the fission products ARMI can handle -- i.e. do not form\n    lumped fission products.\n\n    The class attribute _depleteByName contains a ARMI objects to deplete keyed by name.\n\n    .. impl:: ARMI provides a base class to deplete isotopes.\n        :id: I_ARMI_DEPL_ABC\n        :implements: R_ARMI_DEPL_ABC\n\n        This class provides some basic infrastructure typically needed in depletion calculations within the ARMI\n        framework. It stores a reactor, operator, and case settings object, and also defines methods to store and\n        retrieve the objects which should be depleted based on their names.\n    \"\"\"\n\n    name = None\n    purpose = \"depletion\"\n\n    def __init__(self, r=None, cs=None, o=None):\n        self.r = r\n        self.cs = cs\n        self.o = o\n\n        # ARMI objects to deplete keyed by name order is important for consistency in iterating through objects\n        self._depleteByName = collections.OrderedDict()\n\n        self.efpdToBurn = None\n        self.allNuclidesInProblem = r.blueprints.allNuclidesInProblem if r else []\n\n    def addToDeplete(self, armiObj):\n        \"\"\"Add the object to the group of objects to be depleted.\"\"\"\n        self._depleteByName[armiObj.getName()] = armiObj\n\n    def setToDeplete(self, armiObjects):\n        \"\"\"Change the group of objects to deplete to the specified group.\"\"\"\n        listOfTuples = [(obj.getName(), obj) for obj in armiObjects]\n        self._depleteByName = collections.OrderedDict(listOfTuples)\n\n    def getToDeplete(self):\n        \"\"\"Return objects to be depleted.\"\"\"\n        return list(self._depleteByName.values())\n\n    def run(self):\n        \"\"\"\n        Submit depletion case with external solver to the cluster.\n\n        In addition to running the physics kernel, this method calls the waitForJob method to wait for it job to finish.\n\n        comm = MPI.COMM_SELF.Spawn(sys.executable,args=['cpi.py'],maxprocs=5)\n        \"\"\"\n        raise NotImplementedError\n\n\ndef makeXsecTable(\n    compositeName,\n    xsType,\n    mgFlux,\n    isotxs,\n    headerFormat=\"$ xsecs for {}\",\n    tableFormat=\"\\n{mcnpId} {nG:.5e} {nF:.5e} {n2n:.5e} {n3n:.5e} {nA:.5e} {nP:.5e}\",\n):\n    \"\"\"\n    Make a cross section table for depletion physics input decks.\n\n    Parameters\n    ----------\n    armiObject: armiObject\n        an armi object --  batch or block -- with a .p.xsType and a getMgFlux method\n    activeNuclides: list\n        a list of the nucNames of active isotopes\n    isotxs: isotxs object\n    headerFormat: string (optional)\n        this is the format in which the elements of the header with be returned -- i.e. if you use a .format() call with\n        the case name you'll return a formatted list of string elements\n    tableFormat: string (optional)\n        This is the format in which the elements of the table with be returned -- i.e. if you use a .format() call with\n        mcnpId, nG, nF, n2n, n3n, nA, and nP you'll get the format you want. If you use a .format() call with the case\n        name you'll return a formatted list of strings.\n\n    Results\n    -------\n    output: list\n        a list of string elements that together make a xsec card\n\n    See Also\n    --------\n    crossSectionTable.makeCrossSectionTable\n        Makes a table for arbitrary ArmiObjects\n    \"\"\"\n    xsTable = CrossSectionTable()\n\n    if not xsType or not sum(mgFlux) > 0:\n        return []\n    xsTable.setName(compositeName)\n    totalFlux = sum(mgFlux)\n\n    for nucLabel, nuc in isotxs.items():\n        if xsType != xsLibraries.getSuffixFromNuclideLabel(nucLabel):\n            continue\n        nucName = nuc.name\n        nb = nuclideBases.byName[nucName]\n        if isinstance(nb, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)):\n            continue\n        microMultiGroupXS = isotxs[nucLabel].micros\n        if not isinstance(nb, nuclideBases.NaturalNuclideBase):\n            xsTable.addMultiGroupXS(nucName, microMultiGroupXS, mgFlux, totalFlux)\n    return xsTable.getXsecTable(headerFormat=headerFormat, tableFormat=tableFormat)\n\n\nclass AbstractIsotopicDepletionReader(interfaces.OutputReader):\n    \"\"\"Read number density output produced by the isotopic depletion.\"\"\"\n\n    def read(self):\n        \"\"\"Read a isotopic depletion Output File and applies results to armi objects in the\n        ``ToDepletion`` attribute.\n        \"\"\"\n        raise NotImplementedError\n\n\nclass Csrc:\n    \"\"\"\n    Writes a continuous source term card in a depletion interface.\n\n    Notes\n    -----\n    The chemical vector is a dictionary of chemicals and their removal rate constant. This works like a decay constant.\n\n    The isotopic vector is used to make a source material in continuous source definitions.\n\n    This is also the base class for continuous loss cards.\n    \"\"\"\n\n    def __init__(self):\n        self._chemicalVector = {}\n        self._isotopicVector = {}\n        self.defaultVector = {\"0\": 0}\n\n    def setChemicalVector(self, chemicalVector):\n        self._chemicalVector = chemicalVector\n\n    def getChemicalVector(self):\n        return self._chemicalVector\n\n    def write(self):\n        \"\"\"Return a list of lines to write for a csrc card.\"\"\"\n        raise NotImplementedError\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Initialization of the interfaces for running lattice physics calculations.\"\"\"\n\n# ruff: noqa: F401\nimport os\n\nfrom armi import interfaces, settings\nfrom armi.physics import neutronics\nfrom armi.utils import pathTools\n\nORDER = interfaces.STACK_ORDER.CROSS_SECTIONS\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/latticePhysicsInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nLattice Physics Interface.\n\nParent classes for codes responsible for generating broad-group cross sections.\n\"\"\"\n\nimport os\n\nfrom armi import interfaces, nuclearDataIO, runLog\nfrom armi.physics import neutronics\nfrom armi.physics.neutronics import LatticePhysicsFrequency\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.settings import (\n    CONF_CLEAR_XS,\n    CONF_GEN_XS,\n    CONF_LATTICE_PHYSICS_FREQUENCY,\n    CONF_TOLERATE_BURNUP_CHANGE,\n    CONF_XS_KERNEL,\n)\nfrom armi.utils import safeCopy\n\nLATTICE_PHYSICS = \"latticePhysics\"\n\n\ndef setBlockNeutronVelocities(r, neutronVelocities):\n    \"\"\"\n    Set the ``mgNeutronVelocity`` parameter for each block using the ``neutronVelocities`` dictionary data.\n\n    Parameters\n    ----------\n    r : Reactor\n        A Reactor object, that we want to modify.\n    neutronVelocities : dict\n        Dictionary that is keyed with the ``representativeBlock`` XS IDs with values of multigroup neutron velocity data\n        computed by MC2.\n\n    Raises\n    ------\n    ValueError\n        Multi-group neutron velocities was not computed during the cross section calculation.\n    \"\"\"\n    for b in r.core.iterBlocks():\n        xsID = b.getMicroSuffix()\n        if xsID not in neutronVelocities:\n            raise ValueError(\n                f\"Cannot assign multi-group neutron velocity to {b} because it does not exist in the neutron \"\n                f\"velocities dictionary with keys: {neutronVelocities.keys()}. The XS library does not contain data \"\n                f\"for the {xsID} xsid.\"\n            )\n        b.p.mgNeutronVelocity = neutronVelocities[b.getMicroSuffix()]\n\n\nclass LatticePhysicsInterface(interfaces.Interface):\n    \"\"\"Class for interacting with lattice physics codes.\"\"\"\n\n    purpose = LATTICE_PHYSICS\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n\n        # Set to True by default, but should be disabled when perturbed cross sections are generated.\n        self._updateBlockNeutronVelocities = True\n        self._burnupTolerance = self.cs[CONF_TOLERATE_BURNUP_CHANGE]\n        self._oldXsIdsAndBurnup = {}\n        self.executablePath = self._getExecutablePath()\n        self.executableRoot = os.path.dirname(self.executablePath)\n        self.includeGammaXS = neutronics.gammaTransportIsRequested(cs) or neutronics.gammaXsAreRequested(cs)\n        self._latticePhysicsFrequency = LatticePhysicsFrequency[self.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]\n\n    def _getExecutablePath(self):\n        raise NotImplementedError\n\n    def interactBOL(self, cycle=0):\n        \"\"\"\n        Run the lattice physics code if ``genXS`` is set and update burnup groups.\n\n        Generate new cross sections based off the case settings and the current state of the reactor if the lattice\n        physics frequency is BOL.\n        \"\"\"\n        if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOL:\n            self.updateXSLibrary(cycle)\n\n    def interactBOC(self, cycle=0):\n        \"\"\"\n        Run the lattice physics code if ``genXS`` is set and update burnup groups.\n\n        Generate new cross sections based off the case settings and the current state of the reactor if the lattice\n        physics frequency is BOC.\n\n        Notes\n        -----\n        :py:meth:`armi.physics.fuelCycle.fuelHandlerInterface.FuelHandlerInterface.interactBOC` also calls this if the\n        ``runLatticePhysicsBeforeShuffling`` setting is True. This happens because branch searches may need XS.\n        \"\"\"\n        if self._latticePhysicsFrequency == LatticePhysicsFrequency.BOC:\n            self.updateXSLibrary(cycle)\n\n    def updateXSLibrary(self, cycle, node=None):\n        \"\"\"\n        Update the current XS library, either by creating or reloading one.\n\n        Parameters\n        ----------\n        cycle : int\n            The cycle that is being processed. Used to name the library.\n        node : int, optional\n            The node that is being processed. Used to name the library.\n\n        See Also\n        --------\n        computeCrossSections : run lattice physics on the current reactor state no matter weather needed or not.\n        \"\"\"\n        runLog.important(f\"Preparing XS for cycle {cycle}\")\n        representativeBlocks, xsIds = self._getBlocksAndXsIds()\n        if self._newLibraryShouldBeCreated(cycle, representativeBlocks, xsIds):\n            if self.cs[CONF_CLEAR_XS]:\n                self.clearXS()\n            self.computeCrossSections(blockList=representativeBlocks, xsLibrarySuffix=self._getSuffix(cycle))\n            self._renameExistingLibrariesForStatepoint(cycle, node)\n        else:\n            self.readExistingXSLibraries(cycle, node)\n\n        self._checkInputs()\n\n    def _renameExistingLibrariesForStatepoint(self, cycle, node):\n        \"\"\"Copy the existing neutron and/or gamma libraries into cycle-dependent files.\"\"\"\n        safeCopy(neutronics.ISOTXS, nuclearDataIO.getExpectedISOTXSFileName(cycle, node))\n        if self.includeGammaXS:\n            safeCopy(\n                neutronics.GAMISO,\n                nuclearDataIO.getExpectedGAMISOFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)),\n            )\n            safeCopy(\n                neutronics.PMATRX,\n                nuclearDataIO.getExpectedPMATRXFileName(cycle=cycle, node=node, suffix=self._getSuffix(cycle)),\n            )\n\n    def _checkInputs(self):\n        pass\n\n    def readExistingXSLibraries(self, cycle, node):\n        raise NotImplementedError\n\n    def makeCycleXSFilesAsBaseFiles(self, cycle, node):\n        raise NotImplementedError\n\n    @staticmethod\n    def _copyLibraryFilesForCycle(cycle, libFiles):\n        runLog.extra(f\"Current library files: {libFiles}\")\n        for baseName, cycleName in libFiles.items():\n            if not os.path.exists(cycleName):\n                if not os.path.exists(baseName):\n                    raise ValueError(\n                        f\"Neither {cycleName} nor {baseName} libraries exist. Either the current cycle library for \"\n                        f\"cycle {cycle} should exist or a base library is required to continue.\"\n                    )\n                runLog.info(\n                    f\"Existing library {cycleName} for cycle {cycle} does not exist. The active library is {baseName}\"\n                )\n            else:\n                runLog.info(f\"Using {baseName} as an active library\")\n                if cycleName != baseName:\n                    safeCopy(cycleName, baseName)\n\n    def _readGammaBinaries(self, lib, gamisoFileName, pmatrxFileName):\n        raise NotImplementedError(f\"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}\")\n\n    def _writeGammaBinaries(self, lib, gamisoFileName, pmatrxFileName):\n        raise NotImplementedError(f\"Gamma cross sections not implemented in {self.cs[CONF_XS_KERNEL]}\")\n\n    def _getSuffix(self, cycle):\n        return \"\"\n\n    def interactEveryNode(self, cycle=None, node=None):\n        \"\"\"\n        Run the lattice physics code if ``genXS`` is set and update burnup groups.\n\n        Generate new cross sections based off the case settings and the current state of the reactor if the lattice\n        physics frequency is at least everyNode.\n\n        If this is not a coupled calculation, or if cross sections are only being generated at everyNode, then we want\n        to regenerate all cross sections here. If it _is_ a coupled calculation, and we are generating cross sections at\n        coupled iterations, then keep the existing XS lib for now, adding any XS groups as necessary to ensure that all\n        XS groups are covered.\n        \"\"\"\n        if self._latticePhysicsFrequency >= LatticePhysicsFrequency.everyNode:\n            if not self.o.couplingIsActive() or self._latticePhysicsFrequency == LatticePhysicsFrequency.everyNode:\n                self.r.core.lib = None\n            self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode)\n\n    def interactCoupled(self, iteration):\n        \"\"\"\n        Runs on coupled iterations to generate cross sections that are updated with the temperature state.\n\n        Notes\n        -----\n        This accounts for changes in cross section data due to temperature changes, which are important for cross\n        section resonance effects and accurately characterizing Doppler constant and coefficient evaluations. For\n        Standard and Equilibrium run types, this coupling iteration is limited to when the time node is equal to zero.\n        The validity of this assumption lies in the expectation that these runs have consistent power, flow, and\n        temperature conditions at all time nodes. For Snapshot run types, this assumption, in general, is invalidated as\n        the requested reactor state may sufficiently differ from what exists on the database and where tight coupling is\n        needed to capture temperature effects.\n\n        .. warning::\n\n            For Standard and Equilibrium run types, if the reactor power, flow, and/or temperature state is expected to\n            vary over the lifetime of the simulation, as could be the case with\n            :ref:`detailed cycle histories <cycle-history>`, a custom subclass should be considered.\n\n        Parameters\n        ----------\n        iteration : int\n            This is unused since cross sections are generated on a per-cycle basis.\n        \"\"\"\n        # always run for snapshots to account for temp effect of different flow or power statepoint\n        targetFrequency = (\n            LatticePhysicsFrequency.firstCoupledIteration if iteration == 0 else LatticePhysicsFrequency.all\n        )\n        if self._latticePhysicsFrequency >= targetFrequency:\n            self.r.core.lib = None\n            self.updateXSLibrary(self.r.p.cycle, self.r.p.timeNode)\n\n    def clearXS(self):\n        raise NotImplementedError\n\n    def interactEOC(self, cycle=None):\n        \"\"\"\n        Interact at the end of a cycle.\n\n        Force updating cross sections at the start of the next cycle.\n        \"\"\"\n        self.r.core.lib = None\n\n    def computeCrossSections(self, baseList=None, forceSerial=False, xsLibrarySuffix=\"\", blockList=None):\n        \"\"\"\n        Prepare a batch of inputs, execute them, and store results on reactor library.\n\n        Parameters\n        ----------\n        baseList : list\n            a user-specified set of bases that will be run instead of calculating all of them\n        forceSerial : bool, optional\n            Will run on 1 processor in sequence instead of on many in parallel\n            Useful for optimization/batch runs where every processor is on a different branch\n        xsLibrarySuffix : str, optional\n            A book-keeping suffix used in Doppler calculations\n        blockList : list, optional\n            List of blocks for which to generate cross sections. If None, representative blocks will be determined.\n        \"\"\"\n        self.r.core.lib = self._generateXsLibrary(baseList, forceSerial, xsLibrarySuffix, blockList)\n\n    def _generateXsLibrary(\n        self,\n        baseList,\n        forceSerial,\n        xsLibrarySuffix,\n        blockList,\n        writers=None,\n        purgeFP=True,\n    ):\n        raise NotImplementedError\n\n    def _executeLatticePhysicsCalculation(self, returnedFromWriters, forceSerial):\n        raise NotImplementedError\n\n    def generateLatticePhysicsInputs(self, baseList, xsLibrarySuffix, blockList, xsWriters=None):\n        \"\"\"\n        Write input files for the generation of cross section libraries.\n\n        Parameters\n        ----------\n        baseList : list\n            A list of cross-section id strings (e.g. AA, BC) that will be generated. Default: all in reactor\n        xsLibrarySuffix : str\n            A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty\n        blockList : list\n            The blocks to write inputs for.\n        xsWriters : list, optional\n            The specified writers to write the input files\n\n        Returns\n        -------\n        returnedFromWriters: list\n            A list of what this specific writer instance returns for each representative block.\n            It is the responsibility of the subclassed interface to implement.\n            In many cases, it is the executing agent.\n        \"\"\"\n        returnedFromWriters = []\n        baseList = set(baseList or [])\n        representativeBlocks = blockList or self.getRepresentativeBlocks()\n        for repBlock in representativeBlocks:\n            xsId = repBlock.getMicroSuffix()\n            if not baseList or xsId in baseList:\n                # write the step number to the info log\n                runLog.info(\n                    \"Creating input writer(s) for {0} with {1:65s} BU (%FIMA): {2:10.2f}\".format(\n                        xsId, repBlock, repBlock.p.percentBu\n                    )\n                )\n                writers = self.getWriters(repBlock, xsLibrarySuffix, xsWriters)\n                for writer in writers:\n                    fromWriter = writer.write()\n                    returnedFromWriters.append(fromWriter)\n\n        return returnedFromWriters\n\n    def getWriters(self, representativeBlock, xsLibrarySuffix, writers=None):\n        \"\"\"\n        Return valid lattice physics writer subclass(es).\n\n        Parameters\n        ----------\n        representativeBlock : Block\n            A representative block object that can be created from a block collection.\n        xsLibrarySuffix : str\n            A suffix added to the end of the XS file names such as 'voided' for voided XS. Default: Empty\n        writers : list of lattice physics writer objects, optional\n            If the writers are known, they can be provided and constructed.\n\n        Returns\n        -------\n        writers : list\n            A list of writers for the provided representative block.\n        \"\"\"\n        xsID = representativeBlock.getMicroSuffix()\n        if writers:\n            # Construct the writers that are provided\n            writers = [\n                w(\n                    representativeBlock,\n                    r=self.r,\n                    externalCodeInterface=self,\n                    xsLibrarySuffix=xsLibrarySuffix,\n                )\n                for w in writers\n            ]\n        else:\n            geom = self.cs[CONF_CROSS_SECTION][xsID].geometry\n            writers = self._getGeomDependentWriters(representativeBlock, xsID, geom, xsLibrarySuffix)\n        return writers\n\n    def _getGeomDependentWriters(self, representativeBlock, xsID, geom, xsLibrarySuffix):\n        raise NotImplementedError\n\n    def getReader(self):\n        raise NotImplementedError\n\n    def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs):\n        \"\"\"\n        Determines whether the cross section generator should be executed at this cycle.\n\n        Criteria include:\n\n        #. CONF_GEN_XS setting is turned on\n        #. We are beyond any requested skipCycles (restart cycles)\n        #. The blocks have changed burnup beyond the burnup threshold\n        #. Lattice physics kernel (e.g. MC2) hasn't already been executed for this cycle\n           (possible if it runs during fuel handling)\n        \"\"\"\n        executeXSGen = bool(self.cs[CONF_GEN_XS] and cycle >= self.cs[\"skipCycles\"])\n        idsChangedBurnup = self._checkBurnupThresholds(representativeBlockList)\n        if executeXSGen and not idsChangedBurnup:\n            executeXSGen = False\n\n        if self.r.core.hasLib():\n            # justification=r.core.lib property can raise exception or load pre-generated ISOTXS, but the interface\n            # should have responsibility of loading XS's have already generated for this cycle (maybe during fuel\n            # management). Should we update due to changes that occurred during fuel management?\n            missing = set(xsIDs) - set(self.r.core.lib.xsIDs)\n            if missing and not executeXSGen:\n                runLog.info(\n                    f\"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs \"\n                    f\"{missing} required. The XS generation on cycle {cycle} is not enabled, but will be run to \"\n                    \"generate these missing cross sections.\"\n                )\n                executeXSGen = True\n            elif missing:\n                runLog.info(\n                    f\"Although a XS library {self.r.core.lib} exists on {self.r.core}, there are missing XS IDs \"\n                    f\"{missing} required. These will be generated on cycle {cycle}.\"\n                )\n                executeXSGen = True\n            else:\n                runLog.info(\n                    f\"A XS library {self.r.core.lib} exists on {self.r.core} and contains the required XS data for XS \"\n                    f\"IDs {self.r.core.lib.xsIDs}. The generation of XS will be skipped.\"\n                )\n                executeXSGen = False\n\n        if executeXSGen:\n            runLog.info(f\"Cross sections will be generated on cycle {cycle} for the following XS IDs: {xsIDs}\")\n        else:\n            runLog.info(\n                f\"Cross sections will not be generated on cycle {cycle}. The setting `{CONF_GEN_XS}` is \"\n                f\"{self.cs[CONF_GEN_XS]} and `skipCycles` is {self.cs['skipCycles']}\"\n            )\n\n        return executeXSGen\n\n    def _checkBurnupThresholds(self, blockList):\n        \"\"\"\n        Check to see if burnup has changed meaningfully.\n\n        If there are, then the xs sets should be regenerated. Otherwise then go ahead and skip xs generation.\n\n        This is motivated by the idea that during very long explicit equilibrium runs, it might save time to turn off xs\n        generation at a certain point.\n\n        Parameters\n        ----------\n        blockList: iterable\n            List of all blocks to examine\n\n        Returns\n        -------\n        idsChangedBurnup: bool\n            flag regarding whether or not burnup changed substantially\n        \"\"\"\n        idsChangedBurnup = True\n        if self._burnupTolerance > 0:\n            idsChangedBurnup = False\n            for b in blockList:\n                xsID = b.getMicroSuffix()\n\n                if xsID not in self._oldXsIdsAndBurnup:\n                    # Looks like a new ID was found that was not in the old ID's have to regenerate the cross-sections\n                    # this time around\n                    self._oldXsIdsAndBurnup[xsID] = b.p.percentBu\n                    idsChangedBurnup = True\n                else:\n                    # The id was found. Now it is time to compare the burnups to determine if there has been enough\n                    # meaningful change between the runs\n                    buOld = self._oldXsIdsAndBurnup[xsID]\n                    buNow = b.p.percentBu\n\n                    if abs(buOld - buNow) > self._burnupTolerance:\n                        idsChangedBurnup = True\n                        # update the oldXs burnup to be the about to be newly generated xsBurnup\n                        self._oldXsIdsAndBurnup[xsID] = buNow\n\n                        runLog.important(\n                            f\"Burnup has changed in xsID {xsID} from {buOld} to {buNow}. Recalculating Cross-sections\"\n                        )\n\n        return idsChangedBurnup\n\n    def _getProcessesPerNode(self):\n        raise NotImplementedError\n\n    def getRepresentativeBlocks(self):\n        \"\"\"Return a list of all blocks in the problem.\"\"\"\n        xsGroupManager = self.getInterface(\"xsGroups\")\n        return xsGroupManager.representativeBlocks.values()  # OrderedDict\n\n    def _getBlocksAndXsIds(self):\n        \"\"\"Return blocks and their xsIds.\"\"\"\n        blocks = self.getRepresentativeBlocks()\n        return blocks, [b.getMicroSuffix() for b in blocks]\n\n    def updatePhysicsCouplingControl(self):\n        \"\"\"\n        Disable XS update in equilibrium cases after a while.\n\n        Notes\n        -----\n        This is only relevant for equilibrium cases. We have to turn off XS updates after several cyclics or else the\n        number densities will never converge.\n        \"\"\"\n        if self.r.core.p.cyclics >= self.cs[\"numCyclicsBeforeStoppingXS\"]:\n            self.enabled(False)\n            runLog.important(f\"Disabling {self} because numCyclics={self.r.core.p.cyclics}\")\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/latticePhysicsWriter.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nLattice Physics Writer.\n\nParent class for lattice physics writers.\n\nSeeks to provide access to common methods used by general lattice physics codes.\n\"\"\"\n\nimport collections\nimport math\n\nimport numpy as np\nimport ordered_set\n\nfrom armi import interfaces, runLog\nfrom armi.nucDirectory import nuclideBases\nfrom armi.physics import neutronics\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n)\nfrom armi.physics.neutronics.settings import (\n    CONF_GEN_XS,\n    CONF_MINIMUM_FISSILE_FRACTION,\n    CONF_MINIMUM_NUCLIDE_DENSITY,\n)\nfrom armi.reactor import components\nfrom armi.reactor.flags import Flags\nfrom armi.settings.fwSettings.globalSettings import CONF_DETAILED_AXIAL_EXPANSION\nfrom armi.utils.customExceptions import warn_when_root\n\n# number of decimal places to round temperatures to in _groupNuclidesByTemperature\n_NUM_DIGITS_ROUND_TEMPERATURE = 3\n# index of the temperature in the nuclide dictionary: {nuc: (density, temp, category)}\n_NUCLIDE_VALUES_TEMPERATURE_INDEX = 1\n\n\n@warn_when_root\ndef nuclideNameFoundMultipleTimes(nuclideName):\n    return \"Nuclide `{}' was found multiple times.\".format(nuclideName)\n\n\nclass LatticePhysicsWriter(interfaces.InputWriter):\n    \"\"\"\n    Parent class for creating the inputs for lattice physics codes.\n\n    Contains methods for extracting all nuclides for a given problem.\n    \"\"\"\n\n    _SPACE = \" \"\n    _SEPARATOR = \" | \"\n    # Nuclide categories\n    UNUSED_CATEGORY = \"Unused\" + 3 * _SPACE\n    FUEL_CATEGORY = \"Fuel\" + 5 * _SPACE\n    STRUCTURE_CATEGORY = \"Structure\"\n    COOLANT_CATEGORY = \"Coolant\" + 2 * _SPACE\n    FISSION_PRODUCT_CATEGORY = \"Fission Product\"\n    # Nuclide attributes\n    DEPLETABLE = \"Depletable\" + 4 * _SPACE\n    UNDEPLETABLE = \"Non-Depletable\"\n    REPRESENTED = \"Represented\" + 2 * _SPACE\n    INF_DILUTE = \"Inf Dilute\"\n\n    def __init__(\n        self,\n        representativeBlock,\n        r=None,\n        externalCodeInterface=None,\n        xsLibrarySuffix=\"\",\n        generateExclusiveGammaXS=False,\n    ):\n        interfaces.InputWriter.__init__(self, r=r, externalCodeInterface=externalCodeInterface)\n        self.cs = self.eci.cs\n        self.block = representativeBlock\n        if not isinstance(xsLibrarySuffix, str):\n            raise TypeError(\"xsLibrarySuffix should be a string; got {}\".format(type(xsLibrarySuffix)))\n        self.xsLibrarySuffix = xsLibrarySuffix\n        self.generateExclusiveGammaXS = generateExclusiveGammaXS\n        if self.generateExclusiveGammaXS and not neutronics.gammaXsAreRequested(self.cs):\n            raise ValueError(\"Invalid `{}` setting to generate gamma XS for {}.\".format(CONF_GEN_XS, self.block))\n        self.xsId = representativeBlock.getMicroSuffix()\n        self.xsSettings = self.cs[CONF_CROSS_SECTION][self.xsId]\n        self.mergeIntoClad = self.xsSettings.mergeIntoClad\n        self.mergeIntoFuel = self.xsSettings.mergeIntoFuel\n        self.driverXsID = self.xsSettings.driverID\n        self.numExternalRings = self.xsSettings.numExternalRings\n        self.criticalBucklingSearchActive = self.xsSettings.criticalBuckling\n        self.ductHeterogeneous = self.xsSettings.ductHeterogeneous\n        self.traceIsotopeThreshold = self.xsSettings.traceIsotopeThreshold\n\n        self.executeExclusive = self.xsSettings.xsExecuteExclusive\n        self.priority = self.xsSettings.xsPriority\n        self.maxAtomNumberToModelInfDilute = (\n            self.xsSettings.xsMaxAtomNumber if self.xsSettings.xsMaxAtomNumber is not None else 999\n        )\n        # would prefer this in 1D but its used in 0D in _writeSourceComposition\n        self.minDriverDensity = self.xsSettings.minDriverDensity\n\n        blockNeedsFPs = representativeBlock.getLumpedFissionProductCollection() is not None\n\n        self.modelFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] != \"noFissionProducts\"\n        self.explicitFissionProducts = self.cs[CONF_FP_MODEL] == \"explicitFissionProducts\"\n        self.diluteFissionProducts = blockNeedsFPs and self.cs[CONF_FP_MODEL] == \"infinitelyDilute\"\n        self.minimumNuclideDensity = self.cs[CONF_MINIMUM_NUCLIDE_DENSITY]\n        self.infinitelyDiluteDensity = self.minimumNuclideDensity\n        self._unusedNuclides = set()\n        self._allNuclideObjects = None\n\n    def __repr__(self):\n        suffix = \" with Suffix:`{}`\".format(self.xsLibrarySuffix) if self.xsLibrarySuffix else \"\"\n        if self.generateExclusiveGammaXS:\n            xsFlag = neutronics.GAMMA\n        elif neutronics.gammaXsAreRequested(self.cs) and self._isGammaXSGenerationEnabled:\n            xsFlag = neutronics.NEUTRONGAMMA\n        else:\n            xsFlag = neutronics.NEUTRON\n        return \"<{} - XS ID {} ({} XS){}>\".format(self.__class__.__name__, self.xsId, xsFlag, suffix)\n\n    def _writeTitle(self, fileObj):\n        self._writeComment(\n            fileObj,\n            \"ARMI generated case for caseTitle {}, block {}\\n\".format(self.cs.caseTitle, self.block),\n        )\n\n    def write(self):\n        raise NotImplementedError\n\n    @property\n    def _isSourceDriven(self):\n        return bool(self.driverXsID)\n\n    @property\n    def _isGammaXSGenerationEnabled(self):\n        \"\"\"Gamma transport is not available generically across all lattice physic solvers.\"\"\"\n        return False\n\n    def _getAllNuclidesByTemperatureInC(self, component=None):\n        \"\"\"\n        Returns a dictionary where all nuclides in the block are grouped by temperature.\n\n        Some lattice physics codes, like ``SERPENT`` create mixtures of nuclides\n        at similar temperatures to construct a problem. The dictionary returned is of the form ::\n\n            {temp1: {n1: (d1, temp1, category1),\n                     n2: (d2, temp1, category2)}\n             temp2: {n3: (d3, temp2, category3),\n                     n4: (d4, temp2, category4)}\n             ...\n             }\n\n        \"\"\"\n        nuclides = self._getAllNuclideObjects(component)\n        return _groupNuclidesByTemperature(nuclides)\n\n    def _getAllNuclideObjects(self, component=None):\n        \"\"\"\n        Returns a single dictionary of all nuclides in the component.\n\n        Calls :py:meth:`_getAllNuclidesByCategory`, which returns two dictionaries:\n        one with just fission products and another with the remaining nuclides.\n        This method just updates ``self._allNuclideObjects`` to contain the fission\n        products as well.\n\n        The dictionaries are structured with :py:class:`armi.nucDirectory.nuclideBases.NuclideBase`\n        objects, with `(density, temperatureInC, and category)`` tuples for that nuclide object.\n\n        \"\"\"\n        nucs, fissProds = self._getAllNuclidesByCategory(component)\n        nucs.update(fissProds)\n        return nucs\n\n    def _getAllNuclidesByCategory(self, component=None):\n        \"\"\"\n        Determine number densities and temperatures for each nuclide.\n\n        Temperatures are a bit complex due to some special cases:\n            Nuclides that build up like Pu239 have zero density at BOL but need cross sections.\n            Nuclides like Mo99 are sometimes in structure and sometimes in lumped fission products. What temp to use?\n            Nuclides like B-10 are in control blocks but these aren't candidates for XS creation. What temperature?\n\n        To deal with this, we compute (flux-weighted) average temperatures of each nuclide based on its current\n        component temperatures.\n\n        \"\"\"\n        dfpDensities = self._getDetailedFPDensities()\n        (\n            coolantNuclides,\n            fuelNuclides,\n            structureNuclides,\n        ) = self.r.core.getNuclideCategories()\n        nucDensities = {}\n        subjectObject = component or self.block\n        depletableNuclides = nuclideBases.getDepletableNuclides(self.r.blueprints.activeNuclides, self.block)\n        objNuclides = subjectObject.getNuclides()\n\n        # If the explicit fission product model is enabled then the number densities\n        # on the components will already contain all the nuclides required to be\n        # modeled by the lattice physics writer. Otherwise, assume that `allNuclidesInProblem`\n        # should be modeled.\n        if self.explicitFissionProducts:\n            # If detailed axial expansion is active, mapping between blocks occurs on uniform mesh\n            # and this can cause blocks to have isotopes that they don't have cross sections for.\n            # Fix this by adding all isotopes so they are present in lattice physics.\n            if self.cs[CONF_DETAILED_AXIAL_EXPANSION]:\n                nuclides = self.r.blueprints.allNuclidesInProblem\n            else:\n                nuclides = ordered_set.OrderedSet(sorted(objNuclides))\n        else:\n            nuclides = self.r.blueprints.allNuclidesInProblem\n\n        nuclides = nuclides.union(self.r.blueprints.nucsToForceInXsGen)\n\n        numDensities = subjectObject.getNuclideNumberDensities(nuclides)\n\n        for nucName, dens in zip(nuclides, numDensities):\n            nuc = self.r.nuclideBases.byName[nucName]\n            if isinstance(nuc, nuclideBases.LumpNuclideBase):\n                continue  # skip LFPs here but add individual FPs below.\n\n            if isinstance(subjectObject, components.Component):\n                if self.ductHeterogeneous and \"Homogenized\" in subjectObject.name:\n                    # Nuclide temperatures representing heterogeneous model component temperatures\n                    nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName)\n                else:\n                    # Heterogeneous number densities and temperatures\n                    nucTemperatureInC = subjectObject.temperatureInC\n            else:\n                # Homogeneous number densities and temperatures\n                nucTemperatureInC = self._getAvgNuclideTemperatureInC(nucName)\n\n            density = max(dens, self.minimumNuclideDensity)\n            if nuc in nucDensities:\n                nuclideNameFoundMultipleTimes(nucName)\n                dens, nucTemperatureInC, nucCategory = nucDensities[nuc]\n                density = dens + density\n                nucDensities[nuc] = (density, nucTemperatureInC, nucCategory)\n                continue\n\n            nucCategory = \"\"\n            # Remove nuclides from detailed fission product dictionary if they are a part of the core materials\n            # (e.g., Zr in the U10Zr which is at fuel temperature and Mo in HT9 which is at structure temp)\n            if nuc in dfpDensities:\n                density += dfpDensities[nuc]\n                nucCategory += self.FISSION_PRODUCT_CATEGORY + self._SEPARATOR\n                del dfpDensities[nuc]\n            elif nucName in self._unusedNuclides:\n                nucCategory += self.UNUSED_CATEGORY + self._SEPARATOR\n            elif nucName in fuelNuclides:\n                nucCategory += self.FUEL_CATEGORY + self._SEPARATOR\n            elif nucName in coolantNuclides:\n                nucCategory += self.COOLANT_CATEGORY + self._SEPARATOR\n            elif nucName in structureNuclides:\n                nucCategory += self.STRUCTURE_CATEGORY + self._SEPARATOR\n\n            # Add additional `attributes` to the nuclide categories\n            if nucName in objNuclides:\n                nucCategory += self.REPRESENTED + self._SEPARATOR\n            else:\n                nucCategory += self.INF_DILUTE + self._SEPARATOR\n\n            if nucName in depletableNuclides:\n                nucCategory += self.DEPLETABLE\n            else:\n                nucCategory += self.UNDEPLETABLE\n\n            nucDensities[nuc] = (density, nucTemperatureInC, nucCategory)\n\n        if not self._isSourceDriven:\n            nucDensities = self._adjustPuFissileDensity(nucDensities)\n        fissionProductDensities = self._getDetailedFissionProducts(dfpDensities)\n\n        if self._unusedNuclides:\n            runLog.debug(\n                \"The following unused nuclides (defined in the loading file) are being added to {} at {} C: {}\".format(\n                    subjectObject,\n                    self._getFuelTemperature(),\n                    list(self._unusedNuclides),\n                )\n            )\n\n        # the sortFunc makes orders the nucideDensities and fissionProductDensities by name.\n        sortFunc = lambda nb_data_tuple: nb_data_tuple[0].name\n        nucDensities = collections.OrderedDict(sorted(nucDensities.items(), key=sortFunc))\n        fissionProductDensities = collections.OrderedDict(sorted(fissionProductDensities.items(), key=sortFunc))\n        return nucDensities, fissionProductDensities\n\n    def _getAvgNuclideTemperatureInC(self, nucName):\n        \"\"\"Return the block fuel temperature and the nuclides average temperature in C.\"\"\"\n        # Get the temperature of the nuclide in the block\n        xsgm = self.getInterface(\"xsGroups\")\n        nucTemperatureInC = xsgm.getNucTemperature(self.xsId, nucName)\n        if not nucTemperatureInC or math.isnan(nucTemperatureInC):\n            # Assign the fuel temperature to the nuclide if it is None or NaN.\n            nucTemperatureInC = self._getFuelTemperature()  # NBD b/c the nuclide is not in problem.\n            self._unusedNuclides.add(nucName)\n\n        return nucTemperatureInC\n\n    def _getFuelTemperature(self):\n        fuelComponents = self.block.getComponents(Flags.FUEL)\n        if not fuelComponents:\n            fuelTemperatureInC = self.block.getAverageTempInC()\n        else:\n            fuelTemperatureInC = np.mean([fc.temperatureInC for fc in fuelComponents])\n        if not fuelTemperatureInC or math.isnan(fuelTemperatureInC):\n            raise ValueError(\n                \"The fuel temperature of block {0} is {1} and is not valid\".format(self.block, fuelTemperatureInC)\n            )\n        return fuelTemperatureInC\n\n    def _getDetailedFissionProducts(self, dfpDensities):\n        \"\"\"Return a dictionary of fission products not provided in the reactor blueprint nuclides.\n\n        Notes\n        -----\n        Assumes that all fission products are at the same temperature of the lumped fission product of U238 within the\n        block.\n        \"\"\"\n        if self.cs[CONF_FP_MODEL] != \"noFissionProducts\":\n            fissProductTemperatureInC = self._getAvgNuclideTemperatureInC(\"LFP38\")\n            return {\n                fp: (dens, fissProductTemperatureInC, self.FISSION_PRODUCT_CATEGORY)\n                for fp, dens in dfpDensities.items()\n            }\n        return {}\n\n    def _getDetailedFPDensities(self):\n        \"\"\"\n        Expands the nuclides in the LFP based on their yields.\n\n        Returns\n        -------\n        dfpDensities : dict\n            Detailed Fission Product Densities. keys are FP names, values are block number densities in atoms/bn-cm.\n\n        Raises\n        ------\n        IndexError\n            The lumped fission products were not initialized on the blocks.\n        \"\"\"\n        dfpDensities = {}\n        if not self.modelFissionProducts:\n            return dfpDensities\n        lfpCollection = self.block.getLumpedFissionProductCollection()\n        if self.diluteFissionProducts:\n            if lfpCollection is None:\n                raise ValueError(\"Lumped fission products are not initialized. Did interactAll BOL run?\")\n            dfps = lfpCollection.getAllFissionProductNuclideBases()\n            for individualFpBase in dfps:\n                dfpDensities[individualFpBase] = self.minimumNuclideDensity\n        else:\n            # expand densities and sum\n            dfpDensitiesByName = lfpCollection.getNumberDensities(self.block)\n            # now, go through the list and make sure that there aren't any values less than the\n            # minimumNuclideDensity; we need to keep trace amounts of nuclides in the problem\n            for fpName, fpDens in dfpDensitiesByName.items():\n                fp = self.r.nuclideBases.byName[fpName]\n                dfpDensities[fp] = max(fpDens, self.minimumNuclideDensity)\n        return dfpDensities\n\n    def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None):\n        raise NotImplementedError\n\n    @property\n    def _isCriticalBucklingSearchActive(self):\n        return self.criticalBucklingSearchActive\n\n    def _writeComment(self, fileObj, msg):\n        raise NotImplementedError()\n\n    def _writeGroupStructure(self, fileObj):\n        raise NotImplementedError()\n\n    def _adjustPuFissileDensity(self, nucDensities):\n        \"\"\"\n        Checks if the minimum fissile composition is lower than the allowed minimum fissile fraction and adds\n        additional Pu-239.\n\n        Notes\n        -----\n        We're going to increase the Pu-239 density to make the ratio of fissile mass to heavy metal mass equal to the\n        target ``CONF_MINIMUM_FISSILE_FRACTION``::\n\n            minFrac = (fiss - old + new) / (hm - old + new)\n            minFrac * (hm - old + new) = fiss - old + new\n            minFrac * (hm - old) + old - fiss = new * (1 - minFrac)\n            new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac)\n\n        where::\n\n            minFrac = ``CONF_MINIMUM_FISSILE_FRACTION`` setting\n            fiss = fissile mass of block\n            hm = heavy metal mass of block\n            old = number density of Pu-239 before adjustment\n            new = number density of Pu-239 after adjustment\n\n        \"\"\"\n        minFrac = self.cs[CONF_MINIMUM_FISSILE_FRACTION]\n        fiss = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isFissile())\n        hm = sum(dens[0] for nuc, dens in nucDensities.items() if nuc.isHeavyMetal())\n\n        if fiss / hm < minFrac:\n            pu239 = self.r.nuclideBases.byName[\"PU239\"]\n            old, temp, msg = nucDensities[pu239]\n            new = (minFrac * (hm - old) + old - fiss) / (1 - minFrac)\n            nucDensities[pu239] = (new, temp, msg)\n            runLog.warning(\n                f\"Adjusting Pu-239 number densities in {self.block} from {old} to {new} \"\n                f\"to meet minimum fissile fraction of {minFrac}.\"\n            )\n        return nucDensities\n\n    def _getDriverBlock(self):\n        \"\"\"Return the block that is driving the representative block for this writer.\"\"\"\n        xsgm = self.getInterface(\"xsGroups\")\n        driverBlock = xsgm.representativeBlocks.get(self.driverXsID, None)\n        if self.driverXsID != \"\" and driverBlock is None:\n            msg = f\"No representativeBlock found for driver XS ID {self.driverXsID} to use in {self}!\"\n            runLog.error(msg)\n            raise ValueError(msg)\n        return driverBlock\n\n\ndef _groupNuclidesByTemperature(nuclides):\n    \"\"\"\n    Creates a dictionary of temperatures and nuclides at those temperatures.\n\n    Nuclides is a dictionary with ``NuclideBase`` objects as keys, and\n    the density, temperature, and category of those nuclides as values.\n\n    Notes\n    -----\n    The temperature will be rounded to a number of digits according to ``_NUM_DIGITS_ROUND_TEMPERATURE``,\n    because the average temperature for each nuclide can vary down to numerical precision,\n    i.e. 873.15 and 873.15000000001\n\n    \"\"\"\n    tempDict = {}\n    for nuclide, values in nuclides.items():\n        temperature = round(values[_NUCLIDE_VALUES_TEMPERATURE_INDEX], _NUM_DIGITS_ROUND_TEMPERATURE)\n        if temperature not in tempDict:\n            tempDict[temperature] = {nuclide: values}\n        else:\n            tempDict[temperature][nuclide] = values\n    return tempDict\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/tests/test_latticeInterface.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the Lattice Interface.\"\"\"\n\nimport unittest\nfrom collections import OrderedDict\n\nfrom armi import settings\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.operators.operator import Operator\nfrom armi.physics.neutronics import LatticePhysicsFrequency\nfrom armi.physics.neutronics.crossSectionGroupManager import CrossSectionGroupManager\nfrom armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (\n    LatticePhysicsInterface,\n)\nfrom armi.physics.neutronics.settings import CONF_GEN_XS, CONF_GLOBAL_FLUX_ACTIVE\nfrom armi.reactor.assemblies import (\n    HexAssembly,\n    grids,\n)\nfrom armi.reactor.reactors import Core, Reactor\nfrom armi.reactor.tests.test_blocks import buildSimpleFuelBlock\nfrom armi.tests import ISOAA_PATH, mockRunLogs\n\n\n# As an interface, LatticePhysicsInterface must be subclassed to be used\nclass LatticeInterfaceTester(LatticePhysicsInterface):\n    def __init__(self, r, cs):\n        self.name = \"LatticeInterfaceTester\"\n        super().__init__(r, cs)\n\n    def _getExecutablePath(self):\n        return \"/tmp/fake_path\"\n\n    def readExistingXSLibraries(self, cycle, node):\n        pass\n\n\nclass LatticeInterfaceTesterLibFalse(LatticeInterfaceTester):\n    \"\"\"Subclass setting _newLibraryShouldBeCreated = False.\"\"\"\n\n    def _newLibraryShouldBeCreated(self, cycle, representativeBlockList, xsIDs):\n        self.testVerification = True\n        return False\n\n\nclass TestLatticePhysicsInterfaceBase(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        # create empty reactor core\n        cls.o = Operator(settings.Settings())\n        cls.o.r = Reactor(\"testReactor\", None)\n        cls.o.r.core = Core(\"testCore\")\n        # add an assembly with a single block\n        cls.assembly = HexAssembly(\"testAssembly\")\n        cls.assembly.spatialGrid = grids.AxialGrid.fromNCells(1)\n        cls.assembly.spatialGrid.armiObject = cls.assembly\n        cls.assembly.add(buildSimpleFuelBlock())\n        # init and add interfaces\n        cls.xsGroupInterface = CrossSectionGroupManager(cls.o.r, cls.o.cs)\n        cls.o.addInterface(cls.xsGroupInterface)\n\n\nclass TestLatticePhysicsInterface(TestLatticePhysicsInterfaceBase):\n    \"\"\"Test Lattice Physics Interface.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        cls.latticeInterface = LatticeInterfaceTesterLibFalse(cls.o.r, cls.o.cs)\n        cls.o.addInterface(cls.latticeInterface)\n\n    def setUp(self):\n        self.o.r.core.lib = \"Nonsense\"\n        self.latticeInterface.testVerification = False\n\n    def test_includeGammaXS(self):\n        \"\"\"Test that we can correctly flip the switch to calculate gamma XS.\"\"\"\n        # The default operator here turns off Gamma XS generation\n        self.assertFalse(self.latticeInterface.includeGammaXS)\n        self.assertEqual(self.o.cs[CONF_GLOBAL_FLUX_ACTIVE], \"Neutron\")\n\n        # but we can create an operator that turns on Gamma XS generation\n        cs = settings.Settings().modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: \"Neutron and Gamma\"})\n        newOperator = Operator(cs)\n        newLatticeInterface = LatticeInterfaceTesterLibFalse(newOperator.r, cs)\n        self.assertTrue(newLatticeInterface.includeGammaXS)\n        self.assertEqual(cs[CONF_GLOBAL_FLUX_ACTIVE], \"Neutron and Gamma\")\n\n    def test_latticePhysicsInterface(self):\n        \"\"\"Super basic test of the LatticePhysicsInterface.\"\"\"\n        self.assertEqual(self.latticeInterface._updateBlockNeutronVelocities, True)\n        self.assertEqual(self.latticeInterface.executablePath, \"/tmp/fake_path\")\n        self.assertEqual(self.latticeInterface.executableRoot, \"/tmp\")\n        self.latticeInterface.updateXSLibrary(0)\n        self.assertEqual(len(self.latticeInterface._oldXsIdsAndBurnup), 0)\n\n    def test_interactBOL(self):\n        \"\"\"\n        Test interactBOL() with different update frequencies.\n\n        Notes\n        -----\n        Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses\n        self.testVerification instead.\n        \"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.never\n        self.latticeInterface.interactBOL()\n        self.assertFalse(self.latticeInterface.testVerification)\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode\n        self.latticeInterface.interactBOL()\n        self.assertFalse(self.latticeInterface.testVerification)\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL\n        self.latticeInterface.interactBOL()\n        self.assertTrue(self.latticeInterface.testVerification)\n\n    def test_interactBOC(self):\n        \"\"\"\n        Test interactBOC() with different update frequencies.\n\n        Notes\n        -----\n        Unlike other interactions, self.o.r.core.lib is not set to None at BOC, so this test uses\n        self.testVerification instead.\n        \"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOL\n        self.latticeInterface.interactBOC()\n        self.assertFalse(self.latticeInterface.testVerification)\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode\n        self.latticeInterface.interactBOC()\n        self.assertFalse(self.latticeInterface.testVerification)\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC\n        self.latticeInterface.interactBOC()\n        self.assertTrue(self.latticeInterface.testVerification)\n\n    def test_interactEveryNode(self):\n        \"\"\"Test interactEveryNode() with different update frequencies.\"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.BOC\n        self.latticeInterface.interactEveryNode()\n        self.assertEqual(self.o.r.core.lib, \"Nonsense\")\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode\n        self.latticeInterface.interactEveryNode()\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactEveryNodeWhenCoupled(self):\n        \"\"\"\n        Test that the XS lib is not cleared when coupled iterations are turned on\n        and XS will be generated during the coupled iterations.\n        \"\"\"\n        self.o.couplingIsActive = lambda: True\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration\n        self.latticeInterface.interactEveryNode()\n        self.assertEqual(self.o.r.core.lib, \"Nonsense\")\n\n        self.o.couplingIsActive = lambda: False\n        self.latticeInterface.interactEveryNode()\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactEveryNodeWhenCoupledButNot(self):\n        \"\"\"\n        Test that the XS lib is cleared when coupled iterations are turned on\n        but the lattice physics frequency is not high enough.\n        \"\"\"\n        self.o.couplingIsActive = lambda: True\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration\n        self.latticeInterface.interactEveryNode()\n        self.assertEqual(self.o.r.core.lib, \"Nonsense\")\n\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode\n        self.latticeInterface.interactEveryNode()\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactEveryNodeFirstCoupled(self):\n        \"\"\"Test interactEveryNode() with LatticePhysicsFrequency.firstCoupledIteration.\"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration\n        self.latticeInterface.interactEveryNode()\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactEveryNodeAll(self):\n        \"\"\"Test interactEveryNode() with LatticePhysicsFrequency.all.\"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all\n        self.latticeInterface.interactEveryNode()\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactFirstCoupledIteration(self):\n        \"\"\"Test interactCoupled() with different update frequencies on first iteration.\"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.everyNode\n        self.latticeInterface.interactCoupled(iteration=0)\n        self.assertEqual(self.o.r.core.lib, \"Nonsense\")\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration\n        self.latticeInterface.interactCoupled(iteration=0)\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_interactAll(self):\n        \"\"\"Test interactCoupled() with different update frequencies on non-first iteration.\"\"\"\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.firstCoupledIteration\n        self.latticeInterface.interactCoupled(iteration=1)\n        self.assertEqual(self.o.r.core.lib, \"Nonsense\")\n        self.latticeInterface._latticePhysicsFrequency = LatticePhysicsFrequency.all\n        self.latticeInterface.interactCoupled(iteration=1)\n        self.assertIsNone(self.o.r.core.lib)\n\n    def test_getSuffix(self):\n        self.assertEqual(self.latticeInterface._getSuffix(7), \"\")\n\n\nclass TestLatticePhysicsLibraryCreation(TestLatticePhysicsInterfaceBase):\n    \"\"\"Test variations of _newLibraryShouldBeCreated.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        cls.latticeInterface = LatticeInterfaceTester(cls.o.r, cls.o.cs)\n        cls.o.addInterface(cls.latticeInterface)\n        cls.xsGroupInterface.representativeBlocks = OrderedDict({\"AA\": cls.assembly[0]})\n        cls.b, cls.xsIDs = cls.latticeInterface._getBlocksAndXsIds()\n\n    def setUp(self):\n        \"\"\"Reset representativeBlocks and CONF_GEN_XS.\"\"\"\n        self.xsGroupInterface.representativeBlocks = OrderedDict({\"AA\": self.assembly[0]})\n        self.assembly[0].p.xsType = \"A\"\n        self.o.cs[CONF_GEN_XS] = \"\"\n        self.o.r.core.lib = isotxs.readBinary(ISOAA_PATH)\n\n    def test_libCreation_NoGenXS(self):\n        \"\"\"No ISOTXS and xs gen not requested.\"\"\"\n        self.o.r.core.lib = None\n        with mockRunLogs.BufferLog() as mock:\n            xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)\n            self.assertIn(\"Cross sections will not be generated on cycle 1.\", mock.getStdout())\n            self.assertFalse(xsGen)\n\n    def test_libCreation_GenXS(self):\n        \"\"\"No ISOTXS and xs gen requested.\"\"\"\n        self.o.cs[CONF_GEN_XS] = \"Neutron\"\n        self.o.r.core.lib = None\n        with mockRunLogs.BufferLog() as mock:\n            xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)\n            self.assertIn(\n                \"Cross sections will be generated on cycle 1 for the following XS IDs: ['AA']\",\n                mock.getStdout(),\n            )\n            self.assertTrue(xsGen)\n\n    def test_libCreation_NoGenXS_2(self):\n        \"\"\"ISOTXS present and has all of the necessary information.\"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, self.b, self.xsIDs)\n            self.assertIn(\n                \"The generation of XS will be skipped.\",\n                mock.getStdout(),\n            )\n            self.assertFalse(xsGen)\n\n    def test_libCreation_GenXS_2(self):\n        \"\"\"ISOTXS present and does not have all of the necessary information.\"\"\"\n        self.xsGroupInterface.representativeBlocks = OrderedDict({\"BB\": self.assembly[0]})\n        b, xsIDs = self._modifyXSType()\n        with mockRunLogs.BufferLog() as mock:\n            xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs)\n            self.assertIn(\n                \"is not enabled, but will be run to generate these missing cross sections.\",\n                mock.getStdout(),\n            )\n            self.assertTrue(xsGen)\n\n    def test_libCreation_GenXS_3(self):\n        \"\"\"ISOTXS present and does not have all of the necessary information.\"\"\"\n        self.o.cs[CONF_GEN_XS] = \"Neutron\"\n        b, xsIDs = self._modifyXSType()\n        with mockRunLogs.BufferLog() as mock:\n            xsGen = self.latticeInterface._newLibraryShouldBeCreated(1, b, xsIDs)\n            self.assertIn(\"These will be generated on cycle \", mock.getStdout())\n            self.assertTrue(xsGen)\n\n    def _modifyXSType(self):\n        self.xsGroupInterface.representativeBlocks = OrderedDict({\"BB\": self.assembly[0]})\n        self.assembly[0].p.xsType = \"B\"\n        return self.latticeInterface._getBlocksAndXsIds()\n"
  },
  {
    "path": "armi/physics/neutronics/latticePhysics/tests/test_latticeWriter.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test the Lattice Physics Writer.\"\"\"\n\nimport unittest\nfrom collections import defaultdict\n\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FP_MODEL,\n)\nfrom armi.physics.neutronics.latticePhysics.latticePhysicsInterface import (\n    setBlockNeutronVelocities,\n)\nfrom armi.physics.neutronics.latticePhysics.latticePhysicsWriter import (\n    LatticePhysicsWriter,\n)\nfrom armi.physics.neutronics.settings import (\n    CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,\n    CONF_XS_BLOCK_REPRESENTATION,\n)\nfrom armi.testing import loadTestReactor\nfrom armi.tests import TEST_ROOT\n\n\nclass FakeLatticePhysicsWriter(LatticePhysicsWriter):\n    \"\"\"LatticePhysicsWriter is abstract, so it must be subclassed to be tested.\"\"\"\n\n    def __init__(self, block, r, eci):\n        self.testOut = \"\"\n        super(FakeLatticePhysicsWriter, self).__init__(block, r, eci, \"\", False)\n\n    def write(self):\n        pass\n\n    def _writeNuclide(self, fileObj, nuclide, density, nucTemperatureInC, category, xsIdSpecified=None):\n        pass\n\n    def _writeComment(self, fileObj, msg):\n        self.testOut += \"\\n\" + str(msg)\n\n    def _writeGroupStructure(self, fileObj):\n        pass\n\n\nclass TestLatticePhysicsWriter(unittest.TestCase):\n    \"\"\"Test Lattice Physics Writer.\"\"\"\n\n    def setUp(self):\n        self.o, self.r = loadTestReactor(TEST_ROOT)\n        self.cs = self.o.cs\n        self.cs[CONF_CROSS_SECTION].setDefaults(\n            self.cs[CONF_XS_BLOCK_REPRESENTATION],\n            self.cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.block = self.r.core.getFirstBlock()\n        self.w = FakeLatticePhysicsWriter(self.block, self.r, self.o)\n\n    def test_setBlockNeutronVelocities(self):\n        d = defaultdict(float)\n        d[\"AA\"] = 10.0\n        setBlockNeutronVelocities(self.r, d)\n        tot = sum([b.p.mgNeutronVelocity for b in self.r.core.iterBlocks()])\n        self.assertGreater(tot, 3000.0)\n\n    def test_latticePhysicsWriter(self):\n        \"\"\"Super basic test of the LatticePhysicsWriter.\"\"\"\n        self.assertEqual(self.w.xsId, \"AA\")\n        self.assertFalse(self.w.modelFissionProducts)\n        self.assertEqual(self.w.driverXsID, \"\")\n        self.assertAlmostEqual(self.w.minimumNuclideDensity, 1e-15, delta=1e-16)\n\n        self.assertEqual(self.w.testOut, \"\")\n        self.assertEqual(str(self.w), \"<FakeLatticePhysicsWriter - XS ID AA (Neutron XS)>\")\n\n        self.w._writeTitle(None)\n        self.assertIn(\"ARMI generated case for caseTitle armiRun\", self.w.testOut)\n\n        nucs = self.w._getAllNuclidesByTemperatureInC(None)\n        self.assertEqual(len(nucs.keys()), 1)\n        self.assertAlmostEqual(list(nucs.keys())[0], 450.0, delta=0.1)\n\n    def test_writeTitle(self):\n        self.w._writeTitle(\"test_writeTitle\")\n        self.assertIn(\"ARMI generated case for caseTitle\", self.w.testOut)\n\n    def test_isSourceDriven(self):\n        self.assertFalse(self.w._isSourceDriven)\n        self.w.driverXsID = True\n        self.assertTrue(self.w._isSourceDriven)\n\n    def test_isGammaXSGenerationEnabled(self):\n        self.assertFalse(self.w._isGammaXSGenerationEnabled)\n\n    def test_getAllNuclidesByTemperatureInCNone(self):\n        nucsByTemp = self.w._getAllNuclidesByTemperatureInC(None)\n        keys0 = list(nucsByTemp.keys())\n        self.assertEqual(len(keys0), 1)\n        self.assertEqual(keys0[0], 450.0)\n        keys1 = nucsByTemp[keys0[0]]\n        self.assertGreater(len(keys1), 1)\n        names = [k.name for k in keys1]\n        self.assertIn(\"AM241\", names)\n        self.assertIn(\"U238\", names)\n\n    def test_getAllNuclidesByTemperatureInC(self):\n        self.w.explicitFissionProducts = False\n        c = self.r.core[0][0]\n        nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c)\n        keys0 = list(nucsByTemp.keys())\n        self.assertEqual(len(keys0), 1)\n        self.assertEqual(keys0[0], 450.0)\n        keys1 = nucsByTemp[keys0[0]]\n        self.assertGreater(len(keys1), 1)\n        names = [k.name for k in keys1]\n        self.assertIn(\"AM241\", names)\n        self.assertIn(\"U238\", names)\n\n    def test_getAllNuclidesByTempInCExplicitFisProd(self):\n        self.w.explicitFissionProducts = True\n        c = self.r.core[0][0]\n        nucsByTemp = self.w._getAllNuclidesByTemperatureInC(c)\n        keys0 = list(nucsByTemp.keys())\n        self.assertEqual(len(keys0), 1)\n        self.assertEqual(keys0[0], 450.0)\n        keys1 = nucsByTemp[keys0[0]]\n        self.assertGreater(len(keys1), 1)\n        names = [k.name for k in keys1]\n        self.assertIn(\"AM241\", names)\n        self.assertIn(\"U238\", names)\n\n    def test_getAvgNuclideTemperatureInC(self):\n        temp = self.w._getAvgNuclideTemperatureInC(\"U238\")\n        self.assertAlmostEqual(temp, 450, delta=0.001)\n\n        temp = self.w._getAvgNuclideTemperatureInC(\"U235\")\n        self.assertAlmostEqual(temp, 450, delta=0.001)\n\n    def test_getFuelTemperature(self):\n        temp = self.w._getFuelTemperature()\n        self.assertAlmostEqual(temp, 450, delta=0.001)\n\n    def test_getDetailedFissionProducts(self):\n        dfpDen = defaultdict(int)\n        dfpDen[\"U238\"] = 1.2\n        dfpDen[\"U235\"] = 2.3\n        dfpDen[\"AM241\"] = 3.4\n        prods = self.w._getDetailedFissionProducts(dfpDen)\n        self.assertEqual(len(prods), 3)\n        self.assertIn(\"U238\", prods)\n        self.assertIn(\"U235\", prods)\n        self.assertIn(\"AM241\", prods)\n\n    def test_getDetailedFissionProductsPass(self):\n        self.cs[CONF_FP_MODEL] = \"noFissionProducts\"\n\n        prods = self.w._getDetailedFissionProducts({})\n        self.assertEqual(len(prods), 0)\n\n    def test_getDetailedFPDensities(self):\n        self.w.modelFissionProducts = False\n        dens = self.w._getDetailedFPDensities()\n        self.assertEqual(len(dens), 0)\n\n        self.w.modelFissionProducts = True\n        with self.assertRaises(AttributeError):\n            dens = self.w._getDetailedFPDensities()\n\n    def test_isCriticalBucklingSearchActive(self):\n        isActive = self.w._isCriticalBucklingSearchActive\n        self.assertTrue(isActive)\n\n    def test_getDriverBlock(self):\n        self.w.driverXsID = \"\"\n        b = self.w._getDriverBlock()\n        self.assertIsNone(b)\n        self.w.driverXsID = \"AA\"\n        with self.assertRaises(ValueError):\n            b = self.w._getDriverBlock()\n"
  },
  {
    "path": "armi/physics/neutronics/macroXSGenerationInterface.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nConverts microscopic cross sections to macroscopic cross sections by multiplying by number density.\n\n.. math::\n\n    \\Sigma_i = N_i \\sigma_i\n\n\"\"\"\n\nfrom armi import context, interfaces, mpiActions, runLog\nfrom armi.nuclearDataIO import xsCollections\nfrom armi.physics.neutronics.settings import CONF_MINIMUM_NUCLIDE_DENSITY\nfrom armi.utils import getBurnSteps, iterables\n\n\nclass MacroXSGenerator(mpiActions.MpiAction):\n    \"\"\"An action that can make macroscopic cross sections, even in parallel.\"\"\"\n\n    def __init__(\n        self,\n        blocks,\n        lib,\n        buildScatterMatrix,\n        libType,\n        minimumNuclideDensity=0.0,\n    ):\n        mpiActions.MpiAction.__init__(self)\n        self.buildScatterMatrix = buildScatterMatrix\n        self.libType = libType\n        self.lib = lib\n        self.blocks = blocks\n        self.minimumNuclideDensity = minimumNuclideDensity\n\n    def __reduce__(self):\n        # Prevent blocks and lib from being broadcast by passing None to ctor. Although lib must be broadcast, we need\n        # to do it explicitly to correctly deal with the default lib=None argument in buildMacros(), which utilizes this\n        # action. Default arguments make things more complicated.\n        return (\n            MacroXSGenerator,\n            (\n                None,\n                None,\n                self.buildScatterMatrix,\n                self.libType,\n                self.minimumNuclideDensity,\n            ),\n        )\n\n    def invokeHook(self):\n        # logic here gets messy due to all the default arguments in the calling method. There exists a large number of\n        # permutations to be handled.\n        if context.MPI_RANK == 0:\n            allBlocks = self.blocks\n            if allBlocks is None:\n                allBlocks = self.r.core.getBlocks()\n\n            lib = self.lib or self.r.core.lib\n        else:\n            allBlocks = []\n            lib = None\n\n        mc = xsCollections.MacroscopicCrossSectionCreator(self.buildScatterMatrix, self.minimumNuclideDensity)\n\n        if context.MPI_SIZE > 1:\n            myBlocks = self.scatterList(allBlocks)\n\n            lib = context.MPI_COMM.bcast(lib, root=0)\n\n            myMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in myBlocks]\n\n            allMacros = self.gatherList(myMacros)\n\n        else:\n            allMacros = [mc.createMacrosFromMicros(lib, b, libType=self.libType) for b in allBlocks]\n\n        if context.MPI_RANK == 0:\n            for b, macro in zip(allBlocks, allMacros):\n                b.macros = macro\n\n    @staticmethod\n    def scatterList(lst):\n        \"\"\"Helper functions for mpi communication.\"\"\"\n        if context.MPI_RANK == 0:\n            chunked = iterables.split(lst, context.MPI_SIZE)\n        else:\n            chunked = None\n\n        return context.MPI_COMM.scatter(chunked, root=0)\n\n    @staticmethod\n    def gatherList(localList):\n        \"\"\"Helper functions for mpi communication.\"\"\"\n        globalList = context.MPI_COMM.gather(localList, root=0)\n        if context.MPI_RANK == 0:\n            globalList = iterables.flatten(globalList)\n\n        return globalList\n\n\nclass MacroXSGenerationInterface(interfaces.Interface):\n    \"\"\"\n    Builds macroscopic cross sections on all Blocks.\n\n    Notes\n    -----\n    This probably should not be an interface since it has no interactXYZ methods. It should probably be converted to an\n    MpiAction.\n    \"\"\"\n\n    name = \"macroXsGen\"\n\n    def __init__(self, r, cs):\n        interfaces.Interface.__init__(self, r, cs)\n        self.macrosLastBuiltAt = None\n        self.minimumNuclideDensity = cs[CONF_MINIMUM_NUCLIDE_DENSITY]\n\n    def buildMacros(\n        self,\n        lib=None,\n        bListSome=None,\n        buildScatterMatrix=True,\n        libType=\"micros\",\n    ):\n        \"\"\"\n        Builds block-level macroscopic cross sections for making diffusion equation matrices.\n\n        This will use MPI if armi.context.MPI_SIZE > 1\n\n        Builds G-vectors of the basic XS ('nGamma','fission','nalph','np','n2n','nd','nt') Builds GxG matrices for\n        scatter matrices\n\n        .. impl:: Build macroscopic cross sections for blocks.\n            :id: I_ARMI_MACRO_XS\n            :implements: R_ARMI_MACRO_XS\n\n            This method builds macroscopic cross sections for a user-specified set of blocks using a specified\n            microscopic neutron or gamma cross section library. If no blocks are specified, cross sections are\n            calculated for all blocks in the core. If no library is specified, the existing r.core.lib is used. The\n            basic arithmetic involved in generating macroscopic cross sections consists of multiplying isotopic number\n            densities by isotopic microscopic cross sections and summing over all isotopes in a composition. The\n            calculation is implemented in:py:func:`computeMacroscopicGroupConstants\n            <armi.nuclearDataIO.xsCollections.computeMacroscopicGroupConstants>`. This method uses an\n            :py:class:`mpiAction <armi.mpiActions.MpiAction>` to distribute the work of calculating macroscopic cross\n            sections across the worker processes.\n\n        Parameters\n        ----------\n        lib : library object , optional\n            If lib is specified, then buildMacros will build macro XS using micro XS data from lib. If lib = None, then\n            buildMacros will use the existing library self.r.core.lib. If that does not exist, then buildMacros will use\n            a new nuclearDataIO.ISOTXS object.\n        buildScatterMatrix : Boolean, optional\n            If True, all macro XS will be built, including the time-consuming scatter matrix. If False, only the macro\n            XS that are needed for fluxRecon.computePinMGFluxAndPower will be built. These include 'transport',\n            'fission', and a few others. No ng x ng matrices (such as 'scatter' or 'chi') will be built. Essentially,\n            this option saves huge runtime for the fluxRecon module.\n        libType : str, optional\n            The block attribute containing the desired microscopic XS for this block: either \"micros\" for neutron XS or\n            \"gammaXS\" for gamma XS.\n        \"\"\"\n        cycle = self.r.p.cycle\n        burnSteps = getBurnSteps(self.cs)\n        self.macrosLastBuiltAt = sum([burnSteps[i] + 1 for i in range(cycle)]) + self.r.p.timeNode\n\n        runLog.important(\"Building macro XS\")\n        xsGen = MacroXSGenerator(\n            bListSome,\n            lib,\n            buildScatterMatrix,\n            libType,\n            self.minimumNuclideDensity,\n        )\n        xsGen.broadcast()\n        xsGen.invoke(self.o, self.r, self.cs)\n"
  },
  {
    "path": "armi/physics/neutronics/parameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nParameter definitions for the Neutronics Plugin.\n\nWe hope neutronics plugins that compute flux will use ``mgFlux``, etc., which will enable modular\nconstruction of apps.\n\"\"\"\n\nfrom armi.reactor import parameters\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.parameters.parameterDefinitions import isNumpyArray\nfrom armi.reactor.reactors import Core\nfrom armi.utils import units\n\n\ndef getNeutronicsParameterDefinitions():\n    \"\"\"Return ParameterDefinitionCollections for each appropriate ArmiObject.\"\"\"\n    return {Block: _getNeutronicsBlockParams(), Core: _getNeutronicsCoreParams()}\n\n\ndef _getNeutronicsBlockParams():\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder() as pb:\n        pb.defParam(\n            \"axMesh\",\n            units=units.UNITLESS,\n            description=\"number of neutronics axial mesh points in this block\",\n            default=None,\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"mgFlux\",\n            setter=isNumpyArray(\"mgFlux\"),\n            units=f\"n*{units.CM}/{units.SECONDS}\",\n            description=\"multigroup volume-integrated flux\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n            saveToDB=True,\n            categories=[\n                parameters.Category.fluxQuantities,\n                parameters.Category.multiGroupQuantities,\n            ],\n            default=None,\n        )\n\n        pb.defParam(\n            \"adjMgFlux\",\n            units=f\"n*{units.CM}/{units.SECONDS}\",\n            description=\"multigroup adjoint neutron flux\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n            saveToDB=True,\n            categories=[\n                parameters.Category.fluxQuantities,\n                parameters.Category.multiGroupQuantities,\n            ],\n            default=None,\n        )\n\n        pb.defParam(\n            \"lastMgFlux\",\n            units=f\"n*{units.CM}/{units.SECONDS}\",\n            description=\"multigroup volume-integrated flux used for averaging the latest and previous depletion step\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n            saveToDB=False,\n            categories=[\n                parameters.Category.fluxQuantities,\n                parameters.Category.multiGroupQuantities,\n            ],\n            default=None,\n        )\n\n        pb.defParam(\n            \"mgFluxGamma\",\n            units=f\"#*{units.CM}/{units.SECONDS}\",\n            description=\"multigroup gamma flux\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n            saveToDB=True,\n            categories=[\n                parameters.Category.fluxQuantities,\n                parameters.Category.multiGroupQuantities,\n                parameters.Category.gamma,\n            ],\n            default=None,\n        )\n\n        pb.defParam(\n            \"mgNeutronVelocity\",\n            units=f\"{units.CM}/{units.SECONDS}\",\n            description=\"multigroup neutron velocity\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=True,\n            categories=[parameters.Category.multiGroupQuantities],\n            default=None,\n        )\n\n        pb.defParam(\n            \"extSrc\",\n            units=f\"#/{units.CM}^3/{units.SECONDS}\",\n            description=\"multigroup external source\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=False,\n            categories=[parameters.Category.multiGroupQuantities],\n            default=None,\n        )\n\n        pb.defParam(\n            \"mgGammaSrc\",\n            units=f\"#/{units.CM}^3/{units.SECONDS}\",\n            description=\"multigroup gamma source\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=True,\n            categories=[\n                parameters.Category.multiGroupQuantities,\n                parameters.Category.gamma,\n            ],\n            default=None,\n        )\n\n        pb.defParam(\n            \"gammaSrc\",\n            units=f\"#/{units.CM}^3/{units.SECONDS}\",\n            description=\"gamma source\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=True,\n            categories=[parameters.Category.gamma],\n            default=0.0,\n        )\n\n        # Not anointing the pin fluxes as a MG quantity, since it has an extra dimension, which\n        # could lead to issues, depending on how the multiGroupQuantities category gets used\n        pb.defParam(\n            \"pinMgFluxes\",\n            units=f\"n/{units.CM}^2/{units.SECONDS}\",\n            description=\"\"\"\n            The block-level pin multigroup fluxes. pinMgFluxes[i, g] represents the flux in group g\n            for pin i. Flux units are the standard n/cm^2/s. The \"ARMI pin ordering\" is used, which\n            is counter-clockwise from 3 o'clock.\n            \"\"\",\n            categories=[parameters.Category.pinQuantities],\n            saveToDB=True,\n            default=None,\n        )\n\n        pb.defParam(\n            \"pinMgFluxesAdj\",\n            units=units.UNITLESS,\n            description=\"should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)\",\n            categories=[parameters.Category.pinQuantities],\n            saveToDB=False,\n            default=None,\n        )\n\n        pb.defParam(\n            \"pinMgFluxesGamma\",\n            units=f\"#/{units.CM}^2/{units.SECONDS}\",\n            description=\"should be a blank 3-D array, but re-defined later (nPins x ng x nAxialSegments)\",\n            categories=[parameters.Category.pinQuantities, parameters.Category.gamma],\n            saveToDB=False,\n            default=None,\n        )\n\n        pb.defParam(\n            \"chi\",\n            units=units.UNITLESS,\n            description=\"Energy distribution of fission neutrons\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=True,\n            default=None,\n        )\n\n        pb.defParam(\n            \"linPow\",\n            units=f\"{units.WATTS}/{units.METERS}\",\n            description=(\n                \"Pin-averaged linear heat rate, which is calculated by evaluating the block power \"\n                \"and dividing by the number of pins. If gamma transport is enabled, then this \"\n                \"represents the combined neutron and gamma heating. If gamma transport is disabled \"\n                \"then this represents the energy generation in the pin, where gammas are assumed to \"\n                \"deposit their energy locally. Note that this value does not implicitly account \"\n                \"for axial and radial peaking factors within the block. Use `linPowByPin` for \"\n                \"obtaining the pin linear heat rate with peaking factors included.\"\n            ),\n            location=ParamLocation.AVERAGE,\n            default=0.0,\n            categories=[\n                parameters.Category.detailedAxialExpansion,\n                parameters.Category.neutronics,\n            ],\n        )\n\n        pb.defParam(\n            \"linPowByPin\",\n            setter=isNumpyArray(\"linPowByPin\"),\n            units=f\"{units.WATTS}/{units.CM}\",\n            description=(\n                \"Pin linear linear heat rate, which is calculated through flux reconstruction and \"\n                \"accounts for axial and radial peaking factors. This differs from the `linPow` \"\n                \"parameter, which assumes no axial and radial peaking in the block as this \"\n                \"information is unavailable without detailed flux reconstruction. The same \"\n                \"application of neutron and gamma heating results applies.\"\n            ),\n            location=ParamLocation.CHILDREN,\n            categories=[parameters.Category.pinQuantities],\n            default=None,\n        )\n\n        # gamma category because linPowByPin is only split by neutron/gamma when gamma is activated\n        pb.defParam(\n            \"linPowByPinNeutron\",\n            setter=isNumpyArray(\"linPowByPinNeutron\"),\n            units=f\"{units.WATTS}/{units.CM}\",\n            description=\"Pin linear neutron heat rate. This is the neutron heating component of `linPowByPin`\",\n            location=ParamLocation.CHILDREN,\n            categories=[parameters.Category.pinQuantities, parameters.Category.gamma],\n            default=None,\n        )\n\n        pb.defParam(\n            \"linPowByPinGamma\",\n            setter=isNumpyArray(\"linPowByPinGamma\"),\n            units=f\"{units.WATTS}/{units.CM}\",\n            description=\"Pin linear gamma heat rate. This is the gamma heating component of `linPowByPin`\",\n            location=ParamLocation.CHILDREN,\n            categories=[parameters.Category.pinQuantities, parameters.Category.gamma],\n            default=None,\n        )\n\n        pb.defParam(\n            \"reactionRates\",\n            units=f\"#/{units.SECONDS}\",\n            description='List of reaction rates in specified by setting \"reactionsToDB\"',\n            location=ParamLocation.VOLUME_INTEGRATED,\n            categories=[parameters.Category.fluxQuantities],\n            default=None,\n        )\n\n    with pDefs.createBuilder(\n        saveToDB=True,\n        default=None,\n        location=ParamLocation.EDGES,\n        categories=[parameters.Category.detailedAxialExpansion, \"depletion\"],\n    ) as pb:\n        pb.defParam(\n            \"pointsEdgeFastFluxFr\",\n            units=units.UNITLESS,\n            description=\"Fraction of flux above 100keV at edges of the block\",\n        )\n\n        pb.defParam(\n            \"pointsEdgeDpa\",\n            setter=isNumpyArray(\"pointsEdgeDpa\"),\n            units=units.DPA,\n            description=\"displacements per atom at edges of the block\",\n            location=ParamLocation.EDGES | ParamLocation.BOTTOM,\n            categories=[\"cumulative\", \"detailedAxialExpansion\", \"depletion\"],\n        )\n\n        pb.defParam(\n            \"pointsEdgeDpaRate\",\n            setter=isNumpyArray(\"pointsEdgeDpaRate\"),\n            units=f\"{units.DPA}/{units.SECONDS}\",\n            description=\"Current time derivative of the displacement per atoms at edges of the block\",\n            location=ParamLocation.EDGES | ParamLocation.BOTTOM,\n        )\n\n    with pDefs.createBuilder(\n        saveToDB=True,\n        default=None,\n        location=ParamLocation.CORNERS,\n        categories=[\n            parameters.Category.detailedAxialExpansion,\n            parameters.Category.depletion,\n        ],\n    ) as pb:\n        pb.defParam(\n            \"cornerFastFlux\",\n            units=f\"n/{units.CM}^2/{units.SECONDS}\",\n            description=\"Neutron flux above 100keV at hexagon block corners\",\n        )\n\n        pb.defParam(\n            \"pointsCornerFastFluxFr\",\n            units=units.UNITLESS,\n            description=\"Fraction of flux above 100keV at corners of the block\",\n        )\n\n        pb.defParam(\n            \"pointsCornerDpa\",\n            setter=isNumpyArray(\"pointsCornerDpa\"),\n            units=units.DPA,\n            description=\"displacements per atom at corners of the block\",\n            location=ParamLocation.CORNERS | ParamLocation.BOTTOM,\n            categories=[\"cumulative\", \"detailedAxialExpansion\", \"depletion\"],\n        )\n\n        pb.defParam(\n            \"pointsCornerDpaRate\",\n            setter=isNumpyArray(\"pointsCornerDpaRate\"),\n            units=f\"{units.DPA}/{units.SECONDS}\",\n            description=\"Current time derivative of the displacement per atoms at corners of the block\",\n            location=ParamLocation.CORNERS | ParamLocation.BOTTOM,\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n        categories=[parameters.Category.detailedAxialExpansion],\n    ) as pb:\n        # Neutronics reaction rate params that are not re-derived in mesh conversion\n        pb.defParam(\n            \"rateBalance\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Numerical balance between particle production and destruction (should be small)\",\n        )\n\n        pb.defParam(\n            \"rateProdNet\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"The total neutron production including (n,2n) source and fission source.\",\n        )\n\n        pb.defParam(\n            \"capturePowerFrac\",\n            units=units.UNITLESS,\n            description=\"Fraction of the power produced through capture in a block.\",\n            saveToDB=\"True\",\n        )\n\n        pb.defParam(\n            \"fluence\",\n            units=f\"#/{units.CM}^2\",\n            description=\"Fluence\",\n            categories=[\"cumulative\"],\n        )\n\n        pb.defParam(\n            \"flux\",\n            units=f\"n/{units.CM}^2/{units.SECONDS}\",\n            description=\"neutron flux\",\n            categories=[\n                parameters.Category.retainOnReplacement,\n                parameters.Category.fluxQuantities,\n            ],\n        )\n\n        pb.defParam(\"fluxAdj\", units=units.UNITLESS, description=\"Adjoint flux\")\n\n        pb.defParam(\n            \"pdens\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Average volumetric power density\",\n            categories=[parameters.Category.neutronics],\n        )\n\n        pb.defParam(\n            \"pdensDecay\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Decay power density from decaying radionuclides\",\n        )\n\n        pb.defParam(\n            \"arealPd\",\n            units=f\"{units.MW}/{units.METERS}^2\",\n            description=\"Power divided by XY area\",\n        )\n\n        pb.defParam(\n            \"fisDens\",\n            units=f\"fissions/{units.CM}^3/{units.SECONDS}\",\n            description=\"Fission density in a pin (scaled up from homogeneous)\",\n        )\n\n        pb.defParam(\n            \"fisDensHom\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Homogenized fissile density\",\n        )\n\n        pb.defParam(\n            \"fluxGamma\",\n            units=f\"#/{units.CM}^2/{units.SECONDS}\",\n            description=\"Gamma scalar flux\",\n            categories=[\n                parameters.Category.retainOnReplacement,\n                parameters.Category.fluxQuantities,\n            ],\n        )\n\n        pb.defParam(\n            \"fluxPeak\",\n            units=f\"n/{units.CM}^2/{units.SECONDS}\",\n            description=\"Peak neutron flux calculated within the mesh\",\n            location=ParamLocation.MAX,\n        )\n\n        pb.defParam(\n            \"kInf\",\n            units=units.UNITLESS,\n            description=(\n                \"Neutron production rate in this block/neutron absorption rate in this \"\n                \"block. Not truly kinf but a reasonable approximation of reactivity.\"\n            ),\n        )\n\n        pb.defParam(\"medAbsE\", units=units.EV, description=\"Median neutron absorption energy\")\n\n        pb.defParam(\n            \"medFisE\",\n            units=units.EV,\n            description=\"Median energy of neutron causing fission\",\n        )\n\n        pb.defParam(\"medFlxE\", units=units.EV, description=\"Median neutron flux energy\")\n\n        pb.defParam(\n            \"pdensGamma\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Average volumetric gamma power density\",\n            categories=[parameters.Category.gamma],\n        )\n\n        # gamma category because pdens is only split by neutron/gamma when gamma is activated\n        pb.defParam(\n            \"pdensNeutron\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Average volumetric neutron power density\",\n            categories=[parameters.Category.gamma],\n        )\n\n        pb.defParam(\n            \"ppdens\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Peak power density\",\n            location=ParamLocation.MAX,\n        )\n\n        pb.defParam(\n            \"ppdensGamma\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Peak gamma density\",\n            categories=[parameters.Category.gamma],\n            location=ParamLocation.MAX,\n        )\n\n    # rx rate params that are derived during mesh conversion.\n    # We'd like all things that can be derived from flux and XS to be\n    # in this category to minimize numerical diffusion but it is a WIP.\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n    ) as pb:\n        pb.defParam(\n            \"rateAbs\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Total absorption rate in this block (fisson + capture).\",\n        )\n\n        pb.defParam(\n            \"rateCap\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Parasitic capture rate in this block.\",\n        )\n\n        pb.defParam(\n            \"rateProdN2n\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Production rate of neutrons from n2n reactions.\",\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n        categories=[parameters.Category.detailedAxialExpansion],\n    ) as pb:\n        pb.defParam(\n            \"rateFis\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Fission rate in this block.\",\n        )\n\n        pb.defParam(\n            \"rateProdFis\",\n            units=f\"1/{units.CM}^3/{units.SECONDS}\",\n            description=\"Production rate of neutrons from fission reactions (nu * fission source / k-eff)\",\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.VOLUME_INTEGRATED,\n        categories=[parameters.Category.detailedAxialExpansion],\n    ) as pb:\n        pb.defParam(\n            \"powerGenerated\",\n            units=units.WATTS,\n            description=\"Generated power. Different than b.p.power only when gamma transport is activated.\",\n            categories=[parameters.Category.gamma],\n        )\n\n        pb.defParam(\n            \"power\",\n            units=units.WATTS,\n            description=\"Total power\",\n            categories=[parameters.Category.neutronics],\n        )\n\n        pb.defParam(\n            \"powerGamma\",\n            units=units.WATTS,\n            description=\"Total gamma power\",\n            categories=[parameters.Category.gamma],\n        )\n\n        # gamma category because power is only split by neutron/gamma when gamma is activated\n        pb.defParam(\n            \"powerNeutron\",\n            units=units.WATTS,\n            description=\"Total neutron power\",\n            categories=[parameters.Category.gamma],\n        )\n\n    with pDefs.createBuilder(default=0.0) as pb:\n        pb.defParam(\n            \"detailedDpaThisCycle\",\n            units=units.DPA,\n            location=ParamLocation.AVERAGE,\n            description=(\n                \"Displacement per atom accumulated during this cycle. This accumulates \"\n                \"over a cycle and resets to zero at BOC.\"\n            ),\n            categories=[\n                parameters.Category.cumulativeOverCycle,\n                parameters.Category.detailedAxialExpansion,\n            ],\n        )\n\n        pb.defParam(\n            \"detailedDpaPeakRate\",\n            units=f\"{units.DPA}/{units.SECONDS}\",\n            description=\"Peak DPA rate based on detailedDpaPeak\",\n            location=ParamLocation.MAX,\n            categories=[parameters.Category.cumulative, parameters.Category.neutronics],\n        )\n\n        pb.defParam(\n            \"enrichmentBOL\",\n            units=units.UNITLESS,\n            description=\"Enrichment during fabrication (mass fraction)\",\n        )\n\n        pb.defParam(\n            \"fastFlux\",\n            units=f\"1/{units.CM}^2/{units.SECONDS}\",\n            description=\"Neutron flux above 100keV\",\n            location=ParamLocation.AVERAGE,\n            categories=[\"detailedAxialExpansion\"],\n        )\n\n        pb.defParam(\n            \"fastFluxFr\",\n            units=units.UNITLESS,\n            description=\"Fraction of flux above 100keV\",\n            location=ParamLocation.AVERAGE,\n            categories=[\"detailedAxialExpansion\"],\n        )\n\n        pb.defParam(\n            \"pdensGenerated\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=(\n                \"Volume-averaged generated power density. Different than b.p.pdens only \"\n                \"when gamma transport is activated.\"\n            ),\n            location=ParamLocation.AVERAGE,\n            categories=[parameters.Category.gamma],\n        )\n\n    return pDefs\n\n\ndef _getNeutronicsCoreParams():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(categories=[parameters.Category.neutronics]) as pb:\n        pb.defParam(\n            \"eigenvalues\",\n            units=units.UNITLESS,\n            description=\"All available lambda-eigenvalues of reactor.\",\n            default=None,  # will be a list though, can't set default to mutable type.\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"kInf\",\n            units=units.UNITLESS,\n            description=\"k-infinity\",\n            default=0.0,\n            location=ParamLocation.AVERAGE,\n        )\n\n    return pDefs\n"
  },
  {
    "path": "armi/physics/neutronics/plugin.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA boilerplate entry for a neutronics physics plugin.\n\nThe ARMI Framework comes with a neutronics plugin that introduces two independent interfaces:\n\n:py:mod:`~armi.physics.neutronics.fissionProductModel`\n    Handles fission product modeling\n\n:py:mod:`~armi.physics.neutronics.crossSectionGroupManager`\n    Handles the management of different cross section \"groups\"\n\"\"\"\n\nimport numpy as np\n\nfrom armi import plugins, runLog\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.utils import tabulate\n\n\nclass NeutronicsPlugin(plugins.ArmiPlugin):\n    \"\"\"The built-in neutronics plugin with a few capabilities and a lot of state parameter definitions.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        \"\"\"Collect and expose all of the interfaces that live under the built-in neutronics package.\"\"\"\n        from armi.physics.neutronics import crossSectionGroupManager\n        from armi.physics.neutronics.fissionProductModel import fissionProductModel\n\n        interfaceInfo = []\n        for mod in (crossSectionGroupManager, fissionProductModel):\n            interfaceInfo += plugins.collectInterfaceDescriptions(mod, cs)\n\n        return interfaceInfo\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameters():\n        \"\"\"Define parameters for the plugin.\"\"\"\n        from armi.physics.neutronics import parameters as neutronicsParameters\n\n        return neutronicsParameters.getNeutronicsParameterDefinitions()\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        return {\"buGroup\": \"envGroup\", \"buGroupNum\": \"envGroupNum\"}\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineEntryPoints():\n        \"\"\"Define entry points for the plugin.\"\"\"\n        from armi.physics.neutronics import diffIsotxs\n\n        entryPoints = [diffIsotxs.CompareIsotxsLibraries]\n\n        return entryPoints\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        from armi.physics.neutronics import crossSectionSettings\n        from armi.physics.neutronics import settings as neutronicsSettings\n        from armi.physics.neutronics.fissionProductModel import (\n            fissionProductModelSettings,\n        )\n\n        settings = [\n            crossSectionSettings.XSSettingDef(\n                CONF_CROSS_SECTION,\n            )\n        ]\n        settings += neutronicsSettings.defineSettings()\n        settings += fissionProductModelSettings.defineSettings()\n\n        return settings\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettingsValidators(inspector):\n        \"\"\"Implementation of settings inspections for neutronics settings.\"\"\"\n        from armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n            getFissionProductModelSettingValidators,\n        )\n        from armi.physics.neutronics.settings import getNeutronicsSettingValidators\n\n        settingsValidators = getNeutronicsSettingValidators(inspector)\n        settingsValidators.extend(getFissionProductModelSettingValidators(inspector))\n        return settingsValidators\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def onProcessCoreLoading(core, cs, dbLoad):\n        \"\"\"Called whenever a Core object is newly built.\"\"\"\n        applyEffectiveDelayedNeutronFractionToCore(core, cs)\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def getReportContents(r, cs, report, stage, blueprint):\n        \"\"\"Generates the Report Content for the Neutronics Report.\"\"\"\n        from armi.physics.neutronics import reports\n\n        return reports.insertNeutronicsReport(r, cs, report, stage)\n\n\ndef applyEffectiveDelayedNeutronFractionToCore(core, cs):\n    \"\"\"Process the settings for the delayed neutron fraction and precursor decay constants.\"\"\"\n    # Verify and set the core beta parameters based on the user-supplied settings\n    beta = cs[\"beta\"]\n    decayConstants = cs[\"decayConstants\"]\n\n    # If beta is interpreted as a float, then assign it to the total delayed neutron fraction\n    # parameter. Otherwise, setup the group-wise delayed neutron fractions and precursor decay\n    # constants.\n    reportTableData = []\n    if isinstance(beta, float):\n        core.p.beta = beta\n        reportTableData.append((\"Total Delayed Neutron Fraction\", core.p.beta))\n\n    elif isinstance(beta, list) and isinstance(decayConstants, list):\n        if len(beta) != len(decayConstants):\n            raise ValueError(\n                f\"The values for `beta` ({beta}) and `decayConstants` ({decayConstants}) are not consistent lengths.\"\n            )\n\n        core.p.beta = sum(beta)\n        core.p.betaComponents = np.array(beta)\n        core.p.betaDecayConstants = np.array(decayConstants)\n\n        reportTableData.append((\"Total Delayed Neutron Fraction\", core.p.beta))\n        for i, betaComponent in enumerate(core.p.betaComponents):\n            reportTableData.append((f\"Group {i} Delayed Neutron Fractions\", betaComponent))\n        for i, decayConstant in enumerate(core.p.betaDecayConstants):\n            reportTableData.append((\"Group {i} Precursor Decay Constants\", decayConstant))\n\n    # Report to the user the values were not applied.\n    if not reportTableData and (beta is not None or decayConstants is not None):\n        runLog.warning(\n            f\"Delayed neutron fraction(s) - {beta} and decay constants - {decayConstants} have not been applied.\"\n        )\n    else:\n        runLog.extra(\n            tabulate.tabulate(\n                data=reportTableData,\n                headers=[\"Component\", \"Value\"],\n                tableFmt=\"armi\",\n            )\n        )\n"
  },
  {
    "path": "armi/physics/neutronics/settings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Some generic neutronics-related settings.\"\"\"\n\nimport os\n\nfrom armi import runLog\nfrom armi.physics.neutronics import LatticePhysicsFrequency\nfrom armi.physics.neutronics.const import NEUTRON\nfrom armi.physics.neutronics.energyGroups import GROUP_STRUCTURE\nfrom armi.settings import setting, settingsValidation\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_DETAILED_AXIAL_EXPANSION,\n    CONF_NON_UNIFORM_ASSEM_FLAGS,\n    CONF_RUN_TYPE,\n)\nfrom armi.utils import directoryChangers\n\nCONF_BOUNDARIES = \"boundaries\"\nCONF_DPA_PER_FLUENCE = \"dpaPerFluence\"\nCONF_EIGEN_PROB = \"eigenProb\"\nCONF_EPS_EIG = \"epsEig\"\nCONF_EPS_FSAVG = \"epsFSAvg\"\nCONF_EPS_FSPOINT = \"epsFSPoint\"\nCONF_GEN_XS = \"genXS\"  # gamma stuff and neutronics plugin/lattice physics\nCONF_GLOBAL_FLUX_ACTIVE = \"globalFluxActive\"\nCONF_GROUP_STRUCTURE = \"groupStructure\"\nCONF_INNERS_ = \"inners\"\nCONF_LOADING_FILE = \"loadingFile\"\nCONF_MCNP_LIB_BASE = \"mcnpLibraryVersion\"\nCONF_NEUTRONICS_KERNEL = \"neutronicsKernel\"\nCONF_NEUTRONICS_TYPE = \"neutronicsType\"\nCONF_OUTERS_ = \"outers\"\nCONF_RESTART_NEUTRONICS = \"restartNeutronics\"\n\n# used by global flux interface\nCONF_ACLP_DOSE_LIMIT = \"aclpDoseLimit\"\nCONF_DPA_XS_SET = \"dpaXsSet\"\nCONF_GRID_PLATE_DPA_XS_SET = \"gridPlateDpaXsSet\"\nCONF_LOAD_PAD_ELEVATION = \"loadPadElevation\"\nCONF_LOAD_PAD_LENGTH = \"loadPadLength\"\nCONF_OPT_DPA = [\n    \"\",\n    \"dpa_EBRII_INC600\",\n    \"dpa_EBRII_INCX750\",\n    \"dpa_EBRII_HT9\",\n    \"dpa_EBRII_PE16\",\n    \"dpa_EBRII_INC625\",\n]\n\n# moved from xsSettings\nCONF_CLEAR_XS = \"clearXS\"\nCONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION = \"disableBlockTypeExclusionInXsGeneration\"\nCONF_LATTICE_PHYSICS_FREQUENCY = \"latticePhysicsFrequency\"\nCONF_MINIMUM_FISSILE_FRACTION = \"minimumFissileFraction\"\nCONF_MINIMUM_NUCLIDE_DENSITY = \"minimumNuclideDensity\"\nCONF_TOLERATE_BURNUP_CHANGE = \"tolerateBurnupChange\"\nCONF_XS_BLOCK_REPRESENTATION = \"xsBlockRepresentation\"\nCONF_XS_KERNEL = \"xsKernel\"\n\n\ndef defineSettings():\n    \"\"\"Standard function to define settings; for neutronics.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_GROUP_STRUCTURE,\n            default=\"ANL33\",\n            label=\"Number of Energy Groups\",\n            description=\"Energy group structure to use in neutronics simulations\",\n            options=[\n                \"ANL9\",\n                \"ANL33\",\n                \"ANL70\",\n                \"ANL116\",\n                \"ANL230\",\n                \"ANL703\",\n                \"ANL1041\",\n                \"ANL2082\",\n                \"ARMI33\",\n                \"ARMI45\",\n                \"CINDER63\",\n                \"348\",\n            ],\n        ),\n        setting.Setting(\n            CONF_GLOBAL_FLUX_ACTIVE,\n            default=\"Neutron\",\n            label=\"Global Flux Calculation\",\n            description=\"Calculate the global flux at each timestep for the selected particle \"\n            \"type(s) using the specified neutronics kernel.\",\n            options=[\"\", \"Neutron\", \"Neutron and Gamma\"],\n        ),\n        setting.Setting(\n            CONF_GEN_XS,\n            default=\"\",\n            label=\"Multigroup Cross Sections Generation\",\n            description=\"Generate multigroup cross sections for the selected particle \"\n            \"type(s) using the specified lattice physics kernel (see Lattice Physics \"\n            \"tab). When not set, the XS library will be auto-loaded from an existing \"\n            \"ISOTXS in the working directory, but fail if there is no ISOTXS.\",\n            options=[\"\", \"Neutron\", \"Neutron and Gamma\"],\n        ),\n        setting.Setting(\n            CONF_DPA_PER_FLUENCE,\n            default=4.01568627451e-22,\n            label=\"DPA Per Fluence\",\n            description=\"A quick and dirty conversion that is used to get dpaPeak\",\n        ),\n        setting.Setting(\n            CONF_BOUNDARIES,\n            default=\"Extrapolated\",\n            label=\"Neutronic BCs\",\n            description=\"External Neutronic Boundary Conditions. Reflective does not include axial.\",\n            options=[\n                \"Extrapolated\",\n                \"Reflective\",\n                \"Infinite\",\n                \"ZeroSurfaceFlux\",\n                \"ZeroInwardCurrent\",\n                \"Generalized\",\n            ],\n            enforcedOptions=True,\n        ),\n        setting.Setting(\n            CONF_NEUTRONICS_KERNEL,\n            default=\"\",\n            label=\"Neutronics Kernel\",\n            description=\"The neutronics / depletion solver for global flux solve.\",\n            options=[],\n            enforcedOptions=True,\n        ),\n        setting.Setting(\n            CONF_MCNP_LIB_BASE,\n            default=\"ENDF/B-VII.1\",\n            label=\"ENDF data library version to use for MCNP Analysis\",\n            description=(\n                \"This setting controls the nuclides in the problem according to \"\n                \"the available nuclides in the selected library. For instance, \"\n                \"some MCNP libraries contain elemental nuclides while others do \"\n                f\"not. Only used when MCNP is selected as {CONF_NEUTRONICS_KERNEL}.\"\n            ),\n            options=[\"ENDF/B-V.0\", \"ENDF/B-VII.0\", \"ENDF/B-VII.1\", \"ENDF/B-VIII.0\"],\n        ),\n        setting.Setting(\n            CONF_NEUTRONICS_TYPE,\n            default=\"real\",\n            label=\"Neutronics Type\",\n            description=\"The type of neutronics solution that is desired.\",\n            options=[\"real\", \"adjoint\", \"both\"],\n        ),\n        setting.Setting(\n            CONF_EIGEN_PROB,\n            default=True,\n            label=\"Eigenvalue Problem\",\n            description=\"Is this a eigenvalue problem or a fixed source problem?\",\n        ),\n        setting.Setting(\n            CONF_EPS_EIG,\n            default=1e-07,\n            label=\"Eigenvalue Epsilon\",\n            description=\"Convergence criteria for calculating the eigenvalue in the global flux solver\",\n        ),\n        setting.Setting(\n            CONF_EPS_FSAVG,\n            default=1e-05,\n            label=\"FS Avg. epsilon\",\n            description=\"Convergence criteria for average fission source\",\n        ),\n        setting.Setting(\n            CONF_EPS_FSPOINT,\n            default=1e-05,\n            label=\"FS Point epsilon\",\n            description=\"Convergence criteria for point fission source\",\n        ),\n        setting.Setting(\n            CONF_LOAD_PAD_ELEVATION,\n            default=0.0,\n            label=\"Load pad elevation (cm)\",\n            description=(\n                \"The elevation of the bottom of the above-core load pad (ACLP) in cm from the bottom of the upper grid \"\n                \"plate. Used for calculating the load pad dose\"\n            ),\n        ),\n        setting.Setting(\n            CONF_LOAD_PAD_LENGTH,\n            default=0.0,\n            label=\"Load pad length (cm)\",\n            description=\"The length of the load pad. Used to compute average and peak dose.\",\n        ),\n        setting.Setting(\n            CONF_ACLP_DOSE_LIMIT,\n            default=80.0,\n            label=\"ALCP dose limit\",\n            description=\"Dose limit in dpa used to position the above-core load pad(if one exists)\",\n        ),\n        setting.Setting(\n            CONF_RESTART_NEUTRONICS,\n            default=False,\n            label=\"Restart neutronics\",\n            description=\"Restart global flux case using outputs from last time as a guess\",\n        ),\n        setting.Setting(\n            CONF_OUTERS_,\n            default=100,\n            label=\"Max Outer Iterations\",\n            description=\"XY and Axial partial current sweep max outer iterations.\",\n        ),\n        setting.Setting(\n            CONF_INNERS_,\n            default=0,\n            label=\"Inner Iterations\",\n            description=\"XY and Axial partial current sweep inner iterations. 0 lets the neutronics code pick a \"\n            \"default.\",\n        ),\n        setting.Setting(\n            CONF_GRID_PLATE_DPA_XS_SET,\n            default=\"dpa_EBRII_HT9\",\n            label=\"Grid plate DPA XS\",\n            description=(\"The cross sections to use for grid plate blocks DPA when computing displacements per atom.\"),\n            options=CONF_OPT_DPA,\n        ),\n        setting.Setting(\n            CONF_DPA_XS_SET,\n            default=\"dpa_EBRII_HT9\",\n            label=\"DPA Cross Sections\",\n            description=\"The cross sections to use when computing displacements per atom.\",\n            options=CONF_OPT_DPA,\n        ),\n        setting.Setting(\n            CONF_CLEAR_XS,\n            default=False,\n            label=\"Clear XS\",\n            description=\"Delete all cross section libraries before regenerating them.\",\n        ),\n        setting.Setting(\n            CONF_MINIMUM_FISSILE_FRACTION,\n            default=0.045,\n            label=\"Minimum Fissile Fraction\",\n            description=\"Minimum fissile fraction (fissile number densities / heavy metal number densities).\",\n            oldNames=[(\"mc2.minimumFissileFraction\", None)],\n        ),\n        setting.Setting(\n            CONF_MINIMUM_NUCLIDE_DENSITY,\n            default=1e-15,\n            label=\"Minimum nuclide density\",\n            description=\"Density to use for nuclides and fission products at infinite dilution. This is also used as \"\n            \"the minimum density considered for computing macroscopic cross sections.\",\n        ),\n        setting.Setting(\n            CONF_TOLERATE_BURNUP_CHANGE,\n            default=0.0,\n            label=\"Cross Section Burnup Group Tolerance\",\n            description=\"Burnup window for computing cross sections. If the prior \"\n            \"cross sections were computed within the window, new cross sections will \"\n            \"not be generated and the prior calculated cross sections will be used.\",\n        ),\n        setting.Setting(\n            CONF_XS_BLOCK_REPRESENTATION,\n            default=\"Average\",\n            label=\"Cross Section Block Averaging Method\",\n            description=\"The type of averaging to perform when creating cross sections for a group of blocks\",\n            options=[\n                \"Median\",\n                \"Average\",\n                \"FluxWeightedAverage\",\n                \"ComponentAverage1DSlab\",\n            ],\n        ),\n        setting.Setting(\n            CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,\n            default=False,\n            label=\"Which Block types to merge together in XS Generation\",\n            description=\"Control which blocks get merged together by the XSGM. If set to ``None`` or ``True`` then all \"\n            \"block types in the XS ID will be considered. If set to ``False`` then a default of ['fuel'] will be used. \"\n            \"Can also be set to an exact list of strings for types to consider.\",\n        ),\n        setting.Setting(\n            CONF_XS_KERNEL,\n            default=\"MC2v3\",\n            label=\"Lattice Physics Kernel\",\n            description=\"Method to determine broad group cross sections for assemblies\",\n            options=[\"\", \"MC2v2\", \"MC2v3\", \"MC2v3-PARTISN\", \"SERPENT\"],\n        ),\n        setting.Setting(\n            CONF_LATTICE_PHYSICS_FREQUENCY,\n            default=\"BOC\",\n            label=\"Frequency of lattice physics updates\",\n            description=\"Define the frequency at which cross sections are updated with new lattice physics \"\n            \"interactions.\",\n            options=[opt.name for opt in list(LatticePhysicsFrequency)],\n            enforcedOptions=True,\n        ),\n    ]\n\n    return settings\n\n\ndef _blueprintsHasOldXSInput(inspector):\n    path = inspector.cs[CONF_LOADING_FILE]\n    with directoryChangers.DirectoryChanger(inspector.cs.inputDirectory):\n        with open(os.path.expandvars(path)) as f:\n            for line in f:\n                if line.startswith(\"cross sections:\"):\n                    return True\n\n    return False\n\n\ndef getNeutronicsSettingValidators(inspector):\n    \"\"\"The standard helper method, to provide validators to neutronics settings.\"\"\"\n    queries = []\n\n    def migrateXSOption(name0):\n        \"\"\"\n        The `genXS` and `globalFluxActive` settings used to take True/False as inputs,\n        this helper method migrates those to the new values.\n        \"\"\"\n        value = inspector.cs[name0]\n        if value == \"True\":\n            value = NEUTRON\n        elif value == \"False\":\n            value = \"\"\n\n        inspector.cs = inspector.cs.modified(newSettings={name0: value})\n\n    def migrateXSOptionGenXS():\n        \"\"\"pass-through to migrateXSOption(), because Query functions cannot take arguments.\"\"\"\n        migrateXSOption(CONF_GEN_XS)\n\n    def migrateXSOptionGlobalFluxActive():\n        \"\"\"pass-through to migrateXSOption(), because Query functions cannot take arguments.\"\"\"\n        migrateXSOption(CONF_GLOBAL_FLUX_ACTIVE)\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_GEN_XS] in (\"True\", \"False\"),\n            \"The {0} setting cannot not take `True` or `False` as an exact value any more.\",\n            'Would you like to auto-correct {0} to the correct value? (\"\" or {1})'.format(CONF_GEN_XS, NEUTRON),\n            migrateXSOptionGenXS,\n        )\n    )\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_GLOBAL_FLUX_ACTIVE] in (\"True\", \"False\"),\n            \"The {0} setting cannot not take `True` or `False` as an exact value any more.\",\n            'Would you like to auto-correct {0} to the correct value? (\"\" or {1})'.format(\n                CONF_GLOBAL_FLUX_ACTIVE, NEUTRON\n            ),\n            migrateXSOptionGlobalFluxActive,\n        )\n    )\n\n    def migrateNormalBCSetting():\n        \"\"\"The `boundary` setting is migrated from `Normal` to `Extrapolated`.\"\"\"\n        inspector.cs = inspector.cs.modified(newSettings={CONF_BOUNDARIES: \"Extrapolated\"})\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_BOUNDARIES] == \"Normal\",\n            \"The {0} setting now takes `Extrapolated` instead of `Normal` as a value.\".format(CONF_BOUNDARIES),\n            \"Would you like to auto-correct {0} from `Normal` to `Extrapolated`?\".format(CONF_BOUNDARIES),\n            migrateNormalBCSetting,\n        )\n    )\n\n    def updateXSGroupStructure():\n        \"\"\"Trying to migrate to a valid XS group structure name.\"\"\"\n        value = inspector.cs[CONF_GROUP_STRUCTURE]\n        newValue = value.upper()\n\n        if newValue in GROUP_STRUCTURE:\n            runLog.info(\"Updating the cross section group structure from {} to {}\".format(value, newValue))\n        else:\n            newValue = inspector.cs.getSetting(CONF_GROUP_STRUCTURE).default\n            runLog.info(\n                \"Unable to automatically convert the {} setting of {}. Defaulting to {}\".format(\n                    CONF_GROUP_STRUCTURE, value, newValue\n                )\n            )\n\n        inspector.cs = inspector.cs.modified(newSettings={CONF_GROUP_STRUCTURE: newValue})\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_GROUP_STRUCTURE] not in GROUP_STRUCTURE,\n            \"The given group structure {0} was not recognized.\".format(inspector.cs[CONF_GROUP_STRUCTURE]),\n            \"Would you like to auto-correct the group structure value?\",\n            updateXSGroupStructure,\n        )\n    )\n\n    def migrateDpa(name0):\n        \"\"\"Migrating some common shortened names for dpa XS sets.\"\"\"\n        value = inspector.cs[name0]\n        if value == \"dpaHT9_33\":\n            value = \"dpaHT9_ANL33_TwrBol\"\n        elif value == \"dpa_SS316\":\n            value = \"dpaSS316_ANL33_TwrBol\"\n\n        inspector.cs = inspector.cs.modified(newSettings={name0: value})\n\n    def migrateDpaDpaXsSet():\n        \"\"\"Pass-through to migrateDpa(), because Query functions cannot take arguments.\"\"\"\n        migrateDpa(CONF_DPA_XS_SET)\n\n    def migrateDpaGridPlate():\n        \"\"\"Pass-through to migrateDpa(), because Query functions cannot take arguments.\"\"\"\n        migrateDpa(CONF_GRID_PLATE_DPA_XS_SET)\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_DPA_XS_SET] in (\"dpaHT9_33\", \"dpa_SS316\"),\n            \"It appears you are using a shortened version of the {0}.\".format(CONF_DPA_XS_SET),\n            \"Would you like to auto-correct this to the full name?\",\n            migrateDpaDpaXsSet,\n        )\n    )\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_GRID_PLATE_DPA_XS_SET] in (\"dpaHT9_33\", \"dpa_SS316\"),\n            \"It appears you are using a shortened version of the {0}.\".format(CONF_GRID_PLATE_DPA_XS_SET),\n            \"Would you like to auto-correct this to the full name?\",\n            migrateDpaGridPlate,\n        )\n    )\n\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_DETAILED_AXIAL_EXPANSION] and inspector.cs[CONF_NON_UNIFORM_ASSEM_FLAGS],\n            f\"The use of {CONF_DETAILED_AXIAL_EXPANSION} and {CONF_NON_UNIFORM_ASSEM_FLAGS} is not supported.\",\n            \"Automatically set non-uniform assembly treatment to its default?\",\n            lambda: inspector._assignCS(\n                CONF_NON_UNIFORM_ASSEM_FLAGS,\n                inspector.cs.getSetting(CONF_NON_UNIFORM_ASSEM_FLAGS).default,\n            ),\n        )\n    )\n\n    queryMsg = (\n        \"A Snapshots case is selected but the `latticePhysicsFrequency` \"\n        \"{0} is less than `firstCoupledIteration`. `firstCoupledIteration`\"\n        \" or `all` is recommended for Snapshots when they involve large changes \"\n        \"in power or flow compared to the loaded state.\"\n    ).format(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY])\n    queryPrompt = (\n        \"Would you like to update `latticePhysicsFrequency` from \"\n        f\"{inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]} to `firstCoupledIteration`?\"\n    )\n    queries.append(\n        settingsValidation.Query(\n            lambda: inspector.cs[CONF_RUN_TYPE] == \"Snapshots\"\n            and not LatticePhysicsFrequency[inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY]]\n            >= LatticePhysicsFrequency.firstCoupledIteration,\n            queryMsg,\n            queryPrompt,\n            lambda: inspector._assignCS(CONF_LATTICE_PHYSICS_FREQUENCY, \"firstCoupledIteration\"),\n        )\n    )\n\n    return queries\n"
  },
  {
    "path": "armi/physics/neutronics/tests/ISOXA",
    "content": "Not a real cross section file; just a placeholder to unit test the file copying function.\n"
  },
  {
    "path": "armi/physics/neutronics/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/neutronics/tests/rzmflxYA",
    "content": "Not a real flux spectrum file; just a placeholder to unit test the file copying function.\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_crossSectionManager.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTest the cross section manager.\n\n:py:mod:`armi.physics.neutronics.crossSectionGroupManager`\n\"\"\"\n\nimport copy\nimport os\nimport pickle\nimport sys\nimport unittest\nfrom io import BytesIO\nfrom unittest.mock import MagicMock\n\nfrom armi import settings\nfrom armi.physics.neutronics import crossSectionGroupManager\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.crossSectionGroupManager import (\n    AverageBlockCollection,\n    BlockCollection,\n    CrossSectionGroupManager,\n    FluxWeightedAverageBlockCollection,\n    MedianBlockCollection,\n)\nfrom armi.physics.neutronics.crossSectionSettings import XSModelingOptions\nfrom armi.physics.neutronics.fissionProductModel.tests import test_lumpedFissionProduct\nfrom armi.physics.neutronics.settings import (\n    CONF_LATTICE_PHYSICS_FREQUENCY,\n    CONF_XS_BLOCK_REPRESENTATION,\n)\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_blocks, test_reactors\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils import units\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\n\nclass TestBlockColl(unittest.TestCase):\n    def setUp(self):\n        self.blockList = makeBlocks()\n        self.bc = BlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        self.bc.extend(self.blockList)\n\n    def test_add(self):\n        self.bc.append(\"DummyBlock1\")\n        self.bc.extend([\"DB2\", \"DB3\"])\n        self.assertIn(\"DummyBlock1\", self.bc)\n        self.assertIn(\"DB2\", self.bc)\n        self.assertIn(\"DB3\", self.bc)\n\n    def test_getBlocksInGroup(self):\n        for b in self.blockList:\n            self.assertIn(b, self.bc)\n\n    def test_is_pickleable(self):\n        self.bc.weightingParam = \"test\"\n        buf = BytesIO()\n        pickle.dump(self.bc, buf)\n        buf.seek(0)\n        newBc = pickle.load(buf)\n        self.assertEqual(self.bc.weightingParam, newBc.weightingParam)\n\n\nclass TestBlockCollMedian(unittest.TestCase):\n    def setUp(self):\n        self.blockList = makeBlocks(5)\n        for bi, b in enumerate(self.blockList):\n            b.setType(\"fuel\")\n            b.p.percentBu = bi / 4.0 * 100\n\n        self.blockList[0], self.blockList[2] = self.blockList[2], self.blockList[0]\n        self.bc = MedianBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        self.bc.extend(self.blockList)\n\n    def test_createRepresentativeBlock(self):\n        avgB = self.bc.createRepresentativeBlock()\n        self.assertAlmostEqual(avgB.p.percentBu, 50.0)\n\n    def test_getBlockNuclideTemperature(self):\n        # doesn't have to be in median block tests, but this is a simpler test\n        nuc = \"U235\"\n        testBlock = self.blockList[0]\n        amt, amtWeightedTemp = 0, 0\n        for c in testBlock:\n            dens = c.getNumberDensity(nuc)\n            if dens > 0:\n                thisAmt = dens * c.getVolume()\n                amt += thisAmt\n                amtWeightedTemp += thisAmt * c.temperatureInC\n        avgTemp = amtWeightedTemp / amt\n        self.assertAlmostEqual(avgTemp, crossSectionGroupManager.getBlockNuclideTemperature(testBlock, nuc))\n\n\nclass TestBlockCollAvg(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        fpFactory = test_lumpedFissionProduct.getDummyLFPFile()\n        cls.blockList = makeBlocks(5)\n        for bi, b in enumerate(cls.blockList):\n            b.setType(\"fuel\")\n            b.p.percentBu = bi / 4.0 * 100\n            b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())\n            # put some trace Fe-56 and Na-23 into the fuel\n            # zero out all fuel nuclides except U-235 (for mass-weighting of component temperature)\n            fuelComp = b.getComponent(Flags.FUEL)\n            for nuc in fuelComp.getNuclides():\n                b.setNumberDensity(nuc, 0.0)\n            b.setNumberDensity(\"U235\", bi)\n            fuelComp.setNumberDensity(\"FE56\", 1e-15)\n            fuelComp.setNumberDensity(\"NA23\", 1e-15)\n            b.p.gasReleaseFraction = bi * 2 / 8.0\n            for c in b:\n                if c.hasFlags(Flags.FUEL):\n                    c.temperatureInC = 600.0 + bi\n                elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):\n                    c.temperatureInC = 500.0 + bi\n                elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):\n                    c.temperatureInC = 400.0 + bi\n\n    def setUp(self):\n        self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        self.bc.extend(self.blockList)\n        self.bc.averageByComponent = True\n\n    def test_performAverageByComponent(self):\n        \"\"\"Check the averageByComponent attribute.\"\"\"\n        self.bc._checkBlockSimilarity = MagicMock(return_value=True)\n        self.assertTrue(self.bc._performAverageByComponent())\n        self.bc.averageByComponent = False\n        self.assertFalse(self.bc._performAverageByComponent())\n\n    def test_checkBlockSimilarity(self):\n        \"\"\"Check the block similarity test.\"\"\"\n        self.assertTrue(self.bc._checkBlockSimilarity())\n        self.bc.append(test_blocks.loadTestBlock())\n        self.assertFalse(self.bc._checkBlockSimilarity())\n\n    def test_createRepresentativeBlock(self):\n        \"\"\"Test creation of a representative block.\n\n        .. test:: Create representative blocks using a volume-weighted averaging.\n            :id: T_ARMI_XSGM_CREATE_REPR_BLOCKS0\n            :tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n        \"\"\"\n        avgB = self.bc.createRepresentativeBlock()\n        self.assertNotIn(avgB, self.bc)\n        # (0 + 1 + 2 + 3 + 4) / 5 = 10/5 = 2.0\n        # adjust for thermal expansion between input temp (600 C) and average temp (603 C)\n        fuelMat = avgB.getComponent(Flags.FUEL).material\n        expansion = (1.0 + fuelMat.linearExpansionPercent(Tc=603.0) / 100.0) / (\n            1.0 + fuelMat.linearExpansionPercent(Tc=600.0) / 100.0\n        )\n        self.assertAlmostEqual(avgB.getNumberDensity(\"U235\") / expansion**2, 2.0)\n        # (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0\n        self.assertEqual(avgB.p.percentBu, 50.0)\n\n        # check that a new block collection of the representative block has right temperatures\n        # this is required for Doppler coefficient calculations\n        newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        newBc.append(avgB)\n        newBc.calcAvgNuclideTemperatures()\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"U235\"], 603.0)\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"FE56\"], 502.0)\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"NA23\"], 402.0)\n\n    def test_createRepresentativeBlockDissimilar(self):\n        \"\"\"Test creation of a representative block from a collection with dissimilar blocks.\"\"\"\n        uniqueBlock = test_blocks.loadTestBlock()\n        uniqueBlock.p.percentBu = 50.0\n        fpFactory = test_lumpedFissionProduct.getDummyLFPFile()\n        uniqueBlock.setLumpedFissionProducts(fpFactory.createLFPsFromFile())\n        uniqueBlock.setNumberDensity(\"U235\", 2.0)\n        uniqueBlock.p.gasReleaseFraction = 1.0\n        for c in uniqueBlock:\n            if c.hasFlags(Flags.FUEL):\n                c.temperatureInC = 600.0\n            elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):\n                c.temperatureInC = 500.0\n            elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):\n                c.temperatureInC = 400.0\n        self.bc.append(uniqueBlock)\n\n        with mockRunLogs.BufferLog() as mock:\n            avgB = self.bc.createRepresentativeBlock()\n            self.assertIn(\"Non-matching block in AverageBlockCollection\", mock.getStdout())\n\n        self.assertNotIn(avgB, self.bc)\n        # (0 + 1 + 2 + 3 + 4 + 2) / 6.0 = 12/6 = 2.0\n        self.assertAlmostEqual(avgB.getNumberDensity(\"U235\"), 2.0)\n        # (0 + 1/4 + 2/4 + 3/4 + 4/4) / 5 * 100.0 = 50.0\n        self.assertAlmostEqual(avgB.p.percentBu, 50.0)\n\n        # U35 has different average temperature because blocks have different U235 content\n        newBc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        newBc.append(avgB)\n        newBc.calcAvgNuclideTemperatures()\n        # temps expected to be proportional to volume-fraction weighted temperature\n        # this is a non-physical result, but it demonstrates a problem that exists in the code\n        # when dissimilar blocks are put together in a BlockCollection\n        structureVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.CLAD, Flags.DUCT, Flags.WIRE]))\n        fuelVolume = avgB.getComponent(Flags.FUEL).getVolume()\n        coolantVolume = sum(c.getVolume() for c in avgB.getComponents([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]))\n        expectedIronTemp = (structureVolume * 500.0 + fuelVolume * 600.0) / (structureVolume + fuelVolume)\n        expectedSodiumTemp = (coolantVolume * 400.0 + fuelVolume * 600.0) / (coolantVolume + fuelVolume)\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"U235\"], 600.0)\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"FE56\"], expectedIronTemp)\n        self.assertAlmostEqual(newBc.avgNucTemperatures[\"NA23\"], expectedSodiumTemp)\n\n\nclass TestComponentAveraging(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        fpFactory = test_lumpedFissionProduct.getDummyLFPFile()\n        cls.blockList = makeBlocks(3)\n        for bi, b in enumerate(cls.blockList):\n            b.setType(\"fuel\")\n            b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())\n            # put some trace Fe-56 and Na-23 into the fuel\n            # zero out all fuel nuclides except U-235 (for mass-weighting of component temperature)\n            for nuc in b.getNuclides():\n                b.setNumberDensity(nuc, 0.0)\n            b.setNumberDensity(\"U235\", bi)\n            b.setNumberDensity(\"FE56\", bi / 2.0)\n            b.setNumberDensity(\"NA23\", bi / 3.0)\n            for c in b:\n                if c.hasFlags(Flags.FUEL):\n                    c.temperatureInC = 600.0 + bi\n                elif c.hasFlags([Flags.CLAD, Flags.DUCT, Flags.WIRE]):\n                    c.temperatureInC = 500.0 + bi\n                elif c.hasFlags([Flags.BOND, Flags.COOLANT, Flags.INTERCOOLANT]):\n                    c.temperatureInC = 400.0 + bi\n\n    def setUp(self):\n        self.bc = AverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        blockCopies = [copy.deepcopy(b) for b in self.blockList]\n        self.bc.extend(blockCopies)\n\n    def test_getAverageComponentNumberDensities(self):\n        \"\"\"Test component number density averaging.\"\"\"\n        # because of the way densities are set up, the middle block (index 1 of 0-2) component\n        # densities are equivalent to the average\n        b = self.bc[1]\n        for compIndex, c in enumerate(b.getComponents()):\n            avgDensities = self.bc._getAverageComponentNumberDensities(compIndex)\n            compDensities = c.getNumberDensities()\n            for nuc in c.getNuclides():\n                self.assertAlmostEqual(\n                    compDensities[nuc],\n                    avgDensities[nuc],\n                    msg=f\"{nuc} density {compDensities[nuc]} not equal to {avgDensities[nuc]}!\",\n                )\n            self.assertEqual(len(compDensities), len(avgDensities))\n\n    def test_getAverageComponentTemperature(self):\n        \"\"\"Test mass-weighted component temperature averaging.\"\"\"\n        b = self.bc[0]\n        massWeightedIncrease = 5.0 / 3.0\n        baseTemps = [600, 400, 500, 500, 400, 500, 400]\n        expectedTemps = [t + massWeightedIncrease for t in baseTemps]\n        for compIndex, c in enumerate(b.getComponents()):\n            avgTemp = self.bc._getAverageComponentTemperature(compIndex)\n            self.assertAlmostEqual(\n                expectedTemps[compIndex],\n                avgTemp,\n                msg=f\"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!\",\n            )\n\n    def test_getAvgCompTempVariedWeights(self):\n        \"\"\"Test mass-weighted component temperature averaging with variable weights.\"\"\"\n        # make up a fake weighting with power param\n        self.bc.weightingParam = \"power\"\n        for i, b in enumerate(self.bc):\n            b.p.power = i\n        weightedIncrease = 1.8\n        baseTemps = [600, 400, 500, 500, 400, 500, 400]\n        expectedTemps = [t + weightedIncrease for t in baseTemps]\n        for compIndex, c in enumerate(b.getComponents()):\n            avgTemp = self.bc._getAverageComponentTemperature(compIndex)\n            self.assertAlmostEqual(\n                expectedTemps[compIndex],\n                avgTemp,\n                msg=f\"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!\",\n            )\n\n    def test_getAvgCompTempNoMass(self):\n        \"\"\"Test component temperature averaging when the components have no mass.\"\"\"\n        for b in self.bc:\n            for nuc in b.getNuclides():\n                b.setNumberDensity(nuc, 0.0)\n\n        unweightedIncrease = 1.0\n        baseTemps = [600, 400, 500, 500, 400, 500, 400]\n        expectedTemps = [t + unweightedIncrease for t in baseTemps]\n        for compIndex, c in enumerate(b.getComponents()):\n            avgTemp = self.bc._getAverageComponentTemperature(compIndex)\n            self.assertAlmostEqual(\n                expectedTemps[compIndex],\n                avgTemp,\n                msg=f\"{c} avg temperature {avgTemp} not equal to expected {expectedTemps[compIndex]}!\",\n            )\n\n\nclass TestBlockCollCompAvg(unittest.TestCase):\n    \"\"\"Test Block collection component averages.\"\"\"\n\n    def setUp(self):\n        r\"\"\"\n        First part of setup same as test_Cartesian.\n        Second part of setup builds lists/dictionaries of expected values to compare to.\n        has expected values for component isotopic atom density and component area.\n        \"\"\"\n        self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName=\"zpprTest.yaml\")\n\n        #                    ndrawer1  lenFuelTypeD1  ndrawer2  lenFuelTypeD2\n        EuWeight = float(1 * 60 + 3 * 15)\n        otherEUWeight = float(1 * 15 + 3 * 45)\n        totalWeight = otherEUWeight + EuWeight\n        otherEUWeight /= totalWeight\n        EuWeight /= totalWeight\n        expectedRepBlanketBlock = [\n            {\"U238\": 0.045},  # DU\n            {\"NA23\": 0.02},  # Na\n            {\"U238\": 0.045},  # DU\n        ]\n        expectedRepFuelBlock = [\n            {\"U238\": 0.045 * EuWeight + 0.045 * otherEUWeight},  # DU\n            {\n                \"U235\": 0.025 * EuWeight + 0.0125 * otherEUWeight,\n                \"U238\": 0.02 * EuWeight + 0.01 * otherEUWeight,\n            },\n            {\"NA23\": 0.02},  # Na}\n            {\n                \"FE54\": 0.07 * 0.05845,\n                \"FE56\": 0.07 * 0.91754,\n                \"FE57\": 0.07 * 0.02119,\n                \"FE58\": 0.07 * 0.00282,\n            },  # Steel\n        ]\n        # later sorted by density so less massive block first\n        self.expectedBlockDensities = [\n            expectedRepBlanketBlock,\n            expectedRepFuelBlock,\n            expectedRepFuelBlock,\n        ]\n        self.expectedAreas = [[1, 6, 1], [1, 2, 1, 4]]\n\n    def test_ComponentAverageRepBlock(self):\n        \"\"\"Tests that the XS group manager calculates the expected component atom density\n        and component area correctly.\n\n        Order of components is also checked since in 1D cases the order of the components matters.\n        \"\"\"\n        xsgm = self.o.getInterface(\"xsGroups\")\n\n        for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items():\n            self.assertEqual(xsOpt.blockRepresentation, None)\n\n        xsgm.interactBOL()\n\n        # Check that the correct defaults are propagated after the interactBOL\n        # from the cross section group manager is called.\n        for _xsID, xsOpt in self.o.cs[CONF_CROSS_SECTION].items():\n            self.assertEqual(xsOpt.blockRepresentation, self.o.cs[CONF_XS_BLOCK_REPRESENTATION])\n\n        xsgm.createRepresentativeBlocks()\n        representativeBlockList = list(xsgm.representativeBlocks.values())\n        representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())\n\n        self.assertEqual(len(representativeBlockList), len(self.expectedBlockDensities))\n        for b, componentDensities, areas in zip(\n            representativeBlockList, self.expectedBlockDensities, self.expectedAreas\n        ):\n            self.assertEqual(len(b), len(componentDensities))\n            self.assertEqual(len(b), len(areas))\n            for c, compDensity, compArea in zip(b, componentDensities, areas):\n                self.assertEqual(compArea, c.getArea())\n                cNucs = c.getNuclides()\n                self.assertEqual(len(cNucs), len(compDensity), (cNucs, compDensity))\n                for nuc in cNucs:\n                    self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity[nuc])\n\n        self.assertIn(\n            \"AC\",\n            xsgm.representativeBlocks,\n            (\"Assemblies not in the core should still have XS groups, see _getMissingBlueprintBlocks()\"),\n        )\n\n\nclass TestBlockCollCompAvg1DCyl(unittest.TestCase):\n    \"\"\"Test Block collection component averages for 1D cylinder.\"\"\"\n\n    def setUp(self):\n        \"\"\"First part of setup same as test_Cartesian.\n\n        Second part of setup builds lists/dictionaries of expected values to compare to.\n        has expected values for component isotopic atom density and component area.\n        \"\"\"\n        self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT)\n\n        sodiumDensity = {\"NA23\": 0.022166571826233578}\n        steelDensity = {\n            \"C\": 0.0007685664978992269,\n            \"V50\": 6.795562118653462e-07,\n            \"V51\": 0.0002711429285342731,\n            \"SI28\": 0.0003789374369638149,\n            \"SI29\": 1.924063709833714e-05,\n            \"SI30\": 1.268328992580968e-05,\n            \"CR50\": 0.0004532023742335746,\n            \"CR52\": 0.008739556775111474,\n            \"CR53\": 0.0009909955713678232,\n            \"CR54\": 0.000246679773317009,\n            \"MN55\": 0.0004200803669857142,\n            \"FE54\": 0.004101496663229472,\n            \"FE56\": 0.06438472483061823,\n            \"FE57\": 0.0014869241111006412,\n            \"FE58\": 0.00019788230265709334,\n            \"NI58\": 0.0002944487657779742,\n            \"NI60\": 0.00011342053328927859,\n            \"NI61\": 4.930763373747379e-06,\n            \"NI62\": 1.571788956157717e-05,\n            \"NI64\": 4.005163933412346e-06,\n            \"MO92\": 7.140180476114493e-05,\n            \"MO94\": 4.4505841916481845e-05,\n            \"MO95\": 7.659816252004227e-05,\n            \"MO96\": 8.02548587207478e-05,\n            \"MO97\": 4.594927462728666e-05,\n            \"MO98\": 0.00011610009956095838,\n            \"MO100\": 4.6334190016834624e-05,\n            \"W182\": 3.663619370317025e-05,\n            \"W183\": 1.9783544599711936e-05,\n            \"W184\": 4.235973352562047e-05,\n            \"W186\": 3.9304414603061506e-05,\n        }\n        linerAdjustment = 1.014188527784268\n        cladDensity = {nuc: dens * linerAdjustment for nuc, dens in steelDensity.items()}\n        fuelDensity = {\n            \"AM241\": 2.3605999999999997e-05,\n            \"PU238\": 3.7387e-06,\n            \"PU239\": 0.0028603799999999996,\n            \"PU240\": 0.000712945,\n            \"PU241\": 9.823120000000004e-05,\n            \"PU242\": 2.02221e-05,\n            \"U235\": 0.00405533,\n            \"U238\": 0.0134125,\n        }\n        self.expectedComponentDensities = [\n            fuelDensity,\n            sodiumDensity,\n            cladDensity,\n            steelDensity,\n            sodiumDensity,\n            steelDensity,\n            sodiumDensity,\n        ]\n        self.expectedComponentAreas = [\n            99.54797488948871,\n            29.719913442616843,\n            30.07759373476877,\n            1.365897776727751,\n            63.184097853691235,\n            17.107013842808822,\n            1.9717608091694139,\n        ]\n\n    def test_ComponentAverage1DCylinder(self):\n        \"\"\"Tests that the cross-section group manager calculates the expected component atom density\n        and component area correctly.\n\n        Order of components is also checked since in 1D cases the order of the components matters.\n\n        .. test:: Create representative blocks using custom cylindrical averaging.\n            :id: T_ARMI_XSGM_CREATE_REPR_BLOCKS1\n            :tests: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n        \"\"\"\n        xsgm = self.o.getInterface(\"xsGroups\")\n\n        xsgm.interactBOL()\n\n        # Check that the correct defaults are propagated after the interactBOL\n        # from the cross section group manager is called.\n        xsOpt = self.o.cs[CONF_CROSS_SECTION][\"ZA\"]\n        self.assertEqual(xsOpt.blockRepresentation, \"ComponentAverage1DCylinder\")\n\n        xsgm.createRepresentativeBlocks()\n        xsgm.updateNuclideTemperatures()\n\n        representativeBlockList = list(xsgm.representativeBlocks.values())\n        representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())\n        reprBlock = xsgm.representativeBlocks[\"ZA\"]\n        self.assertEqual(reprBlock.name, \"1D_CYL_AVG_ZA\")\n        self.assertEqual(reprBlock.p.percentBu, 0.0)\n\n        refTemps = {\"fuel\": 600.0, \"coolant\": 450.0, \"structure\": 462.4565}\n\n        for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas):\n            self.assertEqual(compArea, c.getArea())\n            cNucs = c.getNuclides()\n            for nuc in cNucs:\n                self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0))\n                if \"fuel\" in c.getType():\n                    compTemp = refTemps[\"fuel\"]\n                elif any(sodium in c.getType() for sodium in [\"bond\", \"coolant\"]):\n                    compTemp = refTemps[\"coolant\"]\n                else:\n                    compTemp = refTemps[\"structure\"]\n                self.assertAlmostEqual(\n                    compTemp,\n                    xsgm.avgNucTemperatures[\"ZA\"][nuc],\n                    2,\n                    f\"{nuc} temperature does not match expected value of {compTemp}\",\n                )\n\n    def test_ComponentAverageDuctHet1DCylinder(self):\n        \"\"\"\n        Tests that the cross-section group manager calculates the expected component atom density,\n        component area, and average nuclide temperature correctly for a duct heterogeneous cylindrical\n        block collection.\n        \"\"\"\n        self.o.cs[CONF_CROSS_SECTION][\"ZA\"].ductHeterogeneous = True\n        xsgm = self.o.getInterface(\"xsGroups\")\n\n        xsgm.interactBOL()\n\n        # Check that the correct defaults are propagated after the interactBOL\n        # from the cross section group manager is called.\n        xsOpt = self.o.cs[CONF_CROSS_SECTION][\"ZA\"]\n        self.assertEqual(xsOpt.blockRepresentation, \"ComponentAverage1DCylinder\")\n\n        xsgm.createRepresentativeBlocks()\n        xsgm.updateNuclideTemperatures()\n\n        representativeBlockList = list(xsgm.representativeBlocks.values())\n        representativeBlockList.sort(key=lambda repB: repB.getMass() / repB.getVolume())\n        reprBlock = xsgm.representativeBlocks[\"ZA\"]\n        self.assertEqual(reprBlock.name, \"1D_CYL_DUCT_HET_AVG_ZA\")\n        self.assertEqual(reprBlock.p.percentBu, 0.0)\n\n        refTemps = {\"fuel\": 600.0, \"coolant\": 450.0, \"structure\": 462.4565}\n\n        for c, compDensity, compArea in zip(reprBlock, self.expectedComponentDensities, self.expectedComponentAreas):\n            self.assertEqual(compArea, c.getArea())\n            cNucs = c.getNuclides()\n            for nuc in cNucs:\n                self.assertAlmostEqual(c.getNumberDensity(nuc), compDensity.get(nuc, 0.0))\n                if \"fuel\" in c.getType():\n                    compTemp = refTemps[\"fuel\"]\n                elif any(sodium in c.getType() for sodium in [\"bond\", \"coolant\"]):\n                    compTemp = refTemps[\"coolant\"]\n                else:\n                    compTemp = refTemps[\"structure\"]\n\n                if any(comp in c.getType() for comp in [\"fuel\", \"bond\", \"coolant\"]):\n                    # only 1 fuel component, and bond and coolant are both at same temperature\n                    # the component temp should match the avg nuc temp\n                    self.assertAlmostEqual(\n                        compTemp,\n                        xsgm.avgNucTemperatures[\"ZA\"][nuc],\n                        2,\n                        f\"{nuc} temperature does not match expected value of {compTemp} for component {c}\",\n                    )\n                else:\n                    # steel components are at different temperatures\n                    # the temperatures should be different\n                    diff = abs(compTemp - xsgm.avgNucTemperatures[\"ZA\"][nuc])\n                    self.assertGreater(\n                        diff,\n                        1.0,\n                        f\"{nuc} temperature should be different from {compTemp} for component {c}\",\n                    )\n\n    def test_checkComponentConsistency(self):\n        xsgm = self.o.getInterface(\"xsGroups\")\n        xsgm.interactBOL()\n        blockCollectionsByXsGroup = xsgm.makeCrossSectionGroups()\n\n        blockCollection = blockCollectionsByXsGroup[\"ZA\"]\n        baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents()\n        densities = {\n            \"control\": baseComponents[0].getNumberDensities(),\n            \"clad\": baseComponents[2].getNumberDensities(),\n            \"coolant\": baseComponents[4].getNumberDensities(),\n        }\n        controlComponent, cladComponent, coolantComponent = self._makeComponents(7, densities)\n\n        # reference block\n        refBlock = HexBlock(\"refBlock\")\n        refBlock.add(controlComponent)\n        refBlock.add(cladComponent)\n        refBlock.add(coolantComponent)\n\n        # matching block\n        matchingBlock = HexBlock(\"matchBlock\")\n        matchingBlock.add(controlComponent)\n        matchingBlock.add(cladComponent)\n        matchingBlock.add(coolantComponent)\n\n        # unsorted block\n        unsortedBlock = HexBlock(\"unsortedBlock\")\n        unsortedBlock.add(cladComponent)\n        unsortedBlock.add(coolantComponent)\n        unsortedBlock.add(controlComponent)\n\n        # non-matching block length\n        nonMatchingLengthBlock = HexBlock(\"blockLengthDiff\")\n        nonMatchingLengthBlock.add(controlComponent)\n        nonMatchingLengthBlock.add(coolantComponent)\n\n        # non-matching component multiplicity\n        nonMatchingMultBlock = HexBlock(\"blockComponentDiff\")\n        control, clad, coolant = self._makeComponents(19, densities)\n        nonMatchingMultBlock.add(control)\n        nonMatchingMultBlock.add(clad)\n        nonMatchingMultBlock.add(coolant)\n\n        # different nuclides\n        nucDiffBlock = HexBlock(\"blockNucDiff\")\n        mixedDensities = {\n            \"clad\": baseComponents[0].getNumberDensities(),\n            \"coolant\": baseComponents[2].getNumberDensities(),\n            \"control\": baseComponents[4].getNumberDensities(),\n        }\n        control, clad, coolant = self._makeComponents(7, mixedDensities)\n        nucDiffBlock.add(control)\n        nucDiffBlock.add(clad)\n        nucDiffBlock.add(coolant)\n\n        # additional non-important nuclides\n        negligibleNucDiffBlock = HexBlock(\"blockNegligibleNucDiff\")\n        negligibleNuc = {\"N14\": 1.0e-5}\n        modControl = baseComponents[0].getNumberDensities()\n        modClad = baseComponents[2].getNumberDensities()\n        modCoolant = baseComponents[4].getNumberDensities()\n        modControl.update(negligibleNuc)\n        modClad.update(negligibleNuc)\n        modCoolant.update(negligibleNuc)\n        mixedDensities = {\n            \"control\": modControl,\n            \"clad\": modClad,\n            \"coolant\": modCoolant,\n        }\n        control, clad, coolant = self._makeComponents(7, mixedDensities)\n        negligibleNucDiffBlock.add(control)\n        negligibleNucDiffBlock.add(clad)\n        negligibleNucDiffBlock.add(coolant)\n\n        # nuclides at zero number density should be okay\n        zeroNucBlock = HexBlock(\"blockNucZero\")\n        mixedDensities = {\n            \"control\": baseComponents[0].getNumberDensities(),\n            \"clad\": baseComponents[2].getNumberDensities(),\n            \"coolant\": baseComponents[4].getNumberDensities(),\n        }\n        control, clad, coolant = self._makeComponents(7, mixedDensities)\n        # set some nuclide number densities to zero\n        control.setNumberDensity(\"U235\", 0.0)\n        control.setNumberDensity(\"O16\", 0.0)\n        clad.setNumberDensity(\"FE56\", 0.0)\n        coolant.setNumberDensity(\"NA23\", 0.0)\n        coolant.setNumberDensity(\"PU239\", 0.0)\n        zeroNucBlock.add(control)\n        zeroNucBlock.add(clad)\n        zeroNucBlock.add(coolant)\n\n        blockCollection._checkComponentConsistency(refBlock, matchingBlock)\n        blockCollection._checkComponentConsistency(refBlock, unsortedBlock)\n        blockCollection._checkComponentConsistency(refBlock, negligibleNucDiffBlock)\n        blockCollection._checkComponentConsistency(refBlock, zeroNucBlock)\n        for b in (nonMatchingMultBlock, nonMatchingLengthBlock, nucDiffBlock):\n            with self.assertRaises(ValueError):\n                blockCollection._checkComponentConsistency(refBlock, b)\n\n    def _makeComponents(self, multiplicity, densities):\n        from armi.reactor import components\n\n        baseComponents = self.r.core.getFirstBlock(Flags.CONTROL).getComponents()\n        controlComponent = components.Circle(\n            \"control\",\n            baseComponents[0].material,\n            100.0,\n            100.0,\n            id=0.0,\n            od=0.6,\n            mult=multiplicity,\n        )\n        cladComponent = components.Circle(\n            \"clad\",\n            baseComponents[2].material,\n            100.0,\n            100.0,\n            id=0.6,\n            od=0.7,\n            mult=multiplicity,\n        )\n        coolantComponent = components.Circle(\n            \"coolant\",\n            baseComponents[4].material,\n            100.0,\n            100.0,\n            id=0.7,\n            od=0.8,\n            mult=multiplicity,\n        )\n\n        controlComponent.setNumberDensities(densities[\"control\"])\n        cladComponent.setNumberDensities(densities[\"clad\"])\n        coolantComponent.setNumberDensities(densities[\"coolant\"])\n\n        return controlComponent, cladComponent, coolantComponent\n\n\nclass TestBlockCollFluxWeightAvg(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        fpFactory = test_lumpedFissionProduct.getDummyLFPFile()\n        cls.blockList = makeBlocks(5)\n        for bi, b in enumerate(cls.blockList):\n            b.setType(\"fuel\")\n            b.p.percentBu = bi / 4.0 * 100\n            b.setLumpedFissionProducts(fpFactory.createLFPsFromFile())\n            b.setNumberDensity(\"U235\", bi)\n            b.p.gasReleaseFraction = bi * 2 / 8.0\n            b.p.flux = bi + 1\n\n    def setUp(self):\n        self.bc = FluxWeightedAverageBlockCollection(self.blockList[0].core.r.blueprints.allNuclidesInProblem)\n        self.bc.extend(self.blockList)\n\n    def test_createRepresentativeBlock(self):\n        self.bc[1].p.flux = 1e99  # only the 2nd block values should show up\n        avgB = self.bc.createRepresentativeBlock()\n        self.assertNotIn(avgB, self.bc)\n        self.assertAlmostEqual(avgB.getNumberDensity(\"U235\"), 1.0)\n        self.assertEqual(avgB.p.percentBu, 25.0)\n\n    def test_invalidWeights(self):\n        self.bc[0].p.flux = 0.0\n        with self.assertRaises(ValueError):\n            self.bc.createRepresentativeBlock()\n\n\nclass TestXSGM(unittest.TestCase):\n    def setUp(self):\n        cs = settings.Settings()\n        self.blockList = makeBlocks(20)\n        self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs)\n        for bi, b in enumerate(self.blockList):\n            b.p.percentBu = bi / 19.0 * 100\n        self.csm._setBuGroupBounds([3, 10, 30, 100])\n        self.csm.interactBOL()\n\n    def test_enableEnvGroupUpdates(self):\n        self.csm._envGroupUpdatesEnabled = False\n        self.csm.enableEnvGroupUpdates()\n        self.assertTrue(self.csm._envGroupUpdatesEnabled)\n        # test flipping again keeps true\n        self.csm.enableEnvGroupUpdates()\n        self.assertTrue(self.csm._envGroupUpdatesEnabled)\n\n    def test_disableEnvGroupUpdates(self):\n        self.csm._envGroupUpdatesEnabled = True\n        wasEnabled = self.csm.disableEnvGroupUpdates()\n        self.assertTrue(wasEnabled)\n        self.assertFalse(self.csm._envGroupUpdatesEnabled)\n        wasEnabled = self.csm.disableEnvGroupUpdates()\n        self.assertFalse(wasEnabled)\n        self.assertFalse(self.csm._envGroupUpdatesEnabled)\n\n    def test_updateBurnupGroups(self):\n        self.blockList[1].p.percentBu = 3.1\n        self.blockList[2].p.percentBu = 10.0\n\n        self.csm._updateEnvironmentGroups(self.blockList)\n\n        self.assertEqual(self.blockList[0].p.envGroup, \"A\")\n        self.assertEqual(self.blockList[1].p.envGroup, \"B\")\n        self.assertEqual(self.blockList[2].p.envGroup, \"B\")\n        self.assertEqual(self.blockList[-1].p.envGroup, \"D\")\n\n    def test_setBuGroupBounds(self):\n        self.assertAlmostEqual(self.csm._buGroupBounds[2], 30.0)\n\n        with self.assertRaises(ValueError):\n            self.csm._setBuGroupBounds([3, 10, 300])\n\n        with self.assertRaises(ValueError):\n            self.csm._setBuGroupBounds([-5, 3, 10, 30.0])\n\n        with self.assertRaises(ValueError):\n            self.csm._setBuGroupBounds([1, 5, 3])\n\n    def test_setTempGroupBounds(self):\n        # negative temps in C are allowed\n        self.csm._setTempGroupBounds([-5, 3, 10, 300])\n        self.assertAlmostEqual(self.csm._tempGroupBounds[2], 10.0)\n\n        with self.assertRaises(ValueError):\n            self.csm._setTempGroupBounds([1, 5, 3])\n\n    def test_addXsGroupsFromBlocks(self):\n        blockCollectionsByXsGroup = {}\n        blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)\n        self.assertEqual(len(blockCollectionsByXsGroup), 4)\n        self.assertIn(\"AB\", blockCollectionsByXsGroup)\n\n    def test_getMissingBlueprintBlocks(self):\n        \"\"\"Test the function to get missing blueprints blocks.\"\"\"\n        self.csm._setTempGroupBounds([0, 100, 200])\n        blockCollectionsByXsGroup = {}\n        blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)\n        missingBlueprintBlocks = self.csm._getMissingBlueprintBlocks(blockCollectionsByXsGroup)\n        envGroups = set(b.p.envGroup for b in missingBlueprintBlocks)\n        self.assertGreater(len(envGroups), 1, \"Blueprint block environment groups were not updated!\")\n\n    def test_calcWeightedBurnup(self):\n        self.blockList[1].p.percentBu = 3.1\n        self.blockList[2].p.percentBu = 10.0\n        self.blockList[3].p.percentBu = 1.5\n        for b in self.blockList[4:]:\n            b.p.percentBu = 0.0\n        self.csm._updateEnvironmentGroups(self.blockList)\n        blockCollectionsByXsGroup = {}\n        blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)\n        ABcollection = blockCollectionsByXsGroup[\"AB\"]\n        self.assertEqual(blockCollectionsByXsGroup[\"AA\"]._calcWeightedBurnup(), 1 / 12.0)\n        self.assertEqual(\n            ABcollection.getWeight(self.blockList[1]),\n            ABcollection.getWeight(self.blockList[2]),\n            \"The two blocks in AB do not have the same weighting!\",\n        )\n        self.assertEqual(ABcollection._calcWeightedBurnup(), 6.55)\n\n    def test_getNextAvailableXsType(self):\n        blockCollectionsByXsGroup = {}\n        blockCollectionsByXsGroup = self.csm._addXsGroupsFromBlocks(blockCollectionsByXsGroup, self.blockList)\n        xsType1, xsType2, xsType3 = self.csm.getNextAvailableXsTypes(3)\n        self.assertEqual(\"B\", xsType1)\n        self.assertEqual(\"C\", xsType2)\n        self.assertEqual(\"D\", xsType3)\n\n        # verify that we can get lowercase letters\n        xsTypes = self.csm.getNextAvailableXsTypes(26)\n        self.assertEqual(\"Y\", xsTypes[-4])\n        self.assertEqual(\"a\", xsTypes[-3])\n        self.assertEqual(\"b\", xsTypes[-2])\n        self.assertEqual(\"c\", xsTypes[-1])\n\n        # verify that we can get lowercase letters\n        if sys.platform.startswith(\"win\"):\n            with mockRunLogs.BufferLog() as mock:\n                xsTypes = self.csm.getNextAvailableXsTypes(27)\n                self.assertIn(\"Mixing upper and lower-case XS\", mock.getStdout())\n\n    def test_getRepresentativeBlocks(self):\n        \"\"\"Test that we can create the representative blocks for a reactor.\n\n        .. test:: Build representative blocks for a reactor.\n            :id: T_ARMI_XSGM_CREATE_XS_GROUPS0\n            :tests: R_ARMI_XSGM_CREATE_XS_GROUPS\n        \"\"\"\n        _o, r = test_reactors.loadTestReactor(TEST_ROOT)\n        self.csm.r = r\n\n        # Assumption: All sodium in fuel blocks for this test is 450 C and this is the expected\n        # sodium temperature. These lines of code take the first sodium block and decrease the\n        # temperature of the block, but change the atom density to approximately zero. Checking\n        # later on the nuclide temperature of sodium is asserted to be still 450. This perturbation\n        # proves that altering the temperature of an component with near zero atom density does not\n        # affect the average temperature of the block collection. This demonstrates that the\n        # temperatures of a block collection are atom weighted rather than just the average\n        # temperature.\n        regularFuel = r.core.getFirstBlock(Flags.FUEL, exact=True)\n        intercoolant = regularFuel.getComponent(Flags.INTERCOOLANT)\n        intercoolant.setTemperature(100)  # just above melting\n        intercoolant.setNumberDensity(\"NA23\", units.TRACE_NUMBER_DENSITY)\n\n        self.csm.createRepresentativeBlocks()\n        blocks = list(self.csm.representativeBlocks.values())\n        self.assertGreater(len(blocks), 0)\n\n        # Test ability to get average nuclide temperature in block.\n        u235 = self.csm.getNucTemperature(\"AA\", \"U235\")\n        fe = self.csm.getNucTemperature(\"AA\", \"FE56\")\n        na = self.csm.getNucTemperature(\"AA\", \"NA23\")\n\n        self.assertAlmostEqual(na, 450.0, msg=\"Na temp was {}, not 450\".format(na))\n        self.assertGreater(u235, fe)\n        self.assertGreater(fe, na)\n        self.assertTrue(0.0 < na < fe)\n        # trace nuclides should also be at fuel temp.\n        self.assertAlmostEqual(self.csm.getNucTemperature(\"AA\", \"LFP35\"), u235)\n\n        # Test that retrieving temperatures fails if a representative block for a given XS ID does not exist\n        self.assertEqual(self.csm.getNucTemperature(\"Z\", \"U235\"), None)\n\n        # Test dimensions\n        self.assertEqual(blocks[0].getHeight(), 25.0)\n        self.assertEqual(blocks[1].getHeight(), 25.0)\n        self.assertAlmostEqual(blocks[0].getVolume(), 6074.356308731789)\n        self.assertAlmostEqual(blocks[1].getVolume(), 6074.356308731789)\n\n        # Number densities haven't been calculated yet\n        self.assertIsNone(blocks[0].p.detailedNDens)\n        self.assertIsNone(blocks[1].p.detailedNDens)\n\n    def test_checkForUnrepresentedXSIDs(self):\n        blockCollectionsByXsGroup = self.csm.makeCrossSectionGroups()\n        self.csm.createRepresentativeBlocks()\n\n        # set valid flags to something the fuel block would not have to trigger unrepresented block\n        fuelXStype = \"AD\"\n        blocksWithType = [b for b in self.csm.r.core.iterBlocks(Flags.FUEL) if b.getMicroSuffix() == fuelXStype]\n        fuelCollection = blockCollectionsByXsGroup[fuelXStype]\n        fuelCollection._validRepresentativeBlockTypes = Flags.CLAD\n\n        # check for unrepresented XS ID, assert that it is found\n        self.csm._checkForUnrepresentedXSIDs(blockCollectionsByXsGroup)\n        self.assertListEqual(self.csm._unrepresentedXSIDs, [fuelXStype])\n\n        # modify unrepresented XS ID, assert that first character is the same\n        self.csm._modifyUnrepresentedXSIDs(blockCollectionsByXsGroup)\n        for b in blocksWithType:\n            modifiedType = b.getMicroSuffix()\n            self.assertEqual(modifiedType[0], fuelXStype[0])\n            self.assertNotEqual(modifiedType[1], fuelXStype[1])\n\n    def _createRepresentativeBlocksUsingExistingBlocks(self, validBlockTypes):\n        \"\"\"Reusable code used in multiple unit tests.\"\"\"\n        o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        # set a few random non-default settings on AA to be copied to the new BA group\n        o.cs[CONF_CROSS_SECTION].update(\n            {\n                \"AA\": XSModelingOptions(\n                    \"AA\",\n                    geometry=\"0D\",\n                    averageByComponent=True,\n                    xsMaxAtomNumber=60,\n                    criticalBuckling=False,\n                    xsPriority=2,\n                )\n            }\n        )\n        o.cs[CONF_CROSS_SECTION].setDefaults(crossSectionGroupManager.AVERAGE_BLOCK_COLLECTION, validBlockTypes)\n        aaSettings = o.cs[CONF_CROSS_SECTION][\"AA\"]\n        self.csm.cs = copy.deepcopy(o.cs)\n        self.csm.createRepresentativeBlocks()\n        unperturbedReprBlocks = copy.deepcopy(self.csm.representativeBlocks)\n        self.assertNotIn(\"BA\", unperturbedReprBlocks)\n        block = r.core.getFirstBlock()\n        blockXSID = block.getMicroSuffix()\n        blockList = [block]\n        (\n            _bCollect,\n            newRepresentativeBlocks,\n            origXSIDsFromNew,\n        ) = self.csm.createRepresentativeBlocksUsingExistingBlocks(blockList, unperturbedReprBlocks)\n        self.assertIn(\"BA\", newRepresentativeBlocks)\n        oldReprBlock = unperturbedReprBlocks[blockXSID]\n        newReprBlock = newRepresentativeBlocks[\"BA\"]\n        self.assertEqual(newReprBlock.getMicroSuffix(), \"BA\")\n        self.assertEqual(newReprBlock.getNumberDensities(), oldReprBlock.getNumberDensities())\n        self.assertEqual(origXSIDsFromNew[\"BA\"], \"AA\")\n\n        # check that settings were copied correctly\n        baSettings = self.csm.cs[CONF_CROSS_SECTION][\"BA\"]\n        self.assertEqual(baSettings.xsID, \"BA\")\n        for setting, baSettingValue in baSettings.__dict__.items():\n            if setting == \"xsID\":\n                continue\n            self.assertEqual(baSettingValue, aaSettings.__dict__[setting])\n\n    def test_createRepBlocksUsingExistingBlocks(self):\n        \"\"\"\n        Demonstrates that a new representative block can be generated from an existing representative block.\n\n        Notes\n        -----\n        This tests that the XS ID of the new representative block is correct and that the\n        compositions are identical between the original and the new representative blocks.\n        \"\"\"\n        self._createRepresentativeBlocksUsingExistingBlocks([\"fuel\"])\n\n    def test_createRepBlocksDisableValidBlockTypes(self):\n        \"\"\"\n        Demonstrates that a new representative block can be generated from an existing representative block.\n\n        Notes\n        -----\n        This tests that the XS ID of the new representative block is correct and that the\n        compositions are identical between the original and the new representative blocks.\n        \"\"\"\n        self._createRepresentativeBlocksUsingExistingBlocks(True)\n\n    def test_interactBOL(self):\n        \"\"\"Test `BOL` lattice physics update frequency.\n\n        .. test:: The cross-section group manager frequency depends on the LPI frequency at BOL.\n            :id: T_ARMI_XSGM_FREQ0\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        self.assertFalse(self.csm.representativeBlocks)\n        self.blockList[0].core.r.p.timeNode = 0\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"BOL\"\n        self.csm.interactBOL()\n        self.assertTrue(self.csm.representativeBlocks)\n\n    def test_interactBOC(self):\n        \"\"\"Test `BOC` lattice physics update frequency.\n\n        .. test:: The cross-section group manager frequency depends on the LPI frequency at BOC.\n            :id: T_ARMI_XSGM_FREQ1\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        self.assertFalse(self.csm.representativeBlocks)\n        self.blockList[0].core.r.p.timeNode = 0\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"BOC\"\n        self.csm.interactBOL()\n        self.csm.interactBOC()\n        self.assertTrue(self.csm.representativeBlocks)\n\n    def test_interactEveryNode(self):\n        \"\"\"Test `everyNode` lattice physics update frequency.\n\n        .. test:: The cross-section group manager frequency depends on the LPI frequency at every\n            time node.\n            :id: T_ARMI_XSGM_FREQ2\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"BOC\"\n        self.csm.interactBOL()\n        self.csm.interactEveryNode()\n        self.assertFalse(self.csm.representativeBlocks)\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"everyNode\"\n        self.csm.interactBOL()\n        self.csm.interactEveryNode()\n        self.assertTrue(self.csm.representativeBlocks)\n\n    def test_interactFirstCoupledIteration(self):\n        \"\"\"Test `firstCoupledIteration` lattice physics update frequency.\n\n        .. test:: The cross-section group manager frequency depends on the LPI frequency during\n            first coupled iteration.\n            :id: T_ARMI_XSGM_FREQ3\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"everyNode\"\n        self.csm.interactBOL()\n        self.csm.interactCoupled(iteration=0)\n        self.assertFalse(self.csm.representativeBlocks)\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"firstCoupledIteration\"\n        self.csm.interactBOL()\n        self.csm.interactCoupled(iteration=0)\n        self.assertTrue(self.csm.representativeBlocks)\n\n    def test_interactAllCoupled(self):\n        \"\"\"Test `all` lattice physics update frequency.\n\n        .. test:: The cross-section group manager frequency depends on the LPI frequency during coupling.\n            :id: T_ARMI_XSGM_FREQ4\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"firstCoupledIteration\"\n        self.csm.interactBOL()\n        self.csm.interactCoupled(iteration=1)\n        self.assertFalse(self.csm.representativeBlocks)\n        self.csm.cs[CONF_LATTICE_PHYSICS_FREQUENCY] = \"all\"\n        self.csm.interactBOL()\n        self.csm.interactCoupled(iteration=1)\n        self.assertTrue(self.csm.representativeBlocks)\n\n    def test_xsgmIsRunBeforeXS(self):\n        \"\"\"Test that the XSGM is run before the cross sections are calculated.\n\n        .. test:: Test that the cross-section group manager is run before the cross sections are calculated.\n            :id: T_ARMI_XSGM_FREQ5\n            :tests: R_ARMI_XSGM_FREQ\n        \"\"\"\n        from armi.interfaces import STACK_ORDER\n\n        self.assertLess(crossSectionGroupManager.ORDER, STACK_ORDER.CROSS_SECTIONS)\n\n    def test_copyPregeneratedFiles(self):\n        \"\"\"\n        Tests copying pre-generated cross section and flux files using reactor that is built from a\n        case settings file.\n        \"\"\"\n        o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        # Need to overwrite the relative paths with absolute\n        o.cs[CONF_CROSS_SECTION][\"XA\"].xsFileLocation = [os.path.join(THIS_DIR, \"ISOXA\")]\n        o.cs[CONF_CROSS_SECTION][\"YA\"].fluxFileLocation = os.path.join(THIS_DIR, \"rzmflxYA\")\n        csm = CrossSectionGroupManager(r, o.cs)\n\n        with TemporaryDirectoryChanger(root=THIS_DIR):\n            csm._copyPregeneratedXSFile(\"XA\")\n            csm._copyPregeneratedFluxSolutionFile(\"YA\")\n            self.assertTrue(os.path.exists(\"ISOXA\"))\n            self.assertTrue(os.path.exists(\"rzmflxYA\"))\n\n\nclass TestXSGMWithTempGrouping(unittest.TestCase):\n    def setUp(self):\n        cs = settings.Settings()\n        cs[\"tempGroups\"] = [300, 400, 500]\n        self.blockList = makeBlocks(11)\n        buAndTemps = (\n            (1, 340),\n            (2, 150),\n            (6, 410),\n            (10.5, 290),\n            (2.5, 360),\n            (4, 460),\n            (15, 370),\n            (16, 340),\n            (15, 700),\n            (14, 720),\n        )\n        for b, env in zip(self.blockList, buAndTemps):\n            bu, temp = env\n            comps = b.getComponents(Flags.FUEL)\n            self.assertEqual(len(comps), 1)\n            c = next(iter(comps))\n            c.setTemperature(temp)\n            b.p.percentBu = bu\n        core = self.blockList[0].core\n\n        def getBlocks(includeAll=True):\n            return self.blockList\n\n        # this sets XSGM to only analyze the blocks in the block list.\n        core.getBlocks = getBlocks\n\n        self.csm = CrossSectionGroupManager(self.blockList[0].core.r, cs)\n        self.csm._setBuGroupBounds([3, 10, 30, 100])\n        self.csm.interactBOL()\n\n    def test_updateEnvironmentGroups(self):\n        \"\"\"Test creation of a cross section groups with temperature grouping.\n\n        .. test:: Create representative blocks using temperature groups.\n            :id: T_ARMI_XSGM_CREATE_XS_GROUPS1\n            :tests: R_ARMI_XSGM_CREATE_XS_GROUPS, R_ARMI_XSGM_CREATE_REPR_BLOCKS\n        \"\"\"\n        self.csm.createRepresentativeBlocks()\n        BL = self.blockList\n        loners = [BL[1], BL[3]]\n\n        self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix())\n        sameGroups = [(BL[0], BL[4]), (BL[2], BL[5]), (BL[6], BL[7]), (BL[8], BL[9])]\n\n        # check that likes have like and different are different\n        for group in sameGroups:\n            b1, b2 = group\n            xsSuffix = b1.getMicroSuffix()\n            self.assertEqual(xsSuffix, b2.getMicroSuffix())\n            for group in sameGroups:\n                newb1, newb2 = group\n                if b1 is newb1:\n                    continue\n                self.assertNotEqual(xsSuffix, newb1.getMicroSuffix())\n                self.assertNotEqual(xsSuffix, newb2.getMicroSuffix())\n            for lone in loners:\n                self.assertNotEqual(xsSuffix, lone.getMicroSuffix())\n        self.assertNotEqual(loners[0].getMicroSuffix(), loners[1].getMicroSuffix())\n\n        # calculated based on the average of buAndTemps\n        expectedIDs = [\"AF\", \"AA\", \"AL\", \"AC\", \"AH\", \"AR\"]\n        expectedTemps = [\n            (340 + 360) / 2,\n            150,\n            (410 + 460) / 2,\n            290,\n            (370 + 340) / 2,\n            (700 + 720) / 2,\n        ]\n        expectedBurnups = (1.75, 2, 5, 10.5, 15.5, 14.5)\n        for xsID, expectedTemp, expectedBurnup in zip(expectedIDs, expectedTemps, expectedBurnups):\n            b = self.csm.representativeBlocks[xsID]\n            thisTemp = self.csm.avgNucTemperatures[xsID][\"U238\"]\n            self.assertAlmostEqual(thisTemp, expectedTemp)\n            self.assertAlmostEqual(b.p.percentBu, expectedBurnup)\n\n\nclass TestXSNumberConverters(unittest.TestCase):\n    def test_conversion(self):\n        label = crossSectionGroupManager.getXSTypeLabelFromNumber(65)\n        self.assertEqual(label, \"A\")\n        num = crossSectionGroupManager.getXSTypeNumberFromLabel(\"A\")\n        self.assertEqual(num, 65)\n\n    def test_conversion_2digit(self):\n        label = crossSectionGroupManager.getXSTypeLabelFromNumber(6570)\n        self.assertEqual(label, \"AF\")\n        num = crossSectionGroupManager.getXSTypeNumberFromLabel(\"ZZ\")\n        self.assertEqual(num, 9090)\n\n\ndef makeBlocks(howMany=20):\n    _o, r = test_reactors.loadTestReactor(TEST_ROOT)\n    # shift y 3 to skip central assemblies 1/3 volume\n    return r.core.getBlocks(Flags.FUEL)[3 : howMany + 3]\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_crossSectionSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"XS Settings tests.\"\"\"\n\nimport io\nimport unittest\n\nimport voluptuous as vol\nfrom ruamel.yaml import YAML\n\nfrom armi import settings\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.crossSectionSettings import (\n    CONF_BLOCK_REPRESENTATION,\n    CONF_GEOM,\n    XSModelingOptions,\n    XSSettingDef,\n    XSSettings,\n    xsSettingsValidator,\n)\nfrom armi.physics.neutronics.settings import (\n    CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION,\n    CONF_XS_BLOCK_REPRESENTATION,\n)\nfrom armi.physics.neutronics.tests.test_neutronicsPlugin import XS_EXAMPLE\nfrom armi.settings import caseSettings\n\n\nclass TestCrossSectionSettings(unittest.TestCase):\n    def test_crossSections(self):\n        xsModel = XSModelingOptions(\n            xsID=\"AA\",\n            geometry=\"0D\",\n            criticalBuckling=True,\n            blockRepresentation=\"Median\",\n            requiredRAM=20.0,\n        )\n        self.assertEqual(\"AA\", xsModel.xsID)\n        self.assertEqual(\"0D\", xsModel.geometry)\n        self.assertEqual(\"Median\", xsModel.blockRepresentation)\n        self.assertFalse(xsModel.fluxIsPregenerated)\n        self.assertFalse(xsModel.xsIsPregenerated)\n        self.assertTrue(xsModel.criticalBuckling)\n        self.assertEqual(20.0, xsModel.requiredRAM)\n\n    def test_pregeneratedCrossSections(self):\n        cs = settings.Settings()\n        xs = XSSettings()\n        xa = XSModelingOptions(\"XA\", xsFileLocation=[\"ISOXA\"])\n        xs[\"XA\"] = xa\n        self.assertEqual([\"ISOXA\"], xa.xsFileLocation)\n        self.assertNotIn(\"XB\", xs)\n        xs.setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        # Check that the file location of 'XB' still points to the same file location as 'XA'.\n        self.assertEqual(xa, xs[\"XB\"])\n        self.assertFalse(xa.fluxIsPregenerated)\n        self.assertTrue(xa.xsIsPregenerated)\n        self.assertFalse(xa.criticalBuckling)\n\n    def test_pregeneratedFluxInputs(self):\n        xsModel = XSModelingOptions(\n            xsID=\"AA\",\n            fluxFileLocation=\"ISOAA\",\n            geometry=\"0D\",\n            criticalBuckling=True,\n            blockRepresentation=\"Median\",\n        )\n        self.assertEqual(\"AA\", xsModel.xsID)\n        self.assertEqual(\"0D\", xsModel.geometry)\n        self.assertEqual(\"ISOAA\", xsModel.fluxFileLocation)\n        self.assertTrue(xsModel.fluxIsPregenerated)\n        self.assertTrue(xsModel.criticalBuckling)\n        self.assertEqual(\"Median\", xsModel.blockRepresentation)\n\n    def test_prioritization(self):\n        xsModel = XSModelingOptions(\n            xsID=\"AA\",\n            geometry=\"0D\",\n            criticalBuckling=True,\n            xsPriority=2,\n            xsExecuteExclusive=True,\n        )\n        self.assertEqual(\"AA\", xsModel.xsID)\n        self.assertEqual(True, xsModel.xsExecuteExclusive)\n        self.assertEqual(2, xsModel.xsPriority)\n\n        xsModel = XSModelingOptions(\n            xsID=\"AA\",\n            geometry=\"0D\",\n            criticalBuckling=True,\n        )\n        # defaults work\n        xsModel.setDefaults(\"Average\", False)\n        self.assertEqual(False, xsModel.xsExecuteExclusive)\n        self.assertEqual(5, xsModel.xsPriority)\n\n    def test_homogeneousXsDefaultSettingAssignment(self):\n        \"\"\"\n        Make sure the object can whip up an unspecified xsID by default.\n\n        This is used when user hasn't specified anything.\n        \"\"\"\n        cs = settings.Settings()\n        xsModel = XSSettings()\n        xsModel.setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.assertNotIn(\"YA\", xsModel)\n        self.assertEqual(xsModel[\"YA\"].geometry, \"0D\")\n        self.assertEqual(xsModel[\"YA\"].criticalBuckling, True)\n        self.assertEqual(xsModel[\"YA\"].ductHeterogeneous, False)\n        self.assertEqual(xsModel[\"YA\"].traceIsotopeThreshold, 0.0)\n        self.assertEqual(xsModel[\"YA\"].requiredRAM, 0.0)\n\n    def test_setDefSettingsByLowestEnvGroupHomog(self):\n        # Initialize some micro suffix in the cross sections\n        cs = settings.Settings()\n        xs = XSSettings()\n        jd = XSModelingOptions(\"JD\", geometry=\"0D\", criticalBuckling=False)\n        xs[\"JD\"] = jd\n        xs.setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n\n        self.assertIn(\"JD\", xs)\n\n        # Check that new micro suffix `JF` with higher burn-up group gets assigned the same settings as `JD`\n        self.assertNotIn(\"JF\", xs)\n        self.assertEqual(xs[\"JD\"], xs[\"JF\"])\n        self.assertNotIn(\"JF\", xs)\n\n        # Check that new micro suffix `JG` with higher burn-up group gets assigned the same settings as `JD`\n        self.assertNotIn(\"JG\", xs)\n        self.assertEqual(xs[\"JG\"], xs[\"JD\"])\n\n        # Check that new micro suffix `JB` with lower burn-up group does NOT get assigned the same settings as `JD`\n        self.assertNotIn(\"JB\", xs)\n        self.assertNotEqual(xs[\"JD\"], xs[\"JB\"])\n\n    def test_setDefSettingsByLowestEnvGroup1D(self):\n        # Initialize some micro suffix in the cross sections\n        cs = settings.Settings()\n        xsModel = XSSettings()\n        rq = XSModelingOptions(\n            \"RQ\",\n            geometry=\"1D cylinder\",\n            blockRepresentation=\"ComponentAverage1DCylinder\",\n            meshSubdivisionsPerCm=1.0,\n        )\n        xsModel[\"RQ\"] = rq\n        xsModel.setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n\n        # Check that new micro suffix `RY` with higher burn-up group gets assigned the same settings as `RQ`\n        self.assertNotIn(\"RY\", xsModel)\n        self.assertEqual(xsModel[\"RY\"], xsModel[\"RQ\"])\n\n        # Check that new micro suffix `RZ` with higher burn-up group gets assigned the same settings as `RQ`\n        self.assertNotIn(\"RZ\", xsModel)\n        self.assertEqual(xsModel[\"RZ\"], xsModel[\"RQ\"])\n\n        # Check that new micro suffix `RA` with lower burn-up group does NOT get assigned the same settings as `RQ`\n        self.assertNotIn(\"RA\", xsModel)\n        self.assertNotEqual(xsModel[\"RA\"], xsModel[\"RQ\"])\n\n    def test_optionalKey(self):\n        \"\"\"Test that optional key shows up with default value.\"\"\"\n        cs = settings.Settings()\n        xsModel = XSSettings()\n        da = XSModelingOptions(\n            \"DA\",\n            geometry=\"1D cylinder\",\n            meshSubdivisionsPerCm=1.0,\n            ductHeterogeneous=True,\n            traceIsotopeThreshold=1.0e-5,\n        )\n        xsModel[\"DA\"] = da\n        xsModel.setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.assertEqual(xsModel[\"DA\"].mergeIntoClad, [\"gap\"])\n        self.assertEqual(xsModel[\"DA\"].meshSubdivisionsPerCm, 1.0)\n        self.assertEqual(xsModel[\"DA\"].ductHeterogeneous, True)\n        self.assertEqual(xsModel[\"DA\"].traceIsotopeThreshold, 1.0e-5)\n        self.assertEqual(xsModel[\"DA\"].mergeIntoFuel, [])\n\n    def test_badCrossSections(self):\n        with self.assertRaises(TypeError):\n            # This will fail because it is not the required\n            # Dict[str: Dict] structure\n            xsSettingsValidator({CONF_GEOM: \"4D\"})\n\n        with self.assertRaises(vol.error.MultipleInvalid):\n            # This will fail because it has an invalid type for ``driverID``\n            xsSettingsValidator({\"AA\": {\"driverId\": 0.0}})\n\n        with self.assertRaises(vol.error.MultipleInvalid):\n            # This will fail because it has an invalid value for\n            # the ``blockRepresentation``\n            xsSettingsValidator({\"AA\": {CONF_BLOCK_REPRESENTATION: \"Invalid\"}})\n\n        with self.assertRaises(vol.error.MultipleInvalid):\n            # This will fail because the ``xsID`` is not one or two\n            # characters\n            xsSettingsValidator({\"AAA\": {CONF_BLOCK_REPRESENTATION: \"Average\"}})\n\n\nclass TestXSSettings(unittest.TestCase):\n    def test_yamlIO(self):\n        \"\"\"Ensure we can read/write this custom setting object to yaml.\"\"\"\n        yaml = YAML()\n        inp = yaml.load(io.StringIO(XS_EXAMPLE))\n        xs = XSSettingDef(\"TestSetting\")\n        xs.setValue(inp)\n        self.assertEqual(xs.value[\"BA\"].geometry, \"1D slab\")\n        outBuf = io.StringIO()\n        output = xs.dump()\n        yaml.dump(output, outBuf)\n        outBuf.seek(0)\n        inp2 = yaml.load(outBuf)\n        self.assertEqual(inp.keys(), inp2.keys())\n\n    def test_caseSettings(self):\n        \"\"\"\n        Test the setting of the cross section setting using the case settings object.\n\n        Notes\n        -----\n        The purpose of this test is to ensure that the cross sections sections can\n        be removed from an existing case settings object once they have been set.\n        \"\"\"\n\n        def _setInitialXSSettings():\n            cs = caseSettings.Settings()\n            cs[CONF_CROSS_SECTION] = XSSettings()\n            cs[CONF_CROSS_SECTION][\"AA\"] = XSModelingOptions(\"AA\", geometry=\"0D\")\n            cs[CONF_CROSS_SECTION][\"BA\"] = XSModelingOptions(\"BA\", geometry=\"0D\")\n            self.assertIn(\"AA\", cs[CONF_CROSS_SECTION])\n            self.assertIn(\"BA\", cs[CONF_CROSS_SECTION])\n            self.assertNotIn(\"CA\", cs[CONF_CROSS_SECTION])\n            self.assertNotIn(\"DA\", cs[CONF_CROSS_SECTION])\n            return cs\n\n        cs = _setInitialXSSettings()\n        cs[CONF_CROSS_SECTION] = {\"AA\": {}, \"BA\": {}}\n        self.assertDictEqual(cs[CONF_CROSS_SECTION], {})\n        self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings))\n\n        # Produce an error if the setting is set to\n        # a None value\n        cs = _setInitialXSSettings()\n        with self.assertRaises(TypeError):\n            cs[CONF_CROSS_SECTION] = None\n\n        cs = _setInitialXSSettings()\n        cs[CONF_CROSS_SECTION] = {\"AA\": None, \"BA\": {}}\n        self.assertDictEqual(cs[CONF_CROSS_SECTION], {})\n\n        # Test that a new XS setting can be added to an existing\n        # caseSetting using the ``XSModelingOptions`` or using\n        # a dictionary.\n        cs = _setInitialXSSettings()\n        cs[CONF_CROSS_SECTION].update({\"CA\": XSModelingOptions(\"CA\", geometry=\"0D\"), \"DA\": {CONF_GEOM: \"0D\"}})\n        self.assertIn(\"AA\", cs[CONF_CROSS_SECTION])\n        self.assertIn(\"BA\", cs[CONF_CROSS_SECTION])\n        self.assertIn(\"CA\", cs[CONF_CROSS_SECTION])\n        self.assertIn(\"DA\", cs[CONF_CROSS_SECTION])\n\n        # Clear out the settings by setting the value to a None.\n        # This will be interpreted as a empty dictionary.\n        cs[CONF_CROSS_SECTION] = {}\n        self.assertDictEqual(cs[CONF_CROSS_SECTION], {})\n        self.assertTrue(isinstance(cs[CONF_CROSS_SECTION], XSSettings))\n\n        # This will fail because the ``setDefaults`` method on the\n        # ``XSSettings`` has not yet been called.\n        with self.assertRaises(ValueError):\n            cs[CONF_CROSS_SECTION][\"AA\"]\n\n        cs[CONF_CROSS_SECTION].setDefaults(\n            blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION],\n            validBlockTypes=cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n\n        cs[CONF_CROSS_SECTION][\"AA\"]\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].geometry, \"0D\")\n\n    def test_csBlockRepresentation(self):\n        \"\"\"\n        Test that the XS block representation is applied globally,\n        but only to XS modeling options where the blockRepresentation\n        has not already been assigned.\n        \"\"\"\n        cs = caseSettings.Settings()\n        cs[CONF_XS_BLOCK_REPRESENTATION] = \"FluxWeightedAverage\"\n        cs[CONF_CROSS_SECTION] = XSSettings()\n        cs[CONF_CROSS_SECTION][\"AA\"] = XSModelingOptions(\"AA\", geometry=\"0D\")\n        cs[CONF_CROSS_SECTION][\"BA\"] = XSModelingOptions(\"BA\", geometry=\"0D\", blockRepresentation=\"Average\")\n\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].blockRepresentation, None)\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"BA\"].blockRepresentation, \"Average\")\n\n        cs[CONF_CROSS_SECTION].setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].blockRepresentation, \"FluxWeightedAverage\")\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"BA\"].blockRepresentation, \"Average\")\n\n    def test_csBlockRepresentationFileLocation(self):\n        \"\"\"\n        Test that default blockRepresentation is applied correctly to a\n        XSModelingOption that has the ``xsFileLocation`` attribute defined.\n        \"\"\"\n        cs = caseSettings.Settings()\n        cs[CONF_XS_BLOCK_REPRESENTATION] = \"FluxWeightedAverage\"\n        cs[CONF_CROSS_SECTION] = XSSettings()\n        cs[CONF_CROSS_SECTION][\"AA\"] = XSModelingOptions(\"AA\", xsFileLocation=[])\n\n        # Check FluxWeightedAverage\n        cs[CONF_CROSS_SECTION].setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].blockRepresentation, \"FluxWeightedAverage\")\n\n        # Check Average\n        cs[CONF_XS_BLOCK_REPRESENTATION] = \"Average\"\n        cs[CONF_CROSS_SECTION][\"AA\"] = XSModelingOptions(\"AA\", xsFileLocation=[])\n        cs[CONF_CROSS_SECTION].setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].blockRepresentation, \"Average\")\n\n        # Check Median\n        cs[CONF_XS_BLOCK_REPRESENTATION] = \"Average\"\n        cs[CONF_CROSS_SECTION][\"AA\"] = XSModelingOptions(\"AA\", xsFileLocation=[], blockRepresentation=\"Median\")\n        cs[CONF_CROSS_SECTION].setDefaults(\n            cs[CONF_XS_BLOCK_REPRESENTATION],\n            cs[CONF_DISABLE_BLOCK_TYPE_EXCLUSION_IN_XS_GENERATION],\n        )\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].blockRepresentation, \"Median\")\n\n    def test_xsSettingsSetDefault(self):\n        \"\"\"Test the configuration options of the ``setDefaults`` method.\"\"\"\n        cs = caseSettings.Settings()\n        cs[CONF_XS_BLOCK_REPRESENTATION] = \"FluxWeightedAverage\"\n        cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=None)\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].validBlockTypes, None)\n\n        cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=True)\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].validBlockTypes, None)\n\n        cs[CONF_CROSS_SECTION].setDefaults(blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION], validBlockTypes=False)\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].validBlockTypes, [\"fuel\"])\n\n        cs[CONF_CROSS_SECTION].setDefaults(\n            blockRepresentation=cs[CONF_XS_BLOCK_REPRESENTATION],\n            validBlockTypes=[\"control\", \"fuel\", \"plenum\"],\n        )\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].validBlockTypes, [\"control\", \"fuel\", \"plenum\"])\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_crossSectionTable.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for cross section table for depletion.\"\"\"\n\nimport unittest\n\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.physics.neutronics.isotopicDepletion import (\n    crossSectionTable,\n)\nfrom armi.physics.neutronics.isotopicDepletion import (\n    isotopicDepletionInterface as idi,\n)\nfrom armi.physics.neutronics.latticePhysics import ORDER\nfrom armi.reactor.tests.test_blocks import loadTestBlock\nfrom armi.settings import Settings\nfrom armi.testing import loadTestReactor\nfrom armi.tests import ISOAA_PATH\n\n\nclass TestCrossSectionTable(unittest.TestCase):\n    def test_makeTable(self):\n        \"\"\"Test making a cross section table.\n\n        .. test:: Generate cross section table.\n            :id: T_ARMI_DEPL_TABLES\n            :tests: R_ARMI_DEPL_TABLES\n        \"\"\"\n        obj = loadTestBlock()\n        obj.p.mgFlux = range(33)\n        core = obj.parent.parent\n        core.lib = isotxs.readBinary(ISOAA_PATH)\n        table = crossSectionTable.makeReactionRateTable(obj)\n\n        self.assertEqual(len(obj.getNuclides()), len(table))\n        self.assertEqual(obj.getName(), \"B0001-000\")\n\n        self.assertEqual(table.getName(), \"B0001-000\")\n        self.assertTrue(table.hasValues())\n\n        xSecTable = table.getXsecTable()\n        self.assertEqual(len(xSecTable), 11)\n        self.assertIn(\"xsecs\", xSecTable[0])\n        self.assertIn(\"mcnpId\", xSecTable[-1])\n\n    def test_isotopicDepletionInterface(self):\n        \"\"\"\n        Test isotopic depletion interface.\n\n        .. test:: ARMI provides a base class to deplete isotopes.\n            :id: T_ARMI_DEPL_ABC\n            :tests: R_ARMI_DEPL_ABC\n        \"\"\"\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        cs = Settings()\n\n        aid = idi.AbstractIsotopicDepleter(r, cs)\n        self.assertIsNone(aid.efpdToBurn)\n        self.assertEqual(len(aid._depleteByName), 0)\n\n        self.assertEqual(len(aid.getToDeplete()), 0)\n        self.assertEqual(ORDER, 5.0)\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_energyGroups.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Energy group tests.\"\"\"\n\nimport unittest\n\nfrom armi.physics.neutronics import energyGroups\n\n\nclass TestEnergyGroups(unittest.TestCase):\n    def test_invalidGroupStructureType(self):\n        \"\"\"Test that the reverse lookup fails on non-existent energy group bounds.\n\n        .. test:: Check the neutron energy group bounds logic fails correctly for the wrong structure.\n            :id: T_ARMI_EG_NE0\n            :tests: R_ARMI_EG_NE\n        \"\"\"\n        modifier = 1e-5\n        for groupStructureType in energyGroups.GROUP_STRUCTURE:\n            energyBounds = energyGroups.getGroupStructure(groupStructureType)\n            energyBounds[0] = energyBounds[0] * modifier\n            with self.assertRaises(ValueError):\n                energyGroups.getGroupStructureType(energyBounds)\n\n    def test_consistenciesBetweenGSAndGSType(self):\n        \"\"\"Test that the reverse lookup of the energy group structures work.\n\n        .. test:: Check the neutron energy group bounds for a given group structure.\n            :id: T_ARMI_EG_NE1\n            :tests: R_ARMI_EG_NE\n        \"\"\"\n        for groupStructureType in energyGroups.GROUP_STRUCTURE:\n            self.assertEqual(\n                groupStructureType,\n                energyGroups.getGroupStructureType(energyGroups.getGroupStructure(groupStructureType)),\n            )\n\n    def test_getFastFluxGroupCutoff(self):\n        \"\"\"Test ability to get the ARMI energy group index contained in energy threshold.\n\n        .. test:: Return the energy group index which contains a given energy threshold.\n            :id: T_ARMI_EG_FE\n            :tests: R_ARMI_EG_FE\n        \"\"\"\n        group, frac = energyGroups.getFastFluxGroupCutoff([100002, 100001, 100000, 99999, 0])\n\n        self.assertListEqual([group, frac], [2, 0])\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_macroXSGenerationInterface.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"MacroXSGenerationInterface tests.\"\"\"\n\nimport unittest\nfrom collections import defaultdict\n\nfrom armi.nuclearDataIO import isotxs\nfrom armi.nuclearDataIO.xsCollections import XSCollection\nfrom armi.physics.neutronics.macroXSGenerationInterface import (\n    MacroXSGenerationInterface,\n)\nfrom armi.settings import Settings\nfrom armi.testing import loadTestReactor\nfrom armi.tests import ISOAA_PATH\n\n\nclass TestMacroXSGenerationInterface(unittest.TestCase):\n    def test_macroXSGenerationInterfaceBasics(self):\n        \"\"\"Test the macroscopic XS generating interfaces.\n\n        .. test:: Build macroscopic cross sections for all blocks in the reactor.\n            :id: T_ARMI_MACRO_XS\n            :tests: R_ARMI_MACRO_XS\n        \"\"\"\n        cs = Settings()\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        # Before: verify there are no macro XS on each block\n        for b in r.core.iterBlocks():\n            self.assertIsNone(b.macros)\n\n        # create the macro XS interface\n        i = MacroXSGenerationInterface(r, cs)\n        self.assertEqual(i.minimumNuclideDensity, 1e-15)\n        self.assertEqual(i.name, \"macroXsGen\")\n\n        # Mock up a nuclide library\n        mockLib = isotxs.readBinary(ISOAA_PATH)\n        mockLib.__dict__[\"_nuclides\"] = defaultdict(\n            lambda: mockLib.__dict__[\"_nuclides\"][\"CAA\"], mockLib.__dict__[\"_nuclides\"]\n        )\n\n        # This is the meat of it: build the macro XS\n        self.assertIsNone(i.macrosLastBuiltAt)\n        i.buildMacros(mockLib, buildScatterMatrix=False)\n        self.assertEqual(i.macrosLastBuiltAt, 0)\n\n        # After: verify there are macro XS on each block\n        for b in r.core.iterBlocks():\n            self.assertIsNotNone(b.macros)\n            self.assertTrue(isinstance(b.macros, XSCollection))\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_neutronicsPlugin.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for the neutronics plugin.\"\"\"\n\nimport io\nimport unittest\n\nfrom ruamel.yaml import YAML\n\nfrom armi import getPluginManagerOrFail, settings, tests\nfrom armi.physics import neutronics\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.physics.neutronics.settings import (\n    CONF_BOUNDARIES,\n    CONF_DPA_XS_SET,\n    CONF_GEN_XS,\n    CONF_GLOBAL_FLUX_ACTIVE,\n    CONF_GRID_PLATE_DPA_XS_SET,\n    CONF_GROUP_STRUCTURE,\n    CONF_INNERS_,\n    CONF_LATTICE_PHYSICS_FREQUENCY,\n    CONF_NEUTRONICS_KERNEL,\n    CONF_OUTERS_,\n    getNeutronicsSettingValidators,\n)\nfrom armi.settings import caseSettings, settingsValidation\nfrom armi.settings.fwSettings.globalSettings import CONF_RUN_TYPE\nfrom armi.tests import TEST_ROOT\nfrom armi.tests.test_plugins import TestPlugin\nfrom armi.utils import directoryChangers\n\nXS_EXAMPLE = \"\"\"AA:\n    geometry: 0D\n    criticalBuckling: true\n    blockRepresentation: Median\nBA:\n    geometry: 1D slab\n    blockRepresentation: Median\n\"\"\"\n\n\nclass TestNeutronicsPlugin(TestPlugin):\n    plugin = neutronics.NeutronicsPlugin\n\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_customSettingObjectIO(self):\n        \"\"\"Check specialized settings can build objects as values and write.\"\"\"\n        cs = caseSettings.Settings()\n        yaml = YAML()\n        inp = yaml.load(io.StringIO(XS_EXAMPLE))\n        cs[CONF_CROSS_SECTION] = inp\n        self.assertEqual(cs[CONF_CROSS_SECTION][\"AA\"].geometry, \"0D\")\n        fname = \"test_setting_obj_io_.yaml\"\n        cs.writeToYamlFile(fname)\n        outText = open(fname, \"r\").read()\n        self.assertIn(\"geometry: 0D\", outText)\n\n    def test_customSettingRoundTrip(self):\n        \"\"\"Check specialized settings can go back and forth.\"\"\"\n        cs = caseSettings.Settings()\n        yaml = YAML()\n        inp = yaml.load(io.StringIO(XS_EXAMPLE))\n        cs[CONF_CROSS_SECTION] = inp\n        cs[CONF_CROSS_SECTION] = cs[CONF_CROSS_SECTION]\n        fname = \"test_setting_obj_io_round.yaml\"\n        cs.writeToYamlFile(fname)\n        outText = open(fname, \"r\").read()\n        self.assertIn(\"geometry: 0D\", outText)\n        self.assertIn(\"geometry: 1D\", outText)\n\n    def test_neutronicsSettingsLoaded(self):\n        \"\"\"Check that various special neutronics-specifics settings are loaded.\"\"\"\n        cs = caseSettings.Settings()\n\n        self.assertIn(CONF_INNERS_, cs)\n        self.assertIn(CONF_OUTERS_, cs)\n        self.assertIn(CONF_NEUTRONICS_KERNEL, cs)\n\n\nclass NeutronicsReactorTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        # prepare the input files. This is important so the unit tests run from wherever\n        # they need to run from.\n        cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)\n        cls.directoryChanger.open()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.directoryChanger.close()\n\n    @staticmethod\n    def __getModifiedSettings(customSettings):\n        cs = settings.Settings()\n\n        newSettings = {}\n        for key, val in customSettings.items():\n            newSettings[key] = val\n\n        return cs.modified(newSettings=newSettings)\n\n    def test_kineticsParameterAssignment(self):\n        \"\"\"Test that the delayed neutron fraction and precursor decay constants are applied from settings.\"\"\"\n        r = tests.getEmptyHexReactor()\n        self.assertIsNone(r.core.p.beta)\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test that the group-wise beta and decay constants are assigned\n        # together given that they are the same length.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\n                \"beta\": [0.0] * 6,\n                \"decayConstants\": [1.0] * 6,\n            }\n        )\n        dbLoad = False\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        r.core.setOptionsFromCs(cs)\n        self.assertEqual(r.core.p.beta, sum(cs[\"beta\"]))\n        self.assertListEqual(list(r.core.p.betaComponents), cs[\"beta\"])\n        self.assertListEqual(list(r.core.p.betaDecayConstants), cs[\"decayConstants\"])\n\n        # Test the assignment of total beta as a float\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\"beta\": 0.00670},\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertEqual(r.core.p.beta, cs[\"beta\"])\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test that nothing is assigned if the beta is specified as a list\n        # without a corresponding decay constants list.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\n                \"beta\": [0.0] * 6,\n            },\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertIsNone(r.core.p.beta)\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test that 1 group beta components and decay constants can be assigned.\n        # Since beta is a list, ensure that it's assigned to the `betaComponents`\n        # parameter.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\"beta\": [0.0], \"decayConstants\": [1.0]},\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertEqual(r.core.p.beta, sum(cs[\"beta\"]))\n        self.assertListEqual(list(r.core.p.betaComponents), cs[\"beta\"])\n        self.assertListEqual(list(r.core.p.betaDecayConstants), cs[\"decayConstants\"])\n\n        # Test that decay constants are not assigned without a corresponding\n        # group-wise beta input.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\"decayConstants\": [1.0] * 6},\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertIsNone(r.core.p.beta)\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test that decay constants are not assigned without a corresponding\n        # group-wise beta input. This also demonstrates that the total beta\n        # is still assigned.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\"decayConstants\": [1.0] * 6, \"beta\": 0.0},\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertEqual(r.core.p.beta, cs[\"beta\"])\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test the demonstrates that None values are acceptable\n        # and that nothing is assigned.\n        r = tests.getEmptyHexReactor()\n        cs = self.__getModifiedSettings(\n            customSettings={\"decayConstants\": None, \"beta\": None},\n        )\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n        self.assertEqual(r.core.p.beta, cs[\"beta\"])\n        self.assertIsNone(r.core.p.betaComponents)\n        self.assertIsNone(r.core.p.betaDecayConstants)\n\n        # Test that an error is raised if the decay constants\n        # and group-wise beta are inconsistent sizes\n        with self.assertRaises(ValueError):\n            r = tests.getEmptyHexReactor()\n            cs = self.__getModifiedSettings(\n                customSettings={\"decayConstants\": [1.0] * 6, \"beta\": [0.0]},\n            )\n            getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n\n        # Test that an error is raised if the decay constants\n        # and group-wise beta are inconsistent sizes\n        with self.assertRaises(ValueError):\n            r = tests.getEmptyHexReactor()\n            cs = self.__getModifiedSettings(\n                customSettings={\"decayConstants\": [1.0] * 6, \"beta\": [0.0] * 5},\n            )\n            getPluginManagerOrFail().hook.onProcessCoreLoading(core=r.core, cs=cs, dbLoad=dbLoad)\n\n    @staticmethod\n    def __autoCorrectAllQueries(settingsValidator):\n        \"\"\"Force-Correct (resolve() to \"YES\") all queries in a Settings Validator.\"\"\"\n        for query in settingsValidator:\n            try:\n                query.correction()\n            except FileNotFoundError:\n                # to make testing easier, let's ignore settings that require input files\n                pass\n\n    def test_neutronicsSettingsValidators(self):\n        # grab the neutronics validators\n        cs = settings.Settings()\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n        self.assertEqual(len(sv), 8)\n\n        # Test the Query: boundaries are now \"Extrapolated\", not \"Generalized\"\n        cs = cs.modified(newSettings={CONF_BOUNDARIES: \"Generalized\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_BOUNDARIES], \"Extrapolated\")\n\n        # Test the Query: genXS are no longer True/False\n        cs = cs.modified(newSettings={CONF_GEN_XS: \"True\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GEN_XS], \"Neutron\")\n\n        cs = cs.modified(newSettings={CONF_GEN_XS: \"False\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GEN_XS], \"\")\n\n        # Test the Query: CONF_GLOBAL_FLUX_ACTIVE are no longer True/False\n        cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: \"True\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], \"Neutron\")\n\n        cs = cs.modified(newSettings={CONF_GLOBAL_FLUX_ACTIVE: \"False\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GLOBAL_FLUX_ACTIVE], \"\")\n\n        # Test the Query: try to migrate the Group Structure name\n        cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: \"armi45\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], \"ARMI45\")\n\n        cs = cs.modified(newSettings={CONF_GROUP_STRUCTURE: \"bad_value\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GROUP_STRUCTURE], \"ANL33\")\n\n        # Test the Query: migrating some common shortened names for dpa XS sets\n        cs = cs.modified(newSettings={CONF_DPA_XS_SET: \"dpaHT9_33\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_DPA_XS_SET], \"dpaHT9_ANL33_TwrBol\")\n\n        cs = cs.modified(newSettings={CONF_GRID_PLATE_DPA_XS_SET: \"dpa_SS316\"})\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_GRID_PLATE_DPA_XS_SET], \"dpaSS316_ANL33_TwrBol\")\n\n        cs = cs.modified(\n            newSettings={\n                CONF_RUN_TYPE: \"Snapshots\",\n                CONF_LATTICE_PHYSICS_FREQUENCY: \"BOC\",\n            }\n        )\n        inspector = settingsValidation.Inspector(cs)\n        sv = getNeutronicsSettingValidators(inspector)\n\n        self.__autoCorrectAllQueries(sv)\n        self.assertEqual(inspector.cs[CONF_LATTICE_PHYSICS_FREQUENCY], \"firstCoupledIteration\")\n"
  },
  {
    "path": "armi/physics/neutronics/tests/test_neutronicsSymmetry.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAudit symmetry-aware parameters in neutronics.\n\nSee Also\n--------\n    armi.testing.symmetryTesting\n\"\"\"\n\nfrom armi.physics.neutronics.parameters import getNeutronicsParameterDefinitions\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.cores import Core\nfrom armi.testing import symmetryTesting\n\n\nclass TestNeutronicsParamSym(symmetryTesting.BasicArmiSymmetryTestHelper):\n    def setUp(self):\n        pluginParameters = getNeutronicsParameterDefinitions()\n        self.coreParamsToTest = pluginParameters[Core]\n        self.blockParamsToTest = pluginParameters[Block]\n        self.expectedSymmetricBlockParams = [\n            \"mgFlux\",\n            \"adjMgFlux\",\n            \"lastMgFlux\",\n            \"mgFluxGamma\",\n            \"reactionRates\",\n            \"power\",\n            \"powerGamma\",\n            \"powerNeutron\",\n            \"powerGenerated\",\n        ]\n        super().setUp()\n"
  },
  {
    "path": "armi/physics/safety/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Safety package for generic safety-related code.\"\"\"\n\nfrom armi import plugins\n\n\nclass SafetyPlugin(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return []\n"
  },
  {
    "path": "armi/physics/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/tests/test_executers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module provides tests for the generic Executers.\"\"\"\n\nimport os\nimport subprocess\nimport unittest\n\nfrom armi.physics import executers\nfrom armi.reactor import geometry\nfrom armi.utils import directoryChangers\n\n\nclass MockParams:\n    def __init__(self):\n        self.cycle = 1\n        self.timeNode = 2\n\n\nclass MockCore:\n    def __init__(self):\n        # just pick a random geomType\n        self.geomType = geometry.GeomType.CARTESIAN\n        self.symmetry = \"full\"\n        self.p = MockParams()\n\n\nclass MockReactor:\n    def __init__(self):\n        self.core = MockCore()\n        self.o = None\n        self.p = MockParams()\n\n\nclass TestExecutionOptions(unittest.TestCase):\n    def test_runningDirectoryPath(self):\n        \"\"\"\n        Test that the running directory path is set up correctly\n        based on the case title and label provided.\n        \"\"\"\n        e = executers.ExecutionOptions(label=None)\n        e.setRunDirFromCaseTitle(caseTitle=\"test\")\n        self.assertEqual(os.path.basename(e.runDir), \"508bc04f-0\")\n\n        e = executers.ExecutionOptions(label=\"label\")\n        e.setRunDirFromCaseTitle(caseTitle=\"test\")\n        self.assertEqual(os.path.basename(e.runDir), \"b07da087-0\")\n\n        e = executers.ExecutionOptions(label=\"label2\")\n        e.setRunDirFromCaseTitle(caseTitle=\"test\")\n        self.assertEqual(os.path.basename(e.runDir), \"9c1c83cb-0\")\n\n\nclass TestExecuters(unittest.TestCase):\n    def setUp(self):\n        e = executers.ExecutionOptions(label=None)\n        self.executer = executers.DefaultExecuter(e, MockReactor())\n\n    def test_collectInputsAndOutputs(self):\n        \"\"\"Verify that the executer can select to not copy back output.\"\"\"\n        self.executer.options.inputFile = \"test.inp\"\n        self.executer.options.outputFile = \"test.out\"\n        self.executer.options.copyOutput = False\n        inputs, outputs = self.executer._collectInputsAndOutputs()\n        self.assertEqual(\"test.inp\", inputs[0], \"Input file was not successfully identified.\")\n        self.assertTrue(outputs == [], \"Outputs were returned erroneously!\")\n\n        self.executer.options.copyOutput = True\n        inputs, outputs = self.executer._collectInputsAndOutputs()\n        self.assertEqual(\"test.inp\", inputs[0], \"Input file was not successfully identified.\")\n        self.assertEqual(\"test.out\", outputs[0], \"Output file was not successfully identified.\")\n\n    def test_updateRunDir(self):\n        \"\"\"\n        Verify that runDir is updated when TemporaryDirectoryChanger is used and\n        not updated when ForcedCreationDirectoryChanger is used.\n        \"\"\"\n        self.assertEqual(self.executer.dcType, directoryChangers.TemporaryDirectoryChanger)\n        self.executer._updateRunDir(\"updatedRunDir\")\n        self.assertEqual(self.executer.options.runDir, \"updatedRunDir\")\n\n        # change directoryChanger type, runDir not updated\n        self.executer.options.runDir = \"runDir\"\n        self.executer.dcType = directoryChangers.ForcedCreationDirectoryChanger\n        self.executer._updateRunDir(\"notThisString\")\n        self.assertEqual(self.executer.options.runDir, \"runDir\")\n\n    def test_runExternalExecutable(self):\n        \"\"\"Run an external executable with an Executer.\n\n        .. test:: Run an external executable with an Executer.\n            :id: T_ARMI_EX\n            :tests: R_ARMI_EX\n        \"\"\"\n        filePath = \"test_runExternalExecutable.py\"\n        outFile = \"tmp.txt\"\n        label = \"printExtraStuff\"\n\n        class MockExecutionOptions(executers.ExecutionOptions):\n            pass\n\n        class MockExecuter(executers.Executer):\n            def run(self, args):\n                if self.options.label == label:\n                    subprocess.run([\"python\", filePath, \"extra stuff\"])\n                else:\n                    subprocess.run([\"python\", filePath, args])\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            # build a mock external program (a little Python script)\n            self.__makeALittleTestProgram(filePath, outFile)\n\n            # make sure the output file doesn't exist yet\n            self.assertFalse(os.path.exists(outFile))\n\n            # set up an executer for our little test program\n            opts = MockExecutionOptions()\n            exe = MockExecuter(opts, None)\n            exe.run(\"\")\n\n            # make sure the output file exists now\n            self.assertTrue(os.path.exists(outFile))\n\n            # run the executer with options\n            testString = \"some options\"\n            exe.run(testString)\n\n            # make sure the output file exists now\n            self.assertTrue(os.path.exists(outFile))\n            newTxt = open(outFile, \"r\").read()\n            self.assertIn(testString, newTxt)\n\n            # now prove the options object can affect the execution\n            exe.options.label = label\n            exe.run(\"\")\n            newerTxt = open(outFile, \"r\").read()\n            self.assertIn(\"extra stuff\", newerTxt)\n\n    @staticmethod\n    def __makeALittleTestProgram(filePath, outFile):\n        \"\"\"Helper method to write a tiny Python script.\n\n        We need \"an external program\" for testing.\n        \"\"\"\n        txt = f\"\"\"import sys\n\ndef main():\n    with open(\"{outFile}\", \"w\") as f:\n        f.write(str(sys.argv))\n\nif __name__ == \"__main__\":\n    main()\n\"\"\"\n        with open(filePath, \"w\") as f:\n            f.write(txt)\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/__init__.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Thermal Hydraulics package.\"\"\"\n\nfrom armi.physics.thermalHydraulics.plugin import ThermalHydraulicsPlugin  # noqa: F401\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/const.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\n# NOTE: This couldn't be packaged with the thermalHydraulics plugin because it\n# ends up getting imported by the ARMI framework before plugins get imported.\n\nORIFICE_SETTING_ZONE_MAP = \"zone map\"\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/parameters.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Parameter definitions for thermal hydraulic plugins.\"\"\"\n\nfrom armi.reactor import parameters\nfrom armi.reactor.assemblies import Assembly\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils import units\n\n\ndef getParameterDefinitions():\n    \"\"\"Return ParameterDefinitionCollections for each appropriate ArmiObject.\"\"\"\n    return {Assembly: _getAssemblyParams(), Block: _getBlockParams()}\n\n\ndef _getAssemblyParams():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(default=0.0, categories=[\"thermal hydraulics\"]) as pb:\n        pb.defParam(\n            \"THmassFlowRate\",\n            units=f\"{units.KG}/{units.SECONDS}\",\n            description=\"The nominal assembly flow rate\",\n            categories=[\"broadcast\"],\n            location=ParamLocation.VOLUME_INTEGRATED,\n        )\n\n        pb.defParam(\n            \"THcoolantInletT\",\n            units=units.DEGC,\n            description=\"Assembly inlet temperature in C (cold temperature)\",\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n        saveToDB=True,\n        categories=[\"thermal hydraulics\"],\n    ) as pb:\n        pb.defParam(\n            \"THdeltaPTotal\",\n            units=units.PASCALS,\n            description=\"Total pressure difference across the assembly\",\n            categories=[\"broadcast\"],\n        )\n\n    return pDefs\n\n\ndef _getBlockParams():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(default=0.0, categories=[\"thInterface\"], saveToDB=True) as pb:\n        pb.defParam(\n            \"THcoolantOutletT\",\n            units=units.DEGC,\n            description=\"Coolant temperature at the outlet of this block\",\n            location=ParamLocation.TOP,\n        )\n\n        pb.defParam(\n            \"THmassFlowRate\",\n            units=f\"{units.KG}/{units.SECONDS}\",\n            description=\"Mass flow rate\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n        )\n\n        pb.defParam(\n            \"THcoolantInletT\",\n            units=units.DEGC,\n            description=\"The nominal average bulk coolant inlet temperature into the block.\",\n            location=ParamLocation.BOTTOM,\n        )\n\n        pb.defParam(\n            \"THdeltaPTotal\",\n            units=units.PASCALS,\n            description=\"Total pressure difference in a block\",\n            location=ParamLocation.AVERAGE,\n        )\n\n    with pDefs.createBuilder(default=None, categories=[\"thermal hydraulics\", \"mongoose\"], saveToDB=True) as pb:\n        pb.defParam(\n            \"THcornTemp\",\n            units=units.DEGC,\n            description=\"Mid-wall duct temperature for assembly corners\",\n            location=ParamLocation.BOTTOM | ParamLocation.CORNERS,\n        )\n\n        pb.defParam(\n            \"THedgeTemp\",\n            units=units.DEGC,\n            description=\"Mid-wall duct temperature for assembly edges\",\n            location=ParamLocation.BOTTOM | ParamLocation.EDGES,\n        )\n\n    return pDefs\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/plugin.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGeneric Thermal/Hydraulics Plugin.\n\nThermal/hydraulics is concerned with temperatures, flows, pressures, and heat transfer.\n\"\"\"\n\nfrom armi import interfaces, plugins\n\nORDER = interfaces.STACK_ORDER.THERMAL_HYDRAULICS\n\n\nclass ThermalHydraulicsPlugin(plugins.ArmiPlugin):\n    \"\"\"Plugin for thermal/hydraulics.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameters():\n        \"\"\"Define additional parameters for the reactor data model.\"\"\"\n        from armi.physics.thermalHydraulics import parameters\n\n        return parameters.getParameterDefinitions()\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/tests/__init__.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/physics/thermalHydraulics/tests/test_thermalHydraulicsSymmetry.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nAudit symmetry-aware parameters in thermal hydraulics.\n\nSee Also\n--------\n    armi.testing.symmetryTesting\n\"\"\"\n\nfrom armi.physics.thermalHydraulics.parameters import getParameterDefinitions\nfrom armi.reactor.blocks import Block\nfrom armi.testing import symmetryTesting\n\n\nclass TestTHParamSymmetry(symmetryTesting.BasicArmiSymmetryTestHelper):\n    def setUp(self):\n        pluginParameters = getParameterDefinitions()\n        self.blockParamsToTest = pluginParameters[Block]\n        self.expectedSymmetricBlockParams = [\"THmassFlowRate\"]\n        super().setUp()\n"
  },
  {
    "path": "armi/pluginManager.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Slightly customized version of the stock pluggy ``PluginManager``.\"\"\"\n\nimport pluggy\n\n\nclass ArmiPluginManager(pluggy.PluginManager):\n    \"\"\"\n    PluginManager implementation with ARMI-specific features.\n\n    The main point of this subclass is to make it possible to detect when the plugin\n    manager has been mutated, allowing for safe caching of expensive results derived\n    from the set of registered plugins. This is done by exposing a counter that is\n    incremented any time the set of registered plugins is modified. If a client caches\n    any results derived from calling plugin hooks, caching this counter along with that\n    data allows for cheaply testing that the cached results are still valid.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        pluggy.PluginManager.__init__(self, *args, **kwargs)\n\n        self._counter = 0\n\n    @property\n    def counter(self):\n        return self._counter\n\n    def register(self, *args, **kwargs):\n        self._counter += 1\n        pluggy.PluginManager.register(self, *args, **kwargs)\n\n    def unregister(self, *args, **kwargs):\n        pluggy.PluginManager.unregister(self, *args, **kwargs)\n"
  },
  {
    "path": "armi/plugins.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nPlugins allow various built-in or external functionality to be brought into the ARMI ecosystem.\n\nThis module defines the hooks that may be defined within plugins. Plugins are ultimately incorporated into a\n:py:class:`armi.pluginManager.ArmiPluginManager`, which live inside of a :py:class:`armi.apps.App` object.\n\nThe ``ArmiPluginManager`` is derived from the ``PluginManager`` class provided by the ``pluggy`` package, which provides\na registry of known plugins. Rather than create one directly, we use the :py:func:`armi.plugins.getNewPluginManager()`\nfunction, which handles some of the setup for us.\n\nFrom a high-altitude perspective, the plugins provide numerous \"hooks\", which allow for ARMI to be extended in various\nways. Some of these extensions are subtle and play a part in how certain ARMI components are initialized or defined. As\nsuch, it is necessary to register most plugins before some parts of ARMI are imported or exercised in a meaningful way.\nThese requirements are in flux, and will ultimately constitute part of the specification of the ARMI plugin\narchitecture. For now, to be safe, plugins should be registered as soon as possible.\n\nAfter forming the ``PluginManager``, the plugin hooks can be accessed through the ``hook`` attribute. E.g.::\n\n    >>> armi.getPluginManagerOrFail().hook.exposeInterfaces(cs=cs)\n\nDon't forget to use the keyword argument form for all arguments to hooks; ``pluggy`` requires them to enforce hook\nspecifications.\n\nThe :py:class:`armi.apps.App` class serves as the primary storage location of the PluginManager, and also provides some\nmethods to get data out of the plugins more ergonomically than through the hooks themselves.\n\nSome things you may want to bring in via a plugin includes:\n\n- :py:mod:`armi.settings` and their validators\n- :py:mod:`armi.reactor.components` for custom geometry\n- :py:mod:`armi.reactor.flags` for custom reactor components\n- :py:mod:`armi.interfaces` to define new calculation sequences and interactions with new codes\n- :py:mod:`armi.reactor.parameters` to represent new physical state on the reactor\n- :py:mod:`armi.materials` for custom materials\n- Elements of the :py:mod:`armi.gui`\n- :py:mod:`armi.operators` for adding new operations on reactor models\n- :py:mod:`armi.cli` for adding new operations on input files\n\nWarning\n-------\nThe plugin system was developed to support improved collaboration.  It is new and should be considered under\ndevelopment. The API is subject to change as the version of the ARMI framework approaches 1.0.\n\nNotes\n-----\nDue to the nature of some of these components, there are a couple of restrictions on the order in which things can be\nimported (lest we endeavor to redesign them considerably). Examples:\n\n  - Parameters: All parameter definitions must be present before any ``ArmiObject`` objects are instantiated. This is\n    mostly by choice, but also makes the most sense, because the ``ParameterCollection`` s are instance attributes of an\n    ``ArmiObject``, which in turn use ``Parameter`` objects as *class* attributes. We should know what class attributes\n    we have before making instances.\n\n  - Blueprints: Since blueprints should be extendable with new sections, we must also be able to provide new *class*\n    attributes to extend their behavior. This is because blueprints use the yamlize package, which uses class attributes\n    to define much of the class's behavior through metaclassing. Therefore, we need to be able to import all plugins\n    *before* importing blueprints.\n\nPlugins are currently stateless. They do not have ``__init__()`` methods, and when they are registered with the\nPluginMagager, the PluginManager gets the Plugin's class object rather than an instance of that class. Also notice that\nall of the hooks are ``@staticmethod``\\ s. As a result, they can be called directly off of the class object, and only\nhave access to the state passed into them to perform their function. This is a deliberate design choice to keep the\nplugin system simple and to preclude a large class of potential bugs. At some point it may make sense to revisit this.\n\n**Other customization points**\n\nWhile the Plugin API is the main place for ARMI framework customization, there are several other areas where ARMI may be\nextended or customized. These typically pre-dated the Plugin-based architecture, and as the need arise may be migrated\nto here.\n\n    - Component types: Component types are registered dynamically through some metaclass magic, found in\n      :py:class:`armi.reactor.components.component.ComponentType` and\n      :py:class:`armi.reactor.composites.CompositeModelType`. Simply defining a new Component subclass should register\n      it with the appropriate ARMI systems. While this is convenient, it does lead to potential issues, as the behavior\n      of ARMI becomes sensitive to module import order and the like; the containing module needs to be imported before\n      the registration occurs, which can be surprising.\n\n    - Interface input files: Interfaces used to be discovered dynamically, rather than explicitly as they are now in the\n      :py:meth:`armi.plugins.ArmiPlugin.exposeInterfaces` plugin hook. Essentially they functioned as ersatz plugins.\n      One of the ways that they would customize ARMI behavior is through the\n      :py:meth:`armi.physics.interface.Interface.specifyInputs` static method, which is still used to determine inter-\n      Case dependencies and support cloning and hashing Case inputs. Going forward, this approach will likely be\n      deprecated in favor of a plugin hook.\n\n    - Fuel handler logic: The :py:class:`armi.physics.fuelCycle.fuelHandlers.FuelHandlerInterface` supports\n      customization through the dynamic loading of fuel handler logic modules, based on user settings. This also\n      predated the plugin infrastructure, and may one day be replaced with plugin-based fuel handler logic.\n\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Callable, Dict, List, Union\n\nimport pluggy\n\nfrom armi import pluginManager\nfrom armi.utils import flags\n\nif TYPE_CHECKING:\n    from armi.reactor.composites import Composite\n    from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger\n\n\nHOOKSPEC = pluggy.HookspecMarker(\"armi\")\nHOOKIMPL = pluggy.HookimplMarker(\"armi\")\n\n\nclass ArmiPlugin:\n    \"\"\"\n    An ArmiPlugin exposes a collection of hooks that allow users to add a variety of things to their ARMI application:\n    Interfaces, parameters, settings, flags, and much more.\n\n    .. impl:: Plugins add code to the application through interfaces.\n        :id: I_ARMI_PLUGIN\n        :implements: R_ARMI_PLUGIN\n\n        Each plugin has the option of implementing the ``exposeInterfaces`` method, and this will be used as a plugin\n        hook to add one or more Interfaces to the ARMI Application. Interfaces can wrap external executables with\n        nuclear modeling codes in them, or directly implement their logic in Python. But because Interfaces are Python\n        code, they have direct access to read and write from ARMI's reactor data model. This Plugin to multiple\n        Interfaces to reactor data model connection is the primary way that developers add code to an ARMI application\n        and simulation.\n    \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def exposeInterfaces(cs) -> List:\n        \"\"\"\n        Function for exposing interface(s) to other code.\n\n        .. impl:: Plugins can add interfaces to the operator.\n            :id: I_ARMI_PLUGIN_INTERFACES\n            :implements: R_ARMI_PLUGIN_INTERFACES\n\n            This method takes in a Settings object and returns a list of Interfaces, the position of each Interface in\n            the Interface stack, and a list of arguments to pass to the Interface when initializing it later. These\n            Interfaces can then be used to add code to a simulation.\n\n        Returns\n        -------\n        list\n            Tuples containing:\n\n            - The insertion order to use when building an interface stack,\n            - an implementation of the Interface class\n            - a dictionary of kwargs to pass to an Operator when adding an instance of the interface class\n\n            If no Interfaces should be active given the passed case settings, this should return an empty list.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineParameters() -> Dict:\n        \"\"\"\n        Define additional parameters for the reactor data model.\n\n        .. impl:: Plugins can add parameters to the reactor data model.\n            :id: I_ARMI_PLUGIN_PARAMS\n            :implements: R_ARMI_PLUGIN_PARAMS\n\n            Through this method, plugin developers can create new Parameters. A parameter can represent any physical\n            property an analyst might want to track. And they can be added at any level of the reactor data model.\n            Through this, the developers can extend ARMI and what physical properties of the reactor they want to\n            calculate, track, and store to the database.\n\n        .. impl:: Define an arbitrary physical parameter.\n            :id: I_ARMI_PARAM0\n            :implements: R_ARMI_PARAM\n\n            Through this method, plugin developers can create new Parameters. A parameter can represent any physical\n            property an analyst might want to track. For example, through this method, a plugin developer can add a new\n            thermodynamic property that adds a thermodynamic parameter to every block in the reactor. Or they could add\n            a neutronics parameter to every fuel assembly. A parameter is quite generic. But these parameters will be\n            tracked in the reactor data model, extend what developers can do with ARMI, and will be saved to the output\n            database.\n\n        Returns\n        -------\n        dict\n            Keys should be subclasses of ArmiObject, values being a ParameterDefinitionCollection should be added to the\n            key's parameter definitions.\n\n        Example\n        -------\n        >>> pluginBlockParams = parameters.ParameterDefinitionCollection()\n        >>> with pluginBlockParams.createBuilder() as pb:\n        ...     pb.defParam(\"plugBlkP1\", ...)\n        ...     # ...\n        >>> pluginAssemParams = parameters.ParameterDefinitionCollection()\n        >>> with pluginAssemParams.createBuilder() as pb:\n        ...     pb.defParam(\"plugAsmP1\", ...)\n        ...     # ...\n        >>> return {blocks.Block: pluginBlockParams, assemblies.Assembly: pluginAssemParams}\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def afterConstructionOfAssemblies(assemblies, cs) -> None:\n        \"\"\"\n        Function to call after a set of assemblies are constructed.\n\n        This hook can be used to:\n\n        - Verify that all assemblies satisfy constraints imposed by active interfaces and plugins\n        - Apply modifications to Assemblies based on modeling options and active interfaces\n\n        Implementers may alter the state of the passed Assembly objects.\n\n        Returns\n        -------\n        None\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def onProcessCoreLoading(core, cs, dbLoad) -> None:\n        \"\"\"\n        Function to call whenever a Core object is newly built.\n\n        This is usually used to set initial parameter values from inputs, either after constructing a Core from\n        Blueprints, or after loading it from a database.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def beforeReactorConstruction(cs) -> None:\n        \"\"\"Function to call before the reactor is constructed.\"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineFlags() -> Dict[str, Union[int, flags.auto]]:\n        \"\"\"\n        Add new flags to the reactor data model, and the simulation.\n\n        .. impl:: Plugins can define new, unique flags to the system.\n            :id: I_ARMI_FLAG_EXTEND1\n            :implements: R_ARMI_FLAG_EXTEND\n\n            This method allows a plugin developers to provide novel values for the Flags system. This method returns a\n            dictionary mapping flag names to their desired numerical values. In most cases, no specific value is needed,\n            one can be automatically generated using :py:class:`armi.utils.flags.auto`. (For more information, see\n            :py:mod:`armi.reactor.flags`.)\n\n        See Also\n        --------\n        armi.reactor.flags\n\n        Example\n        -------\n        >>> def defineFlags():\n        ...     return {\"FANCY\": armi.utils.flags.auto()}\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineBlockTypes() -> List:\n        \"\"\"\n        Function for providing novel Block types from a plugin.\n\n        This should return a list of tuples containing ``(compType, blockType)``, where ``blockType`` is a new ``Block``\n        subclass to register, and ``compType`` is the corresponding ``Component`` type that should activate it. For\n        instance a ``HexBlock`` would be created when the largest component is a ``Hexagon``::\n\n            [(Hexagon, HexBlock)]\n\n        Returns\n        -------\n        list\n            ``[(compType, BlockType), ...]``\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineAssemblyTypes() -> List:\n        \"\"\"\n        Function for providing novel Assembly types from a plugin.\n\n        This should return a list of tuples containing ``(blockType, assemType)``, where ``assemType`` is a new\n        ``Assembly`` subclass to register, and ``blockType`` is the corresponding ``Block`` subclass that, if present in\n        the assembly, should trigger it to be of the corresponding ``assemType``.\n\n        Warning\n        -------\n        There is no guarantee that you will find subclassing ``Assembly`` useful.\n\n        Example\n        -------\n        .. code::\n\n            [\n                (HexBlock, HexAssembly),\n                (CartesianBlock, CartesianAssembly),\n                (ThRZBlock, ThRZAssembly),\n            ]\n\n        Returns\n        -------\n        list\n            List of new Block&Assembly types\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineBlueprintsSections() -> List:\n        \"\"\"\n        Return new sections for the blueprints input method.\n\n        This hook allows plugins to extend the blueprints functionality with their own sections.\n\n        Returns\n        -------\n        list\n            (name, section, resolutionMethod) tuples, where:\n\n            - name : The name of the attribute to add to the Blueprints class; this should be a valid Python identifier.\n\n            - section : An instance of ``yaml.Attribute`` defining the data that is described by the Blueprints section.\n\n            - resolutionMethod : A callable that takes a Blueprints object and case settings as arguments. This will be\n              called like an unbound instance method on the passed Blueprints object to initialize the state of the new\n              Blueprints section.\n\n        Notes\n        -----\n        Most of the sections that a plugin would want to add may be better served as settings, rather than blueprints\n        sections. These sections were added to the blueprints mainly because the schema is more flexible, allowing\n        namespaces and hierarchical collections of settings. Perhaps in the near future it would make sense to enhance\n        the settings system to support these features, moving the blueprints extensions out into settings. This is\n        discussed in more detail in T1671.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineEntryPoints() -> List:\n        \"\"\"\n        Return new entry points for the ARMI CLI.\n\n        This hook allows plugins to provide their own ARMI entry points, which each serve as a command in the command-\n        line interface.\n\n        Returns\n        -------\n        list\n            class objects which derive from the base EntryPoint class.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineSettings() -> List:\n        \"\"\"\n        Define configuration settings for this plugin.\n\n        .. impl:: Plugins can add settings to the run.\n            :id: I_ARMI_PLUGIN_SETTINGS\n            :implements: R_ARMI_PLUGIN_SETTINGS\n\n            This hook allows plugin developers to provide their own configuration settings, which can participate in the\n            :py:class:`armi.settings.caseSettings.Settings`. Plugins may provide entirely new settings to what are\n            already provided by ARMI, as well as new options or default values for existing settings. For instance, the\n            framework provides a ``neutronicsKernel`` setting for selecting which global physics solver to use. Since we\n            wish to enforce that the user specify a valid kernel, the settings validator will check to make sure\n            that the user's requested kernel is among the available options. If a plugin were to provide a new\n            neutronics kernel (let's say MCNP), it should also define a new option to tell the settings system that\n            ``\"MCNP\"`` is a valid option.\n\n        Returns\n        -------\n        list\n            A list of Settings, Options, or Defaults to be registered.\n\n        See Also\n        --------\n        armi.physics.neutronics.NeutronicsPlugin.defineSettings\n        armi.settings.setting.Setting\n        armi.settings.setting.Option\n        armi.settings.setting.Default\n        \"\"\"\n        return []\n\n    @staticmethod\n    @HOOKSPEC\n    def defineSettingsValidators(inspector) -> List:\n        \"\"\"\n        Define the high-level settings input validators by adding them to an inspector.\n\n        Parameters\n        ----------\n        inspector : :py:class:`armi.settings.settingsValidation.Inspector` instance\n            The inspector to add queries to. See note below, this is not ideal.\n\n        Notes\n        -----\n        These are higher-level than the input-level SCHEMA defined in :py:meth:`defineSettings` and are intended to be\n        used for more complex cross-plugin info.\n\n        We would prefer to not manipulate objects passed in directly, but rather have the inspection happen in a\n        measurable hook. This would help find misbehaving plugins.\n\n        See Also\n        --------\n        armi.settings.settingsValidation.Inspector : Runs the queries\n\n        Returns\n        -------\n        list\n            Query objects to attach\n        \"\"\"\n        return []\n\n    @staticmethod\n    @HOOKSPEC\n    def defineCaseDependencies(case, suite):\n        r\"\"\"\n        Function for defining case dependencies.\n\n        Some Cases depend on the results of other ``Case``\\ s in the same ``CaseSuite``. Which dependencies exist, and\n        how they are discovered depends entirely on the type of analysis and active interfaces, etc. This function\n        allows a plugin to inspect settings and declare dependencies between the passed ``case`` and any other cases in\n        the passed ``suite``.\n\n        Parameters\n        ----------\n        case : Case\n            The specific case for which we want to find dependencies.\n        suite : CaseSuite\n            A CaseSuite object to which the Case and other potential dependencies belong.\n\n        Returns\n        -------\n        dependencies : set of Cases\n            This should return a set containing ``Case`` objects that are considered dependencies of the passed\n            ``case``. They should be members of the passed ``suite``.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineGuiWidgets() -> List:\n        \"\"\"\n        Define which settings should go in the GUI.\n\n        Rather than making widgets here, this simply returns metadata as a nested dictionary saying which tab to put\n        which settings on, and a little bit about how to group them.\n\n        Returns\n        -------\n        widgetData : list of dict\n            Each dict is nested. First level contains the tab name (e.g. 'Global Flux'). Second level contains a box\n            name. Third level contains help and a list of setting names\n\n        See Also\n        --------\n        armi.gui.submitter.layout.abstractTab.AbstractTab.addSectionsFromPlugin : uses data structure\n\n        Example\n        -------\n        >>> widgets = {\n        ...     'Global Flux': {\n        ...         'MCNP Solver Settings': {\n        ...             'help': \"Help message\"\n        ...             'settings': [\n        ...                 \"mcnpAddTallies\",\n        ...                 \"useSrctp\",\n        ...             ]\n        ...         }\n        ...     }\n        ... }\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def getOperatorClassFromRunType(runType: str):\n        \"\"\"Return an Operator subclass if the runType is recognized by this plugin.\"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineParameterRenames() -> Dict:\n        \"\"\"\n        Return a mapping from old parameter names to new parameter names.\n\n        Occasionally, it may become necessary to alter the name of an existing parameter. This can lead to frustration\n        when attempting to load from old database files that use the previous name. This hook allows a plugin to define\n        mappings from the old name to the new name, allowing the old database to be read in and translated to the new\n        parameter name.\n\n        The following rules are followed when applying these renames:\n\n        * When state is loaded from a database, if the parameter name in the database file is found in the rename\n          dictionary, it will be mapped to that renamed parameter.\n        * If the renamed parameter is found in the renames, then it will be mapped again to new parameter name. This\n          process is repeated until there are no more renames left. This allows for parameters to be renamed multiple\n          times, and for a database from several generations prior to still be readable, so long as the history of\n          renames is intact.\n        * If at the end of the above process, the parameter name is not a defined parameter for the appropriate\n          ``ArmiObject`` type, an exception is raised.\n        * If any of the ``renames`` keys match any currently-defined parameters, an exception is raised.\n        * If any of the ``renames`` collide with another plugin's ``renames``, an exception is raised.\n\n        Returns\n        -------\n        renames : dict\n            Keys should be an old parameter name, where the corresponding values are the new parameter name.\n\n        Example\n        -------\n        The following would allow databases with values for either ``superOldParam`` or ``oldParam`` to be read into\n        ``currentParam``::\n\n            return {\"superOldParam\": \"oldParam\", \"oldParam\": \"currentParam\"}\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def mpiActionRequiresReset(cmd) -> bool:\n        \"\"\"\n        Flag indicating when a reactor reset is required.\n\n        Commands are sent through operators either as strings (old) or as MpiActions (newer). After some are sent, the\n        reactor must be reset. This hook says when to reset. The reset operation is a (arguably suboptimal) response to\n        some memory issues in very large and long-running cases.\n\n        Parameters\n        ----------\n        cmd :  str or MpiAction\n            The ARMI mpi command being sent\n\n        Returns\n        -------\n        bool\n\n        See Also\n        --------\n        armi.operators.operatorMPI.OperatorMPI.workerOperate : Handles these flags\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def getReportContents(r, cs, report, stage, blueprint) -> None:\n        \"\"\"\n        To generate a report.\n\n        Parameters\n        ----------\n        r : Reactor\n        cs : Settings\n        report : ReportContent\n            Report object to add contents to\n        stage : ReportStage\n            begin/standard/or end (stage of the report for when inserting BOL vs. EOL content)\n        blueprint : Blueprint, optional\n            for a reactor (if None, only partial contents created)\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC\n    def defineSystemBuilders() -> Dict[str, Callable[[str], \"Composite\"]]:\n        \"\"\"\n        Convert a user-string from the systems section into a valid composite builder.\n\n        Parameters\n        ----------\n        name : str\n            Name of the system type defined by the user, e.g., ``\"core\"``\n\n        Returns\n        -------\n        dict\n            Dictionary that maps a grid type from the input file (e.g., ``\"core\"``)\n            to a function responsible for building a grid of that type, e.g.,\n\n            .. code::\n\n                {\n                    \"core\": armi.reactor.reactors.Core,\n                    \"excore\": armi.reactor.excoreStructure.ExcoreStructure,\n                    \"sfp\": armi.reactor.spentFuelPool.SpentFuelPool,\n                }\n\n        Notes\n        -----\n        The default :class:`~armi.reactor.ReactorPlugin` defines a ``\"core\"`` lookup and a ``\"sfp\"`` lookup, triggered\n        to run after all other hooks have been run.\n        \"\"\"\n\n    @staticmethod\n    @HOOKSPEC(firstresult=True)\n    def getAxialExpansionChanger() -> type[\"AxialExpansionChanger\"]:\n        \"\"\"Produce the class responsible for performing axial expansion.\n\n        Plugins can provide this hook to override or negate axial expansion. Will be used during initial construction of\n        the core and assemblies, and can be a class to perform custom axial expansion routines.\n\n        The first object returned that is not ``None`` will be used. Plugins are encouraged to add the ``tryfirst=True``\n        arguments to their ``HOOKIMPL`` invocations to make sure their specific are earlier in the hook call sequence.\n\n        Returns\n        -------\n        type of :class:`armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`\n\n        Notes\n        -----\n        This hook **should not** provide an instance of the class. The construction of the changer will be handled by\n        applications and plugins that need it.\n\n        This hook should only be provided by one additional plugin in your application. Otherwise the `order of hook\n        execution <https://pluggy.readthedocs.io/en/stable/index.html#call-time-order>`_ may not provide the behavior\n        you expect.\n\n        Examples\n        --------\n        >>> class MyPlugin(ArmiPlugin):\n        ...     @staticmethod\n        ...     @HOOKIMPL(tryfirst=True)\n        ...     def getAxialExpansionChanger():\n        ...         from myproject.physics import BespokeAxialExpansion\n        ...\n        ...         return BespokeAxialExpansion\n\n        \"\"\"\n\n\nclass UserPlugin(ArmiPlugin):\n    \"\"\"\n    A variation on the ArmiPlugin meant to be created at runtime, from the ``userPlugins`` setting.\n\n    This is obviously a more limited use-case than the usual ArmiPlugin, as those are meant to be defined at import\n    time, instead of run time. As such, this class has some built-in tooling to limit how these run-time plugins are\n    used. They are meant to be more limited.\n\n    Notes\n    -----\n    The usual ArmiPlugin is much more flexible, if the UserPlugin does not support what you want to do, just use an\n    ArmiPlugin.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        ArmiPlugin.__init__(self, *args, **kwargs)\n        self.__enforceLimitations()\n\n    def __enforceLimitations(self):\n        \"\"\"\n        This method enforces that UserPlugins are more limited than regular ArmiPlugins.\n\n        UserPlugins are different from regular plugins in that they can be defined during a run, and as such, we want to\n        limit how flexible they are, so we can correctly corral their side effects during a run.\n        \"\"\"\n        if issubclass(self.__class__, UserPlugin):\n            assert len(self.__class__.defineParameters()) == 0, (\n                \"UserPlugins cannot define parameters, consider using an ArmiPlugin.\"\n            )\n            assert len(self.__class__.defineParameterRenames()) == 0, (\n                \"UserPlugins cannot define parameter renames, consider using an ArmiPlugin.\"\n            )\n            assert len(self.__class__.defineSettings()) == 0, (\n                \"UserPlugins cannot define new Settings, consider using an ArmiPlugin.\"\n            )\n            # NOTE: These are the methods that we are staunchly _not_ allowing people to change in this class. If you\n            # need these, please use a regular ArmiPlugin.\n            self.defineParameterRenames = lambda: {}\n            self.defineSettings = lambda: []\n            self.defineSettingsValidators = lambda: []\n\n    @staticmethod\n    @HOOKSPEC\n    def defineParameters():\n        \"\"\"\n        Prevents defining additional parameters.\n\n        .. warning:: This is not overridable.\n\n        Notes\n        -----\n        It is a designed limitation of user plugins that they not define parameters. Parameters are defined when the\n        App() is read in, which is LONG before the settings file has been read. So the parameters are defined before we\n        discover the user plugin. If this is a feature you need, just use an ArmiPlugin.\n        \"\"\"\n        return {}\n\n    @staticmethod\n    @HOOKSPEC\n    def defineParameterRenames():\n        \"\"\"\n        Prevents parameter renames.\n\n        Warning\n        -------\n        This is not overridable.\n\n        Notes\n        -----\n        It is a designed limitation of user plugins that they not generate parameter renames, Parameters are defined\n        when the App() is read in, which is LONG before the settings file has been read. So the parameters are defined\n        before we discover the user plugin. If this is a feature you need, just use a normal Plugin.\n        \"\"\"\n        return {}\n\n    @staticmethod\n    @HOOKSPEC\n    def defineSettings():\n        \"\"\"\n        Prevents new settings.\n\n        Warning\n        -------\n        This is not overridable.\n\n        Notes\n        -----\n        It is a designed limitation of user plugins that they not define new settings, so that they are able to be added\n        to the plugin stack during run time.\n        \"\"\"\n        return []\n\n    @staticmethod\n    @HOOKSPEC\n    def defineSettingsValidators(inspector):\n        \"\"\"\n        Prevents new settings validators.\n\n        .. warning:: This is not overridable.\n\n        Notes\n        -----\n        It is a designed limitation of user plugins that they not define new settings, so that they are able to be added\n        to the plugin stack during run time.\n        \"\"\"\n        return []\n\n\ndef getNewPluginManager() -> pluginManager.ArmiPluginManager:\n    \"\"\"Return a new plugin manager with all of the hookspecs pre-registered.\"\"\"\n    pm = pluginManager.ArmiPluginManager(\"armi\")\n    pm.add_hookspecs(ArmiPlugin)\n    return pm\n\n\ndef collectInterfaceDescriptions(mod, cs):\n    \"\"\"\n    Adapt old-style ``describeInterfaces`` to the new plugin interface.\n\n    Old describeInterfaces implementations would return an interface class and kwargs for adding to an operator. Now we\n    expect an ORDER as well. This takes a module and case settings and staples the module's ORDER attribute to the tuple\n    and checks to make sure that a None is replaced by an empty list.\n    \"\"\"\n    from armi import interfaces\n\n    val = mod.describeInterfaces(cs)\n\n    if val is None:\n        return []\n    if isinstance(val, list):\n        return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs) for klass, kwargs in val]\n\n    klass, kwargs = val\n    return [interfaces.InterfaceInfo(mod.ORDER, klass, kwargs)]\n\n\nclass PluginError(RuntimeError):\n    \"\"\"\n    Special exception class for use when a plugin appears to be non-conformant.\n\n    These should always come from some form of programmer error, and indicates conditions such as:\n\n    - A plugin improperly implementing a hook, when possible to detect.\n    - A collision between components provided by plugins (e.g. two plugins providing the same Blueprints section)\n    \"\"\"\n"
  },
  {
    "path": "armi/reactor/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThe reactor package houses the data model used in ARMI to represent the reactor during its\nsimulation. It contains definitions of the reactor, assemblies, blocks, components, etc.\n\nSee :doc:`/developer/index`.\n\"\"\"\n\nfrom typing import TYPE_CHECKING, Callable, Dict, Union\n\nfrom armi import materials, plugins\n\nif TYPE_CHECKING:\n    from armi.reactor.excoreStructure import ExcoreStructure\n    from armi.reactor.reactors import Core\n    from armi.reactor.spentFuelPool import SpentFuelPool\n\n\nclass ReactorPlugin(plugins.ArmiPlugin):\n    \"\"\"Plugin exposing built-in reactor components, blocks, assemblies, etc.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def beforeReactorConstruction(cs) -> None:\n        \"\"\"Just before reactor construction, update the material \"registry\" with user settings,\n        if it is set. Often it is set by the application.\n        \"\"\"\n        from armi.settings.fwSettings.globalSettings import (\n            CONF_MATERIAL_NAMESPACE_ORDER,\n        )\n\n        if cs[CONF_MATERIAL_NAMESPACE_ORDER]:\n            materials.setMaterialNamespaceOrder(cs[CONF_MATERIAL_NAMESPACE_ORDER])\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineBlockTypes():\n        from armi.reactor import blocks\n        from armi.reactor.components.basicShapes import Hexagon, Rectangle\n        from armi.reactor.components.volumetricShapes import RadialSegment\n\n        return [\n            (Rectangle, blocks.CartesianBlock),\n            (RadialSegment, blocks.ThRZBlock),\n            (Hexagon, blocks.HexBlock),\n        ]\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineAssemblyTypes():\n        from armi.reactor.assemblies import CartesianAssembly, HexAssembly, ThRZAssembly\n        from armi.reactor.blocks import CartesianBlock, HexBlock, ThRZBlock\n\n        return [\n            (HexBlock, HexAssembly),\n            (CartesianBlock, CartesianAssembly),\n            (ThRZBlock, ThRZAssembly),\n        ]\n\n    @staticmethod\n    @plugins.HOOKIMPL(trylast=True)\n    def defineSystemBuilders() -> Dict[str, Callable[[str], Union[\"Core\", \"ExcoreStructure\", \"SpentFuelPool\"]]]:\n        from armi.reactor.excoreStructure import ExcoreStructure\n        from armi.reactor.reactors import Core\n        from armi.reactor.spentFuelPool import SpentFuelPool\n\n        return {\n            \"core\": Core,\n            \"excore\": ExcoreStructure,\n            \"sfp\": SpentFuelPool,\n        }\n\n    @staticmethod\n    @plugins.HOOKIMPL(trylast=True)\n    def getAxialExpansionChanger():\n        from armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger\n\n        return AxialExpansionChanger\n"
  },
  {
    "path": "armi/reactor/assemblies.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAssemblies are collections of Blocks.\n\nGenerally, Blocks are stacked from bottom to top.\n\"\"\"\n\nimport copy\nimport math\nimport pickle\nfrom collections.abc import Iterable\nfrom random import randint\nfrom typing import ClassVar, Optional, Type\n\nimport numpy as np\nfrom scipy import interpolate\n\nfrom armi import runLog\nfrom armi.materials.material import Fluid\nfrom armi.reactor import assemblyParameters, blocks, composites, grids\nfrom armi.reactor.flags import Flags, TypeSpec\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.spentFuelPool import SpentFuelPool\n\n\nclass Assembly(composites.Composite):\n    \"\"\"\n    A single assembly in a reactor made up of blocks built from the bottom up.\n    Append blocks to add them up. Index blocks with 0 being the bottom.\n    \"\"\"\n\n    _BLOCK_TYPE: ClassVar[Optional[Type[blocks.Block]]] = None\n    pDefs = assemblyParameters.getAssemblyParameterDefinitions()\n\n    # For assemblies coming in from the database, waiting to be loaded to their old\n    # position. This is a necessary distinction, since we need to make sure that a bunch\n    # of fuel management stuff doesn't treat its re-placement into the core as a new move\n    DATABASE = \"database\"\n    LOAD_QUEUE = \"LoadQueue\"\n    SPENT_FUEL_POOL = \"SFP\"\n    DELETE = \"Delete\"\n    NOT_CREATED_YET = \"NotCreatedYet\"  # used in assembly location history tracking\n    NOT_IN_CORE = [LOAD_QUEUE, SPENT_FUEL_POOL, DELETE, NOT_CREATED_YET]\n\n    def __init__(self, typ, assemNum=None):\n        \"\"\"\n        Parameters\n        ----------\n        typ : str\n            Name of assembly design (e.g. the name from the blueprints input file).\n        assemNum : int, optional\n            The unique ID number of this assembly. If None is provided, we generate a\n            random int. This makes it clear that it is a placeholder. When an assembly with\n            a negative ID is placed into a Reactor, it will be given a new, positive ID.\n        \"\"\"\n        # If no assembly number is provided, generate a random number as a placeholder.\n        if assemNum is None:\n            assemNum = randint(-9000000000000, -1)\n        name = self.makeNameFromAssemNum(assemNum)\n        composites.Composite.__init__(self, name)\n        self.p.assemNum = assemNum\n        self.setType(typ)\n        self._current = 0  # for iterating\n        self.p.buLimit = self.getMaxParam(\"buLimit\")\n        self.lastLocationLabel = self.LOAD_QUEUE\n        self.p.orientation = np.array((0.0, 0.0, 0.0))\n        self.p.ringPosHist = []\n\n    def __repr__(self):\n        msg = \"<{typeName} Assembly {name} at {loc}>\".format(\n            name=self.getName(), loc=self.getLocation(), typeName=self.getType()\n        )\n        return msg\n\n    def __lt__(self, other):\n        \"\"\"\n        Compare two assemblies by location.\n\n        Notes\n        -----\n        As with other ArmiObjects, Assemblies are sorted based on location. Assemblies are more\n        permissive in the grid consistency checks to accommodate situations where assemblies might be\n        children of the same Core, but not in the same grid as each other (like in the spent fuel\n        pool). In these situations, the operator returns ``False``.  This behavior may lead to some\n        strange sorting behavior when two or more Assemblies are being compared that do not live in\n        the same grid.\n\n        See Also\n        --------\n        armi.reactor.composites.ArmiObject.__lt__\n        \"\"\"\n        try:\n            return composites.ArmiObject.__lt__(self, other)\n        except ValueError:\n            return False\n\n    def renameBlocksAccordingToAssemblyNum(self):\n        \"\"\"\n        Updates the names of all blocks to comply with the assembly number.\n\n        Useful after an assembly number/name has been loaded from a snapshot and you\n        want to update all block names to be consistent.\n\n        It may be better to store block numbers on each block as params. A database that\n        can hold strings would be even better.\n\n        Notes\n        -----\n        You must run armi.reactor.reactors.Reactor.regenAssemblyLists after calling this.\n        \"\"\"\n        assemNum = self.getNum()\n        for bi, b in enumerate(self):\n            b.setName(b.makeName(assemNum, bi))\n\n    @staticmethod\n    def makeNameFromAssemNum(assemNum):\n        \"\"\"\n        Set the name of this assembly (and the containing blocks) based on an assemNum.\n\n        AssemNums are like serial numbers for assemblies.\n        \"\"\"\n        return \"A{0:04d}\".format(int(assemNum))\n\n    def renumber(self, newNum):\n        \"\"\"\n        Change the assembly number of this assembly.\n\n        And handle the downstream impacts of changing the name of this Assembly and all\n        of the Blocks within this Assembly.\n\n        Parameters\n        ----------\n        newNum : int\n            The new Assembly number.\n        \"\"\"\n        self.p.assemNum = int(newNum)\n        self.name = self.makeNameFromAssemNum(self.p.assemNum)\n        self.renameBlocksAccordingToAssemblyNum()\n\n    def makeUnique(self):\n        \"\"\"\n        Function to make an assembly unique by getting a new assembly number.\n\n        This also adjusts the assembly's blocks IDs. This is necessary when using\n        ``deepcopy`` to get a unique ``assemNum`` since a deepcopy implies it would\n        otherwise have been the same object.\n        \"\"\"\n        # Default to a random negative assembly number (unique enough)\n        self.p.assemNum = randint(-9000000000000, -1)\n        self.renumber(self.p.assemNum)\n\n    def _checkPotentialChild(self, obj: blocks.Block, action: str = \"add\"):\n        \"\"\"An internal helper method to ensure the Block type is valid for this Assembly.\"\"\"\n        if self._BLOCK_TYPE is None or isinstance(obj, self._BLOCK_TYPE):\n            # this is the right Block, pass on\n            return\n\n        # if we got here, this Block is not the right type for this Assembly\n        msg = f\"Cannot {action} {obj} to this Assembly, it is not a {self._BLOCK_TYPE}.\"\n        runLog.error(msg)\n        raise TypeError(msg)\n\n    def add(self, obj: blocks.Block):\n        \"\"\"\n        Add an object to this assembly.\n\n        The simple act of adding a block to an assembly fully defines the location of\n        the block in 3-D.\n\n        .. impl:: Assemblies are made up of type Block.\n            :id: I_ARMI_ASSEM_BLOCKS\n            :implements: R_ARMI_ASSEM_BLOCKS\n\n            Adds a unique Block to the top of the Assembly. If the Block already\n            exists in the Assembly, an error is raised in\n            :py:meth:`armi.reactor.composites.Composite.add`.\n            The spatialLocator of the Assembly is updated to account for\n            the new Block. In ``reestablishBlockOrder``, the Assembly spatialGrid\n            is reinitialized and Block-wise spatialLocator and name objects\n            are updated. The axial mesh and other Block geometry parameters are\n            updated in ``calculateZCoords``.\n        \"\"\"\n        self._checkPotentialChild(obj, \"add\")\n        composites.Composite.add(self, obj)\n        obj.spatialLocator = self.spatialGrid[0, 0, len(self) - 1]\n\n        # more work is needed, make a new mesh\n        self.reestablishBlockOrder()\n        self.calculateZCoords()\n\n    def insert(self, index, obj):\n        \"\"\"Insert an object at a given index position with the assembly.\"\"\"\n        self._checkPotentialChild(obj, \"insert\")\n        composites.Composite.insert(self, index, obj)\n        obj.spatialLocator = self.spatialGrid[0, 0, index]\n\n    def moveTo(self, locator):\n        \"\"\"Move an assembly somewhere else.\"\"\"\n        oldSymmetryFactor = self.getSymmetryFactor()\n        composites.Composite.moveTo(self, locator)\n        if self.lastLocationLabel != self.DATABASE:\n            self.p.numMoves += 1\n            self.p.daysSinceLastMove = 0.0\n        self.parent.childrenByLocator[locator] = self\n        # symmetry may have changed (either moving on or off of symmetry line)\n        self.clearCache()\n        self.scaleParamsToNewSymmetryFactor(oldSymmetryFactor)\n\n    def scaleParamsToNewSymmetryFactor(self, oldSymmetryFactor):\n        scalingFactor = oldSymmetryFactor / self.getSymmetryFactor()\n        if scalingFactor == 1:\n            return\n\n        blockVolIntegratedParamsToScale = self[0].p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED)\n        for b in self:\n            self._scaleParams(b, blockVolIntegratedParamsToScale, scalingFactor)\n        assemblyVolIntegratedParamsToScale = self.p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED)\n        self._scaleParams(self, assemblyVolIntegratedParamsToScale, scalingFactor)\n\n    @staticmethod\n    def _scaleParams(obj, params, scalingFactor):\n        for param in params:\n            name = param.name\n            if obj.p[name] is None or isinstance(obj.p[name], str):\n                continue\n            elif isinstance(obj.p[name], Iterable):\n                obj.p[name] = [value * scalingFactor for value in obj.p[name]]\n            else:\n                # numpy array or other\n                obj.p[name] = obj.p[name] * scalingFactor\n\n    def getNum(self):\n        \"\"\"Return unique integer for this assembly.\"\"\"\n        return int(self.p.assemNum)\n\n    def getLocation(self):\n        \"\"\"\n        Get string label representing this object's location.\n\n        .. impl:: Assembly location is retrievable.\n            :id: I_ARMI_ASSEM_POSI0\n            :implements: R_ARMI_ASSEM_POSI\n\n            This method returns a string label indicating the location\n            of an Assembly. There are three options: 1) the Assembly\n            is not within a Core object and is interpreted as in the\n            \"load queue\"; 2) the Assembly is within the spent fuel pool;\n            3) the Assembly is within a Core object, so it has a physical\n            location within the Core.\n        \"\"\"\n        # just use ring and position, not axial (which is 0)\n        if not self.parent:\n            return self.LOAD_QUEUE\n        elif isinstance(self.parent, SpentFuelPool):\n            return self.SPENT_FUEL_POOL\n        return self.parent.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices()[:2])\n\n    def coords(self):\n        \"\"\"Return the location of the assembly in the plane using cartesian global coordinates.\n\n        .. impl:: Assembly coordinates are retrievable.\n            :id: I_ARMI_ASSEM_POSI1\n            :implements: R_ARMI_ASSEM_POSI\n\n            In this method, the spatialLocator of an Assembly is leveraged to return its physical\n            (x,y) coordinates in cm.\n        \"\"\"\n        x, y, _z = self.spatialLocator.getGlobalCoordinates()\n        return (x, y)\n\n    def getArea(self):\n        \"\"\"\n        Return the area of the assembly by looking at its first block.\n\n        The assumption is that all blocks in an assembly have the same area. Calculate the total\n        assembly volume in cm^3.\n        \"\"\"\n        try:\n            return self[0].getArea()\n        except IndexError:\n            runLog.warning(f\"{self} has no blocks and therefore no area.\")\n            return None\n\n    def getVolume(self):\n        \"\"\"Calculate the total assembly volume in cm^3.\"\"\"\n        return self.getArea() * self.getTotalHeight()\n\n    def getPinPlenumVolumeInCubicMeters(self) -> float:\n        \"\"\"\n        Return the total volume of the plenum for an assembly in m^3.\n\n        Notes\n        -----\n        If there is no plenum blocks in the assembly, a plenum volume of 0.0 is returned.\n\n        Warning\n        -------\n        This is a bit design-specific for pinned assemblies.\n\n        Returns\n        -------\n        float: Total plenum volume for an assembly.\n        \"\"\"\n        plenumVolume = 0.0\n        for b in self.iterChildrenWithFlags(Flags.PLENUM):\n            length = b.getHeight()\n            for c in b.iterChildrenWithFlags(Flags.CLAD):\n                cladId = c.getDimension(\"id\")\n                plenumVolume += math.pi * (cladId / 2.0) ** 2.0 * length\n        # convert vol from cm^3 to m^3\n        plenumVolume *= 1e-6\n        return plenumVolume\n\n    def getAveragePlenumTemperature(self):\n        \"\"\"Return the average of the plenum block outlet temperatures.\"\"\"\n        plenumBlocks = self.iterChildrenWithFlags(Flags.PLENUM)\n        plenumTemps = [b.p.THcoolantOutletT for b in plenumBlocks]\n\n        # no plenum blocks, use the top block of the assembly for plenum temperature\n        if not plenumTemps:\n            runLog.warning(\"No plenum blocks exist. Using outlet coolant temperature.\")\n            plenumTemps = [self[-1].p.THcoolantOutletT]\n\n        return sum(plenumTemps) / len(plenumTemps)\n\n    def adjustResolution(self, refA):\n        \"\"\"Split the blocks in this assembly to have the same mesh structure as refA.\"\"\"\n        newBlockStack = []\n\n        newBlocks = 0  # number of new blocks we've added so far.\n        for i, b in enumerate(self):\n            refB = refA[i + newBlocks]  # pick the block that is \"supposed to\" line up with refB.\n\n            if refB.getHeight() == b.getHeight():\n                # these blocks line up\n                newBlockStack.append(b)\n                continue\n            elif refB.getHeight() > b.getHeight():\n                raise RuntimeError(\n                    \"can't split {0} ({1}cm) into larger blocks to match ref block {2} ({3}cm)\".format(\n                        b, b.getHeight(), refB, refB.getHeight()\n                    )\n                )\n            else:\n                # b is larger than refB. Split b up by splitting it into several smaller blocks of\n                # refBs\n                heightToChop = b.getHeight()\n                heightChopped = 0.0\n                while abs(heightChopped - heightToChop) > 1e-5:  # stop when they are equal. floating point.\n                    # update which ref block we're on (does nothing on the first pass)\n                    refB = refA[i + newBlocks]\n                    newB = copy.deepcopy(b)\n                    newB.setHeight(refB.getHeight())  # make block match ref mesh\n                    newBlockStack.append(newB)\n                    heightChopped += refB.getHeight()\n                    newBlocks += 1\n                    runLog.important(f\"Added a new block {newB} of height {newB.getHeight()}\")\n                    runLog.important(f\"Chopped {heightChopped} of {heightToChop}\")\n\n                # subtract one because we eliminated the original b completely.\n                newBlocks -= 1\n\n        self.removeAll()\n        self.spatialGrid = grids.AxialGrid.fromNCells(len(newBlockStack))\n        for b in newBlockStack:\n            self.add(b)\n        self.reestablishBlockOrder()\n\n    def getAxialMesh(self, centers=False, zeroAtFuel=False):\n        \"\"\"\n        Make a list of the block z-mesh tops from bottom to top in cm.\n\n        Parameters\n        ----------\n        centers : bool, optional\n            Return centers instead of tops. If centers and zeroesAtFuel the zero point will be\n            center of first fuel.\n\n        zeroAtFuel : bool, optional\n            If true will make the (bottom or center depending on centers) of the first fuel block be\n            the zero point instead of the bottom of the first block.\n\n        See Also\n        --------\n        armi.reactor.assemblies.Assembly.makeAxialSnapList : makes index-based lookup of axial mesh\n\n        armi.reactor.reactors.Reactor.findAllAxialMeshPoints : gets a global list of all of these,\n        plus finer res.\n        \"\"\"\n        bottom = 0.0\n        meshVals = []\n        fuelIndex = None\n        for bi, b in enumerate(self):\n            top = bottom + b.getHeight()\n            if centers:\n                center = bottom + (top - bottom) / 2.0\n                meshVals.append(center)\n            else:\n                meshVals.append(top)\n            bottom = top\n            if fuelIndex is None and b.isFuel():\n                fuelIndex = bi\n\n        if zeroAtFuel:\n            # adjust the mesh to put zero at the first fuel block.\n            zeroVal = meshVals[fuelIndex]\n            meshVals = [mv - zeroVal for mv in meshVals]\n\n        return meshVals\n\n    def calculateZCoords(self):\n        \"\"\"\n        Set the center z-coords of each block and the params for axial expansion.\n\n        See Also\n        --------\n        reestablishBlockOrder\n        \"\"\"\n        bottom = 0.0\n        mesh = [bottom]\n        for bi, b in enumerate(self):\n            b.p.z = bottom + (b.getHeight() / 2.0)\n            b.p.zbottom = bottom\n            top = bottom + b.getHeight()\n            b.p.ztop = top\n            mesh.append(top)\n            bottom = top\n            b.spatialLocator = self.spatialGrid[0, 0, bi]\n\n        # also update the 1-D axial assembly level grid (this is intended to replace z,\n        # ztop, zbottom, etc.)\n\n        # length of this is numBlocks + 1\n        bounds = list(self.spatialGrid._bounds)\n        bounds[2] = np.array(mesh)\n        self.spatialGrid._bounds = tuple(bounds)\n\n    def getTotalHeight(self, typeSpec=None):\n        \"\"\"\n        Determine the height of this assembly in cm.\n\n        Parameters\n        ----------\n        typeSpec : See :py:meth:`armi.composites.Composite.hasFlags`\n\n        Returns\n        -------\n        height : float\n            the height in cm\n        \"\"\"\n        h = 0.0\n        for b in self:\n            if b.hasFlags(typeSpec):\n                h += b.getHeight()\n        return h\n\n    def getHeight(self, typeSpec=None):\n        return self.getTotalHeight(typeSpec)\n\n    def getReactiveHeight(self, enrichThresh=0.02):\n        \"\"\"\n        Returns the zBottom and total height in cm that has fissile enrichment over\n        enrichThresh.\n        \"\"\"\n        reactiveH = 0.0\n        zBot = None\n        z = 0.0\n        for b in self:\n            h = b.getHeight()\n            if b.getFissileMass() > 0.01 and b.getFissileMassEnrich() > enrichThresh:\n                if zBot is None:\n                    zBot = z\n                reactiveH += h\n            z += h\n\n        return zBot, reactiveH\n\n    def getElevationBoundariesByBlockType(self, blockType=None):\n        \"\"\"\n        Gets of list of elevations, ordered from bottom to top of all boundaries of the block of specified type.\n\n        Useful for determining location of the top of the upper grid plate or active\n        fuel, etc by using [0] to get the lowest boundary and [-1] to get highest\n\n        Notes\n        -----\n        The list will have duplicates when blocks of the same type share a boundary.\n        this is intentional. It makes it easy to grab pairs off the list and know that\n        the first item in a pair is the bottom boundary and the second is the top.\n\n        Parameters\n        ----------\n        blockType : str\n            Block type to find. empty accepts all\n\n        Returns\n        -------\n        elevation : list of floats\n            Every float in the list is an elevation of a block boundary for the block\n            type specified (has duplicates)\n        \"\"\"\n        elevation, elevationsWithBlockBoundaries = 0.0, []\n\n        # loop from bottom to top, stopping at the first instance of blockType\n        for b in self:\n            if b.hasFlags(blockType):\n                elevationsWithBlockBoundaries.append(elevation)  # bottom Boundary\n                elevationsWithBlockBoundaries.append(elevation + b.getHeight())  # top Boundary\n            elevation += b.getHeight()\n\n        return elevationsWithBlockBoundaries\n\n    def getElevationsMatchingParamValue(self, param, value):\n        \"\"\"\n        Return the elevations (z-coordinates) where the specified param takes the\n        specified value.\n\n        Uses linear interpolation, assuming params correspond to block centers\n\n        Parameters\n        ----------\n        param : str\n            Name of param to try and match\n\n        value: float\n\n        Returns\n        -------\n        heights : list\n            z-coordinates where the specified param takes the specified value\n        \"\"\"\n        heights = []\n        # loop from bottom to top\n        for i in range(0, len(self) - 1):\n            diff1 = self[i].p[param] - value\n            diff2 = self[i + 1].p[param] - value\n            z1 = (self[i].p.zbottom + self[i].p.ztop) / 2\n            z2 = (self[i + 1].p.zbottom + self[i + 1].p.ztop) / 2\n\n            if diff1 == diff2:  # params are flat\n                if diff1 != 0:  # no match\n                    continue\n                else:\n                    if z1 not in heights:\n                        heights.append(z1)\n                    if z2 not in heights:\n                        heights.append(z2)\n\n            # check if param is bounded by two adjacent blocks\n            elif diff1 * diff2 <= 0:\n                tie = diff1 / (diff1 - diff2)\n                z = z1 + tie * (z2 - z1)\n                if z not in heights:  # avoid duplicates\n                    heights.append(z)\n\n        return heights\n\n    def getAge(self):\n        \"\"\"Gets a height-averaged residence time of this assembly in days.\"\"\"\n        at = 0.0\n        for b in self:\n            at += b.p.residence * b.getHeight()\n        return at / self.getTotalHeight()\n\n    def makeAxialSnapList(self, refAssem=None, refMesh=None, force=False):\n        \"\"\"\n        Creates a list of block indices that should track axially with refAssem's.\n\n        When axially expanding, the control rods, shields etc. need to maintain mesh\n        lines with the rest of the core. To do this, we'll just keep track of which\n        indices of a reference assembly we should stick with. This method writes the\n        indices of the top of a block to settings as topIndex.\n\n        Keep in mind that assemblies can have different number of blocks. This is why\n        this function is useful. So this makes a list of reference indices that\n        correspond to different axial mesh points on this assembly.\n\n        This is the depletion mesh we're returning, useful for snapping after axial\n        extension. Note that the neutronics mesh on rebusOutputs might be different.\n\n        See Also\n        --------\n        setBlockMesh : applies a snap.\n\n        \"\"\"\n        if not force and self[-1].p.topIndex > 0:\n            return\n\n        refMesh = refAssem.getAxialMesh() if refMesh is None else refMesh\n        selfMesh = self.getAxialMesh()\n        # make a list relating this assemblies axial mesh points to indices of the\n        # reference assembly\n        z = 0.0\n        for b in self:\n            top = z + b.getHeight()\n            try:\n                b.p.topIndex = np.where(np.isclose(refMesh, top))[0].tolist()[0]\n            except IndexError:\n                runLog.error(\n                    \"Height {0} in this assembly ({1} in {4}) is not in the reactor mesh \"\n                    \"list from  {2}\\nThis has: {3}\\nIf you want to run \"\n                    \"a case with non-uniform axial mesh, activate the `detailedAxialExpansion` \"\n                    \"setting\".format(top, self, refMesh, selfMesh, self.parent)\n                )\n                raise\n            z = top\n\n    def _shouldMassBeConserved(self, belowFuelColumn, b):\n        \"\"\"\n        Determine from a rule set if the mass of a block component should be conserved during axial expansion.\n\n        Parameters\n        ----------\n        belowFuelColumn : boolean\n            Determines whether a block is below the fuel column or not in fuel\n            assemblies\n\n        b : armi block\n            The block that is being examined for modification\n\n        Returns\n        -------\n        conserveMass : boolean\n            Should the mass be conserved in this block\n\n        conserveComponents : list of components\n            What components should have their mass conserved (if any)\n\n        belowFuelColumn : boolean\n            Update whether the block is above or below a fuel column\n\n        See Also\n        --------\n        armi.assemblies.Assembly.setBlockMesh\n\n        \"\"\"\n        if b.hasFlags(Flags.FUEL):\n            # fuel block\n            conserveMass = True\n            conserveComponents = b.getComponents(Flags.FUEL)\n        elif self.hasFlags(Flags.FUEL):\n            # non-fuel block of a fuel assembly.\n            if belowFuelColumn:\n                # conserve mass of everything below the fuel so as to not invalidate\n                # grid-plate dose calcs.\n                conserveMass = True\n                # conserve mass of everything except fluids.\n                conserveComponents = [comp for comp in b.getComponents() if not isinstance(comp.material, Fluid)]\n            else:\n                # plenum or above block in fuel assembly. don't conserve mass.\n                conserveMass = False\n                conserveComponents = []\n        else:\n            # non fuel block in non-fuel assem. Don't conserve mass.\n            conserveMass = False\n            conserveComponents = []\n\n        return conserveMass, conserveComponents\n\n    def setBlockMesh(self, blockMesh, conserveMassFlag=False):\n        \"\"\"\n        Snaps the axial mesh points of this assembly to correspond with the reference mesh.\n\n        Notes\n        -----\n        This function only conserves mass on certain conditions:\n            1) Fuel Assembly\n                a) Structural material below the assembly conserves mass to accurate\n                   depict grid plate shielding Sodium is not conserved.\n                b) Fuel blocks only conserve mass of the fuel, not the structure since\n                   the fuel slides up through the cladding (thus fuel/cladding should be\n                   reduced).\n                c) Structure above the assemblies (expected to be plenum) do not\n                   conserve mass since plenum regions have their height reduced to\n                   conserve the total structure mass when the fuel grows in the\n                   cladding.  See b)\n            2) Reflectors, shields, and control rods\n                a) These assemblies do not conserve mass since they should remain\n                   uniform to keep radial shielding accurate. This approach should be\n                   conservative.\n                b) Control rods do not have their mass conserved and the control rod\n                   interface is required to be run after this function is called to\n                   correctly place mass of poison axially.\n\n        Parameters\n        ----------\n        blockMesh : iterable\n            A list of floats describing the upper mesh points of each block in cm.\n\n        conserveMassFlag : bool or str\n            Option for how to treat mass conservation when the block mesh changes.\n            Conservation of mass for fuel components is enabled by\n            conserveMassFlag=\"auto\". If not auto, a boolean value should be\n            passed. The default is False, which does not conserve any masses.\n            True conserves mass for all components.\n\n        See Also\n        --------\n        makeAxialSnapList : Builds the lookup table used by this method\n        getAxialMesh : builds a mesh compatible with this\n        \"\"\"\n        # Just adjust the heights and everything else will fall into place\n        zBottom = 0.0\n        belowFuelColumn = True\n\n        if self[-1].p.topIndex == 0:\n            runLog.warning(\n                \"Reference uniform mesh not being applied to {}. It was likely \"\n                \"excluded through the setting `nonUniformAssemFlags`.\".format(self.p.type)\n            )\n            return\n\n        for b in self:\n            if b.isFuel():\n                belowFuelColumn = False\n\n            topIndex = b.p.topIndex\n\n            if not 0 <= topIndex < len(blockMesh):\n                runLog.warning(\n                    \"index {0} does not exist in topvals (len:{1}). 0D case? Skipping snap\".format(\n                        topIndex, len(blockMesh)\n                    )\n                )\n                return\n\n            newTop = blockMesh[topIndex]\n\n            if newTop is None:\n                runLog.warning(\"Skipping axial snapping on {0}\".format(self), 1)\n                return\n\n            if conserveMassFlag == \"auto\":\n                conserveMass, conserveComponents = self._shouldMassBeConserved(belowFuelColumn, b)\n            else:\n                conserveMass = conserveMassFlag\n                conserveComponents = b.getComponents()\n\n            oldBlockHeight = b.getHeight()\n            b.setHeight(newTop - zBottom)\n            if conserveMass:\n                heightRatio = oldBlockHeight / b.getHeight()\n                for c in conserveComponents:\n                    c.changeNDensByFactor(heightRatio)\n            zBottom = newTop\n\n        self.calculateZCoords()\n\n    def setBlockHeights(self, blockHeights):\n        \"\"\"Set the block heights of all blocks in the assembly.\"\"\"\n        mesh = np.cumsum(blockHeights)\n        self.setBlockMesh(mesh)\n\n    def dump(self, fName=None):\n        \"\"\"Pickle the assembly and write it to a file.\"\"\"\n        if not fName:\n            fName = self.getName() + \".dump.pkl\"\n\n        with open(fName, \"w\") as pkl:\n            pickle.dump(self, pkl)\n\n    def iterBlocks(self, typeSpec=None, exact=False):\n        \"\"\"Produce an iterator over all blocks in this assembly from bottom to top.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags, optional\n            Restrict returned blocks to have these flags.\n        exact : bool, optional\n            If true, only produce blocks that have those exact flags.\n\n        Returns\n        -------\n        iterable of Block\n\n        See Also\n        --------\n        * :meth:`__iter__` - if no type spec provided, assemblies can be\n          naturally iterated upon.\n        * :meth:`iterChildrenWithFlags` - alternative if you know you have\n           a type spec that isn't ``None``.\n        \"\"\"\n        if typeSpec is None:\n            return iter(self)\n        return self.iterChildrenWithFlags(typeSpec, exact)\n\n    def getBlocks(self, typeSpec=None, exact=False):\n        \"\"\"\n        Get blocks in an assembly from bottom to top.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags, optional\n            Restrict returned blocks to those of this type.\n        exact : bool, optional\n            If true, will only return if there's an exact match in typeSpec\n\n        Returns\n        -------\n        blocks : list\n            List of blocks.\n        \"\"\"\n        return list(self.iterBlocks(typeSpec, exact))\n\n    def getBlocksAndZ(self, typeSpec=None, returnBottomZ=False, returnTopZ=False):\n        \"\"\"\n        Get blocks and their z-coordinates from bottom to top.\n\n        This method is useful when you need to know the z-coord of a block.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags, optional\n            Block type specification to restrict to\n\n        returnBottomZ : bool, optional\n            If true, will return bottom coordinates instead of centers.\n\n        Returns\n        -------\n        blocksAndCoords, list\n            (block, zCoord) tuples\n\n        Examples\n        --------\n            for block, bottomZ in a.getBlocksAndZ(returnBottomZ=True):\n                print({0}'s bottom mesh point is {1}'.format(block, bottomZ))\n        \"\"\"\n        if returnBottomZ and returnTopZ:\n            raise ValueError(\"Both returnTopZ and returnBottomZ are set to `True`\")\n\n        blocks, zCoords = [], []\n        bottom = 0.0\n        for b in self:\n            top = bottom + b.getHeight()\n            mid = (bottom + top) / 2.0\n            if b.hasFlags(typeSpec):\n                blocks.append(b)\n                if returnBottomZ:\n                    val = bottom\n                elif returnTopZ:\n                    val = top\n                else:\n                    val = mid\n                zCoords.append(val)\n            bottom = top\n\n        return zip(blocks, zCoords)\n\n    def hasContinuousCoolantChannel(self):\n        return all(b.containsAtLeastOneChildWithFlags(Flags.COOLANT) for b in self)\n\n    def getFirstBlock(self, typeSpec: TypeSpec = None, exact: bool = False) -> Optional[blocks.Block]:\n        \"\"\"Find the first block that matches the spec.\n\n        Parameters\n        ----------\n        typeSpec\n            Specification to require on the returned block.\n        exact\n            Require block to exactly match ``typeSpec``\n\n        Returns\n        -------\n        Block or None\n            First block that matches if such a block could be found.\n        \"\"\"\n        if typeSpec is None:\n            items = iter(self)\n        else:\n            items = self.iterChildrenWithFlags(typeSpec, exact)\n        try:\n            # Create an iterator and attempt to advance it to the first value.\n            return next(items)\n        except StopIteration:\n            # No items found in the iteration -> no blocks match the request\n            return None\n\n    def getFirstBlockByType(self, typeName: str) -> Optional[blocks.Block]:\n        blocks = filter(lambda b: b.getType() == typeName, self)\n        try:\n            return next(blocks)\n        except StopIteration:\n            return None\n\n    def getBlockAtElevation(self, elevation: float) -> Optional[blocks.Block]:\n        \"\"\"\n        Returns the block at a specified axial dimension elevation (given in cm).\n\n        If height matches the exact top of the block, the block is considered at that\n        height.\n\n        Parameters\n        ----------\n        elevation : float\n            The elevation of interest to grab a block (cm)\n\n        Returns\n        -------\n        targetBlock : block or None\n            The block that exists at the specified height in the reactor. ``None``\n            if a block was not found.\n        \"\"\"\n        bottomOfBlock = 0.0\n        for b in self:\n            topOfBlock = bottomOfBlock + b.getHeight()\n            if (\n                topOfBlock > elevation or abs(topOfBlock - elevation) / elevation < 1e-10\n            ) and bottomOfBlock < elevation:\n                return b\n            bottomOfBlock = topOfBlock\n        return None\n\n    def getBIndexFromZIndex(self, zIndex):\n        \"\"\"\n        Returns the ARMI block axial index corresponding to a DIF3D node axial index.\n\n        Parameters\n        ----------\n        zIndex : float\n            The axial index (beginning with 0) of a DIF3D node.\n\n        Returns\n        -------\n        bIndex : int\n            The axial index (beginning with 0) of the ARMI block containing the\n            DIF3D node corresponding to zIndex.\n        \"\"\"\n        zIndexTot = -1\n        for bIndex, b in enumerate(self):\n            zIndexTot += b.p.axMesh\n            if zIndexTot >= zIndex:\n                return bIndex\n        return -1  # no block index found\n\n    def getBlocksBetweenElevations(self, zLower, zUpper, eps=1e-10):\n        \"\"\"\n        Return block(s) between two axial elevations and their corresponding heights.\n\n        Parameters\n        ----------\n        zLower, zUpper : float\n            Elevations in cm where blocks should be found.\n        eps : float, optional\n            Lower bound for relative block height fraction that we care about.\n            Below this bound, small slivers of overlapping block are ignored.\n\n        Returns\n        -------\n        blockInfo : list\n            list of (blockObj, overlapHeightInCm) tuples\n\n        Examples\n        --------\n        If the block structure looks like:\n        50.0 to 100.0 Block3\n        25.0 to 50.0  Block2\n        0.0 to 25.0   Block1\n\n        Then,\n\n        >>> a.getBlocksBetweenElevations(0, 50)\n        [(Block1, 25.0), (Block2, 25.0)]\n\n        >>> a.getBlocksBetweenElevations(0, 30)\n        [(Block1, 25.0), (Block2, 5.0)]\n\n        \"\"\"\n        blocksHere = []\n        for b in self:\n            if b.p.ztop >= zLower and b.p.zbottom <= zUpper:\n                # at least some of this block overlaps the window of interest\n                top = min(b.p.ztop, zUpper)\n                bottom = max(b.p.zbottom, zLower)\n                heightHere = top - bottom\n\n                # Filter out blocks that have an extremely small height fraction\n                if heightHere / b.getHeight() > eps:\n                    blocksHere.append((b, heightHere))\n\n        return blocksHere\n\n    def getParamValuesAtZ(self, param, elevations, interpType=\"linear\", fillValue=np.nan):\n        \"\"\"\n        Interpolates a param axially to find it at any value of elevation z.\n\n        By default, assumes that all parameters are for the center of a block. So for\n        parameters such as THoutletTemperature that are defined on the top, this may be\n        off. See the paramDefinedAt parameters.\n\n        Defaults to linear interpolations.\n\n        Notes\n        -----\n        This caches interpolators for each param and must be cleared if new params are\n        set or new heights are set.\n\n        Warning\n        -------\n        Fails when requested to extrapolate. With higher order splines it is possible to interpolate\n        non-physical values, for example, a negative flux or dpa. Please use caution when going off\n        default in interpType and be certain that interpolated values are physical.\n\n        Parameters\n        ----------\n        param : str\n            the parameter to interpolate\n        elevations : array of float\n            the elevations from the bottom of the assembly in cm at which you want the point.\n        interpType: str or int\n            used in interp1d. interp1d documentation: Specifies the kind of interpolation\n            as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'\n            where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of\n            first, second or third order) or as an integer specifying the order of the\n            spline interpolator to use. Default is 'linear'.\n        fillValue: str\n            Rough pass through to scipy.interpolate.interp1d. If 'extend', then the\n            lower and upper bounds are used as the extended value. If 'extrapolate',\n            then extrapolation is permitted.\n\n        Returns\n        -------\n        valAtZ : np.ndarray\n            This will be of the shape (z,data-shape)\n        \"\"\"\n        interpolator = self.getParamOfZFunction(param, interpType=interpType, fillValue=fillValue)\n        return interpolator(elevations)\n\n    def getParamOfZFunction(self, param, interpType=\"linear\", fillValue=np.nan):\n        \"\"\"\n        Interpolates a param axially to find it at any value of elevation z.\n\n        By default, assumes that all parameters are for the center of a block. So for\n        parameters such as THoutletTemperature that are defined on the top, this may be\n        off. See the paramDefinedAt parameters.\n\n        Defaults to linear interpolations.\n\n        Notes\n        -----\n        This caches interpolators for each param and must be cleared if new params are\n        set or new heights are set.\n\n        Warning\n        -------\n        Fails when requested to extrapololate. With higher order splines it is possible to\n        interpolate nonphysical values, for example, a negative flux or dpa. Please use caution when\n        going off default in interpType and be certain that interpolated values are physical.\n\n        Parameters\n        ----------\n        param : str\n            the parameter to interpolate\n        interpType: str or int\n            used in interp1d. interp1d documentation: Specifies the kind of interpolation\n            as a string ('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'\n            where 'slinear', 'quadratic' and 'cubic' refer to a spline interpolation of\n            first, second or third order) or as an integer specifying the order of the\n            spline interpolator to use. Default is 'linear'.\n        fillValue: float\n            Rough pass through to scipy.interpolate.interp1d. If 'extend', then the\n            lower and upper bounds are used as the extended value. If 'extrapolate',\n            then extrapolation is permitted.\n\n        Returns\n        -------\n        valAtZ : np.ndarray\n            This will be of the shape (z,data-shape)\n        \"\"\"\n        paramDef = self[0].p.paramDefs[param]\n\n        if not isinstance(paramDef.location, ParamLocation):\n            raise Exception(\n                \"Cannot interpolate on `{}`. The ParamDefinition does not define a \"\n                \"valid location `{}`.\\nValid locations are {}\".format(\n                    param,\n                    paramDef.location,\n                    \", \".join([str(pl) for pl in ParamLocation]),\n                )\n            )\n        atCenter = bool(paramDef.location & (ParamLocation.CENTROID | ParamLocation.VOLUME_INTEGRATED))\n        z = self.getAxialMesh(atCenter)\n\n        if paramDef.location & ParamLocation.BOTTOM:\n            z.insert(0, 0.0)\n            z.pop(-1)\n\n        z = np.asarray(z)\n\n        values = self.getChildParamValues(param).transpose()\n\n        boundsError = None\n        if fillValue == \"extend\":\n            boundsError = False\n            if values.ndim == 1:\n                fillValue = values[0], values[-1]\n            elif values.ndim == 2:\n                fillValue = values[:, 0], values[:, 1]\n            else:\n                raise Exception(\n                    'Unsupported shape ({}) returned from getChildParamValues(\"{}\").'\n                    \"Shape must be 1 or 2 dimensions\".format(values.shape, param)\n                )\n        interpolater = interpolate.interp1d(\n            z,\n            values,\n            kind=interpType,\n            fill_value=fillValue,\n            assume_sorted=True,\n            bounds_error=boundsError,\n        )\n        return interpolater\n\n    def reestablishBlockOrder(self):\n        \"\"\"\n        The block ordering has changed, so the spatialGrid and Block-wise spatialLocator and name objects need updating.\n\n        See Also\n        --------\n        calculateZCoords : updates the ztop/zbottom params on each block after\n            reordering.\n        \"\"\"\n        # replace grid with one that has the right number of locations\n        self.spatialGrid = grids.AxialGrid.fromNCells(len(self))\n        self.spatialGrid.armiObject = self\n        for zi, b in enumerate(self):\n            b.spatialLocator = self.spatialGrid[0, 0, zi]\n            # update the name too. NOTE: You must update the history tracker.\n            b.setName(b.makeName(self.p.assemNum, zi))\n\n    def countBlocksWithFlags(self, blockTypeSpec=None):\n        \"\"\"\n        Returns the number of blocks of a specified type.\n\n        blockTypeSpec : Flags or list\n            Restrict to only these types of blocks. typeSpec is None, return all of the\n            blocks\n\n        Returns\n        -------\n        blockCounter : int\n            number of blocks of this type\n        \"\"\"\n        return sum(1 for _ in self.iterBlocks(blockTypeSpec))\n\n    def getDim(self, typeSpec, dimName):\n        \"\"\"\n        With a preference for fuel blocks, find the first component in the Assembly with\n        flags that match ``typeSpec`` and return dimension as specified by ``dimName``.\n\n        Example: getDim(Flags.WIRE, 'od') will return a wire's OD in cm.\n        \"\"\"\n        # prefer fuel blocks.\n        bList = self.getBlocks(Flags.FUEL)\n        if not bList:\n            # no fuel blocks. take first block.\n            bList = self\n\n        for b in bList:\n            dim = b.getDim(typeSpec, dimName)\n            if dim:\n                return dim\n\n        # return none if there is nothing to return\n        return None\n\n    def getSymmetryFactor(self):\n        \"\"\"Return the symmetry factor of this assembly.\"\"\"\n        return self[0].getSymmetryFactor()\n\n    def rotate(self, rad):\n        \"\"\"Rotates the spatial variables on an assembly by the specified angle.\n\n        Each Block on the Assembly is rotated in turn.\n\n        Parameters\n        ----------\n        rad : float\n            number (in radians) specifying the angle of counter clockwise rotation\n        \"\"\"\n        self.p.orientation[2] += math.degrees(rad)\n\n        for b in self:\n            b.rotate(rad)\n\n    def isOnWhichSymmetryLine(self):\n        grid = self.parent.spatialGrid\n        return grid.overlapsWhichSymmetryLine(self.spatialLocator.getCompleteIndices())\n\n    def orientBlocks(self, parentSpatialGrid):\n        \"\"\"Add special grids to the blocks inside this Assembly, respecting their orientation.\n\n        Parameters\n        ----------\n        parentSpatialGrid : Grid\n            Spatial Grid of the parent of this Assembly (probably a system-level grid).\n        \"\"\"\n        for b in self:\n            if b.spatialGrid is None:\n                try:\n                    b.autoCreateSpatialGrids(parentSpatialGrid)\n                except (ValueError, NotImplementedError) as e:\n                    runLog.extra(str(e), single=True)\n            # Do more grid initializations from a manual or auto created grid\n            if b.spatialGrid is not None:\n                b.assignPinIndices()\n\n\nclass HexAssembly(Assembly):\n    \"\"\"An assembly that is hexagonal in cross-section.\"\"\"\n\n    _BLOCK_TYPE = blocks.HexBlock\n\n    def rotate(self, rad: float):\n        \"\"\"Rotate an assembly and its children.\n\n        .. impl:: A hexagonal assembly shall support rotating around the z-axis in 60 degree increments.\n            :id: I_ARMI_ROTATE_HEX_ASSEM\n            :implements: R_ARMI_ROTATE_HEX\n\n            This method loops through every ``Block`` in this ``HexAssembly`` and rotates it by a\n            given angle (in radians). The rotation angle is positive in the counter-clockwise\n            direction. To perform the ``Block`` rotation, the\n            :meth:`armi.reactor.blocks.HexBlock.rotate` method is called.\n\n        Parameters\n        ----------\n        rad : float\n            Counter clockwise rotation in radians. **MUST** be in increments of 60 degrees (PI / 3)\n\n        Raises\n        ------\n        ValueError\n            If rotation is not divisible by pi / 3.\n        \"\"\"\n        if math.isclose(rad % (math.pi / 3), 0, abs_tol=1e-12):\n            return super().rotate(rad)\n\n        msg = f\"Rotation must be in 60 degree increments, got {math.degrees(rad)} degrees ({rad} radians).\"\n        runLog.error(msg)\n        raise ValueError(msg)\n\n\nclass CartesianAssembly(Assembly):\n    \"\"\"An assembly that is rectangular in cross-section.\"\"\"\n\n    _BLOCK_TYPE = blocks.CartesianBlock\n\n\nclass RZAssembly(Assembly):\n    \"\"\"\n    RZAssembly are assemblies in RZ geometry; they need to be different objects than HexAssembly\n    because they use different locations and need to have Radial Meshes in their setting.\n\n    Notes\n    -----\n    ThRZAssemblies should be a subclass of Assemblies because they should have a common place to put\n    information about subdividing the global mesh for transport. This is similar to how blocks have\n    'AxialMesh' in their blocks.\n    \"\"\"\n\n    def __init__(self, name, assemNum=None):\n        Assembly.__init__(self, name, assemNum)\n        self.p.RadMesh = 1\n\n    def radialOuter(self):\n        \"\"\"Returns the outer radial boundary of this assembly.\"\"\"\n        return self[0].radialOuter()\n\n    def radialInner(self):\n        \"\"\"Returns the inner radial boundary of this assembly.\"\"\"\n        return self[0].radialInner()\n\n    def thetaOuter(self):\n        \"\"\"Returns the outer azimuthal boundary of this assembly.\"\"\"\n        return self[0].thetaOuter()\n\n    def thetaInner(self):\n        \"\"\"Returns the outer azimuthal boundary of this assembly.\"\"\"\n        return self[0].thetaInner()\n\n\nclass ThRZAssembly(RZAssembly):\n    \"\"\"\n    ThRZAssembly are assemblies in ThetaRZ geometry, they need to be different objects\n    than HexAssembly because they use different locations and need to have Radial Meshes\n    in their setting.\n    \"\"\"\n\n    def __init__(self, assemType, assemNum=None):\n        RZAssembly.__init__(self, assemType, assemNum)\n        self.p.AziMesh = 1\n"
  },
  {
    "path": "armi/reactor/assemblyParameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Assembly Parameter Definitions.\"\"\"\n\nfrom armi import runLog\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.parameters.parameterDefinitions import isNumpyArray\nfrom armi.utils import units\n\n\ndef getAssemblyParameterDefinitions():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:\n        pb.defParam(\n            \"orientation\",\n            units=units.DEGREES,\n            description=(\n                \"Triple representing rotations counterclockwise around each spatial axis. \"\n                \"For example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)\"\n            ),\n            default=None,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:\n        pb.defParam(\n            \"arealPd\",\n            units=f\"{units.MW}/{units.METERS}^2\",\n            description=\"Power in assembly divided by its XY cross-sectional area. Related to PCT.\",\n        )\n\n        pb.defParam(\n            \"buLimit\",\n            units=units.UNITLESS,\n            description=\"buLimit\",\n            default=parameters.NoDefault,\n        )\n\n        pb.defParam(\n            \"chargeBu\",\n            units=units.PERCENT_FIMA,\n            description=\"Max block-average burnup in this assembly when it most recently entered \"\n            \"the core. If the assembly was discharged and then re-charged, this value will only \"\n            \"reflect the most recent charge.\",\n        )\n\n        pb.defParam(\n            \"chargeCycle\",\n            units=units.UNITLESS,\n            description=\"Cycle number that this assembly most recently entered the core. If the \"\n            \"assembly was discharged and then re-charged, this value will only reflect the most \"\n            \"recent charge.\",\n        )\n\n        pb.defParam(\n            \"chargeFis\",\n            units=units.KG,\n            description=\"Fissile mass in assembly when it most recently entered the core. If the \"\n            \"assembly was discharged and then re-charged, this value will only reflect the most \"\n            \"recent charge.\",\n        )\n\n        pb.defParam(\n            \"chargeTime\",\n            units=units.YEARS,\n            description=\"Time at which this assembly most recently entered the core. If the \"\n            \"assembly was discharged and then re-charged, this value will only reflect the most \"\n            \"recent charge.\",\n            default=parameters.NoDefault,\n        )\n\n        pb.defParam(\n            \"multiplicity\",\n            units=units.UNITLESS,\n            description=\"The number of physical assemblies that the associated object represents. \"\n            \"This is typically 1, but may need to change when the assembly is moved between \"\n            \"containers with different types of symmetry. For instance, if an assembly moves from \"\n            \"a Core with 1/3rd symmetry into a spent-fuel pool with full symmetry, rather than \"\n            \"splitting the assembly into 3, the multiplicity can be set to 3. For now, this is a \"\n            \"bit of a hack to make fuel handling work; multiplicity in the 1/3 core should be 3 to \"\n            \"begin with, in which case this parameter could be used as the primary means of \"\n            \"handling symmetry and fractional domains throughout ARMI. We will probably roll that \"\n            \"out once the dust settles on some of this SFP work. For now, the Core stores \"\n            \"multiplicity as 1 always, since the powerMultiplier to adjust to full-core \"\n            \"quantities.\",\n            default=1,\n        )\n\n        pb.defParam(\"daysSinceLastMove\", units=units.UNITLESS, description=\"daysSinceLastMove\")\n\n        pb.defParam(\"kInf\", units=units.UNITLESS, description=\"kInf\")\n\n        pb.defParam(\"maxDpaPeak\", units=units.DPA, description=\"maxDpaPeak\")\n\n        pb.defParam(\"maxPercentBu\", units=units.PERCENT, description=\"maxPercentBu\")\n\n        pb.defParam(\"numMoves\", units=units.UNITLESS, description=\"numMoves\")\n\n        pb.defParam(\"timeToLimit\", units=units.DAYS, description=\"timeToLimit\", default=1e6)\n\n        pb.defParam(\n            \"guideTubeTopElevation\",\n            units=units.CM,\n            description=(\"Elevation of the top of the guide tube relative to the bottom of the duct.\"),\n            categories=[parameters.Category.assignInBlueprints],\n            saveToDB=True,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE) as pb:\n        pb.defParam(\n            \"detailedNDens\",\n            setter=isNumpyArray(\"detailedNDens\"),\n            units=f\"atoms/(bn*{units.CM})\",\n            description=(\n                \"High-fidelity number density vector with up to thousands of nuclides. \"\n                \"Used in high-fi depletion runs where low-fi depletion may also be occurring. \"\n                \"This param keeps the hi-fi and low-fi depletion values from interfering.\"\n            ),\n            saveToDB=True,\n            default=None,\n        )\n\n        def _enforceNotesRestrictions(self, value):\n            \"\"\"Enforces that notes can only be of type str with max length of 1000.\"\"\"\n            if type(value) is not str:\n                runLog.error(\"Values stored in the `notes` parameter must be strings of less than 1000 characters!\")\n                raise ValueError\n            elif len(value) > 1000:\n                runLog.warning(\n                    \"Strings stored in the `notes` parameter must be less than 1000 characters. \"\n                    f\"Truncating the note starting with {value[0:15]}... at 1000 characters!\"\n                )\n                self._p_notes = value[0:1000]\n            else:\n                self._p_notes = value\n\n        pb.defParam(\n            \"notes\",\n            units=units.UNITLESS,\n            description=\"A string with notes about the assembly, limited to 1000 characters. This \"\n            \"parameter is not meant to store data. Needlessly storing large strings on this \"\n            \"parameter for every assembly is potentially unwise from a memory perspective.\",\n            saveToDB=True,\n            default=\"\",\n            setter=_enforceNotesRestrictions,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.NA, default=0.0, categories=[\"control rods\"]) as pb:\n        pb.defParam(\n            \"crCriticalFraction\",\n            units=units.UNITLESS,\n            description=(\n                \"The insertion fraction when the control rod assembly is in its critical \"\n                \"configuration. Note that the default of -1.0 is a trigger for this value not \"\n                \"being set yet.\"\n            ),\n            saveToDB=True,\n            default=-1.0,\n        )\n\n        pb.defParam(\n            \"crCurrentElevation\",\n            units=units.CM,\n            description=\"The current elevation of the bottom of the moveable section of a control rod assembly.\",\n            categories=[parameters.Category.assignInBlueprints],\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"crInsertedElevation\",\n            units=units.CM,\n            description=(\n                \"The elevation of the furthest-most insertion point of a control rod assembly. For \"\n                \"a control rod assembly inserted from the top, this will be the lower tip of the \"\n                \"bottom-most moveable section in the assembly when fully inserted.\"\n            ),\n            categories=[parameters.Category.assignInBlueprints],\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"crRodLength\",\n            units=units.CM,\n            description=\"length of the control material within the control rod\",\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"crWithdrawnElevation\",\n            units=units.CM,\n            description=(\n                \"The elevation of the tip of a control rod assembly when it is fully withdrawn. \"\n                \"For a control rod assembly inserted from the top, this will be the lower tip of \"\n                \"the bottom-most moveable section in the assembly when fully withdrawn.\"\n            ),\n            categories=[parameters.Category.assignInBlueprints],\n            saveToDB=True,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0, categories=[\"thermal hydraulics\"]) as pb:\n        pb.defParam(\n            \"THcoolantOutletT\",\n            units=units.DEGC,\n            description=\"The nominal average bulk coolant outlet temperature out of the block.\",\n            categories=[\"broadcast\"],\n        )\n\n    with pDefs.createBuilder() as pb:\n        pb.defParam(\n            \"type\",\n            units=units.UNITLESS,\n            description=\"The name of the assembly input on the blueprints input\",\n            default=\"defaultAssemType\",\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"ringPosHist\",\n            units=units.UNITLESS,\n            description=(\n                \"Ring and position history for this assembly written at BOC. Index 1 corresponds to position at BOC1.\"\n            ),\n            default=None,\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"nozzleType\",\n            units=units.UNITLESS,\n            description=\"nozzle type for assembly\",\n            default=\"Default\",\n            saveToDB=True,\n            categories=[parameters.Category.assignInBlueprints],\n        )\n\n    with pDefs.createBuilder(default=0.0) as pb:\n        pb.defParam(\"assemNum\", units=units.UNITLESS, description=\"Assembly number\")\n\n        pb.defParam(\n            \"dischargeTime\",\n            units=units.YEARS,\n            description=\"Time the Assembly was removed from the Reactor.\",\n        )\n\n        pb.defParam(\n            \"hotChannelFactors\",\n            units=units.UNITLESS,\n            description=\"Definition of set of HCFs to be applied to assembly.\",\n            default=\"Default\",\n            saveToDB=True,\n            categories=[parameters.Category.assignInBlueprints],\n        )\n\n    with pDefs.createBuilder(categories=[\"radialGeometry\"]) as pb:\n        pb.defParam(\n            \"AziMesh\",\n            units=units.UNITLESS,\n            description=\"Number of points in the Azimuthal mesh.\",\n            saveToDB=False,\n            default=1,\n        )\n\n        pb.defParam(\n            \"RadMesh\",\n            units=units.UNITLESS,\n            description=\"Number of points in the Radial mesh.\",\n            saveToDB=False,\n            default=1,\n        )\n\n        return pDefs\n"
  },
  {
    "path": "armi/reactor/blockParameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Parameter definitions for Blocks.\"\"\"\n\nfrom armi import runLog\nfrom armi.physics.neutronics import crossSectionGroupManager\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.parameters.parameterDefinitions import isNumpyArray\nfrom armi.utils import units\nfrom armi.utils.units import ASCII_LETTER_A, ASCII_LETTER_Z, ASCII_LETTER_a\n\n\ndef getBlockParameterDefinitions():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:\n        pb.defParam(\n            \"orientation\",\n            units=units.DEGREES,\n            description=(\n                \"Triple representing rotations counterclockwise around each spatial axis. For \"\n                \"example, a hex assembly rotated by 1/6th has orientation (0, 0, 60.0)\"\n            ),\n            default=None,\n        )\n\n        pb.defParam(\n            \"detailedNDens\",\n            setter=isNumpyArray(\"detailedNDens\"),\n            units=f\"atoms/(bn*{units.CM})\",\n            description=(\n                \"High-fidelity number density vector with up to thousands of nuclides. \"\n                \"Used in high-fi depletion runs where low-fi depletion may also be occurring. \"\n                \"This param keeps the hi-fi and low-fi depletion values from interfering.\"\n            ),\n            location=ParamLocation.AVERAGE,\n            saveToDB=False,\n            default=None,\n        )\n\n    with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE, categories=[\"depletion\"]) as pb:\n        pb.defParam(\n            \"newDPA\",\n            units=units.DPA,\n            description=\"Dose in DPA accrued during the current time step\",\n        )\n\n        pb.defParam(\n            \"percentBu\",\n            units=units.PERCENT_FIMA,\n            description=\"Percentage of the initial heavy metal atoms that have been fissioned\",\n            categories=[\"cumulative\"],\n        )\n\n        pb.defParam(\n            \"percentBuByPin\",\n            units=units.PERCENT_FIMA,\n            description=\"Percent burnup of the initial heavy metal atoms that have been fissioned for each pin\",\n            default=None,\n            saveToDB=False,\n            location=ParamLocation.CHILDREN,\n        )\n\n        pb.defParam(\n            \"residence\",\n            units=units.DAYS,\n            description=(\n                \"Duration that a block has been in the core multiplied by the fraction \"\n                \"of full power generated in that time.\"\n            ),\n            categories=[\"cumulative\"],\n        )\n\n    with pDefs.createBuilder(default=0.0, location=ParamLocation.VOLUME_INTEGRATED, categories=[\"depletion\"]) as pb:\n        pb.defParam(\n            \"molesHmNow\",\n            units=f\"{units.MOLES}\",\n            description=\"Total number of atoms of heavy metal\",\n        )\n\n        pb.defParam(\n            \"molesHmBOL\",\n            units=f\"{units.MOLES}\",\n            description=\"Total number of atoms of heavy metal at BOL.\",\n        )\n\n        pb.defParam(\n            \"massHmBOL\",\n            units=units.GRAMS,\n            description=\"Mass of heavy metal at BOL\",\n        )\n\n        pb.defParam(\n            \"initialB10ComponentVol\",\n            units=f\"{units.CM}^3\",\n            description=(\n                \"cc's of un-irradiated, cold B10 containing component (includes full volume of any components with B10)\"\n            ),\n        )\n\n    with pDefs.createBuilder(default=0.0, location=ParamLocation.AVERAGE) as pb:\n\n        def envGroup(self, envGroupChar):\n            if isinstance(envGroupChar, (int, float)):\n                intValue = int(envGroupChar)\n                runLog.warning(\n                    f\"Attempting to set `b.p.envGroup` to int value ({envGroupChar}).\"\n                    \"Possibly loading from old database\",\n                    single=True,\n                    label=\"env group as int \" + str(intValue),\n                )\n                self.envGroupNum = intValue\n                return\n            elif not isinstance(envGroupChar, str):\n                raise Exception(f\"Wrong type for envGroupChar {envGroupChar}: {type(envGroupChar)}\")\n\n            if envGroupChar.islower():\n                # if lower case find the distance from lowercase a and add the span of A to Z\n                lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A + 1  # 26\n                envGroupNum = ord(envGroupChar) - ASCII_LETTER_a + lowerCaseOffset\n            else:\n                envGroupNum = ord(envGroupChar) - ASCII_LETTER_A\n            self._p_envGroup = envGroupChar\n            self._p_envGroupNum = envGroupNum\n            envGroupNumDef = parameters.ALL_DEFINITIONS[\"envGroupNum\"]\n            envGroupNumDef.assigned = parameters.SINCE_ANYTHING\n\n        pb.defParam(\n            \"envGroup\",\n            units=units.UNITLESS,\n            description=\"The environment group letter of this block\",\n            default=\"A\",\n            setter=envGroup,\n        )\n\n        def envGroupNum(self, envGroupNum):\n            # support capital and lowercase alpha chars (52= 26*2)\n            if envGroupNum > 52:\n                raise RuntimeError(\"Invalid env group number ({}): too many groups. 52 is the max.\".format(envGroupNum))\n            self._p_envGroupNum = envGroupNum\n            lowerCaseOffset = ASCII_LETTER_Z - ASCII_LETTER_A\n            if envGroupNum > lowerCaseOffset:\n                envGroupNum = envGroupNum - (lowerCaseOffset + 1)\n                self._p_envGroup = chr(envGroupNum + ASCII_LETTER_a)\n            else:\n                self._p_envGroup = chr(envGroupNum + ASCII_LETTER_A)\n            envGroupDef = parameters.ALL_DEFINITIONS[\"envGroup\"]\n            envGroupDef.assigned = parameters.SINCE_ANYTHING\n\n        pb.defParam(\n            \"envGroupNum\",\n            units=units.UNITLESS,\n            description=\"An integer representation of the environment group \"\n            \"(burnup/temperature/etc.). linked to envGroup.\",\n            default=0,\n            setter=envGroupNum,\n        )\n\n        pb.defParam(\n            \"buRate\",\n            units=f\"{units.PERCENT_FIMA}/{units.DAYS}\",\n            # This is very related to power, but normalized to %FIMA.\n            description=(\n                \"Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded.\"\n            ),\n        )\n\n        pb.defParam(\n            \"buRatePeak\",\n            units=f\"{units.PERCENT_FIMA}/{units.DAYS}\",\n            description=\"Current rate of burnup accumulation at peak location\",\n            location=ParamLocation.MAX,\n        )\n\n        pb.defParam(\n            \"detailedDpa\",\n            units=units.DPA,\n            description=\"displacements per atom\",\n            categories=[\"cumulative\", \"detailedAxialExpansion\", \"depletion\"],\n        )\n\n        pb.defParam(\n            \"detailedDpaPeak\",\n            units=units.DPA,\n            description=\"displacements per atom with peaking factor\",\n            categories=[\"cumulative\", \"detailedAxialExpansion\", \"depletion\"],\n            location=ParamLocation.MAX,\n        )\n\n        pb.defParam(\n            \"detailedDpaRate\",\n            units=f\"{units.DPA}/{units.SECONDS}\",\n            description=\"Current time derivative of average detailed DPA\",\n            categories=[\"detailedAxialExpansion\", \"depletion\"],\n        )\n\n        pb.defParam(\n            \"displacementX\",\n            units=units.METERS,\n            description=\"Assembly displacement in the x direction\",\n        )\n\n        pb.defParam(\n            \"displacementY\",\n            units=units.METERS,\n            description=\"Assembly displacement in the y direction\",\n        )\n\n        pb.defParam(\n            \"heliumInB4C\",\n            units=f\"He/{units.SECONDS}/{units.CM}^3\",\n            description=\"Alpha particle production rate in B4C control and shield material.\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"powerRx\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Power density of the reactor\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"timeToLimit\",\n            units=units.DAYS,\n            description=\"Time unit block violates its burnup limit.\",\n        )\n\n        pb.defParam(\n            \"zbottom\",\n            units=units.CM,\n            description=\"Axial position of the bottom of this block\",\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"ztop\",\n            units=units.CM,\n            description=\"Axial position of the top of this block\",\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"nHMAtBOL\",\n            units=f\"atoms/(bn*{units.CM})\",\n            description=\"Ndens of heavy metal at BOL\",\n            saveToDB=False,\n        )\n\n        pb.defParam(\n            \"z\",\n            units=units.CM,\n            description=\"Center axial dimension of this block\",\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n    with pDefs.createBuilder() as pb:\n        pb.defParam(\n            \"axialExpTargetComponent\",\n            units=units.UNITLESS,\n            description=(\n                \"The name of the target component used for axial expansion and contraction of solid components.\"\n            ),\n            default=\"\",\n            saveToDB=True,\n        )\n\n        pb.defParam(\n            \"topIndex\",\n            units=units.UNITLESS,\n            description=(\n                \"the axial block index within its parent assembly (0 is bottom block). This index with regard to the \"\n                \"mesh of the reference assembly so it does not increase by 1 for each block. It is used to keep the \"\n                \"mesh of the assemblies uniform with axial expansion. See setBlockMesh, makeAxialSnapList\",\n            ),\n            default=0,\n            saveToDB=True,\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"eqRegion\",\n            units=units.UNITLESS,\n            description=\"Equilibrium shuffling region. Corresponds to how many full cycles fuel here has gone through.\",\n            default=0.0,\n        )\n\n        pb.defParam(\n            \"id\",\n            units=units.UNITLESS,\n            description=\"Inner diameter of the Block.\",\n            default=None,\n        )\n\n        pb.defParam(\n            \"height\",\n            units=units.CM,\n            description=\"the block height\",\n            default=None,\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        def xsType(self, value):\n            self._p_xsType = value\n            self._p_xsTypeNum = crossSectionGroupManager.getXSTypeNumberFromLabel(value)\n            xsTypeNumDef = parameters.ALL_DEFINITIONS[\"xsTypeNum\"]\n            xsTypeNumDef.assigned = parameters.SINCE_ANYTHING\n\n        pb.defParam(\n            \"xsType\",\n            units=units.UNITLESS,\n            description=\"The xs group letter of this block\",\n            default=\"A\",\n            setter=xsType,\n        )\n\n        def xsTypeNum(self, value):\n            self._p_xsTypeNum = value\n            self._p_xsType = crossSectionGroupManager.getXSTypeLabelFromNumber(value)\n            xsTypeDef = parameters.ALL_DEFINITIONS[\"xsType\"]\n            xsTypeDef.assigned = parameters.SINCE_ANYTHING\n\n        pb.defParam(\n            \"xsTypeNum\",\n            units=units.UNITLESS,\n            description=\"An integer representation of the cross section type, linked to xsType.\",\n            default=65,  # NOTE: buGroupNum actually starts at 0\n            setter=xsTypeNum,\n        )\n\n        pb.defParam(\n            \"type\",\n            units=units.UNITLESS,\n            description=\"string name of the input block\",\n            default=\"defaultType\",\n            saveToDB=True,\n        )\n\n    with pDefs.createBuilder(default=0.0) as pb:\n        pb.defParam(\n            \"assemNum\",\n            units=units.UNITLESS,\n            description=\"Index that refers, nominally, to the assemNum parameter of the containing \"\n            \"Assembly object. This is stored on the Block to aid in visualizing shuffle patterns \"\n            \"and the like, and should not be used within the code. These are not guaranteed to be \"\n            \"consistent with the containing Assembly, so they should not be used as a reliable \"\n            \"means to reconstruct the model.\",\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"breedRatio\",\n            units=units.UNITLESS,\n            description=\"Breeding ratio\",\n            categories=[\"detailedAxialExpansion\"],\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\"buLimit\", units=units.PERCENT_FIMA, description=\"Burnup limit\")\n\n        pb.defParam(\n            \"heightBOL\",\n            units=units.CM,\n            description=\"As-fabricated height of this block (as input). Used in fuel performance. Should be constant.\",\n            location=ParamLocation.AVERAGE,\n            categories=[parameters.Category.retainOnReplacement],\n        )\n\n        pb.defParam(\n            \"intrinsicSource\",\n            units=units.UNITLESS,\n            description=\"Intrinsic neutron source from spontaneous fissions before a decay period\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"kgFis\",\n            units=units.KG,\n            description=\"Mass of fissile material in block\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n        )\n\n        pb.defParam(\n            \"kgHM\",\n            units=units.KG,\n            description=\"Mass of heavy metal in block\",\n            location=ParamLocation.VOLUME_INTEGRATED,\n        )\n\n        pb.defParam(\"nPins\", units=units.UNITLESS, description=\"Number of pins\")\n\n        pb.defParam(\n            \"percentBuPeak\",\n            units=units.PERCENT_FIMA,\n            description=\"Peak percentage of the initial heavy metal atoms that have been fissioned\",\n            location=ParamLocation.MAX,\n            categories=[\"cumulative\", \"eq cumulative shift\"],\n        )\n\n        pb.defParam(\n            \"puFrac\",\n            units=units.UNITLESS,\n            description=\"Current Pu number density relative to HM at BOL\",\n            location=ParamLocation.AVERAGE,\n        )\n\n        pb.defParam(\n            \"smearDensity\",\n            units=units.UNITLESS,\n            description=(\n                \"Smear density of fuel pins in this block. Defined as the ratio of fuel \"\n                \"area to total space inside cladding.\"\n            ),\n            location=ParamLocation.AVERAGE,\n        )\n\n    return pDefs\n"
  },
  {
    "path": "armi/reactor/blocks/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nARMI provides several different Block types for downstream users.\n\nThe generic Block is meant to be a base class. And then ARMI provides different geometries that might be interesting or\nuseful, such as hexagonal or cartesian blocks.\n\nARMI encourages you to build your own subclass of an ARMI Block type, to simplify your reactor blueprints.\n\"\"\"\n\n# ruff: noqa: F401\nfrom armi.reactor.blocks.block import PIN_COMPONENTS, Block\nfrom armi.reactor.blocks.cartesianBlock import CartesianBlock\nfrom armi.reactor.blocks.hexBlock import HexBlock\nfrom armi.reactor.blocks.thRZBlock import ThRZBlock\n"
  },
  {
    "path": "armi/reactor/blocks/block.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe generic Block base class. This is meant to be the basis of all Blocks you use in your modeling. ARMI encourages you\nto build your own subclass of an ARMI Block type, to simplify your reactor blueprints.\n\nBlocks are axial chunks of assemblies. They contain most of the state variables, including power, flux, and homogenized\nnumber densities. Blocks are further divided into components.\n\"\"\"\n\nimport collections\nimport copy\nimport math\nfrom typing import ClassVar, Optional, Tuple, Type\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.bookkeeping import report\nfrom armi.nuclearDataIO import xsCollections\nfrom armi.reactor import (\n    blockParameters,\n    components,\n    composites,\n    grids,\n    parameters,\n)\nfrom armi.reactor.components import basicShapes\nfrom armi.reactor.flags import Flags\nfrom armi.utils import densityTools, units\nfrom armi.utils.plotting import plotBlockFlux\nfrom armi.utils.units import TRACE_NUMBER_DENSITY\n\nPIN_COMPONENTS = [\n    Flags.CONTROL,\n    Flags.PLENUM,\n    Flags.SHIELD,\n    Flags.FUEL,\n    Flags.CLAD,\n    Flags.PIN,\n    Flags.WIRE,\n]\n\n_PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]]\n\n\nclass Block(composites.Composite):\n    \"\"\"\n    An axial slice of an assembly.\n\n    Blocks are Composite objects with extra parameter bindings, and utility methods that let them\n    play nicely with their containing Assembly.\n    \"\"\"\n\n    uniqID = 0\n\n    # dimension used to determine which component defines the block's pitch\n    PITCH_DIMENSION = \"op\"\n\n    # component type that can be considered a candidate for providing pitch\n    PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = None\n\n    pDefs = blockParameters.getBlockParameterDefinitions()\n\n    def __init__(self, name: str, height: float = 1.0):\n        \"\"\"\n        Builds a new ARMI block.\n\n        name : str\n            The name of this block\n        height : float, optional\n            The height of the block in cm. Defaults to 1.0 so that ``getVolume`` assumes unit height.\n        \"\"\"\n        composites.Composite.__init__(self, name)\n        self.p.height = height\n        self.p.heightBOL = height\n        self.p.orientation = np.array((0.0, 0.0, 0.0))\n\n        self.points = []\n        self.macros = None\n\n        # flag to indicated when DerivedShape children must be updated.\n        self.derivedMustUpdate = False\n\n        # which component to use to determine block pitch, along with its 'op'\n        self._pitchDefiningComponent = (None, 0.0)\n\n        # Manually set some parameters at BOL\n        for problemParam in [\"THcornTemp\", \"THedgeTemp\"]:\n            self.p[problemParam] = []\n\n    def __repr__(self):\n        # be warned, changing this might break unit tests on input file generations\n        return \"<{type} {name} at {loc} XS: {xs} ENV GP: {env}>\".format(\n            type=self.getType(),\n            name=self.getName(),\n            xs=self.p.xsType,\n            env=self.p.envGroup,\n            loc=self.getLocation(),\n        )\n\n    def __deepcopy__(self, memo):\n        \"\"\"\n        Custom deepcopy behavior to prevent duplication of macros and _lumpedFissionProducts.\n\n        We detach the recursive links to the parent and the reactor to prevent blocks carrying large\n        independent copies of stale reactors in memory. If you make a new block, you must add it to\n        an assembly and a reactor.\n        \"\"\"\n        # add self to memo to prevent child objects from duplicating the parent block\n        memo[id(self)] = b = self.__class__.__new__(self.__class__)\n\n        # use __getstate__ and __setstate__ pickle-methods to initialize\n        state = self.__getstate__()  # __getstate__ removes parent\n        del state[\"macros\"]\n        del state[\"_lumpedFissionProducts\"]\n        b.__setstate__(copy.deepcopy(state, memo))\n\n        # assign macros and LFP\n        b.macros = self.macros\n        b._lumpedFissionProducts = self._lumpedFissionProducts\n\n        return b\n\n    def createHomogenizedCopy(self, pinSpatialLocators=False):\n        \"\"\"\n        Create a copy of a block.\n\n        Notes\n        -----\n        Used to implement a copy function for specific block types that can be much faster than a\n        deepcopy by glossing over details that may be unnecessary in certain contexts.\n\n        This base class implementation is just a deepcopy of the block, in full detail (not\n        homogenized).\n        \"\"\"\n        return copy.deepcopy(self)\n\n    @property\n    def core(self):\n        from armi.reactor.reactors import Core\n\n        c = self.getAncestor(lambda c: isinstance(c, Core))\n        return c\n\n    def makeName(self, assemNum, axialIndex):\n        \"\"\"\n        Generate a standard block from assembly number.\n\n        This also sets the block-level assembly-num param.\n\n        Once, we used a axial-character suffix to represent the axial index, but this is inherently limited so we\n        switched to a numerical name. The axial suffix needs can be brought in to plugins that require them.\n\n        Examples\n        --------\n        >>> makeName(120, 5)\n        'B0120-005'\n        \"\"\"\n        self.p.assemNum = assemNum\n        return \"B{0:04d}-{1:03d}\".format(assemNum, axialIndex)\n\n    def getSmearDensity(self, cold=True):\n        \"\"\"\n        Compute the smear density of pins in this block.\n\n        Smear density is the area of the fuel divided by the area of the space available for fuel inside the cladding.\n        Other space filled with solid materials is not considered available. If all the area is fuel, it has 100% smear\n        density. Lower smear density allows more room for swelling.\n\n        Warning\n        -------\n        This requires circular fuel and circular cladding. Designs that vary from this will be wrong. It may make sense\n        in the future to put this somewhere a bit more design specific.\n\n        Notes\n        -----\n        This only considers circular objects. If you have a cladding that is not a circle, it will be ignored.\n\n        Negative areas can exist for void gaps in the fuel pin. A negative area in a gap represents overlap area between\n        two solid components. To account for this additional space within the pin cladding the abs(negativeArea) is\n        added to the inner cladding area.\n\n        Parameters\n        ----------\n        cold : bool, optional\n            If false, returns the smear density at hot temperatures\n\n        Returns\n        -------\n        float\n            The smear density as a fraction.\n        \"\"\"\n        fuels = self.getComponents(Flags.FUEL)\n        if not fuels:\n            # smear density is not computed for non-fuel blocks\n            return 0.0\n        elif not self.getNumPins():\n            # smear density is only defined for pinned blocks\n            return 0.0\n\n        circles = self.getComponentsOfShape(components.Circle)\n        if not circles:\n            raise ValueError(f\"Cannot get smear density of {self}. There are no circular components.\")\n\n        clads = set(self.getComponents(Flags.CLAD)).intersection(set(circles))\n        if not clads:\n            raise ValueError(f\"Cannot get smear density of {self}. There are no clad components.\")\n\n        # Compute component areas\n        innerCladdingArea = sum(\n            math.pi * clad.getDimension(\"id\", cold=cold) ** 2 / 4.0 * clad.getDimension(\"mult\") for clad in clads\n        )\n        sortedClads = sorted(clads)\n        sortedCompsInsideClad = self.getSortedComponentsInsideOfComponent(sortedClads.pop())\n\n        return self.computeSmearDensity(innerCladdingArea, sortedCompsInsideClad, cold)\n\n    @staticmethod\n    def computeSmearDensity(innerCladdingArea: float, sortedCompsInsideClad: list[components.Component], cold: bool):\n        \"\"\"Compute the smear density for a sorted list of components.\n\n        Parameters\n        ----------\n        innerCladdingArea : float\n            Circular area inside the cladding.\n        sortedCompsInsideClad : list\n            A sorted list of Components inside the cladding.\n        cold : bool\n            If false, returns the smear density at hot temperatures\n\n        Returns\n        -------\n        float\n            The smear density as a fraction.\n        \"\"\"\n        fuelComponentArea = 0.0\n        unmovableComponentArea = 0.0\n        negativeArea = 0.0\n        for c in sortedCompsInsideClad:\n            componentArea = c.getArea(cold=cold)\n            if c.isFuel():\n                fuelComponentArea += componentArea\n            elif c.hasFlags(Flags.CLAD):\n                # this is another component's clad; don't count it towards unmoveable area\n                pass\n            elif c.hasFlags([Flags.SLUG, Flags.DUMMY]):\n                # this flag designates that this clad/slug combination isn't fuel and shouldn't be in the average\n                pass\n            else:\n                if c.containsSolidMaterial():\n                    unmovableComponentArea += componentArea\n                elif c.containsVoidMaterial() and componentArea < 0.0:\n                    if cold:  # will error out soon\n                        runLog.error(\n                            \"{} with id {} and od {} has negative area at cold dimensions\".format(\n                                c,\n                                c.getDimension(\"id\", cold=True),\n                                c.getDimension(\"od\", cold=True),\n                            )\n                        )\n                    negativeArea += abs(componentArea)\n\n        if cold and negativeArea:\n            raise ValueError(\n                \"Negative component areas found. Check the cold dimensions are properly aligned and no components \"\n                \"overlap.\"\n            )\n\n        innerCladdingArea += negativeArea  # See note 2 of self.getSmearDensity\n        totalMovableArea = innerCladdingArea - unmovableComponentArea\n        if totalMovableArea <= 0.0:\n            return 0.0\n        else:\n            return fuelComponentArea / totalMovableArea\n\n    def autoCreateSpatialGrids(self, systemSpatialGrid=None):\n        \"\"\"\n        Creates a spatialGrid for a Block.\n\n        Blocks do not always have a spatialGrid from Blueprints, but some Blocks can have their\n        spatialGrids inferred based on the multiplicity of their components. This would add the\n        ability to create a spatialGrid for a Block and give its children the corresponding\n        spatialLocators if certain conditions are met.\n\n        Parameters\n        ----------\n        systemSpatialGrid : Grid, optional\n            Spatial Grid of the system-level parent of this Assembly that contains this Block.\n\n        Raises\n        ------\n        ValueError\n            If the multiplicities of the block are not only 1 or N or if generated ringNumber leads\n            to more positions than necessary.\n        \"\"\"\n        if self.spatialGrid is None:\n            self.spatialGrid = systemSpatialGrid\n\n    def assignPinIndices(self):\n        pass\n\n    def getMgFlux(self, adjoint=False, average=False, gamma=False):\n        \"\"\"\n        Returns the multigroup neutron flux in [n/cm^2/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as\n        set in the ISOTXS library.\n\n        It is stored integrated over volume on self.p.mgFlux\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        average : bool, optional\n            If true, will return average flux between latest and previous. Doesn't work for pin detailed yet.\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        flux : multigroup neutron flux in [n/cm^2/s]\n        \"\"\"\n        flux = composites.ArmiObject.getMgFlux(self, adjoint=adjoint, average=False, gamma=gamma)\n        if average and np.any(self.p.lastMgFlux):\n            volume = self.getVolume()\n            lastFlux = self.p.lastMgFlux / volume\n            flux = (flux + lastFlux) / 2.0\n\n        return flux\n\n    def setPinMgFluxes(self, fluxes, adjoint=False, gamma=False):\n        \"\"\"\n        Store the pin-detailed multi-group neutron flux.\n\n        Parameters\n        ----------\n        fluxes : np.ndarray\n            The block-level pin multigroup fluxes. ``fluxes[i, g]`` represents the flux in group g for\n            pin ``i`` located at ``self.getPinLocations()[i]``. Flux units are the standard n/cm^2/s.\n        adjoint : bool, optional\n            Whether to set real or adjoint data.\n        gamma : bool, optional\n            Whether to set gamma or neutron data.\n        \"\"\"\n        if gamma:\n            if adjoint:\n                raise ValueError(\"Adjoint gamma flux is currently unsupported.\")\n            else:\n                self.p.pinMgFluxesGamma = fluxes\n        else:\n            if adjoint:\n                self.p.pinMgFluxesAdj = fluxes\n            else:\n                self.p.pinMgFluxes = fluxes\n\n    def getMicroSuffix(self):\n        \"\"\"\n        Returns the microscopic library suffix (e.g. 'AB') for this block.\n\n        DIF3D and MC2 are limited to 6 character nuclide labels. ARMI by convention uses the first 4\n        for nuclide name (e.g. U235, PU39, etc.) and then uses the 5th character for cross-section\n        type and the 6th for burnup group. This allows a variety of XS sets to be built modeling\n        substantially different blocks.\n\n        Notes\n        -----\n        The single-letter use for xsType and envGroup limit users to 52 groups of each. ARMI will\n        allow 2-letter xsType designations if and only if the `envGroup` setting has length 1 (i.e.\n        no burnup/temp groups are defined). This is useful for high-fidelity XS modeling.\n        \"\"\"\n        env = self.p.envGroup\n        if not env:\n            raise RuntimeError(\n                \"Cannot get MicroXS suffix because {0} in {1} does not have a environment(env) group\".format(\n                    self, self.parent\n                )\n            )\n\n        xsType = self.p.xsType\n        if len(xsType) == 1:\n            return xsType + env\n        elif len(xsType) == 2 and ord(env) != ord(\"A\"):\n            # default is \"A\" so if we got an off default 2 char, there is no way to resolve.\n            raise ValueError(\"Use of non-default env groups is not allowed with multi-character xs groups!\")\n        else:\n            # ignore env group, multi Char XS type to support assigning 2 chars in blueprints\n            return xsType\n\n    def getHeight(self):\n        \"\"\"Return the block height.\"\"\"\n        return self.p.height\n\n    def setHeight(self, modifiedHeight, conserveMass=False, adjustList=None):\n        \"\"\"\n        Set a new height of the block.\n\n        Parameters\n        ----------\n        modifiedHeight : float\n            The height of the block in cm\n\n        conserveMass : bool, optional\n            Conserve mass of nuclides in ``adjustList``.\n\n        adjustList : list, optional\n            Nuclides that will be conserved in conserving mass in the block. It is recommended to\n            pass a list of all nuclides in the block.\n\n        Notes\n        -----\n        There is a coupling between block heights, the parent assembly axial mesh, and the\n        ztop/zbottom/z params of the sibling blocks. When you set a height, all those things are\n        invalidated. Thus, this method has to go through and update them via\n        ``parent.calculateZCoords``.\n\n        See Also\n        --------\n        armi.reactor.reactors.Core.updateAxialMesh\n            May need to be called after this.\n        armi.reactor.assemblies.Assembly.calculateZCoords\n            Recalculates z-coords, automatically called by this.\n        \"\"\"\n        originalHeight = self.getHeight()  # get before modifying\n        if modifiedHeight < 0.0:\n            raise ValueError(f\"Cannot set height of block {self} to height of {modifiedHeight} cm\")\n        self.p.height = modifiedHeight\n        self.clearCache()\n        if conserveMass:\n            if originalHeight != modifiedHeight:\n                if not adjustList:\n                    raise ValueError(\"Nuclides in ``adjustList`` must be provided to conserve mass.\")\n                self.adjustDensity(originalHeight / modifiedHeight, adjustList)\n        if self.parent:\n            self.parent.calculateZCoords()\n\n    def getWettedPerimeter(self):\n        raise NotImplementedError\n\n    def getFlowAreaPerPin(self):\n        \"\"\"\n        Return the flowing coolant area of the block in cm^2, normalized to the number of pins in the block.\n\n        NumPins looks for max number of fuel, clad, control, etc.\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.getNumPins\n            figures out numPins\n        \"\"\"\n        numPins = self.getNumPins()\n        try:\n            return self.getComponent(Flags.COOLANT, exact=True).getArea() / numPins\n        except ZeroDivisionError:\n            raise ZeroDivisionError(\n                f\"Block {self} has 0 pins (fuel, clad, control, shield, etc.). Thus, its flow area \"\n                \"per pin is undefined.\"\n            )\n\n    def getHydraulicDiameter(self):\n        raise NotImplementedError\n\n    def adjustUEnrich(self, newEnrich):\n        \"\"\"\n        Adjust U-235/U-238 mass ratio to a mass enrichment.\n\n        Parameters\n        ----------\n        newEnrich : float\n            New U-235 enrichment in mass fraction\n\n        Notes\n        -----\n        completeInitialLoading must be run because adjusting the enrichment actually changes the\n        mass slightly and you can get negative burnups, which you do not want.\n        \"\"\"\n        fuels = self.getChildrenWithFlags(Flags.FUEL)\n\n        if fuels:\n            for fuel in fuels:\n                fuel.adjustMassEnrichment(newEnrich)\n        else:\n            # no fuel in this block\n            tU = self.getNumberDensity(\"U235\") + self.getNumberDensity(\"U238\")\n            if tU:\n                self.setNumberDensity(\"U235\", tU * newEnrich)\n                self.setNumberDensity(\"U238\", tU * (1.0 - newEnrich))\n\n        self.completeInitialLoading()\n\n    def getLocation(self):\n        \"\"\"Return a string representation of the location.\n\n        .. impl:: Location of a block is retrievable.\n            :id: I_ARMI_BLOCK_POSI0\n            :implements: R_ARMI_BLOCK_POSI\n\n            If the block does not have its ``core`` attribute set, if the block's parent does not\n            have a ``spatialGrid`` attribute, or if the block does not have its location defined by\n            its ``spatialLocator`` attribute, return a string indicating that it is outside of the\n            core.\n\n            Otherwise, use the :py:class:`~armi.reactor.grids.Grid.getLabel` static method to\n            convert the block's indices into a string like \"XXX-YYY-ZZZ\". For hexagonal geometry,\n            \"XXX\" is the zero-padded hexagonal core ring, \"YYY\" is the zero-padded position in that\n            ring, and \"ZZZ\" is the zero-padded block axial index from the bottom of the core.\n        \"\"\"\n        if self.core and self.parent.spatialGrid and self.spatialLocator:\n            return self.core.spatialGrid.getLabel(self.spatialLocator.getCompleteIndices())\n        else:\n            return \"ExCore\"\n\n    def coords(self):\n        \"\"\"\n        Returns the coordinates of the block.\n\n        .. impl:: Coordinates of a block are queryable.\n            :id: I_ARMI_BLOCK_POSI1\n            :implements: R_ARMI_BLOCK_POSI\n\n            Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates`\n            method of the block's ``spatialLocator`` attribute, which recursively calls itself on\n            all parents of the block to get the coordinates of the block's centroid in 3D cartesian\n            space.\n        \"\"\"\n        return self.spatialLocator.getGlobalCoordinates()\n\n    def setBuLimitInfo(self):\n        \"\"\"Sets burnup limit based on igniter, feed, etc.\"\"\"\n        if self.p.buRate == 0:\n            # might be cycle 1 or a non-burning block\n            self.p.timeToLimit = 0.0\n        else:\n            timeLimit = (self.p.buLimit - self.p.percentBu) / self.p.buRate + self.p.residence\n            self.p.timeToLimit = (timeLimit - self.p.residence) / units.DAYS_PER_YEAR\n\n    def getMaxArea(self):\n        raise NotImplementedError\n\n    def getArea(self, cold=False):\n        \"\"\"\n        Return the area of a block for a full core or a 1/3 core model.\n\n        Area is consistent with the area in the model, so if you have a central assembly in a 1/3\n        symmetric model, this will return 1/3 of the total area of the physical assembly. This way,\n        if you take the sum of the areas in the core (or count the atoms in the core, etc.), you\n        will have the proper number after multiplying by the model symmetry.\n\n        Parameters\n        ----------\n        cold : bool\n            flag to indicate that cold (as input) dimensions are required\n\n        Notes\n        -----\n        This might not work for a 1/6 core model (due to symmetry line issues).\n\n        Returns\n        -------\n        area : float (cm^2)\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.getMaxArea\n            return the full area of the physical assembly disregarding model symmetry\n        \"\"\"\n        # this caching requires that you clear the cache every time you adjust anything including\n        # temperature and dimensions.\n        area = self._getCached(\"area\")\n        if area:\n            return area\n\n        a = 0.0\n        for c in self:\n            myArea = c.getArea(cold=cold)\n            a += myArea\n        fullArea = a\n\n        # correct the fullHexArea by the symmetry factor this factor determines if the hex has been\n        # clipped by symmetry lines\n        area = fullArea / self.getSymmetryFactor()\n\n        self._setCache(\"area\", area)\n        return area\n\n    def getVolume(self):\n        \"\"\"\n        Return the volume of a block.\n\n        Returns\n        -------\n        volume : float\n            Block or component volume in cm^3\n        \"\"\"\n        # use symmetryFactor in case the assembly is sitting on a boundary and needs to be cut in half, etc.\n        vol = sum(c.getVolume() for c in self)\n        return vol / self.getSymmetryFactor()\n\n    def getSymmetryFactor(self):\n        \"\"\"\n        Return a scaling factor due to symmetry on the area of the block or its components.\n\n        Takes into account assemblies that are bisected or trisected by symmetry lines\n\n        In 1/3 symmetric cases, the central assembly is 1/3 a full area. If edge assemblies are\n        included in a model, the symmetry factor along both edges for overhanging assemblies should\n        be 2.0. However, ARMI runs in most scenarios with those assemblies on the 120-edge removed,\n        so the symmetry factor should generally be just 1.0.\n\n        See Also\n        --------\n        armi.reactor.converters.geometryConverter.EdgeAssemblyChanger.scaleParamsRelatedToSymmetry\n        \"\"\"\n        return 1.0\n\n    def adjustDensity(self, frac, adjustList, returnMass=False):\n        \"\"\"\n        Adjusts the total density of each nuclide in adjustList by frac.\n\n        Parameters\n        ----------\n        frac : float\n            The fraction of the current density that will remain after this operation\n\n        adjustList : list\n            List of nuclide names that will be adjusted.\n\n        returnMass : bool\n            If true, will return mass difference.\n\n        Returns\n        -------\n        mass : float\n            Mass difference in grams. If you subtract mass, mass will be negative.\n            If returnMass is False (default), this will always be zero.\n        \"\"\"\n        self._updateDetailedNdens(frac, adjustList)\n\n        mass = 0.0\n        if returnMass:\n            # do this with a flag to enable faster operation when mass is not needed.\n            volume = self.getVolume()\n\n        numDensities = self.getNuclideNumberDensities(adjustList)\n\n        for nuclideName, dens in zip(adjustList, numDensities):\n            if not dens:\n                # don't modify zeros.\n                continue\n            newDens = dens * frac\n            # add a little so components remember\n            self.setNumberDensity(nuclideName, newDens + TRACE_NUMBER_DENSITY)\n            if returnMass:\n                mass += densityTools.getMassInGrams(nuclideName, volume, newDens - dens)\n\n        return mass\n\n    def _updateDetailedNdens(self, frac, adjustList):\n        \"\"\"\n        Update detailed number density which is used by hi-fi depleters such as ORIGEN.\n\n        Notes\n        -----\n        This will perturb all number densities so it is assumed that if one of the active densities\n        is perturbed, all of htem are perturbed.\n        \"\"\"\n        if self.p.detailedNDens is None:\n            # BOL assems get expanded to a reference so the first check is needed so it won't call\n            # .blueprints on None since BOL assems don't have a core/r\n            return\n        if any(nuc in self.core.r.blueprints.activeNuclides for nuc in adjustList):\n            self.p.detailedNDens *= frac\n            # Other power densities do not need to be updated as they are calculated in the global\n            # flux interface, which occurs after axial expansion on the interface stack.\n            self.p.pdensDecay *= frac\n\n    def completeInitialLoading(self, bolBlock=None):\n        \"\"\"\n        Does some BOL bookkeeping to track things like BOL HM density for burnup tracking.\n\n        This should run after this block is loaded up at BOC (called from Reactor.initialLoading).\n\n        The original purpose of this was to get the moles HM at BOC for the moles Pu/moles HM at BOL\n        calculation.\n\n        This also must be called after modifying something like the smear density or zr fraction in\n        an optimization case. In ECPT cases, a BOL block must be passed or else the burnup will try\n        to get based on a pre-burned value.\n\n        Parameters\n        ----------\n        bolBlock : Block, optional\n            A BOL-state block of this block type, required for perturbed equilibrium cases.\n            Must have the same enrichment as this block!\n\n        Returns\n        -------\n        hmDens : float\n            The heavy metal number density of this block.\n\n        See Also\n        --------\n        Reactor.importGeom\n        depletion._updateBlockParametersAfterDepletion\n        \"\"\"\n        if bolBlock is None:\n            bolBlock = self\n\n        hmDens = bolBlock.getHMDens()  # total homogenized heavy metal number density\n        self.p.nHMAtBOL = hmDens\n        self.p.molesHmBOL = self.getHMMoles()\n        self.p.puFrac = self.getPuMoles() / self.p.molesHmBOL if self.p.molesHmBOL > 0.0 else 0.0\n\n        try:\n            # non-pinned reactors (or ones without cladding) will not use smear density\n            self.p.smearDensity = self.getSmearDensity()\n        except ValueError:\n            pass\n\n        self.p.enrichmentBOL = self.getFissileMassEnrich()\n        massHmBOL = 0.0\n        for child in self:\n            hmMass = child.getHMMass()\n            massHmBOL += hmMass\n            # Components have the following parameters but not every composite will massHmBOL,\n            # molesHmBOL, puFrac, enrichmentBOL\n            if isinstance(child, components.Component):\n                child.p.massHmBOL = hmMass\n                child.p.molesHmBOL = child.getHMMoles()\n                if child.p.molesHmBOL:\n                    child.p.enrichmentBOL = child.getFissileMassEnrich()\n\n        self.p.massHmBOL = massHmBOL\n\n        return hmDens\n\n    def setB10VolParam(self, heightHot):\n        \"\"\"\n        Set the b.p.initialB10ComponentVol param according to the volume of boron-10 containing components.\n\n        Parameters\n        ----------\n        heightHot : Boolean\n            True if self.height() is cold height\n        \"\"\"\n        # exclude fuel components since they could have slight B10 impurity and\n        # this metric is not relevant for fuel.\n        b10Comps = [c for c in self if c.getNumberDensity(\"B10\") and not c.isFuel()]\n        if not b10Comps:\n            return\n\n        # get the highest density comp dont want to sum all because some comps might have very small\n        # impurities of boron and adding this volume won't be conservative for captures per cc.\n        b10Comp = sorted(b10Comps, key=lambda x: x.getNumberDensity(\"B10\"))[-1]\n\n        if len(b10Comps) > 1:\n            runLog.warning(\n                f\"More than one boron10-containing component found in {self.name}. Only {b10Comp} \"\n                f\"will be considered for calculation of initialB10ComponentVol Since adding \"\n                f\"multiple volumes is not conservative for captures. All compos found {b10Comps}\",\n                single=True,\n            )\n        if self.isFuel():\n            runLog.warning(\n                f\"{self.name} has both fuel and initial b10. b10 volume may not be conserved with axial expansion.\",\n                single=True,\n            )\n\n        # calc volume of boron components\n        coldArea = b10Comp.getArea(cold=True)\n        coldFactor = b10Comp.getThermalExpansionFactor() if heightHot else 1\n        coldHeight = self.getHeight() / coldFactor\n        self.p.initialB10ComponentVol = coldArea * coldHeight\n\n    def replaceBlockWithBlock(self, bReplacement):\n        \"\"\"\n        Replace the current block with the replacementBlock.\n\n        Typically used in the insertion of control rods.\n        \"\"\"\n        paramsToSkip = set(self.p.paramDefs.inCategory(parameters.Category.retainOnReplacement).names)\n\n        tempBlock = copy.deepcopy(bReplacement)\n        oldParams = self.p\n        newParams = self.p = tempBlock.p\n        for paramName in paramsToSkip:\n            newParams[paramName] = oldParams[paramName]\n\n        # update synchronization information\n        self.p.assigned = parameters.SINCE_ANYTHING\n        paramDefs = self.p.paramDefs\n        for paramName in set(newParams.keys()) - paramsToSkip:\n            paramDefs[paramName].assigned = parameters.SINCE_ANYTHING\n\n        newComponents = tempBlock.getChildren()\n        self.setChildren(newComponents)\n        self.clearCache()\n\n    @staticmethod\n    def plotFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]):\n        \"\"\"A simple pass-through method to a utils plotting function. This is here to preserve the API.\"\"\"\n        plotBlockFlux(core, fName, bList, peak, adjoint, bList2)\n\n    def _updatePitchComponent(self, c):\n        \"\"\"\n        Update the component that defines the pitch.\n\n        Given a Component, compare it to the current component that defines the pitch of the Block.\n        If bigger, replace it. We need different implementations of this to support different logic\n        for determining the form of pitch and the concept of \"larger\".\n\n        See Also\n        --------\n        CartesianBlock._updatePitchComponent\n        \"\"\"\n        # Some block types don't have a clearly defined pitch (e.g. ThRZ)\n        if self.PITCH_COMPONENT_TYPE is None:\n            return\n\n        if not isinstance(c, self.PITCH_COMPONENT_TYPE):\n            return\n\n        try:\n            componentPitch = c.getDimension(self.PITCH_DIMENSION)\n        except parameters.UnknownParameterError:\n            # some components dont have the appropriate parameter\n            return\n\n        if componentPitch and (componentPitch > self._pitchDefiningComponent[1]):\n            self._pitchDefiningComponent = (c, componentPitch)\n\n    def add(self, c):\n        composites.Composite.add(self, c)\n\n        self.derivedMustUpdate = True\n        self.clearCache()\n        try:\n            mult = int(c.getDimension(\"mult\"))\n            if self.p.percentBuByPin is None or len(self.p.percentBuByPin) < mult:\n                # this may be a little wasteful, but we can fix it later...\n                self.p.percentBuByPin = [0.0] * mult\n        except AttributeError:\n            # maybe adding a Composite of components rather than a single\n            pass\n        self._updatePitchComponent(c)\n\n    def removeAll(self, recomputeAreaFractions=True):\n        for c in list(self):\n            self.remove(c, recomputeAreaFractions=False)\n        if recomputeAreaFractions:  # only do this once\n            self.getVolumeFractions()\n\n    def remove(self, c, recomputeAreaFractions=True):\n        composites.Composite.remove(self, c)\n        self.clearCache()\n\n        if c is self._pitchDefiningComponent[0]:\n            self._pitchDefiningComponent = (None, 0.0)\n            pc = self.getLargestComponent(self.PITCH_DIMENSION)\n            if pc is not None:\n                self._updatePitchComponent(pc)\n\n        if recomputeAreaFractions:\n            self.getVolumeFractions()\n\n    def getComponentsThatAreLinkedTo(self, comp, dim):\n        \"\"\"\n        Determine which dimensions of which components are linked to a specific dimension of a\n        particular component.\n\n        Useful for breaking fuel components up into individuals and making sure anything that was\n        linked to the fuel mult (like the cladding mult) stays correct.\n\n        Parameters\n        ----------\n        comp : Component\n            The component that the results are linked to\n        dim : str\n            The name of the dimension that the results are linked to\n\n        Returns\n        -------\n        linkedComps : list\n            A list of (components,dimName) that are linked to this component, dim.\n        \"\"\"\n        linked = []\n        for c in self.iterComponents():\n            for dimName, val in c.p.items():\n                if c.dimensionIsLinked(dimName):\n                    requiredComponent = val[0]\n                    if requiredComponent is comp and val[1] == dim:\n                        linked.append((c, dimName))\n        return linked\n\n    def getComponentsInLinkedOrder(self, componentList=None):\n        \"\"\"\n        Return a list of the components in order of their linked-dimension dependencies.\n\n        Parameters\n        ----------\n        components : list, optional\n            A list of components to consider. If None, this block's components will be used.\n\n        Notes\n        -----\n        This means that components other components are linked to come first.\n        \"\"\"\n        if componentList is None:\n            componentList = self.getComponents()\n        cList = collections.deque(componentList)\n        orderedComponents = []\n        # Loop through the components until there are none left.\n        counter = 0\n        while cList:\n            candidate = cList.popleft()  # take first item in list\n            cleared = True  # innocent until proven guilty\n            # loop through all dimensions in this component to determine its dependencies\n            for dimName, val in candidate.p.items():\n                if candidate.dimensionIsLinked(dimName):\n                    # In linked dimensions, val = (component, dimName)\n                    requiredComponent = val[0]\n                    if requiredComponent not in orderedComponents:\n                        # this component depends on one that is not in the ordered list yet.\n                        # do not add it.\n                        cleared = False\n                        break  # short circuit. One failed lookup is enough to flag this component as dirty.\n            if cleared:\n                # this candidate is free of dependencies and is ready to be added.\n                orderedComponents.append(candidate)\n            else:\n                cList.append(candidate)\n\n            counter += 1\n            if counter > 1000:\n                cList.append(candidate)\n                runLog.error(\n                    \"The component {0} in {1} contains a dimension that is linked to another component, \"\n                    \" but the required component is not present in the block. They may also be other dependency fails. \"\n                    \"The component dims are {2}\".format(cList[0], self, cList[0].p)\n                )\n                raise RuntimeError(\"Cannot locate linked component.\")\n        return orderedComponents\n\n    def getSortedComponentsInsideOfComponent(self, component):\n        \"\"\"\n        Returns a list of components inside of the given component sorted from innermost to outermost.\n\n        Parameters\n        ----------\n        component : object\n            Component to look inside of.\n\n        Notes\n        -----\n        If you just want sorted components in this block, use ``sorted(self)``. This will never\n        include any ``DerivedShape`` objects. Since they have a derived area they don't have a well-\n        defined dimension. For now we just ignore them. If they are desired in the future some\n        knowledge of their dimension will be required while they are being derived.\n        \"\"\"\n        sortedComponents = sorted(self)\n        componentIndex = sortedComponents.index(component)\n        sortedComponents = sortedComponents[:componentIndex]\n        return sortedComponents\n\n    def getNumPins(self):\n        \"\"\"Return the number of pins in this block.\n\n        .. impl:: Get the number of pins in a block.\n            :id: I_ARMI_BLOCK_NPINS\n            :implements: R_ARMI_BLOCK_NPINS\n\n            Uses some simple criteria to infer the number of pins in the block.\n\n            For every flag in the module list :py:data:`~armi.reactor.blocks.PIN_COMPONENTS`, loop\n            over all components of that type in the block. If the component is an instance of\n            :py:class:`~armi.reactor.components.basicShapes.Circle`, add its multiplicity to a list,\n            and sum that list over all components with each given flag.\n\n            After looping over all possibilities, return the maximum value returned from the process\n            above, or if no compatible components were found, return zero.\n        \"\"\"\n        nPins = [\n            sum(\n                [\n                    (int(c.getDimension(\"mult\")) if isinstance(c, basicShapes.Circle) else 0)\n                    for c in self.iterComponents(compType)\n                ]\n            )\n            for compType in PIN_COMPONENTS\n        ]\n        return 0 if not nPins else max(nPins)\n\n    def mergeWithBlock(self, otherBlock, fraction):\n        \"\"\"\n        Turns this block into a mixture of this block and some other block.\n\n        Parameters\n        ----------\n        otherBlock : Block\n            The block to mix this block with. The other block will not be modified.\n\n        fraction : float\n            Fraction of the other block to mix in with this block. If 0.1 is passed in, this block\n            will become 90% what it originally was and 10% what the other block is.\n\n        Notes\n        -----\n        This merges on a high level (using number densities). Components will not be merged.\n\n        This is used e.g. for inserting a control block partially to get a very tight criticality\n        control. In this case, a control block would be merged with a duct block. It is also used\n        when a control rod is specified as a certain length but that length does not fit exactly\n        into a full block.\n        \"\"\"\n        numDensities = self.getNumberDensities()\n        otherBlockDensities = otherBlock.getNumberDensities()\n        newDensities = {}\n\n        # Make sure to hit all nuclides in union of blocks\n        for nucName in set(numDensities.keys()).union(otherBlockDensities.keys()):\n            newDensities[nucName] = (1.0 - fraction) * numDensities.get(\n                nucName, 0.0\n            ) + fraction * otherBlockDensities.get(nucName, 0.0)\n\n        self.setNumberDensities(newDensities)\n\n    def getComponentAreaFrac(self, typeSpec):\n        \"\"\"\n        Returns the area fraction of the specified component(s) among all components in the block.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags\n            Component types to look up\n\n        Examples\n        --------\n        >>> b.getComponentAreaFrac(Flags.CLAD)\n        0.15\n\n        Returns\n        -------\n        float\n            The area fraction of the component.\n        \"\"\"\n        tFrac = sum(f for (c, f) in self.getVolumeFractions() if c.hasFlags(typeSpec))\n\n        if tFrac:\n            return tFrac\n        else:\n            runLog.warning(\n                f\"No component {typeSpec} exists on {self}, so area fraction is zero.\",\n                single=True,\n                label=f\"{typeSpec} areaFrac is zero\",\n            )\n            return 0.0\n\n    def verifyBlockDims(self):\n        \"\"\"Optional dimension checking.\"\"\"\n        return\n\n    def getDim(self, typeSpec, dimName):\n        \"\"\"\n        Search through blocks in this assembly and find the first component of compName.\n        Then, look on that component for dimName.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags\n            Component name, e.g. Flags.FUEL, Flags.CLAD, Flags.COOLANT, ...\n        dimName : str\n            Dimension name, e.g. 'od', ...\n\n        Returns\n        -------\n        dimVal : float\n            The dimension in cm.\n\n        Examples\n        --------\n        >>> getDim(Flags.WIRE, \"od\")\n        0.01\n        \"\"\"\n        for c in self:\n            if c.hasFlags(typeSpec):\n                return c.getDimension(dimName.lower())\n\n        raise ValueError(f\"Cannot get Dimension because Flag not found: {typeSpec}\")\n\n    def getPinCenterFlatToFlat(self, cold=False):\n        \"\"\"Return the flat-to-flat distance between the centers of opposing pins in the outermost ring.\"\"\"\n        raise NotImplementedError  # no geometry can be assumed\n\n    def getWireWrapCladGap(self, cold=False):\n        \"\"\"Return the gap between the wire wrap and the clad.\"\"\"\n        clad = self.getComponent(Flags.CLAD)\n        wire = self.getComponent(Flags.WIRE)\n        wireOuterRadius = wire.getBoundingCircleOuterDiameter(cold=cold) / 2.0\n        wireInnerRadius = wireOuterRadius - wire.getDimension(\"od\", cold=cold)\n        cladOuterRadius = clad.getDimension(\"od\", cold=cold) / 2.0\n        return wireInnerRadius - cladOuterRadius\n\n    def getPlenumPin(self):\n        \"\"\"Return the plenum pin if it exists.\"\"\"\n        for c in self.iterComponents(Flags.GAP):\n            if self.isPlenumPin(c):\n                return c\n        return None\n\n    def isPlenumPin(self, c):\n        \"\"\"Return True if the specified component is a plenum pin.\"\"\"\n        # This assumes that anything with the GAP flag will have a valid 'id' dimension.\n        cIsCenterGapGap = isinstance(c, components.Component) and c.hasFlags(Flags.GAP) and c.getDimension(\"id\") == 0\n        return self.hasFlags([Flags.PLENUM, Flags.ACLP]) and cIsCenterGapGap\n\n    def getPitch(self, returnComp=False):\n        \"\"\"\n        Return the center-to-center hex pitch of this block.\n\n        Parameters\n        ----------\n        returnComp : bool, optional\n            If true, will return the component that has the maximum pitch as well\n\n        Returns\n        -------\n        pitch : float or None\n            Hex pitch in cm, if well-defined. If there is no clear component for determining pitch, returns None\n        component : Component or None\n            Component that has the max pitch, if returnComp == True. If no component is found to define the pitch,\n            returns None.\n\n        Notes\n        -----\n        The block stores a reference to the component that defines the pitch, making the assumption that while the\n        dimensions can change, the component containing the largest dimension will not. This lets us skip the search for\n        largest component. We still need to ask the largest component for its current dimension in case its temperature\n        changed, or was otherwise modified.\n\n        See Also\n        --------\n        setPitch : sets pitch\n        \"\"\"\n        c, _p = self._pitchDefiningComponent\n        if c is None:\n            raise ValueError(\"{} has no valid pitch defining component\".format(self))\n\n        # ask component for dimensions, since they could have changed due to temperature\n        p = c.getPitchData()\n        return (p, c) if returnComp else p\n\n    def hasPinPitch(self):\n        \"\"\"Return True if the block has enough information to calculate pin pitch.\"\"\"\n        return self.spatialGrid is not None\n\n    def getPinPitch(self, cold=False):\n        \"\"\"\n        Return sub-block pitch in blocks.\n\n        This assumes the spatial grid is defined by unit steps\n        \"\"\"\n        return self.spatialGrid.pitch\n\n    def getDimensions(self, dimension):\n        \"\"\"Return dimensional values of the specified dimension.\"\"\"\n        dimVals = set()\n        for c in self:\n            try:\n                dimVal = c.getDimension(dimension)\n            except parameters.ParameterError:\n                continue\n            if dimVal is not None:\n                dimVals.add(dimVal)\n        return dimVals\n\n    def getLargestComponent(self, dimension):\n        \"\"\"\n        Find the component with the largest dimension of the specified type.\n\n        Parameters\n        ----------\n        dimension: str\n            The name of the dimension to find the largest component of.\n\n        Returns\n        -------\n        largestComponent: armi.reactor.components.Component\n            The component with the largest dimension of the specified type.\n        \"\"\"\n        maxDim = -float(\"inf\")\n        largestComponent = None\n        for c in self:\n            try:\n                dimVal = c.getDimension(dimension)\n            except parameters.ParameterError:\n                continue\n            if dimVal is not None and dimVal > maxDim:\n                maxDim = dimVal\n                largestComponent = c\n        return largestComponent\n\n    def setPitch(self, val, updateBolParams=False):\n        \"\"\"\n        Sets outer pitch to some new value.\n\n        This sets the settingPitch and actually sets the dimension of the outer hexagon.\n\n        During a load (importGeom), the setDimension doesn't usually do anything except set the\n        setting See Issue 034\n\n        But during a actual case modification (e.g. in an optimization sweep, then the dimension has\n        to be set as well.\n\n        See Also\n        --------\n        getPitch : gets the pitch\n        \"\"\"\n        c, _p = self._pitchDefiningComponent\n        if c:\n            c.setDimension(\"op\", val)\n            self._pitchDefiningComponent = (c, val)\n        else:\n            raise RuntimeError(\"No pitch-defining component on block {}\".format(self))\n\n        if updateBolParams:\n            self.completeInitialLoading()\n\n    def getMfp(self, gamma=False):\n        r\"\"\"\n        Calculate the mean free path for neutron or gammas in this block.\n\n        .. math::\n\n            <\\Sigma> = \\frac{\\sum_E(\\phi_e \\Sigma_e dE)}{\\sum_E (\\phi_e dE)}  =\n            \\frac{\\sum_E(\\phi_e N \\sum_{\\text{type}}(\\sigma_e)  dE}{\\sum_E (\\phi_e dE))}\n\n        Block macro is the sum of macros of all nuclides.\n\n        phi_g = flux*dE already in multigroup method.\n\n        Returns\n        -------\n        mfp, mfpAbs, diffusionLength : tuple(float, float float)\n        \"\"\"\n        lib = self.core.lib\n        flux = self.getMgFlux(gamma=gamma)\n        flux = [fi / max(flux) for fi in flux]\n        mfpNumerator = np.zeros(len(flux))\n        absMfpNumerator = np.zeros(len(flux))\n        transportNumerator = np.zeros(len(flux))\n\n        numDensities = self.getNumberDensities()\n\n        for nucName, nDen in numDensities.items():\n            nucMc = self.nuclideBases.byName[nucName].label + self.getMicroSuffix()\n            if gamma:\n                micros = lib[nucMc].gammaXS\n            else:\n                micros = lib[nucMc].micros\n            total = micros.total[:, 0]  # 0th order\n            transport = micros.transport[:, 0]  # 0th order, [bn]\n            absorb = sum(micros.getAbsorptionXS())\n            mfpNumerator += nDen * total  # [cm]\n            absMfpNumerator += nDen * absorb\n            transportNumerator += nDen * transport\n\n        denom = sum(flux)\n        mfp = 1.0 / (sum(mfpNumerator * flux) / denom)\n        sigmaA = sum(absMfpNumerator * flux) / denom\n        sigmaTr = sum(transportNumerator * flux) / denom\n        diffusionCoeff = 1 / (3.0 * sigmaTr)\n        mfpAbs = 1 / sigmaA\n        diffusionLength = math.sqrt(diffusionCoeff / sigmaA)\n        return mfp, mfpAbs, diffusionLength\n\n    def setAreaFractionsReport(self):\n        for c, frac in self.getVolumeFractions():\n            report.setData(\n                c.getName(),\n                [\"{0:10f}\".format(c.getArea()), \"{0:10f}\".format(frac)],\n                report.BLOCK_AREA_FRACS,\n            )\n\n        # return the group the information went to\n        return report.ALL[report.BLOCK_AREA_FRACS]\n\n    def getBlocks(self):\n        \"\"\"\n        This method returns all the block(s) included in this block its implemented so that methods\n        could iterate over reactors, assemblies or single blocks without checking to see what the\n        type of the reactor-family object is.\n        \"\"\"\n        return [self]\n\n    def updateComponentDims(self):\n        \"\"\"\n        This method updates all the dimensions of the components.\n\n        Notes\n        -----\n        This is VERY useful for defining a ThRZ core out of differentialRadialSegements whose\n        dimensions are connected together some of these dimensions are derivative and can be updated\n        by changing dimensions in a Parameter Component or other linked components\n\n        See Also\n        --------\n        armi.reactor.components.DifferentialRadialSegment.updateDims\n        armi.reactor.components.Parameters\n        armi.physics.optimize.OptimizationInterface.modifyCase (look up 'ThRZReflectorThickness')\n        \"\"\"\n        for c in self.getComponentsInLinkedOrder():\n            try:\n                c.updateDims()\n            except NotImplementedError:\n                runLog.warning(\"{0} has no updatedDims method -- skipping\".format(c))\n\n    def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n        \"\"\"\n        Return the volume integrated multigroup neutron tracklength in [n-cm/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the\n        next energy group, as set in the ISOTXS library.\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        integratedFlux : np.ndarray\n            multigroup neutron tracklength in [n-cm/s]\n        \"\"\"\n        if adjoint:\n            if gamma:\n                raise ValueError(\"Adjoint gamma flux is currently unsupported.\")\n            integratedFlux = self.p.adjMgFlux\n        elif gamma:\n            integratedFlux = self.p.mgFluxGamma\n        else:\n            integratedFlux = self.p.mgFlux\n\n        return np.array(integratedFlux)\n\n    def getLumpedFissionProductCollection(self):\n        \"\"\"\n        Get collection of LFP objects. Will work for global or block-level LFP models.\n\n        Returns\n        -------\n        lfps : LumpedFissionProduct\n            lfpName keys , lfp object values\n\n        See Also\n        --------\n        armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object\n        \"\"\"\n        return composites.ArmiObject.getLumpedFissionProductCollection(self)\n\n    def rotate(self, rad):\n        \"\"\"Function for rotating a block's spatially varying variables by a specified angle (radians).\n\n        Parameters\n        ----------\n        rad: float\n            Number (in radians) specifying the angle of counter clockwise rotation.\n        \"\"\"\n        raise NotImplementedError\n\n    def setAxialExpTargetComp(self, targetComponent):\n        \"\"\"Sets the targetComponent for the axial expansion changer.\n\n        .. impl:: Set the target axial expansion components on a given block.\n            :id: I_ARMI_MANUAL_TARG_COMP\n            :implements: R_ARMI_MANUAL_TARG_COMP\n\n            Sets the ``axialExpTargetComponent`` parameter on the block to the name of the Component\n            which is passed in. This is then used by the\n            :py:class:`~armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger`\n            class during axial expansion.\n\n            This method is typically called from within\n            :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` during the\n            process of building a Block from the blueprints.\n\n        Parameter\n        ---------\n        targetComponent: :py:class:`Component <armi.reactor.components.component.Component>` object\n            Component specified to be target component for axial expansion changer\n        \"\"\"\n        self.p.axialExpTargetComponent = targetComponent.name\n\n    def getPinLocations(self) -> list[grids.IndexLocation]:\n        \"\"\"Produce all the index locations for pins in the block.\n\n        Returns\n        -------\n        list[grids.IndexLocation]\n            Integer locations where pins can be found in the block.\n\n        Notes\n        -----\n        Only components with ``Flags.CLAD`` are considered to define a pin's location.\n\n        See Also\n        --------\n        :meth:`getPinCoordinates` - companion for this method.\n        \"\"\"\n        items = []\n        for clad in self.iterChildrenWithFlags(Flags.CLAD):\n            if isinstance(clad.spatialLocator, grids.MultiIndexLocation):\n                items.extend(clad.spatialLocator)\n            else:\n                items.append(clad.spatialLocator)\n        return items\n\n    def getPinCoordinates(self) -> np.ndarray:\n        \"\"\"\n        Compute the local centroid coordinates of any pins in this block.\n\n        The pins must have a CLAD-flagged component for this to work.\n\n        Returns\n        -------\n        localCoords : numpy.ndarray\n            ``(N, 3)`` array of coordinates for pins locations. ``localCoords[i]`` contains a triplet of\n            the x, y, z location for pin ``i``. Ordered according to how they are listed as children\n\n        See Also\n        --------\n        :meth:`getPinLocations` - companion for this method\n        \"\"\"\n        indices = self.getPinLocations()\n        coords = [location.getLocalCoordinates() for location in indices]\n        return np.array(coords)\n\n    def getTotalEnergyGenerationConstants(self):\n        \"\"\"\n        Get the total energy generation group constants for a block.\n\n        Gives the total energy generation rates when multiplied by the multigroup flux.\n\n        Returns\n        -------\n        totalEnergyGenConstant: np.ndarray\n            Total (fission + capture) energy generation group constants (Joules/cm)\n        \"\"\"\n        return self.getFissionEnergyGenerationConstants() + self.getCaptureEnergyGenerationConstants()\n\n    def getFissionEnergyGenerationConstants(self):\n        \"\"\"\n        Get the fission energy generation group constants for a block.\n\n        Gives the fission energy generation rates when multiplied by the multigroup flux.\n\n        Returns\n        -------\n        fissionEnergyGenConstant: np.ndarray\n            Energy generation group constants (Joules/cm)\n\n        Raises\n        ------\n        RuntimeError:\n            Reports if a cross section library is not assigned to a reactor.\n        \"\"\"\n        if not self.core.lib:\n            raise RuntimeError(\n                \"Cannot compute energy generation group constants without a library. Please ensure a library exists.\"\n            )\n\n        return xsCollections.computeFissionEnergyGenerationConstants(\n            self.getNumberDensities(), self.core.lib, self.getMicroSuffix()\n        )\n\n    def getCaptureEnergyGenerationConstants(self):\n        \"\"\"\n        Get the capture energy generation group constants for a block.\n\n        Gives the capture energy generation rates when multiplied by the multigroup flux.\n\n        Returns\n        -------\n        fissionEnergyGenConstant: np.ndarray\n            Energy generation group constants (Joules/cm)\n\n        Raises\n        ------\n        RuntimeError:\n            Reports if a cross section library is not assigned to a reactor.\n        \"\"\"\n        if not self.core.lib:\n            raise RuntimeError(\n                \"Cannot compute energy generation group constants without a library. Please ensure a library exists.\"\n            )\n\n        return xsCollections.computeCaptureEnergyGenerationConstants(\n            self.getNumberDensities(), self.core.lib, self.getMicroSuffix()\n        )\n\n    def getNeutronEnergyDepositionConstants(self):\n        \"\"\"\n        Get the neutron energy deposition group constants for a block.\n\n        Returns\n        -------\n        energyDepConstants: np.ndarray\n            Neutron energy generation group constants (in Joules/cm)\n\n        Raises\n        ------\n        RuntimeError:\n            Reports if a cross section library is not assigned to a reactor.\n        \"\"\"\n        if not self.core.lib:\n            raise RuntimeError(\n                \"Cannot get neutron energy deposition group constants without \"\n                \"a library. Please ensure a library exists.\"\n            )\n\n        return xsCollections.computeNeutronEnergyDepositionConstants(\n            self.getNumberDensities(), self.core.lib, self.getMicroSuffix()\n        )\n\n    def getGammaEnergyDepositionConstants(self):\n        \"\"\"\n        Get the gamma energy deposition group constants for a block.\n\n        Returns\n        -------\n        energyDepConstants: np.ndarray\n            Energy generation group constants (in Joules/cm)\n\n        Raises\n        ------\n        RuntimeError:\n            Reports if a cross section library is not assigned to a reactor.\n        \"\"\"\n        if not self.core.lib:\n            raise RuntimeError(\n                \"Cannot get gamma energy deposition group constants without a library. Please ensure a library exists.\"\n            )\n\n        return xsCollections.computeGammaEnergyDepositionConstants(\n            self.getNumberDensities(), self.core.lib, self.getMicroSuffix()\n        )\n\n    def getBoronMassEnrich(self):\n        \"\"\"Return B-10 mass fraction.\"\"\"\n        b10 = self.getMass(\"B10\")\n        b11 = self.getMass(\"B11\")\n        total = b11 + b10\n        if total == 0.0:\n            return 0.0\n        return b10 / total\n\n    def getUraniumMassEnrich(self):\n        \"\"\"Returns fissile mass fraction of uranium.\"\"\"\n        totalU = self.getMass(\"U\")\n        if totalU < 1e-10:\n            return 0.0\n\n        fissileU = self.getMass([\"U233\", \"U235\"])\n        return fissileU / totalU\n\n    def getInputHeight(self) -> float:\n        \"\"\"Determine the input height from blueprints.\n\n        Returns\n        -------\n        float\n            Height for this block pulled from the blueprints.\n\n        Raises\n        ------\n        AttributeError\n            If no ancestor of this block contains the input blueprints. Blueprints are usually\n            stored on the reactor object, which is typically an ancestor of the block\n            (block -> assembly -> core -> reactor). However, this may be the case when creating\n            blocks from scratch in testing where the entire composite tree may not exist.\n        \"\"\"\n        ancestorWithBp = self.getAncestor(lambda o: getattr(o, \"blueprints\", None) is not None)\n        if ancestorWithBp is not None:\n            bp = ancestorWithBp.blueprints\n            assemDesign = bp.assemDesigns[self.parent.getType()]\n            heights = assemDesign.height\n            myIndex = self.parent.index(self)\n            return heights[myIndex]\n\n        raise AttributeError(f\"No ancestor of {self} has blueprints\")\n\n    def sort(self):\n        \"\"\"Sort the children on this block.\n\n        If there is a spatial grid, the previous pin indices on the components\n        is now invalid because the ordering of :meth:`getPinLocations` has maybe\n        changed since the ordering of components has changed. Reassign the pin\n        indices via :meth:`assignPinIndices` accordingly.\n        \"\"\"\n        super().sort()\n        if self.spatialGrid is not None:\n            self.assignPinIndices()\n"
  },
  {
    "path": "armi/reactor/blocks/cartesianBlock.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Cartesian blocks can be square or more generically rectangular in cross section.\"\"\"\n\nimport math\n\nfrom armi.reactor import components\nfrom armi.reactor.blocks.block import Block\nfrom armi.reactor.flags import Flags\n\n\nclass CartesianBlock(Block):\n    \"\"\"\n    A Cartesian Block is a vertical slice of an Assembly which is laid out on a Cartesian grid. That is, a grid that is\n    square or rectangular.\n\n    A Cartesian grid can have an origin that is in the middle of a grid cell:\n\n    +---------+--------+--------+\n    |         |        |        |\n    | (-1,1)  | (0,1)  | (1,1)  |\n    |         |        |        |\n    +---------+--------+--------+\n    |         |        |        |\n    | (-1,0)  | (0,0)  | (1,0)  |\n    |         |        |        |\n    +---------+--------+--------+\n    |         |        |        |\n    | (-1,-1) | (0,-1) | (1,-1) |\n    |         |        |        |\n    +---------+--------+--------+\n\n    Or the grid cells can be aligned so the origin is between the grid cells:\n\n    +---------+---------+--------+--------+\n    |         |         |        |        |\n    | (-2,1)  | (-1,1)  | (0,1)  | (1,1)  |\n    |         |         |        |        |\n    +---------+---------+--------+--------+\n    |         |         |        |        |\n    | (-2,0)  | (-1,0)  | (0,0)  | (1,0)  |\n    |         |         |        |        |\n    +---------+---------+--------+--------+\n    |         |         |        |        |\n    | (-2,-1) | (-1,-1) | (0,-1) | (1,-1) |\n    |         |         |        |        |\n    +---------+---------+--------+--------+\n    |         |         |        |        |\n    | (-2,-2) | (-1,-2) | (0,-2) | (1,-2) |\n    |         |         |        |        |\n    +---------+---------+--------+--------+\n    \"\"\"\n\n    PITCH_DIMENSION = \"widthOuter\"\n    PITCH_COMPONENT_TYPE = components.Rectangle\n\n    def getMaxArea(self):\n        \"\"\"Get area of this block if it were totally full.\"\"\"\n        xw, yw = self.getPitch()\n        return xw * yw\n\n    def setPitch(self, val, updateBolParams=False):\n        raise NotImplementedError(\"Directly setting the pitch of a cartesian block is currently not supported.\")\n\n    def getSymmetryFactor(self):\n        \"\"\"Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is.\"\"\"\n        if self.core is not None:\n            indices = self.spatialLocator.getCompleteIndices()\n            if self.core.symmetry.isThroughCenterAssembly:\n                if indices[0] == 0 and indices[1] == 0:\n                    # central location\n                    return 4.0\n                elif indices[0] == 0 or indices[1] == 0:\n                    # edge location\n                    return 2.0\n\n        return 1.0\n\n    def getPinCenterFlatToFlat(self, cold=False):\n        \"\"\"Return the flat-to-flat distance between the centers of opposing pins (corner-2-corner) in the outer ring.\"\"\"\n        clad = self.getComponent(Flags.CLAD)\n        nRings = self.numRingsToHoldNumCells(clad.getDimension(\"mult\"))\n        pinPitch = self.getPinPitch(cold=cold)\n        pinPitchDist = math.sqrt(pinPitch[0] ** 2 + pinPitch[1] ** 2)\n\n        if self.core.symmetry.isThroughCenterAssembly:\n            return 2 * (nRings - 1) * pinPitchDist\n        else:\n            return ((2 * nRings) - 1) * pinPitchDist\n\n    def getNumCellsGivenRings(self, nRings: int):\n        \"\"\"Calculate the number of cells in a Cartesian grid with a given number of rings.\n\n        The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and\n        one where the origin is on the line between grid cells.\n        \"\"\"\n        if self.core.symmetry.isThroughCenterAssembly:\n            return (2 * nRings - 1) ** 2\n        else:\n            return (2 * nRings) ** 2\n\n    def numRingsToHoldNumCells(self, nCells: int):\n        \"\"\"Calculate the number of rings needed in a Cartesian grid to hold a given number of cells.\n\n        The logic here is separated out into two scenarios: one for when the origin is inside the center grid cell and\n        one where the origin is on the line between grid cells.\n        \"\"\"\n        if self.core.symmetry.isThroughCenterAssembly:\n            return math.ceil((math.sqrt(nCells) + 1) / 2.0)\n        else:\n            return math.ceil(math.sqrt(nCells) / 2.0)\n"
  },
  {
    "path": "armi/reactor/blocks/hexBlock.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"The HexBlock is a vertical slice of a hexagon-shaped assembly. This is a common geometry in reactor design.\"\"\"\n\nimport copy\nimport functools\nimport math\nimport operator\nfrom typing import Callable, ClassVar, Optional, Tuple, Type\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.physics.neutronics import GAMMA, NEUTRON\nfrom armi.reactor import components, geometry, grids\nfrom armi.reactor.blocks.block import Block\nfrom armi.reactor.components.basicShapes import Circle, Hexagon\nfrom armi.reactor.components.complexShapes import Helix\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils import hexagon, iterables, units\n\n_PitchDefiningComponent = Optional[Tuple[Type[components.Component], ...]]\n\n\nclass HexBlock(Block):\n    \"\"\"\n    Defines a Block shaped like a hexagon.\n\n    .. impl:: ARMI has the ability to create hex shaped blocks.\n        :id: I_ARMI_BLOCK_HEX\n        :implements: R_ARMI_BLOCK_HEX\n\n        This class defines hexagonal-shaped Blocks. It inherits functionality from the parent class,\n        Block, and defines hexagonal-specific methods including, but not limited to, querying pin\n        pitch, pin linear power densities, hydraulic diameter, and retrieving inner and outer pitch.\n    \"\"\"\n\n    PITCH_COMPONENT_TYPE: ClassVar[_PitchDefiningComponent] = (components.Hexagon,)\n\n    def __init__(self, name, height=1.0):\n        Block.__init__(self, name, height)\n\n    def coords(self):\n        \"\"\"\n        Returns the coordinates of the block.\n\n        .. impl:: Coordinates of a block are queryable.\n            :id: I_ARMI_BLOCK_POSI2\n            :implements: R_ARMI_BLOCK_POSI\n\n            Calls to the :py:meth:`~armi.reactor.grids.locations.IndexLocation.getGlobalCoordinates`\n            method of the block's ``spatialLocator`` attribute, which recursively calls itself on\n            all parents of the block to get the coordinates of the block's centroid in 3D cartesian\n            space.\n\n            Will additionally adjust the x and y coordinates based on the block parameters\n            ``displacementX`` and ``displacementY``.\n        \"\"\"\n        x, y, _z = self.spatialLocator.getGlobalCoordinates()\n        x += self.p.displacementX * 100.0\n        y += self.p.displacementY * 100.0\n        return (\n            round(x, units.FLOAT_DIMENSION_DECIMALS),\n            round(y, units.FLOAT_DIMENSION_DECIMALS),\n        )\n\n    def createHomogenizedCopy(self, pinSpatialLocators=False):\n        \"\"\"\n        Create a new homogenized copy of a block that is less expensive than a full deepcopy.\n\n        .. impl:: Block compositions can be homogenized.\n            :id: I_ARMI_BLOCK_HOMOG\n            :implements: R_ARMI_BLOCK_HOMOG\n\n            This method creates and returns a homogenized representation of itself in the form of a\n            new Block. The homogenization occurs in the following manner. A single Hexagon Component\n            is created and added to the new Block. This Hexagon Component is given the\n            :py:class:`armi.materials.mixture._Mixture` material and a volume averaged temperature\n            (``getAverageTempInC``). The number densities of the original Block are also stored on\n            this new Component (:need:`I_ARMI_CMP_GET_NDENS`). Several parameters from the original\n            block are copied onto the homogenized block (e.g., macros, lumped fission products,\n            burnup group, number of pins, and spatial grid).\n\n        Notes\n        -----\n        This can be used to improve performance when a new copy of a reactor needs to be built, but\n        the full detail of the block (including component geometry, material, number density, etc.)\n        is not required for the targeted physics solver being applied to the new reactor model.\n\n        The main use case is for the uniform mesh converter (UMC). Frequently, a deterministic\n        neutronics solver will require a uniform mesh reactor, which is produced by the UMC. Many\n        deterministic solvers for fast spectrum reactors will also treat the individual blocks as\n        homogenized mixtures. Since the neutronics solver does not need to know about the geometric\n        and material details of the individual child components within a block, we can save\n        significant effort while building the uniform mesh reactor with the UMC by omitting this\n        detailed data and only providing the necessary level of detail for the uniform mesh reactor:\n        number densities on each block.\n\n        Individual components within a block can have different temperatures, and this can affect\n        cross sections. This temperature variation is captured by the lattice physics module. As\n        long as temperature distribution is correctly captured during cross section generation, it\n        does not need to be transferred to the neutronics solver directly through this copy\n        operation.\n\n        If you make a new block, you must add it to an assembly and a reactor.\n\n        Returns\n        -------\n        b : A homogenized block containing a single Hexagon Component that contains an average\n            temperature and the number densities from the original block.\n\n        See Also\n        --------\n        armi.reactor.converters.uniformMesh.UniformMeshGeometryConverter.makeAssemWithUniformMesh\n        \"\"\"\n        b = self.__class__(self.getName(), height=self.getHeight())\n        b.setType(self.getType(), self.p.flags)\n\n        # assign macros and LFP\n        b.macros = self.macros\n        b._lumpedFissionProducts = self._lumpedFissionProducts\n        b.p.envGroup = self.p.envGroup\n\n        hexComponent = Hexagon(\n            \"homogenizedHex\",\n            \"_Mixture\",\n            self.getAverageTempInC(),\n            self.getAverageTempInC(),\n            self._pitchDefiningComponent[1],\n        )\n        hexComponent.setNumberDensities(self.getNumberDensities())\n        b.add(hexComponent)\n\n        b.p.nPins = self.p.nPins\n        if pinSpatialLocators:\n            # create a null component with cladding flags and spatialLocator from source block's\n            # clad components in case pin locations need to be known for physics solver\n            if self.hasComponents(Flags.CLAD):\n                cladComponents = self.getComponents(Flags.CLAD)\n                for i, clad in enumerate(cladComponents):\n                    pinComponent = Circle(\n                        f\"voidPin{i}\",\n                        \"Void\",\n                        self.getAverageTempInC(),\n                        self.getAverageTempInC(),\n                        0.0,\n                    )\n                    pinComponent.setType(\"pin\", Flags.CLAD)\n                    pinComponent.spatialLocator = copy.deepcopy(clad.spatialLocator)\n                    if isinstance(pinComponent.spatialLocator, grids.MultiIndexLocation):\n                        for i1, i2 in zip(list(pinComponent.spatialLocator), list(clad.spatialLocator)):\n                            i1.associate(i2.grid)\n                    pinComponent.setDimension(\"mult\", clad.getDimension(\"mult\"))\n                    b.add(pinComponent)\n\n        if self.spatialGrid is not None:\n            b.spatialGrid = self.spatialGrid\n\n        return b\n\n    def getMaxArea(self):\n        \"\"\"Compute the max area of this block if it was totally full.\"\"\"\n        pitch = self.getPitch()\n        if not pitch:\n            return 0.0\n        return hexagon.area(pitch)\n\n    def getDuctIP(self):\n        \"\"\"Returns the duct IP dimension.\"\"\"\n        duct = self.getComponent(Flags.DUCT, exact=True)\n        return duct.getDimension(\"ip\")\n\n    def getDuctOP(self):\n        \"\"\"Returns the duct OP dimension.\"\"\"\n        duct = self.getComponent(Flags.DUCT, exact=True)\n        return duct.getDimension(\"op\")\n\n    def setPinPowers(self, powers, powerKeySuffix=\"\"):\n        \"\"\"\n        Updates the pin linear power densities of this block.\n\n        The linear densities are represented by the ``linPowByPin`` parameter.\n\n        It is expected that the ordering of ``powers`` is consistent with :meth:`getPinLocations`. That helps ensure\n        alignment with component-level look ups like :meth:`~armi.reactor.components.Circle.getPinIndices`.\n\n        The ``linPowByPin`` parameter can be directly assigned to instead of using this method if the multiplicity of\n        the pins in the block is equal to the number of pins in the block.\n\n        Parameters\n        ----------\n        powers : list of floats, required\n            The block-level pin linear power densities. ``powers[i]`` represents the average linear power density of pin\n            ``i`` location at ``self.getPinLocations()[i]``. The units of linear power density is watts/cm (i.e., watts\n            produced per cm of pin length).\n        powerKeySuffix: str, optional\n            Must be either an empty string, :py:const:`NEUTRON <armi.physics.neutronics.const.NEUTRON>`, or\n            :py:const:`GAMMA <armi.physics.neutronics.const.GAMMA>`. Defaults to empty string.\n        \"\"\"\n        numPins = self.getNumPins()\n        if not numPins or numPins != len(powers):\n            raise ValueError(\n                f\"Invalid power data for {self} with {numPins} pins. Got {len(powers)} entries in powers: {powers}\"\n            )\n\n        powerKey = f\"linPowByPin{powerKeySuffix}\"\n        self.p[powerKey] = powers\n\n        # If using the *powerKeySuffix* parameter, we also need to set total power, which is sum of neutron and gamma\n        # powers. We assume that a solo gamma calculation to set total power does not make sense.\n        if powerKeySuffix:\n            if powerKeySuffix == GAMMA:\n                if self.p[f\"linPowByPin{NEUTRON}\"] is None:\n                    msg = f\"Neutron power has not been set yet. Cannot set total power for {self}.\"\n                    raise UnboundLocalError(msg)\n                self.p.linPowByPin = self.p[f\"linPowByPin{NEUTRON}\"] + self.p[powerKey]\n            else:\n                self.p.linPowByPin = self.p[powerKey]\n\n    def rotate(self, rad: float):\n        \"\"\"\n        Rotates a block's spatially varying parameters by a specified angle in the counter-clockwise direction.\n\n        The parameters must have a ParamLocation of either CORNERS or EDGES and must be a Python list of length 6 in\n        order to be eligible for rotation; all parameters that do not meet these two criteria are not rotated.\n\n        .. impl:: Rotating a hex block updates parameters on the boundary, the orientation\n            parameter, and the spatial coordinates on contained objects.\n            :id: I_ARMI_ROTATE_HEX_BLOCK\n            :implements: R_ARMI_ROTATE_HEX\n\n            This method rotates a block on a hexagonal grid, conserving the 60-degree symmetry of the grid. It first\n            determines how many rotations the block will undergo based on the 60-degree hex grid. Then it uses that\n            \"rotation number\" to do a few things: reset the orientation parameter, rotate the children, and rotate the\n            boundary parameters. It also sets the \"displacement in X\" and \"displacement in Y\" parameters.\n\n        Parameters\n        ----------\n        rad: float, required\n            Angle of counter-clockwise rotation in units of radians. Rotations must be in 60-degree increments\n            (i.e., PI/3, 2 * PI/3, PI, 4 * PI/3, 5 * PI/3, and 2 * PI).\n        \"\"\"\n        rotNum = round((rad % (2 * math.pi)) / math.radians(60))\n        self._rotateChildLocations(rad, rotNum)\n        if self.p.orientation is None:\n            self.p.orientation = np.array([0.0, 0.0, 0.0])\n        self.p.orientation[2] += rotNum * 60.0\n        self._rotateBoundaryParameters(rotNum)\n        self._rotateDisplacement(rad)\n\n    def _rotateChildLocations(self, radians: float, rotNum: int):\n        \"\"\"Update spatial locators for children.\"\"\"\n        if self.spatialGrid is None:\n            return\n\n        locationRotator = functools.partial(self.spatialGrid.rotateIndex, rotations=rotNum)\n        rotationMatrix = np.array([[math.cos(radians), -math.sin(radians)], [math.sin(radians), math.cos(radians)]])\n        for c in self:\n            if isinstance(c.spatialLocator, grids.MultiIndexLocation):\n                newLocations = list(map(locationRotator, c.spatialLocator))\n                c.spatialLocator = grids.MultiIndexLocation(self.spatialGrid)\n                c.spatialLocator.extend(newLocations)\n            elif isinstance(c.spatialLocator, grids.CoordinateLocation):\n                oldCoords = c.spatialLocator.getLocalCoordinates()\n                newXY = rotationMatrix.dot(oldCoords[:2])\n                newLocation = grids.CoordinateLocation(newXY[0], newXY[1], oldCoords[2], self.spatialGrid)\n                c.spatialLocator = newLocation\n            elif isinstance(c.spatialLocator, grids.IndexLocation):\n                c.spatialLocator = locationRotator(c.spatialLocator)\n            elif c.spatialLocator is not None:\n                msg = f\"{c} on {self} has an invalid spatial locator for rotation: {c.spatialLocator}\"\n                runLog.error(msg)\n                raise TypeError(msg)\n\n    def _rotateBoundaryParameters(self, rotNum: int):\n        \"\"\"Rotate any parameters defined on the corners or edge of bounding hexagon.\n\n        Parameters\n        ----------\n        rotNum : int\n            Rotation number between zero and five, inclusive, specifying how many rotations have taken place.\n        \"\"\"\n        names = self.p.paramDefs.atLocation(ParamLocation.CORNERS).names\n        names += self.p.paramDefs.atLocation(ParamLocation.EDGES).names\n        for name in names:\n            original = self.p[name]\n            if isinstance(original, (list, np.ndarray)):\n                if len(original) == 6:\n                    # Rotate by making the -rotNum item be first\n                    self.p[name] = iterables.pivot(original, -rotNum)\n                elif len(original) == 0:\n                    # Hasn't been defined yet, no warning needed.\n                    pass\n                else:\n                    msg = (\n                        \"No rotation method defined for spatial parameters that aren't defined \"\n                        f\"once per hex edge/corner. No rotation performed on {name}\"\n                    )\n                    runLog.warning(msg)\n            elif isinstance(original, (int, float)):\n                # this is a scalar and there shouldn't be any rotation.\n                pass\n            elif original is None:\n                # param is not set yet. no rotations as well.\n                pass\n            else:\n                raise TypeError(\n                    f\"b.rotate() method received unexpected data type for {name} on block {self}\\n\"\n                    + f\"expected list, np.ndarray, int, or float. received {original}\"\n                )\n\n    def _rotateDisplacement(self, rad: float):\n        # This specifically uses the .get() functionality to avoid an error if this parameter does not exist.\n        dispx = self.p.get(\"displacementX\")\n        dispy = self.p.get(\"displacementY\")\n        if (dispx is not None) and (dispy is not None):\n            self.p.displacementX = dispx * math.cos(rad) - dispy * math.sin(rad)\n            self.p.displacementY = dispx * math.sin(rad) + dispy * math.cos(rad)\n\n    def verifyBlockDims(self):\n        \"\"\"Perform some checks on this type of block before it is assembled.\"\"\"\n        try:\n            wireComp = self.getComponent(Flags.WIRE, quiet=True)  # Quiet because None case is checked for below\n            ductComps = self.getComponents(Flags.DUCT)\n            cladComp = self.getComponent(Flags.CLAD, quiet=True)  # Quiet because None case is checked for below\n        except ValueError:\n            # there are probably more that one clad/wire, so we really dont know what this block looks like\n            runLog.info(f\"Block design {self} is too complicated to verify dimensions. Make sure they are correct!\")\n            return\n\n        # check wire wrap in contact with clad\n        if cladComp is not None and wireComp is not None:\n            wwCladGap = self.getWireWrapCladGap(cold=True)\n            if round(wwCladGap, 6) != 0.0:\n                runLog.warning(\n                    \"The gap between wire wrap and clad in block {} was {} cm. Expected 0.0.\".format(self, wwCladGap),\n                    single=True,\n                )\n\n        # check clad duct overlap\n        pinToDuctGap = self.getPinToDuctGap(cold=True)\n        # Allow for some tolerance; user input precision may lead to slight negative gaps\n        if pinToDuctGap is not None and pinToDuctGap < -0.005:\n            raise ValueError(\n                \"Gap between pins and duct is {0:.4f} cm in {1}. Make more room.\".format(pinToDuctGap, self)\n            )\n        elif pinToDuctGap is None:\n            # only produce a warning if pin or clad are found, but not all of pin, clad and duct. We may need to tune\n            # this logic a bit\n            ductComp = next(iter(ductComps), None)\n            if (cladComp is not None or wireComp is not None) and any(\n                [c is None for c in (wireComp, cladComp, ductComp)]\n            ):\n                runLog.warning(\"Some component was missing in {} so pin-to-duct gap not calculated\".format(self))\n\n    def getPinToDuctGap(self, cold=False):\n        \"\"\"\n        Returns the distance in cm between the outer most pin and the duct in a block.\n\n        Parameters\n        ----------\n        cold : boolean\n            Determines whether the results should be cold or hot dimensions.\n\n        Returns\n        -------\n        pinToDuctGap : float\n            Returns the diameteral gap between the outer most pins in a hex pack to the duct inner face to face in cm.\n        \"\"\"\n        wire = self.getComponent(Flags.WIRE, quiet=True)  # Quiet because None case is checked for below\n        ducts = sorted(self.getChildrenWithFlags(Flags.DUCT))\n        duct = None\n        if any(ducts):\n            duct = ducts[0]\n            if not isinstance(duct, components.Hexagon):\n                # getPinCenterFlatToFlat only works for hexes\n                # inner most duct might be circle or some other shape\n                duct = None\n            elif isinstance(duct, components.HoledHexagon):\n                # has no ip and is circular on inside so following\n                # code will not work\n                duct = None\n        clad = self.getComponent(Flags.CLAD, quiet=True)  # Quiet because None case is checked for below\n        if any(c is None for c in (duct, wire, clad)):\n            return None\n\n        # NOTE: If nRings was a None, this could be for a non-hex packed fuel assembly see thermal hydraulic design\n        # basis for description of equation\n        pinCenterFlatToFlat = self.getPinCenterFlatToFlat(cold=cold)\n        pinOuterFlatToFlat = (\n            pinCenterFlatToFlat + clad.getDimension(\"od\", cold=cold) + 2.0 * wire.getDimension(\"od\", cold=cold)\n        )\n        ductMarginToContact = duct.getDimension(\"ip\", cold=cold) - pinOuterFlatToFlat\n        pinToDuctGap = ductMarginToContact / 2.0\n\n        return pinToDuctGap\n\n    def getRotationNum(self) -> int:\n        \"\"\"Get index 0 through 5 indicating number of rotations counterclockwise around the z-axis.\"\"\"\n        # assume rotation only in Z\n        return np.rint(self.p.orientation[2] / 360.0 * 6) % 6\n\n    def setRotationNum(self, rotNum: int):\n        \"\"\"\n        Set orientation based on a number 0 through 5 indicating number of rotations\n        counterclockwise around the z-axis.\n        \"\"\"\n        self.p.orientation[2] = 60.0 * rotNum\n\n    def getSymmetryFactor(self):\n        \"\"\"\n        Return a factor between 1 and N where 1/N is how much cut-off by symmetry lines this mesh cell is.\n\n        Reactor-level meshes have symmetry information so we have a reactor for this to work. That is why it is not\n        implemented on the grid/locator level.\n\n        When edge-assemblies are included on both edges (i.e. MCNP or DIF3D-FD 1/3-symmetric cases), the edge assemblies\n        have symmetry factors of 2.0. Otherwise (DIF3D-nodal) there's a full assembly on the bottom edge (overhanging)\n        and no assembly at the top edge so the ones at the bottom are considered full (symmetryFactor=1).\n\n        If this block is not in any grid at all, then there can be no symmetry so return 1.\n        \"\"\"\n        try:\n            symmetry = self.parent.spatialLocator.grid.symmetry\n        except Exception:\n            return 1.0\n        if symmetry.domain == geometry.DomainType.THIRD_CORE and symmetry.boundary == geometry.BoundaryType.PERIODIC:\n            indices = self.spatialLocator.getCompleteIndices()\n            if indices[0] == 0 and indices[1] == 0:\n                # central location\n                return 3.0\n            else:\n                symmetryLine = self.core.spatialGrid.overlapsWhichSymmetryLine(indices)\n                # Detect if upper edge assemblies are included. Doing this is the only way to know definitively whether\n                # or not the edge assemblies are half-assems or full. Seeing the first one is the easiest way to detect\n                # them. Check it last in the and statement so we don't waste time doing it.\n                upperEdgeLoc = self.core.spatialGrid[-1, 2, 0]\n                if symmetryLine in [\n                    grids.BOUNDARY_0_DEGREES,\n                    grids.BOUNDARY_120_DEGREES,\n                ] and bool(self.core.childrenByLocator.get(upperEdgeLoc)):\n                    return 2.0\n        return 1.0\n\n    def autoCreateSpatialGrids(self, systemSpatialGrid=None):\n        \"\"\"\n        Given a block without a spatialGrid, create a spatialGrid and give its children the corresponding\n        spatialLocators (if it is a simple block).\n\n        In this case, a simple block would be one that has either multiplicity of components equal to 1 or N but no\n        other multiplicities. Also, this should only happen when N fits exactly into a given number of hex rings.\n        Otherwise, do not create a grid for this block.\n\n        Parameters\n        ----------\n        systemSpatialGrid : Grid, optional\n            Spatial Grid of the system-level parent of this Assembly that contains this Block.\n\n        Notes\n        -----\n        When a hex grid has another hex grid nested inside it, the nested grid has the opposite orientation (corners vs\n        flats up). This method takes care of that.\n\n        If components inside this block are multiplicity 1, they get a single locator at the center of the grid cell. If\n        the multiplicity is greater than 1, all the components are added to a multiIndexLocation on the hex grid.\n\n        Raises\n        ------\n        ValueError\n            If the multiplicities of the block are not only 1 or N or if generated ringNumber leads to more positions\n            than necessary.\n        \"\"\"\n        # not necessary\n        if self.spatialGrid is not None:\n            return\n\n        # Check multiplicities\n        mults = {c.getDimension(\"mult\") for c in self.iterComponents()}\n\n        # Do some validation: Should we try to create a spatial grid?\n        multz = {float(m) for m in mults}\n        if len(multz) == 1 and 1.0 in multz:\n            runLog.extra(\n                f\"Block {self.p.type} does not need a spatial grid: multiplicities are all 1.\",\n                single=True,\n            )\n            return\n        elif len(multz) != 2 or 1.0 not in multz:\n            runLog.extra(\n                f\"Could not create a spatialGrid for block {self.p.type}, multiplicities are not {{1, N}} \"\n                f\"they are {mults}\",\n                single=True,\n            )\n            return\n\n        # build the grid, from pitch and orientation\n        if isinstance(systemSpatialGrid, grids.HexGrid):\n            cornersUp = not systemSpatialGrid.cornersUp\n        else:\n            cornersUp = False\n\n        grid = grids.HexGrid.fromPitch(\n            self.getPinPitch(cold=True),\n            numRings=0,\n            armiObject=self,\n            cornersUp=cornersUp,\n        )\n\n        ringNumber = hexagon.numRingsToHoldNumCells(self.getNumPins())\n        numLocations = 0\n        for ring in range(ringNumber):\n            numLocations = numLocations + hexagon.numPositionsInRing(ring + 1)\n\n        if numLocations != self.getNumPins():\n            raise ValueError(\n                \"Cannot create spatialGrid, number of locations in rings {} not equal to pin number {}\".format(\n                    numLocations, self.getNumPins()\n                )\n            )\n\n        # set the spatial position of the sub-block components\n        spatialLocators = grids.MultiIndexLocation(grid=grid)\n        for ring in range(ringNumber):\n            for pos in range(grid.getPositionsInRing(ring + 1)):\n                i, j = grid.getIndicesFromRingAndPos(ring + 1, pos + 1)\n                spatialLocators.append(grid[i, j, 0])\n\n        # finally, fill the spatial grid, and put the sub-block components on it\n        if self.spatialGrid is None:\n            self.spatialGrid = grid\n            for c in self:\n                if c.getDimension(\"mult\") > 1:\n                    c.spatialLocator = spatialLocators\n                elif c.getDimension(\"mult\") == 1:\n                    c.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, grid)\n\n    def assignPinIndices(self):\n        \"\"\"Assign pin indices for pin components on the block.\"\"\"\n        if self.spatialGrid is None:\n            return\n        locations = self.getPinLocations()\n        if not locations:\n            return\n        # Clear out any previous values. If your block is built with one ordering\n        # and then sorted, things that used to have pin indices may now have invalid\n        # pin indices. Wipe them out just to be safe\n        for c in self:\n            c.p.pinIndices = None\n        ijGetter = operator.attrgetter(\"i\", \"j\")\n        allIJ: tuple[tuple[int, int]] = tuple(map(ijGetter, locations))\n        # Flags for components that we want to set this parameter\n        # Usually things are linked to one of these \"important\" flags, like\n        # a cladding component having linked dimensions to a fuel component\n        primaryFlags = (Flags.FUEL, Flags.CONTROL, Flags.SHIELD)\n        withPinIndices: list[components.Component] = []\n        for c in self.iterChildrenWithFlags(primaryFlags):\n            if self._setPinIndices(c, ijGetter, allIJ):\n                withPinIndices.append(c)\n        # Iterate over every other thing on the grid and make sure\n        # 1) it share a lattice site with something that has pin indices, or\n        # 2) it itself declares the pin indices\n        for c in self:\n            if c.p.pinIndices is not None:\n                continue\n            # Does anything with pin indices share this lattice site?\n            if any(other.spatialLocator == c.spatialLocator for other in withPinIndices):\n                continue\n            if self._setPinIndices(c, ijGetter, allIJ):\n                withPinIndices.append(c)\n\n    @staticmethod\n    def _setPinIndices(\n        c: components.Component, ijGetter: Callable[[grids.IndexLocation], tuple[int, int]], allIJ: tuple[int, int]\n    ):\n        localLocations = c.spatialLocator\n        if isinstance(localLocations, grids.MultiIndexLocation):\n            localIJ = list(map(ijGetter, localLocations))\n        # CoordinateLocations do not live on the grid, by definition\n        elif isinstance(localLocations, grids.CoordinateLocation):\n            return False\n        elif isinstance(localLocations, grids.IndexLocation):\n            localIJ = [ijGetter(localLocations)]\n        else:\n            return False\n        localIndices = list(map(allIJ.index, localIJ))\n        c.p.pinIndices = localIndices\n        return True\n\n    def getPinCenterFlatToFlat(self, cold=False):\n        \"\"\"Return the flat-to-flat distance between the centers of opposing pins in the outermost ring.\"\"\"\n        clad = self.getComponent(Flags.CLAD)\n        nRings = hexagon.numRingsToHoldNumCells(clad.getDimension(\"mult\"))\n        pinPitch = self.getPinPitch(cold=cold)\n        pinCenterCornerToCorner = 2 * (nRings - 1) * pinPitch\n        pinCenterFlatToFlat = math.sqrt(3.0) / 2.0 * pinCenterCornerToCorner\n        return pinCenterFlatToFlat\n\n    def hasPinPitch(self):\n        \"\"\"Return True if the block has enough information to calculate pin pitch.\"\"\"\n        try:\n            return (self.getComponent(Flags.CLAD, quiet=True) is not None) and (\n                self.getComponent(Flags.WIRE, quiet=True) is not None\n            )\n        except ValueError:\n            # not well defined pitch due to multiple pin and/or wire components\n            return False\n\n    def getPinPitch(self, cold=False):\n        \"\"\"\n        Get the pin pitch in cm.\n\n        Assumes that the pin pitch is defined entirely by contacting cladding tubes and wire wraps.\n        Grid spacers not yet supported.\n\n        Parameters\n        ----------\n        cold : boolean\n            Determines whether the dimensions should be cold or hot\n\n        Returns\n        -------\n        pinPitch : float\n            pin pitch in cm\n        \"\"\"\n        try:\n            clad = self.getComponent(Flags.CLAD, quiet=True)  # Quiet because None case is checked for below\n            wire = self.getComponent(Flags.WIRE, quiet=True)  # Quiet because None case is checked for below\n        except ValueError:\n            raise ValueError(f\"Block {self} has multiple clad and wire components, so pin pitch is not well-defined.\")\n\n        if wire and clad:\n            return clad.getDimension(\"od\", cold=cold) + wire.getDimension(\"od\", cold=cold)\n        else:\n            raise ValueError(f\"Cannot get pin pitch in {self} because it does not have a wire and a clad\")\n\n    def getWettedPerimeter(self):\n        \"\"\"\n        Return the total wetted perimeter of the block in cm.\n\n        Notes\n        -----\n        Please be aware that this method is specific to Fast Reactors, and probably even Sodium Fast Reactors. This is\n        obviously an awkward design choice, and we hope to improve upon it soon.\n        \"\"\"\n        # flags pertaining to hexagon components where the interior of the hexagon is wetted\n        wettedHollowHexagonComponentFlags = (\n            Flags.DUCT,\n            Flags.GRID_PLATE,\n            Flags.INLET_NOZZLE,\n            Flags.HANDLING_SOCKET,\n            Flags.DUCT | Flags.DEPLETABLE,\n            Flags.GRID_PLATE | Flags.DEPLETABLE,\n            Flags.INLET_NOZZLE | Flags.DEPLETABLE,\n            Flags.HANDLING_SOCKET | Flags.DEPLETABLE,\n        )\n\n        # flags pertaining to circular pin components where the exterior of the circle is wetted\n        wettedPinComponentFlags = (\n            Flags.CLAD,\n            Flags.WIRE,\n        )\n\n        # flags pertaining to components where both the interior and exterior are wetted\n        wettedHollowComponentFlags = (\n            Flags.DUCT | Flags.INNER,\n            Flags.DUCT | Flags.INNER | Flags.DEPLETABLE,\n        )\n\n        # obtain all wetted components based on type\n        wettedHollowHexagonComponents = []\n        for flag in wettedHollowHexagonComponentFlags:\n            c = self.getComponent(flag, exact=True)\n            wettedHollowHexagonComponents.append(c) if c else None\n\n        wettedPinComponents = []\n        for flag in wettedPinComponentFlags:\n            comps = self.getComponents(flag)\n            wettedPinComponents.extend(comps)\n\n        wettedHollowCircleComponents = []\n        wettedHollowHexComponents = []\n        for flag in wettedHollowComponentFlags:\n            c = self.getComponent(flag, exact=True)\n            if isinstance(c, Hexagon):\n                wettedHollowHexComponents.append(c) if c else None\n            else:\n                wettedHollowCircleComponents.append(c) if c else None\n\n        # calculate wetted perimeters according to their geometries\n        # hollow hexagon = 6 * ip / sqrt(3)\n        wettedHollowHexagonPerimeter = 0.0\n        for c in wettedHollowHexagonComponents:\n            wettedHollowHexagonPerimeter += 6 * c.getDimension(\"ip\") / math.sqrt(3) if c else 0.0\n\n        # solid circle = NumPins * pi * (Comp Diam + Wire Diam)\n        wettedPinPerimeter = 0.0\n        for c in wettedPinComponents:\n            correctionFactor = 1.0\n            if isinstance(c, Helix):\n                # account for the helical wire wrap\n                correctionFactor = np.hypot(\n                    1.0,\n                    math.pi * c.getDimension(\"helixDiameter\") / c.getDimension(\"axialPitch\"),\n                )\n            compWettedPerim = c.getDimension(\"od\") * correctionFactor * c.getDimension(\"mult\") * math.pi\n            wettedPinPerimeter += compWettedPerim\n\n        # hollow circle = (id + od) * pi\n        wettedHollowCirclePerimeter = 0.0\n        for c in wettedHollowCircleComponents:\n            wettedHollowCirclePerimeter += c.getDimension(\"id\") + c.getDimension(\"od\") if c else 0.0\n        wettedHollowCirclePerimeter *= math.pi\n\n        # hollow hexagon = 6 * (ip + op) / sqrt(3)\n        wettedHollowHexPerimeter = 0.0\n        for c in wettedHollowHexComponents:\n            wettedHollowHexPerimeter += c.getDimension(\"ip\") + c.getDimension(\"op\") if c else 0.0\n        wettedHollowHexPerimeter *= 6 / math.sqrt(3)\n\n        return (\n            wettedHollowHexagonPerimeter + wettedPinPerimeter + wettedHollowCirclePerimeter + wettedHollowHexPerimeter\n        )\n\n    def getFlowArea(self):\n        \"\"\"Return the total flowing coolant area of the block in cm^2.\"\"\"\n        area = self.getComponent(Flags.COOLANT, exact=True).getArea()\n        for c in self.getComponents(Flags.INTERDUCTCOOLANT, exact=True):\n            area += c.getArea()\n\n        return area\n\n    def getHydraulicDiameter(self):\n        \"\"\"\n        Return the hydraulic diameter in this block in cm.\n\n        Hydraulic diameter is 4A/P where A is the flow area and P is the wetted perimeter. In a hex assembly, the wetted\n        perimeter includes the cladding, the wire wrap, and the inside of the duct. The flow area is the inner area of\n        the duct minus the area of the pins and the wire.\n        \"\"\"\n        return 4.0 * self.getFlowArea() / self.getWettedPerimeter()\n"
  },
  {
    "path": "armi/reactor/blocks/thRZBlock.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A simple base class to help define blocks in a Theta-R-Z geometry.\"\"\"\n\nfrom armi.reactor.blocks.block import Block\n\n\nclass ThRZBlock(Block):\n    def getMaxArea(self):\n        \"\"\"Return the area of the Theta-R-Z block if it was totally full.\"\"\"\n        raise NotImplementedError(\"Cannot get max area of a TRZ block. Fully specify your geometry.\")\n\n    def radialInner(self):\n        \"\"\"Return a smallest radius of all the components.\"\"\"\n        innerRadii = self.getDimensions(\"inner_radius\")\n        smallestInner = min(innerRadii) if innerRadii else None\n        return smallestInner\n\n    def radialOuter(self):\n        \"\"\"Return a largest radius of all the components.\"\"\"\n        outerRadii = self.getDimensions(\"outer_radius\")\n        largestOuter = max(outerRadii) if outerRadii else None\n        return largestOuter\n\n    def thetaInner(self):\n        \"\"\"Return a smallest theta of all the components.\"\"\"\n        innerTheta = self.getDimensions(\"inner_theta\")\n        smallestInner = min(innerTheta) if innerTheta else None\n        return smallestInner\n\n    def thetaOuter(self):\n        \"\"\"Return a largest theta of all the components.\"\"\"\n        outerTheta = self.getDimensions(\"outer_theta\")\n        largestOuter = max(outerTheta) if outerTheta else None\n        return largestOuter\n\n    def axialInner(self):\n        \"\"\"Return the lower z-coordinate.\"\"\"\n        return self.getDimensions(\"inner_axial\")\n\n    def axialOuter(self):\n        \"\"\"Return the upper z-coordinate.\"\"\"\n        return self.getDimensions(\"outer_axial\")\n\n    def verifyBlockDims(self):\n        \"\"\"Perform dimension checks related to ThetaRZ blocks.\"\"\"\n        return\n"
  },
  {
    "path": "armi/reactor/blueprints/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nBlueprints describe the geometric and composition details of the objects in the reactor\n(e.g. fuel assemblies, control rods, etc.).\n\nInputs captured within this blueprints module pertain to major design criteria like\ncustom material properties or basic structures like the assemblies in use.\n\nThis is essentially a wrapper for a yaml loader.\nThe given yaml file is expected to rigidly adhere to given key:value pairings.\n\nSee the :ref:`blueprints documentation <bp-input-file>` for more details.\n\nThe file structure is expectation is::\n\n    nuclide flags:\n        AM241: {burn: true, xs: true}\n        ...\n\n    custom isotopics: {} # optional\n\n    blocks:\n        name:\n            component name:\n                component dimensions\n        ...\n\n    assemblies:\n        name:\n            specifier: ABC\n            blocks: [...]\n            height: [...]\n            axial mesh points: [...]\n            xs types: [...]\n\n            # optional\n            myMaterialModification1: [...]\n            myMaterialModification2: [...]\n\n            # optionally extra settings (note this is probably going to be a removed feature)\n            #    hotChannelFactors: TWRPclad\n\nExamples\n--------\n>>> design = blueprints.Blueprints.load(self.yamlString)\n>>> print(design.gridDesigns)\n\nNotes\n-----\nThe blueprints system was built to enable round trip translations between\ntext representations of input and objects in the code.\n\"\"\"\n\nimport copy\nimport io\nimport math\nimport pathlib\nimport traceback\nimport typing\n\nimport h5py\nimport ordered_set\nimport yamlize\nimport yamlize.objects\nfrom ruamel.yaml import RoundTripLoader\n\nfrom armi import (\n    context,\n    getPluginManager,\n    getPluginManagerOrFail,\n    migration,\n    plugins,\n    runLog,\n)\nfrom armi.nucDirectory import nuclideBases\nfrom armi.physics.neutronics.settings import CONF_LOADING_FILE\nfrom armi.reactor import assemblies\nfrom armi.reactor.blueprints import isotopicOptions\nfrom armi.reactor.blueprints.assemblyBlueprint import AssemblyKeyedList\nfrom armi.reactor.blueprints.blockBlueprint import BlockKeyedList\nfrom armi.reactor.blueprints.componentBlueprint import (\n    ComponentGroups,\n    ComponentKeyedList,\n)\nfrom armi.reactor.blueprints.gridBlueprint import Grids, Triplet\nfrom armi.reactor.blueprints.reactorBlueprint import SystemBlueprint, Systems\nfrom armi.reactor.converters import axialExpansionChanger\nfrom armi.reactor.flags import Flags\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_ACCEPTABLE_BLOCK_AREA_ERROR,\n    CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP,\n    CONF_DETAILED_AXIAL_EXPANSION,\n    CONF_INPUT_HEIGHTS_HOT,\n    CONF_NON_UNIFORM_ASSEM_FLAGS,\n)\nfrom armi.utils import tabulate, textProcessors\nfrom armi.utils.customExceptions import InputError\n\ncontext.BLUEPRINTS_IMPORTED = True\ncontext.BLUEPRINTS_IMPORT_CONTEXT = \"\".join(traceback.format_stack())\n\n\ndef loadFromCs(cs, roundTrip=False):\n    \"\"\"Function to load Blueprints based on supplied ``Settings``.\"\"\"\n    from armi.utils import directoryChangers\n\n    with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):\n        bpPath = pathlib.Path(cs[CONF_LOADING_FILE])\n        if bpPath.suffix.lower() in (\".h5\", \".hdf5\"):\n            # This is a case settings from a database so the blueprints are also in the database.\n            try:\n                db = h5py.File(bpPath, \"r\")\n                bpString = db[\"inputs/blueprints\"].asstr()[()]\n                stream = io.StringIO(bpString)\n                stream = Blueprints.migrate(stream)\n                bp = Blueprints.load(stream)\n            except KeyError:\n                # not all reactors need to be created from blueprints, so they may not exist\n                bp = None\n        else:\n            with open(cs[CONF_LOADING_FILE], \"r\") as bpYaml:\n                root = bpPath.parent.absolute()\n                bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root)\n                try:\n                    bp = Blueprints.load(bpYaml, roundTrip=roundTrip)\n                except yamlize.yamlizing_error.YamlizingError as err:\n                    if \"cross sections\" in err.args[0]:\n                        runLog.error(\n                            \"The loading file {} contains invalid `cross sections` input. \"\n                            \"Please run the `modify` entry point on this case to automatically convert.\"\n                            \"\".format(cs[CONF_LOADING_FILE])\n                        )\n                    raise\n    return bp\n\n\nclass _BlueprintsPluginCollector(yamlize.objects.ObjectType):\n    \"\"\"\n    Simple metaclass for adding yamlize.Attributes from plugins to Blueprints.\n\n    This calls the defineBlueprintsSections() plugin hook to discover new class\n    attributes to add before the yamlize code fires off to make the root yamlize.Object.\n    Since yamlize.Object itself uses a metaclass to define the attributes to turn into\n    yamlize.Attributes, these need to be folded in early.\n    \"\"\"\n\n    def __new__(mcs, name, bases, attrs):\n        pm = getPluginManager()\n        if pm is None:\n            runLog.warning(\n                \"Blueprints were instantiated before the framework was \"\n                \"configured with plugins. Blueprints cannot be imported before \"\n                \"ARMI has been configured.\"\n            )\n        else:\n            pluginSections = pm.hook.defineBlueprintsSections()\n            for plug in pluginSections:\n                for attrName, section, resolver in plug:\n                    assert isinstance(section, yamlize.Attribute)\n                    if attrName in attrs:\n                        raise plugins.PluginError(\n                            \"There is already a section called '{}' in the reactor blueprints\".format(attrName)\n                        )\n                    attrs[attrName] = section\n                    attrs[\"_resolveFunctions\"].append(resolver)\n\n        newType = yamlize.objects.ObjectType.__new__(mcs, name, bases, attrs)\n\n        return newType\n\n\nclass Blueprints(yamlize.Object, metaclass=_BlueprintsPluginCollector):\n    \"\"\"Base Blueprintsobject representing all the subsections in the input file.\"\"\"\n\n    nuclideFlags = yamlize.Attribute(key=\"nuclide flags\", type=isotopicOptions.NuclideFlags, default=None)\n    customIsotopics = yamlize.Attribute(key=\"custom isotopics\", type=isotopicOptions.CustomIsotopics, default=None)\n    blockDesigns = yamlize.Attribute(key=\"blocks\", type=BlockKeyedList, default=None)\n    assemDesigns = yamlize.Attribute(key=\"assemblies\", type=AssemblyKeyedList, default=None)\n    systemDesigns = yamlize.Attribute(key=\"systems\", type=Systems, default=None)\n    gridDesigns = yamlize.Attribute(key=\"grids\", type=Grids, default=None)\n    componentDesigns = yamlize.Attribute(key=\"components\", type=ComponentKeyedList, default=None)\n    componentGroups = yamlize.Attribute(key=\"component groups\", type=ComponentGroups, default=None)\n\n    # These are used to set up new attributes that come from plugins.\n    _resolveFunctions = []\n\n    def __new__(cls):\n        # yamlizable does not call __init__, so attributes that are not defined above need to be\n        # initialized here\n        self = yamlize.Object.__new__(cls)\n        self.assemblies = {}\n        self._prepped = False\n        self._assembliesBySpecifier = {}\n\n        # Better for performance since these are used for lookups\n        self.allNuclidesInProblem = ordered_set.OrderedSet()\n        self.activeNuclides = ordered_set.OrderedSet()\n        self.inertNuclides = ordered_set.OrderedSet()\n        self.nucsToForceInXsGen = ordered_set.OrderedSet()\n        self.elementsToExpand = []\n        return self\n\n    def __init__(self):\n        # Yamlize does not call __init__, instead we use Blueprints.load which creates and instance\n        # of a Blueprints object and initializes it with valuesconstructAssemusing setattr.\n        self._assembliesBySpecifier = {}\n        self._prepped = False\n        self.systemDesigns = Systems()\n        self.assemDesigns = AssemblyKeyedList()\n        self.blockDesigns = BlockKeyedList()\n        self.assemblies = {}\n        self.grids = Grids()\n        self.elementsToExpand = []\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} Assemblies:{len(self.assemDesigns)} Blocks:{len(self.blockDesigns)}>\"\n\n    def constructAssem(self, cs, name=None, specifier=None, orientation=0.0):\n        \"\"\"\n        Construct a new assembly instance from the assembly designs in this Blueprints object.\n\n        Parameters\n        ----------\n        cs : Settings\n            Used to apply various modeling options when constructing an assembly.\n        name : str (optional, and should be exclusive with specifier)\n            Name of the assembly to construct. This should match the key that was used to define the\n            assembly in the Blueprints YAML file.\n        specifier : str (optional, and should be exclusive with name)\n            Identifier of the assembly to construct. This should match the identifier that was used\n            to define the assembly in the Blueprints YAML file.\n        orientation : float (optional, is usually just zero)\n            Rotate the Assembly at creation.\n\n        Raises\n        ------\n        ValueError\n            If neither name nor specifier are passed\n\n        Notes\n        -----\n        There is some possibility for \"compiling\" the logic with closures to make constructing an\n        assembly / block / component faster. At this point is is pretty much irrelevant because we\n        are currently just deepcopying already constructed assemblies.\n\n        Currently, this method is backward compatible with other code in ARMI and generates the\n        `.assemblies` attribute (the BOL assemblies). Eventually, this should be removed.\n        \"\"\"\n        self._prepConstruction(cs)\n\n        if name is not None:\n            assem = self.assemblies[name]\n        elif specifier is not None:\n            assem = self._assembliesBySpecifier[specifier]\n        else:\n            raise ValueError(\"Must supply assembly name or specifier to construct\")\n\n        a = copy.deepcopy(assem)\n        # since a deepcopy has the same assembly numbers and block id's, we need to make it unique\n        a.makeUnique()\n\n        if orientation:\n            a.rotate(math.radians(orientation))\n        return a\n\n    def _prepConstruction(self, cs):\n        \"\"\"\n        This method initializes a bunch of information within a Blueprints object such as assigning\n        assembly and block type numbers, resolving the nuclides in the problem, and pre-populating\n        assemblies.\n\n        Ideally, it would not be necessary at all, but the ``cs`` currently contains a bunch of\n        information necessary to create the applicable model. If it were possible, it would be\n        terrific to override the Yamlizable.from_yaml method to run this code after the instance has\n        been created, but we need additional information in order to build the assemblies that is\n        not within the YAML file.\n\n        This method should not be called directly, but it is used in testing.\n        \"\"\"\n        if not self._prepped:\n            self._assignTypeNums()\n            for func in self._resolveFunctions:\n                func(self, cs)\n            self._resolveNuclides(cs)\n            self._assembliesBySpecifier.clear()\n            self.assemblies.clear()\n\n            for aDesign in self.assemDesigns:\n                a = aDesign.construct(cs, self)\n                self._assembliesBySpecifier[aDesign.specifier] = a\n                self.assemblies[aDesign.name] = a\n\n            runLog.header(\"=========== Verifying Assembly Configurations ===========\")\n            self._checkAssemblyAreaConsistency(cs)\n\n            if not cs[CONF_DETAILED_AXIAL_EXPANSION]:\n                # this is required to set up assemblies so they know how to snap to the reference\n                # mesh. They won't know the mesh to conform to otherwise....\n                axialExpansionChanger.makeAssemsAbleToSnapToUniformMesh(\n                    self.assemblies.values(), cs[CONF_NON_UNIFORM_ASSEM_FLAGS]\n                )\n\n            if not cs[CONF_INPUT_HEIGHTS_HOT]:\n                runLog.header(\"=========== Axially expanding all assemblies from Tinput to Thot ===========\")\n                # expand axial heights from cold to hot so dims and masses are consistent with\n                # specified component hot temperatures.\n                assemsToSkip = [Flags.fromStringIgnoreErrors(t) for t in cs[CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP]]\n                assemsToExpand = list(\n                    a for a in list(self.assemblies.values()) if not any(a.hasFlags(f) for f in assemsToSkip)\n                )\n                axialExpander = getPluginManagerOrFail().hook.getAxialExpansionChanger()\n                if axialExpander is not None:\n                    axialExpander.expandColdDimsToHot(\n                        assemsToExpand,\n                        cs[CONF_DETAILED_AXIAL_EXPANSION],\n                    )\n\n            getPluginManagerOrFail().hook.afterConstructionOfAssemblies(assemblies=self.assemblies.values(), cs=cs)\n\n        self._prepped = True\n\n    def _assignTypeNums(self):\n        if self.blockDesigns is None:\n            # this happens when directly defining assemblies.\n            self.blockDesigns = BlockKeyedList()\n            for aDesign in self.assemDesigns:\n                for bDesign in aDesign.blocks:\n                    if bDesign not in self.blockDesigns:\n                        self.blockDesigns.add(bDesign)\n\n    def _resolveNuclides(self, cs):\n        \"\"\"\n        Process elements and determine how to expand them to natural isotopics.\n\n        Also builds meta-data about which nuclides are in the problem.\n\n        This system works by building a dictionary in the ``elementsToExpand`` attribute with\n        ``Element`` keys and list of ``NuclideBase`` values.\n\n        The actual expansion of elementals to isotopics occurs during\n        :py:meth:`Component construction <armi.reactor.blueprints.componentBlueprint.\n        ComponentBlueprint._constructMaterial>`.\n        \"\"\"\n        from armi import utils\n\n        actives = set()\n        inerts = set()\n\n        nuclideFlags = self.nuclideFlags or isotopicOptions.genDefaultNucFlags()\n\n        nucsToForceInXsGen = set()\n        # just expanding flags now. ndense gets expanded in comp blueprints\n        self.elementsToExpand = []\n        for nucFlag in nuclideFlags:\n            # this returns any nuclides that are flagged specifically for expansion by input\n            (\n                expandedElements,\n                undefBurnChainActiveNuclides,\n            ) = nucFlag.fileAsActiveOrInert(\n                actives,\n                inerts,\n            )\n            self.elementsToExpand.extend(expandedElements)\n\n        inerts -= actives\n        self.customIsotopics = self.customIsotopics or isotopicOptions.CustomIsotopics()\n        eleKeep, eleExpand = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)\n\n        # Flag all elementals for expansion unless they've been flagged otherwise by\n        # user input or automatic lattice/datalib rules.\n        for nucBase in nuclideBases.instances:\n            isAlreadyIsotopic = not isinstance(nucBase, nuclideBases.NaturalNuclideBase)\n            if isAlreadyIsotopic:\n                # `elemental` may be a NaturalNuclideBase or a NuclideBase\n                # skip all NuclideBases (isotopics)\n                continue\n\n            # we now know its an elemental\n            elemental = nucBase\n            if elemental in eleKeep:\n                continue\n\n            if elemental.name in actives:\n                currentSet = actives\n            elif elemental.name in inerts:\n                currentSet = inerts\n            else:\n                # This was not specified in the nuclide flags at all as burn or xs.\n                # If a material with this in its composition is brought in it's nice from a user\n                # perspective to allow it.\n                # But current behavior is that all nuclides in problem must be declared up front.\n                continue\n\n            self.elementsToExpand.append(elemental.element)\n\n            if elemental.name in nuclideFlags and nuclideFlags[elemental.element.symbol].expandTo:\n                # user-input expandTo has precedence\n                newNuclides = [nuclideBases.byName[nn] for nn in nuclideFlags[elemental.element.symbol].expandTo]\n            elif elemental in eleExpand and elemental.element.symbol in nuclideFlags:\n                # code-specific expansion required based on code and ENDF\n                newNuclides = eleExpand[elemental]\n                # Overlay code details onto nuclideFlags for other parts of the code that use them.\n                # Also, if this element is not in nuclideFlags at all, we just don't add it.\n                nuclideFlags[elemental.element.symbol].expandTo = [nb.name for nb in newNuclides]\n            else:\n                # expand to all possible natural isotopics\n                newNuclides = elemental.element.getNaturalIsotopics()\n\n            # remove the elemental and add the isotopic\n            currentSet.remove(elemental.name)\n            for nb in newNuclides:\n                currentSet.add(nb.name)\n\n        # force everything asked for in xsGen\n        nucsToForceInXsGen = ordered_set.OrderedSet(sorted(actives.union(inerts)))\n\n        # add all detailed isotopes in ENDF if requested\n        isotopicOptions.autoUpdateNuclideFlags(cs, nuclideFlags, inerts)\n        self.nuclideFlags = nuclideFlags\n\n        if self.elementsToExpand:\n            runLog.info(\n                \"Will expand {} elementals to have natural isotopics\".format(\n                    \", \".join(element.symbol for element in self.elementsToExpand)\n                )\n            )\n\n        self.activeNuclides = ordered_set.OrderedSet(sorted(actives))\n        self.inertNuclides = ordered_set.OrderedSet(sorted(inerts))\n        self.allNuclidesInProblem = ordered_set.OrderedSet(sorted(actives.union(inerts)))\n        self.nucsToForceInXsGen = ordered_set.OrderedSet(sorted(nucsToForceInXsGen))\n\n        # Inform user which nuclides are truncating the burn chain.\n        if undefBurnChainActiveNuclides and nuclideBases.burnChainImposed:\n            runLog.info(\n                tabulate.tabulate(\n                    [\n                        [\n                            \"Nuclides truncating the burn-chain:\",\n                            utils.createFormattedStrWithDelimiter(list(undefBurnChainActiveNuclides)),\n                        ]\n                    ],\n                    tableFmt=\"plain\",\n                ),\n                single=True,\n            )\n\n    def _checkAssemblyAreaConsistency(self, cs):\n        references = None\n        for a in self.assemblies.values():\n            if references is None:\n                references = (a, a.getArea())\n                continue\n\n            assemblyArea = a.getArea()\n            if isinstance(a, assemblies.RZAssembly):\n                # R-Z assemblies by definition have different areas, so skip the check\n                continue\n            if abs(references[1] - assemblyArea) > 1e-9:\n                runLog.error(\"REFERENCE COMPARISON ASSEMBLY:\")\n                references[0][0].printContents()\n                runLog.error(\"CURRENT COMPARISON ASSEMBLY:\")\n                a[0].printContents()\n                raise InputError(\n                    \"Assembly {} has a different area {} than assembly {} {}.  Check inputs for accuracy\".format(\n                        a, assemblyArea, references[0], references[1]\n                    )\n                )\n\n            blockArea = a[0].getArea()\n            for b in a[1:]:\n                if abs(b.getArea() - blockArea) / blockArea > cs[CONF_ACCEPTABLE_BLOCK_AREA_ERROR]:\n                    runLog.error(\"REFERENCE COMPARISON BLOCK:\")\n                    a[0].printContents(includeNuclides=False)\n                    runLog.error(\"CURRENT COMPARISON BLOCK:\")\n                    b.printContents(includeNuclides=False)\n\n                    for c in b:\n                        runLog.error(\n                            \"{0} area {1} effective area {2}\".format(c, c.getArea(), c.getVolume() / b.getHeight())\n                        )\n\n                    raise InputError(\n                        \"Block {} has a different area {} than block {} {}. Check inputs for accuracy\".format(\n                            b, b.getArea(), a[0], blockArea\n                        )\n                    )\n\n    @classmethod\n    def migrate(cls, inp: typing.TextIO):\n        \"\"\"Given a stream representation of a blueprints file, migrate it.\n\n        Parameters\n        ----------\n        inp : typing.TextIO\n            Input stream to migrate.\n        \"\"\"\n        for migI in migration.ACTIVE_MIGRATIONS:\n            if issubclass(migI, migration.base.BlueprintsMigration):\n                mig = migI(stream=inp)\n                inp = mig.apply()\n        return inp\n\n    @classmethod\n    def load(cls, stream, roundTrip=False):\n        \"\"\"This method is a wrapper around the `yamlize.Object.load()` method.\"\"\"\n        # With the release of ruamel.yaml 0.19.1, we began getting the following error:\n        # AttributeError: 'RoundTripLoader' object has no attribute 'max_depth'\n        # Setting that attribute to `None` solved the issue. However, it would be prudent to rework blueprints loading\n        # to side step the issue entirely. This occurs because of the way `yamlize` works when it calls\n        # `get_single_node`.\n        RoundTripLoader.max_depth = None\n        return super().load(stream, Loader=RoundTripLoader)\n\n    def addDefaultSFP(self):\n        \"\"\"Create a default SFP if it's not in the blueprints.\"\"\"\n        if self.systemDesigns is not None:\n            if not any(structure.typ == \"sfp\" for structure in self.systemDesigns):\n                sfp = SystemBlueprint(\"Spent Fuel Pool\", \"sfp\", Triplet())\n                sfp.typ = \"sfp\"\n                self.systemDesigns[\"Spent Fuel Pool\"] = sfp\n        else:\n            runLog.warning(f\"Can't add default SFP to {self}, there are no systemDesigns!\")\n\n\ndef migrate(bp: Blueprints, cs):\n    \"\"\"\n    Apply migrations to the input structure.\n\n    This is a good place to perform migrations that address changes to the system design description\n    (settings, blueprints). We have access both here, so we can even move stuff between files.\n    \"\"\"\n    from armi.reactor.blueprints import gridBlueprint\n\n    if bp.systemDesigns is None:\n        bp.systemDesigns = Systems()\n    if bp.gridDesigns is None:\n        bp.gridDesigns = gridBlueprint.Grids()\n\n    if \"core\" in [rd.name for rd in bp.gridDesigns]:\n        raise ValueError(\"Cannot auto-create a 2nd `core` grid. Adjust input.\")\n\n    if \"core\" in [rd.name for rd in bp.systemDesigns]:\n        raise ValueError(\"Cannot auto-create a 2nd `core` grid. Adjust input.\")\n\n    bp.systemDesigns[\"core\"] = SystemBlueprint(\"core\", \"core\", Triplet())\n"
  },
  {
    "path": "armi/reactor/blueprints/assemblyBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module defines the blueprints input object for assemblies.\n\nIn addition to defining the input format, the ``AssemblyBlueprint`` class is responsible for\nconstructing ``Assembly`` objects. An attempt has been made to decouple ``Assembly`` construction\nfrom the rest of ARMI as much as possible. For example, an assembly does not require a reactor to be\nconstructed, or a geometry file (but uses contained Block geometry type as a surrogate).\n\"\"\"\n\nimport yamlize\n\nfrom armi import getPluginManagerOrFail, runLog\nfrom armi.reactor import assemblies, grids, parameters\nfrom armi.reactor.blueprints import blockBlueprint\nfrom armi.reactor.flags import Flags\nfrom armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT\n\n\ndef _configureAssemblyTypes():\n    assemTypes = dict()\n    pm = getPluginManagerOrFail()\n    for pluginAssemTypes in pm.hook.defineAssemblyTypes():\n        for blockType, assemType in pluginAssemTypes:\n            assemTypes[blockType] = assemType\n\n    return assemTypes\n\n\nclass Modifications(yamlize.Map):\n    \"\"\"\n    The names of material modifications and lists of the modification values for each block in the\n    assembly.\n    \"\"\"\n\n    key_type = yamlize.Typed(str)\n    value_type = yamlize.Sequence\n\n\nclass ByComponentModifications(yamlize.Map):\n    \"\"\"The name of a component within the block and an associated Modifications object.\"\"\"\n\n    key_type = yamlize.Typed(str)\n    value_type = Modifications\n\n\nclass MaterialModifications(yamlize.Map):\n    \"\"\"\n    A yamlize map for reading and holding material modifications.\n\n    A user may specify material modifications directly as keys/values on this class, in which case\n    these material modifications will be blanket applied to the entire block.\n\n    If the user wishes to specify material modifications specific to a component within the block,\n    they should use the `by component` attribute, specifying the keys/values underneath the name of\n    a specific component in the block.\n\n    .. impl:: User-impact on material definitions.\n        :id: I_ARMI_MAT_USER_INPUT0\n        :implements: R_ARMI_MAT_USER_INPUT\n\n        Defines a yaml map attribute for the assembly portion of the blueprints (see\n        :py:class:`~armi.blueprints.assemblyBlueprint.AssemblyBlueprint`) that allows users to\n        specify material attributes as lists corresponding to each axial block in the assembly. Two\n        types of specifications can be made:\n\n            1. Key-value pairs can be specified directly, where the key is the name of the\n            modification and the value is the list of block values.\n\n            2. The \"by component\" attribute can be used, in which case the user can specify material\n            attributes that are specific to individual components in each block. This is enabled\n            through the\n            :py:class:`~armi.reactor.blueprints.assemblyBlueprint.ByComponentModifications` class,\n            which basically just allows for one additional layer of attributes corresponding to the\n            component names.\n\n        These material attributes can be used during the resolution of material classes during core\n        instantiation (see\n        :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct` and\n        :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`).\n    \"\"\"\n\n    key_type = yamlize.Typed(str)\n    value_type = yamlize.Sequence\n    byComponent = yamlize.Attribute(\n        key=\"by component\",\n        type=ByComponentModifications,\n        default=ByComponentModifications(),\n    )\n\n\nclass AssemblyBlueprint(yamlize.Object):\n    \"\"\"\n    A data container for holding information needed to construct an ARMI assembly.\n\n    This class utilizes ``yamlize`` to enable serialization to and from the blueprints YAML file.\n\n    .. impl:: Create assembly from blueprint file.\n        :id: I_ARMI_BP_ASSEM\n        :implements: R_ARMI_BP_ASSEM\n\n        Defines a yaml construct that allows the user to specify attributes of an\n        assembly from within their blueprints file, including a name, flags, specifier\n        for use in defining a core map, a list of blocks, a list of block heights,\n        a list of axial mesh points in each block, a list of cross section identifiers\n        for each block, and material options (see :need:`I_ARMI_MAT_USER_INPUT0`).\n\n        Relies on the underlying infrastructure from the ``yamlize`` package for\n        reading from text files, serialization, and internal storage of the data.\n\n        Is implemented as part of a blueprints file by being imported and used\n        as an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints`\n        class.\n\n        Includes a ``construct`` method, which instantiates an instance of\n        :py:class:`~armi.reactor.assemblies.Assembly` with the characteristics\n        as specified in the blueprints.\n    \"\"\"\n\n    name = yamlize.Attribute(type=str)\n    flags = yamlize.Attribute(type=str, default=None)\n    specifier = yamlize.Attribute(type=str)\n    blocks = yamlize.Attribute(type=blockBlueprint.BlockList)\n    height = yamlize.Attribute(type=yamlize.FloatList)\n    axialMeshPoints = yamlize.Attribute(key=\"axial mesh points\", type=yamlize.IntList)\n    radialMeshPoints = yamlize.Attribute(key=\"radial mesh points\", type=int, default=None)\n    azimuthalMeshPoints = yamlize.Attribute(key=\"azimuthal mesh points\", type=int, default=None)\n    materialModifications = yamlize.Attribute(\n        key=\"material modifications\",\n        type=MaterialModifications,\n        default=MaterialModifications(),\n    )\n    xsTypes = yamlize.Attribute(key=\"xs types\", type=yamlize.StrList)\n    # note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr\n\n    _assemTypes = _configureAssemblyTypes()\n\n    @classmethod\n    def getAssemClass(cls, blocks):\n        \"\"\"\n        Get the ARMI ``Assembly`` class for the specified blocks.\n\n        Parameters\n        ----------\n        blocks : list of Blocks\n            Blocks for which to determine appropriate containing Assembly type\n        \"\"\"\n        blockClasses = {b.__class__ for b in blocks}\n        for bType, aType in cls._assemTypes.items():\n            if bType in blockClasses:\n                return aType\n        raise ValueError('Unsupported block geometries in {}: \"{}\"'.format(cls.name, blocks))\n\n    def construct(self, cs, blueprint):\n        \"\"\"\n        Construct an instance of this specific assembly blueprint.\n\n        Parameters\n        ----------\n        cs : Settings\n            Settings object which containing relevant modeling options.\n        blueprint : Blueprint\n            Root blueprint object containing relevant modeling options.\n        \"\"\"\n        runLog.info(\"Constructing assembly `{}`\".format(self.name))\n        self._checkParamConsistency()\n        a = self._constructAssembly(cs, blueprint)\n        a.calculateZCoords()\n        return a\n\n    def _constructAssembly(self, cs, blueprint):\n        \"\"\"Construct the current assembly.\"\"\"\n        blocks = []\n        for axialIndex, bDesign in enumerate(self.blocks):\n            b = self._createBlock(cs, blueprint, bDesign, axialIndex)\n            blocks.append(b)\n\n        assemblyClass = self.getAssemClass(blocks)\n        a = assemblyClass(self.name)\n        flags = None\n        if self.flags is not None:\n            flags = Flags.fromString(self.flags)\n            a.p.flags = flags\n\n        # set a basic grid with the right number of blocks with bounds to be adjusted.\n        a.spatialGrid = grids.AxialGrid.fromNCells(len(blocks))\n        a.spatialGrid.armiObject = a\n\n        # init submeshes\n        radMeshPoints = self.radialMeshPoints or 1\n        a.p.RadMesh = radMeshPoints\n        aziMeshPoints = self.azimuthalMeshPoints or 1\n        a.p.AziMesh = aziMeshPoints\n\n        # Loop a second time because we needed all the blocks before choosing the assembly class.\n        for axialIndex, b in enumerate(blocks):\n            b.name = b.makeName(a.p.assemNum, axialIndex)\n            a.add(b)\n\n        # Assign values for the parameters if they are defined on the blueprints\n        for paramDef in a.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):\n            val = getattr(self, paramDef.name)\n            if val is not None:\n                a.p[paramDef.name] = val\n\n        return a\n\n    @staticmethod\n    def _shouldMaterialModiferBeApplied(value) -> bool:\n        \"\"\"Determine if a material modifier entry is applicable.\n\n        Two exceptions:\n\n        1. Modifiers that are empty strings are not applied.\n        2. Modifiers that are ``None`` are not applied\n\n        Parameters\n        ----------\n        value : object\n            Entry in a material modifications array\n\n        Returns\n        -------\n        bool: Result of the check\n        \"\"\"\n        return bool(value != \"\" and value is not None)\n\n    def _createBlock(self, cs, blueprint, bDesign, axialIndex):\n        \"\"\"Create a block based on the block design and the axial index.\"\"\"\n        meshPoints = self.axialMeshPoints[axialIndex]\n        height = self.height[axialIndex]\n        xsType = self.xsTypes[axialIndex]\n\n        materialInput = {}\n\n        for key, mod in {\n            \"byBlock\": {**self.materialModifications},\n            **self.materialModifications.byComponent,\n        }.items():\n            materialInput[key] = {\n                modName: modList[axialIndex]\n                for modName, modList in mod.items()\n                if self._shouldMaterialModiferBeApplied(modList[axialIndex])\n            }\n\n        b = bDesign.construct(cs, blueprint, axialIndex, meshPoints, height, xsType, materialInput)\n\n        b.completeInitialLoading()\n\n        # set b10 volume cc since its a cold dim param\n        b.setB10VolParam(cs[CONF_INPUT_HEIGHTS_HOT])\n        return b\n\n    def _checkParamConsistency(self) -> None:\n        \"\"\"Check that the number of block params specified is equal to the number of blocks specified.\"\"\"\n        # general things to check\n        paramsToCheck = {\n            \"mesh points\": self.axialMeshPoints,\n            \"heights\": self.height,\n            \"xs types\": self.xsTypes,\n        }\n\n        # check by-block mat mods\n        for modName, modList in self.materialModifications.items():\n            paramName = f\"mat mod for {modName}\"\n            paramsToCheck[paramName] = modList\n\n        # check by-component mat mods\n        for comp in self.materialModifications.byComponent.values():\n            for modName, modList in comp.items():\n                paramName = f\"material modifications for {modName}\"\n                paramsToCheck[paramName] = modList\n\n        # perform the check\n        for paramName, blockVals in paramsToCheck.items():\n            if len(self.blocks) != len(blockVals):\n                msg = (\n                    f\"Assembly {self.name} had {len(self.blocks)} block(s), but {len(blockVals)} \"\n                    f\"'{paramName}'. These numbers should be equal. Check input for errors.\"\n                )\n                runLog.error(msg)\n                raise ValueError(msg)\n\n\nfor paramDef in parameters.forType(assemblies.Assembly).inCategory(parameters.Category.assignInBlueprints):\n    setattr(\n        AssemblyBlueprint,\n        paramDef.name,\n        yamlize.Attribute(name=paramDef.name, default=None),\n    )\n\n\nclass AssemblyKeyedList(yamlize.KeyedList):\n    \"\"\"\n    Effectively and OrderedDict of assembly items, keyed on the assembly name.\n\n    This uses yamlize KeyedList for YAML serialization.\n    \"\"\"\n\n    item_type = AssemblyBlueprint\n    key_attr = AssemblyBlueprint.name\n    heights = yamlize.Attribute(type=yamlize.FloatList, default=None)\n    axialMeshPoints = yamlize.Attribute(key=\"axial mesh points\", type=yamlize.IntList, default=None)\n\n    # NOTE: yamlize does not call an __init__ method, instead it uses __new__ and setattr\n\n    @property\n    def bySpecifier(self):\n        \"\"\"Used by the reactor to ``_loadComposites`` later, specifiers are two character strings.\"\"\"\n        return {aDesign.specifier: aDesign for aDesign in self}\n"
  },
  {
    "path": "armi/reactor/blueprints/blockBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module defines the ARMI input for a block definition, and code for constructing an ARMI ``Block``.\"\"\"\n\nimport collections\nfrom inspect import signature\nfrom typing import Iterable, Iterator, Set\n\nimport yamlize\n\nfrom armi import getPluginManagerOrFail, runLog\nfrom armi.materials.material import Material\nfrom armi.reactor import blocks, parameters\nfrom armi.reactor.blueprints import componentBlueprint\nfrom armi.reactor.components.component import Component\nfrom armi.reactor.composites import Composite\nfrom armi.reactor.converters import blockConverters\nfrom armi.reactor.flags import Flags\nfrom armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT\n\n\ndef _configureGeomOptions():\n    blockTypes = dict()\n    pm = getPluginManagerOrFail()\n    for pluginBlockTypes in pm.hook.defineBlockTypes():\n        for compType, blockType in pluginBlockTypes:\n            blockTypes[compType] = blockType\n\n    return blockTypes\n\n\nclass BlockBlueprint(yamlize.KeyedList):\n    \"\"\"Input definition for Block.\n\n    .. impl:: Create a Block from blueprint file.\n        :id: I_ARMI_BP_BLOCK\n        :implements: R_ARMI_BP_BLOCK\n\n        Defines a yaml construct that allows the user to specify attributes of a block from within\n        their blueprints file, including a name, flags, a radial grid to specify locations of pins,\n        and the name of a component which drives the axial expansion of the block (see\n        :py:mod:`~armi.reactor.converters.axialExpansionChanger`).\n\n        In addition, the user may specify key-value pairs to specify the components contained within\n        the block, where the keys are component names and the values are component blueprints (see\n        :py:class:`~armi.reactor.blueprints.ComponentBlueprint.ComponentBlueprint`).\n\n        Relies on the underlying infrastructure from the ``yamlize`` package for reading from text\n        files, serialization, and internal storage of the data.\n\n        Is implemented into a blueprints file by being imported and used as an attribute within the\n        larger :py:class:`~armi.reactor.blueprints.Blueprints` class.\n\n        Includes a ``construct`` method, which instantiates an instance of\n        :py:class:`~armi.reactor.blocks.Block` with the characteristics as specified in the\n        blueprints.\n    \"\"\"\n\n    item_type = componentBlueprint.ComponentBlueprint\n    key_attr = componentBlueprint.ComponentBlueprint.name\n    name = yamlize.Attribute(key=\"name\", type=str)\n    gridName = yamlize.Attribute(key=\"grid name\", type=str, default=None)\n    flags = yamlize.Attribute(type=str, default=None)\n    axialExpTargetComponent = yamlize.Attribute(key=\"axial expansion target component\", type=str, default=None)\n    _geomOptions = _configureGeomOptions()\n\n    def _getBlockClass(self, outerComponent):\n        \"\"\"\n        Get the ARMI ``Block`` class for the specified outerComponent.\n\n        Parameters\n        ----------\n        outerComponent : Component\n            Largest component in block.\n        \"\"\"\n        for compCls, blockCls in self._geomOptions.items():\n            if isinstance(outerComponent, compCls):\n                return blockCls\n\n        raise ValueError(\n            \"Block input for {} has outer component {} which is \"\n            \" not a supported Block geometry subclass. Update geometry.\"\n            \"\".format(self.name, outerComponent)\n        )\n\n    def construct(self, cs, blueprint, axialIndex, axialMeshPoints, height, xsType, materialInput):\n        \"\"\"\n        Construct an ARMI ``Block`` to be placed in an ``Assembly``.\n\n        Parameters\n        ----------\n        cs : Settings\n            Settings object for the appropriate simulation.\n\n        blueprint : Blueprints\n            Blueprints object containing various detailed information, such as nuclides to model\n\n        axialIndex : int\n            The Axial index this block exists within the parent assembly\n\n        axialMeshPoints : int\n            number of mesh points for use in the neutronics kernel\n\n        height : float\n            initial height of the block\n\n        xsType : str\n            String representing the xsType of this block.\n\n        materialInput : dict\n            Double-layered dict.\n            Top layer groups the by-block material modifications under the `byBlock` key\n            and the by-component material modifications under the component's name.\n            The inner dict under each key contains material modification names and values.\n        \"\"\"\n        runLog.debug(\"Constructing block {}\".format(self.name))\n        components = collections.OrderedDict()\n        # build grid before components so you can load\n        # the components into the grid.\n        gridDesign = self._getGridDesign(blueprint)\n        if gridDesign:\n            spatialGrid = gridDesign.construct()\n        else:\n            spatialGrid = None\n\n        self._checkByComponentMaterialInput(materialInput)\n\n        allLatticeIds = set()\n        for componentDesign in self:\n            filteredMaterialInput, byComponentMatModKeys = self._filterMaterialInput(materialInput, componentDesign)\n            c = componentDesign.construct(\n                blueprint,\n                filteredMaterialInput,\n                cs[CONF_INPUT_HEIGHTS_HOT],\n            )\n            components[c.name] = c\n\n            # check that the mat mods for this component are valid options\n            # this will only examine by-component mods, block mods are done later\n            if isinstance(c, Component):\n                # there are other things like composite groups that don't get\n                # material modifications -- skip those\n                validMatModOptions = self._getMaterialModsFromBlockChildren(c)\n                for key in byComponentMatModKeys:\n                    if key not in validMatModOptions:\n                        raise ValueError(f\"{c} in block {self.name} has invalid material modification: {key}\")\n\n            if spatialGrid:\n                componentLocators = gridDesign.getMultiLocator(spatialGrid, componentDesign.latticeIDs)\n                if componentLocators:\n                    # this component is defined in the block grid\n                    # We can infer the multiplicity from the grid.\n                    # Otherwise it's a component that is in a block\n                    # with grids but that's not in the grid itself.\n                    c.spatialLocator = componentLocators\n                    mult = c.getDimension(\"mult\")\n                    if mult and mult != 1.0 and mult != len(c.spatialLocator):\n                        raise ValueError(\n                            f\"For {c} in {self.name} there is a conflicting ``mult`` input ({mult}) \"\n                            f\"and number of lattice positions ({len(c.spatialLocator)}). \"\n                            \"Recommend leaving off ``mult`` input when using grids.\"\n                        )\n                    elif not mult or mult == 1.0:\n                        # learn mult from grid definition\n                        c.setDimension(\"mult\", len(c.spatialLocator))\n\n                idsInGrid = list(gridDesign.gridContents.values())\n                if componentDesign.latticeIDs:\n                    for latticeID in componentDesign.latticeIDs:\n                        allLatticeIds.add(str(latticeID))\n                        # the user has given this component latticeIDs. check that\n                        # each of the ids appears in the grid, otherwise\n                        # their blueprints are probably wrong\n                        if len([i for i in idsInGrid if i == str(latticeID)]) == 0:\n                            raise ValueError(\n                                f\"latticeID {latticeID} in block blueprint '{self.name}' is expected \"\n                                \"to be present in the associated block grid. \"\n                                \"Check that the component's latticeIDs align with the block's grid.\"\n                            )\n\n        # for every id in grid, confirm that at least one component had it\n        if gridDesign:\n            idsInGrid = list(gridDesign.gridContents.values())\n            for idInGrid in idsInGrid:\n                if str(idInGrid) not in allLatticeIds:\n                    raise ValueError(\n                        f\"ID {idInGrid} in grid {gridDesign.name} is not in any components of block {self.name}. \"\n                        \"All IDs in the grid must appear in at least one component.\"\n                    )\n\n        # check that the block level mat mods use valid options in the same way\n        # as we did for the by-component mods above\n        validMatModOptions = self._getBlockwiseMaterialModifierOptions(components.values())\n\n        if \"byBlock\" in materialInput:\n            for key in materialInput[\"byBlock\"]:\n                if key not in validMatModOptions:\n                    raise ValueError(f\"Block {self.name} has invalid material modification key: {key}\")\n\n        # Resolve linked dims after all components in the block are created\n        for c in components.values():\n            c.resolveLinkedDims(components)\n\n        boundingComp = sorted(components.values())[-1]\n        # give a temporary name (will be updated by b.makeName as real blocks populate systems)\n        b = self._getBlockClass(boundingComp)(name=f\"block-bol-{axialIndex:03d}\")\n\n        for paramDef in b.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):\n            val = getattr(self, paramDef.name)\n            if val is not None:\n                b.p[paramDef.name] = val\n\n        flags = None\n        if self.flags is not None:\n            flags = Flags.fromString(self.flags)\n\n        b.setType(self.name, flags)\n\n        if self.axialExpTargetComponent is not None:\n            try:\n                b.setAxialExpTargetComp(components[self.axialExpTargetComponent])\n            except KeyError as noMatchingComponent:\n                raise RuntimeError(\n                    f\"Block {b} --> axial expansion target component {self.axialExpTargetComponent} \"\n                    \"specified in the blueprints does not match any component names. \"\n                    \"Revise axial expansion target component in blueprints \"\n                    \"to match the name of a component and retry.\"\n                ) from noMatchingComponent\n\n        for c in components.values():\n            b.add(c)\n        b.p.nPins = b.getNumPins()\n        b.p.axMesh = _setBlueprintNumberOfAxialMeshes(axialMeshPoints, cs[\"axialMeshRefinementFactor\"])\n        b.p.height = height\n        b.p.heightBOL = height  # for fuel performance\n        b.p.xsType = xsType\n        b.setBuLimitInfo()\n        b = self._mergeComponents(b)\n        b.verifyBlockDims()\n        b.spatialGrid = spatialGrid\n\n        return b\n\n    def _getBlockwiseMaterialModifierOptions(self, children: Iterable[Composite]) -> Set[str]:\n        \"\"\"Collect all the material modifiers that exist on a block.\"\"\"\n        validMatModOptions = set()\n        for c in children:\n            perChildModifiers = self._getMaterialModsFromBlockChildren(c)\n            validMatModOptions.update(perChildModifiers)\n        return validMatModOptions\n\n    def _getMaterialModsFromBlockChildren(self, c: Composite) -> Set[str]:\n        \"\"\"Collect all the material modifiers from a child of a block.\"\"\"\n        perChildModifiers = set()\n        for material in self._getMaterialsInComposite(c):\n            for materialParentClass in material.__class__.__mro__:\n                # we must loop over parents as well, since applyInputParams\n                # could call to Parent.applyInputParams()\n                if issubclass(materialParentClass, Material):\n                    perChildModifiers.update(signature(materialParentClass.applyInputParams).parameters.keys())\n        # self is a parameter to methods, so it gets picked up here\n        # but that's obviously not a real material modifier\n        perChildModifiers.discard(\"self\")\n        return perChildModifiers\n\n    def _getMaterialsInComposite(self, child: Composite) -> Iterator[Material]:\n        \"\"\"Collect all the materials in a composite.\"\"\"\n        # Leaf node, no need to traverse further down\n        if isinstance(child, Component):\n            yield child.material\n            return\n        # Don't apply modifications to other things that could reside\n        # in a block e.g., component groups\n\n    def _checkByComponentMaterialInput(self, materialInput):\n        for component in materialInput:\n            if component != \"byBlock\":\n                if component not in [componentDesign.name for componentDesign in self]:\n                    if materialInput[component]:  # ensure it is not empty\n                        raise ValueError(\n                            f\"The component '{component}' used to specify a by-component\"\n                            f\" material modification is not in block '{self.name}'.\"\n                        )\n\n    @staticmethod\n    def _filterMaterialInput(materialInput, componentDesign):\n        \"\"\"\n        Get the by-block material modifications and those specifically for this\n        component.\n\n        If a material modification is specified both by-block and by-component\n        for a given component, the by-component value will be used.\n        \"\"\"\n        filteredMaterialInput = {}\n        byComponentMatModKeys = set()\n\n        # first add the by-block modifications without question\n        if \"byBlock\" in materialInput:\n            for modName, modVal in materialInput[\"byBlock\"].items():\n                filteredMaterialInput[modName] = modVal\n\n        # then get the by-component modifications as appropriate\n        for component, mod in materialInput.items():\n            if component == \"byBlock\":\n                pass  # we already added these\n            else:\n                # these are by-component mods, first test if the component matches\n                # before adding. if component matches, add the modifications,\n                # overwriting any by-block modifications of the same type\n                if component == componentDesign.name:\n                    for modName, modVal in mod.items():\n                        byComponentMatModKeys.add(modName)\n                        filteredMaterialInput[modName] = modVal\n\n        return filteredMaterialInput, byComponentMatModKeys\n\n    def _getGridDesign(self, blueprint):\n        \"\"\"\n        Get the appropriate grid design.\n\n        This happens when a lattice input is provided on the block. Otherwise all\n        components are ambiguously defined in the block.\n        \"\"\"\n        if self.gridName:\n            if self.gridName not in blueprint.gridDesigns:\n                raise KeyError(\n                    f\"Lattice {self.gridName} defined on {self} is not defined in the blueprints `lattices` section.\"\n                )\n            return blueprint.gridDesigns[self.gridName]\n        return None\n\n    @staticmethod\n    def _mergeComponents(b):\n        solventNamesToMergeInto = set(c.p.mergeWith for c in b.iterComponents() if c.p.mergeWith)\n\n        if solventNamesToMergeInto:\n            runLog.warning(\n                \"Component(s) {} in block {} has merged components inside it. The merge was valid at hot \"\n                \"temperature, but the merged component only has the basic thermal expansion factors \"\n                \"of the component(s) merged into. Expansion properties or dimensions of non hot  \"\n                \"temperature may not be representative of how the original components would have acted had \"\n                \"they not been merged. It is recommended that merging happen right before \"\n                \"a physics calculation using a block converter to avoid this.\"\n                \"\".format(solventNamesToMergeInto, b.name),\n                single=True,\n            )\n\n        for solventName in solventNamesToMergeInto:\n            soluteNames = []\n\n            for c in b:\n                if c.p.mergeWith == solventName:\n                    soluteNames.append(c.name)\n\n            converter = blockConverters.MultipleComponentMerger(b, soluteNames, solventName)\n            b = converter.convert()\n\n        return b\n\n\nfor paramDef in parameters.forType(blocks.Block).inCategory(parameters.Category.assignInBlueprints):\n    setattr(\n        BlockBlueprint,\n        paramDef.name,\n        yamlize.Attribute(name=paramDef.name, default=None),\n    )\n\n\ndef _setBlueprintNumberOfAxialMeshes(meshPoints, factor):\n    \"\"\"Set the blueprint number of axial mesh based on the axial mesh refinement factor.\"\"\"\n    if factor <= 0:\n        raise ValueError(f\"A positive axial mesh refinement factor must be provided. A value of {factor} is invalid.\")\n\n    if factor != 1:\n        runLog.important(\n            \"An axial mesh refinement factor of {} is applied to blueprint based on setting specification.\".format(\n                factor\n            ),\n            single=True,\n        )\n    return int(meshPoints) * factor\n\n\nclass BlockKeyedList(yamlize.KeyedList):\n    \"\"\"\n    An OrderedDict of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.\n\n    This is used within the ``blocks:`` main entry of the blueprints.\n    \"\"\"\n\n    item_type = BlockBlueprint\n    key_attr = BlockBlueprint.name\n\n\nclass BlockList(yamlize.Sequence):\n    \"\"\"\n    A list of BlockBlueprints keyed on the name. Utilizes yamlize for serialization to and from YAML.\n\n    This is used to define the ``blocks:`` attribute of the assembly definitions.\n    \"\"\"\n\n    item_type = BlockBlueprint\n"
  },
  {
    "path": "armi/reactor/blueprints/componentBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module defines the ARMI input for a component definition, and code for constructing an ARMI ``Component``.\n\nSpecial logic is required for handling component links.\n\"\"\"\n\nimport yamlize\n\nfrom armi import materials, runLog\nfrom armi.nucDirectory import nuclideBases\nfrom armi.reactor import components, composites\nfrom armi.reactor.flags import Flags\nfrom armi.utils import densityTools\n\nCOMPONENT_GROUP_SHAPE = \"group\"\n\n\nclass ComponentDimension(yamlize.Object):\n    \"\"\"\n    Dummy object for ensuring well-formed component links are specified within the YAML input.\n\n    This can be either a number (float or int), or a conformation string (``name.dimension``).\n    \"\"\"\n\n    def __init__(self, value):\n        # note: yamlizable does not call an __init__ method, instead it uses __new__ and setattr\n        self.value = value\n        if isinstance(value, str):\n            if not components.COMPONENT_LINK_REGEX.search(value):\n                raise ValueError(f\"Bad component link `{value}`, must be in form `name.dimension`\")\n\n    def __repr__(self):\n        return f\"<ComponentDimension value: {self.value}>\"\n\n    @classmethod\n    def from_yaml(cls, loader, node, _rtd=None):\n        \"\"\"\n        Override the ``Yamlizable.from_yaml`` to inject custom interpretation of component dimension.\n\n        This allows us to create a new object with either a string or numeric value.\n        \"\"\"\n        try:\n            val = loader.construct_object(node)\n            self = ComponentDimension(val)\n            loader.constructed_objects[node] = self\n            return self\n        except ValueError as ve:\n            raise yamlize.YamlizingError(str(ve), node)\n\n    @classmethod\n    def to_yaml(cls, dumper, self, _rtd=None):\n        \"\"\"\n        Override the ``Yamlizable.to_yaml`` to remove the object-like behavior, otherwise we'd end up with a\n        ``{value: ...}`` dictionary.\n\n        This allows someone to programmatically edit the component dimensions without using the ``ComponentDimension``\n        class.\n        \"\"\"\n        if not isinstance(self, cls):\n            self = cls(self)\n        node = dumper.represent_data(self.value)\n        dumper.represented_objects[self] = node\n        return node\n\n    def __mul__(self, other):\n        return self.value * other\n\n    def __add__(self, other):\n        return self.value + other\n\n    def __div__(self, other):\n        return self.value / other\n\n    def __sub__(self, other):\n        return self.value - other\n\n    def __eq__(self, other):\n        return self.value == other\n\n    def __ne__(self, other):\n        return self.value != other\n\n    def __gt__(self, other):\n        return self.value > other\n\n    def __ge__(self, other):\n        return self.value >= other\n\n    def __lt__(self, other):\n        return self.value < other\n\n    def __le__(self, other):\n        return self.value <= other\n\n    def __hash__(self):\n        return id(self)\n\n\nclass ComponentBlueprint(yamlize.Object):\n    \"\"\"\n    This class defines the inputs necessary to build ARMI component objects. It uses ``yamlize`` to enable serialization\n    to and from YAML.\n\n    .. impl:: Construct component from blueprint file.\n        :id: I_ARMI_BP_COMP\n        :implements: R_ARMI_BP_COMP\n\n        Defines a yaml construct that allows the user to specify attributes of a component from within their blueprints\n        file, including a name, flags, shape, material and/or isotopic vector, input temperature, corresponding\n        component dimensions, and ID for placement in a block lattice (see\n        :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint`). Component dimensions that can be defined\n        for a given component are dependent on the component's ``shape`` attribute, and the dimensions defining each\n        shape can be found in the :py:mod:`~armi.reactor.components` module.\n\n        Limited validation on the inputs is performed to ensure that the component shape corresponds to a valid shape\n        defined by the ARMI application.\n\n        Relies on the underlying infrastructure from the ``yamlize`` package for reading from text files, serialization,\n        and internal storage of the data.\n\n        Is implemented as part of a blueprints file by being imported and used as an attribute within the larger\n        :py:class:`~armi.reactor.blueprints.Blueprints` class. Can also be used within the\n        :py:class:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint` class to enable specification of components\n        directly within the \"blocks\" portion of the blueprint file.\n\n        Includes a ``construct`` method, which instantiates an instance of\n        :py:class:`~armi.reactor.components.component.Component` with the characteristics specified in the blueprints\n        (see :need:`I_ARMI_MAT_USER_INPUT1`).\n    \"\"\"\n\n    name = yamlize.Attribute(type=str)\n    flags = yamlize.Attribute(type=str, default=None)\n\n    @name.validator\n    def name(self, name):\n        \"\"\"Validate component names.\"\"\"\n        if name == \"cladding\":\n            # many users were mixing cladding and clad and it caused issues downstream where physics plugins checked for\n            # clad.\n            raise ValueError(f\"Cannot set ComponentBlueprint.name to {name}. Prefer 'clad'.\")\n\n    shape = yamlize.Attribute(type=str)\n\n    @shape.validator\n    def shape(self, shape):\n        normalizedShape = shape.strip().lower()\n        if normalizedShape not in components.ComponentType.TYPES and normalizedShape != COMPONENT_GROUP_SHAPE:\n            raise ValueError(f\"Cannot set ComponentBlueprint.shape to unknown shape: {shape}\")\n\n    material = yamlize.Attribute(type=str, default=None)\n    Tinput = yamlize.Attribute(type=float, default=None)\n    Thot = yamlize.Attribute(type=float, default=None)\n    isotopics = yamlize.Attribute(type=str, default=None)\n    latticeIDs = yamlize.Attribute(type=list, default=None)\n    origin = yamlize.Attribute(type=list, default=None)\n    orientation = yamlize.Attribute(type=str, default=None)\n    mergeWith = yamlize.Attribute(type=str, default=None)\n    area = yamlize.Attribute(type=float, default=None)\n\n    def construct(self, blueprint, matMods, inputHeightsConsideredHot):\n        \"\"\"Construct a component or group.\n\n        .. impl:: User-defined on material alterations are applied here.\n            :id: I_ARMI_MAT_USER_INPUT1\n            :implements: R_ARMI_MAT_USER_INPUT\n\n            Allows for user input to impact a component's materials by applying the \"material modifications\" section of\n            a blueprints file (see :need:`I_ARMI_MAT_USER_INPUT0`) to the material during construction. This takes place\n            during lower calls to ``_conformKwargs()`` and subsequently ``_constructMaterial()``, which operate using\n            the component blueprint and associated material modifications from the component's block.\n\n            Within ``_constructMaterial()``, the material class is resolved into a material object by calling\n            :py:func:`~armi.materials.resolveMaterialClassByName`. The ``applyInputParams()`` method of that material\n            class is then called, passing in the associated material modifications data, which the material class can\n            then use to modify the isotopics as necessary.\n\n        Parameters\n        ----------\n        blueprint : Blueprints\n            Blueprints object containing various detailed information, such as nuclides to model\n        matMods : dict\n            Material modifications to apply to the component.\n        inputHeightsConsideredHot : bool\n            See the case setting of the same name.\n        \"\"\"\n        runLog.debug(f\"Constructing component {self.name}\")\n        kwargs = self._conformKwargs(blueprint, matMods)\n        shape = self.shape.lower().strip()\n        if shape == COMPONENT_GROUP_SHAPE:\n            group = blueprint.componentGroups[self.name]\n            constructedObject = composites.Composite(self.name)\n            for groupedComponent in group:\n                componentDesign = blueprint.componentDesigns[groupedComponent.name]\n                component = componentDesign.construct(blueprint, {}, inputHeightsConsideredHot)\n                # override free component multiplicity if it's set based on the group definition\n                component.setDimension(\"mult\", groupedComponent.mult)\n                _setComponentFlags(component, self.flags, blueprint)\n                insertDepletableNuclideKeys(component, blueprint)\n                constructedObject.add(component)\n\n        else:\n            constructedObject = components.factory(shape, [], kwargs)\n            _setComponentFlags(constructedObject, self.flags, blueprint)\n            insertDepletableNuclideKeys(constructedObject, blueprint)\n            constructedObject.p.theoreticalDensityFrac = constructedObject.material.getTD()\n\n        self._setComponentCustomDensity(\n            constructedObject,\n            blueprint,\n            matMods,\n            inputHeightsConsideredHot,\n        )\n\n        if hasattr(constructedObject, \"material\") and \"Custom\" in str(constructedObject.material):\n            if len(constructedObject.material.massFrac) == 0:\n                msg = f\"Custom material does not have isotopics: {self}\"\n                runLog.error(msg, single=True)\n                raise IOError(msg)\n\n        return constructedObject\n\n    def _setComponentCustomDensity(self, comp, blueprint, matMods, inputHeightsConsideredHot):\n        \"\"\"Apply a custom density to a material with custom isotopics but not a 'custom material'.\"\"\"\n        if self.isotopics is None:\n            # No custom isotopics specified\n            return\n\n        densityFromCustomIsotopic = blueprint.customIsotopics[self.isotopics].density\n        if densityFromCustomIsotopic is None:\n            # Nothing to do\n            return\n\n        if densityFromCustomIsotopic <= 0:\n            runLog.error(\n                \"A zero or negative density was specified in a custom isotopics input. This is not permitted, if a 0 \"\n                f\"density material is needed, use 'Void'. The component is {comp} and the isotopics entry is \"\n                f\"{self.isotopics}.\"\n            )\n            raise ValueError(\"A zero or negative density was specified in the custom isotopics for a component\")\n        elif len(matMods):\n            runLog.warning(\n                f\"Custom isotopics and material modifications have both been defined for {self.material} for component\"\n                f\"{comp}. Please consider carefully if these are in conflict.\",\n                single=True,\n                label=f\"custom iso + mat mods {self.material} {comp}\",\n            )\n\n        mat = materials.resolveMaterialClassByName(self.material)()\n        if not isinstance(mat, materials.Custom):\n            # check for some problem cases\n            overSpecs = [k for k in matMods if k.endswith(\"_frac\")]\n            if len(overSpecs):\n                runLog.error(\n                    f\"Both {overSpecs} and a custom isotopic with density {blueprint.customIsotopics[self.isotopics]} \"\n                    f\"have been specified for material {self.material}. This is an overspecification.\",\n                    single=True,\n                )\n\n            if not mat.density(Tc=self.Tinput) > 0:\n                runLog.error(\n                    f\"A custom density has been assigned to material '{self.material}', which has no baseline density. \"\n                    \"Only materials with a starting density may be assigned a density. This comes up e.g. if isotopics \"\n                    \"are assigned to 'Void'.\"\n                )\n                raise ValueError(\"Cannot apply custom densities to materials without density.\")\n\n            # Apply a density scaling to account for the temperature change between Tinput and Thot\n            if isinstance(mat, materials.Fluid):\n                densityRatio = densityFromCustomIsotopic / mat.density(Tc=comp.inputTemperatureInC)\n            else:\n                # For solids we need to consider if the input heights are hot or cold, in order to get the density\n                # correct. There may be a better place in the initialization to determine if the block height will be\n                # interpreted as hot dimensions, which would allow us to not have to pass the case settings this far.\n                dLL = mat.linearExpansionFactor(Tc=comp.temperatureInC, T0=comp.inputTemperatureInC)\n                if inputHeightsConsideredHot:\n                    f = 1.0 / (1 + dLL) ** 2\n                else:\n                    f = 1.0 / (1 + dLL) ** 3\n\n                scaledDensity = comp.density() / f\n                densityRatio = densityFromCustomIsotopic / scaledDensity\n\n            comp.changeNDensByFactor(densityRatio)\n\n            runLog.important(\n                f\"A custom material density was specified in the custom isotopics for non-custom material {mat}. The \"\n                f\"component density has been altered to {comp.density()} at temperature {comp.temperatureInC} C\",\n                single=True,\n            )\n\n    def _conformKwargs(self, blueprint, matMods):\n        \"\"\"This method gets the relevant kwargs to construct the component.\"\"\"\n        kwargs = {\"mergeWith\": self.mergeWith or \"\", \"isotopics\": self.isotopics or \"\"}\n\n        for attr in self.attributes:  # yamlize magic\n            val = attr.get_value(self)\n\n            if attr.name == \"shape\" or val == attr.default:\n                continue\n            elif attr.name == \"material\":\n                # value is a material instance\n                value = self._constructMaterial(blueprint, matMods)\n            elif attr.name == \"latticeIDs\":\n                # Don't pass latticeIDs on to the component constructor.\n                # They're applied during block construction.\n                continue\n            elif attr.name == \"flags\":\n                # Don't pass these to the component constructor. These are used to\n                # override the flags derived from the type, if present.\n                continue\n            else:\n                value = attr.get_value(self)\n\n            # Keep digging until the actual value is found. This is a bit of a hack to get around an issue in\n            # yamlize/ComponentDimension where Dimensions can end up chained.\n            while isinstance(value, ComponentDimension):\n                value = value.value\n\n            kwargs[attr.name] = value\n\n        return kwargs\n\n    def _constructMaterial(self, blueprint, matMods):\n        nucsInProblem = blueprint.allNuclidesInProblem\n        # make material with defaults\n        mat = materials.resolveMaterialClassByName(self.material)()\n\n        if self.isotopics is not None:\n            # Apply custom isotopics before processing input mods so\n            # the input mods have the final word\n            blueprint.customIsotopics.apply(mat, self.isotopics)\n\n        # add mass fraction custom isotopics info, since some material modifications need to see them e.g. in the base\n        # Material.applyInputParams\n        matMods.update({\"customIsotopics\": {k: v.massFracs for k, v in blueprint.customIsotopics.items()}})\n        if len(matMods) > 1:\n            # don't apply if only customIsotopics is in there\n            try:\n                # update material with updated input params from blueprints file.\n                mat.applyInputParams(**matMods)\n            except TypeError as ee:\n                errorMessage = ee.args[0]\n                if \"got an unexpected keyword argument\" in errorMessage:\n                    # This component does not accept material modification inputs of the names passed in\n                    # Keep going since the modification could work for another component\n                    pass\n                else:\n                    raise ValueError(\n                        f\"Something went wrong in applying the material modifications {matMods} \"\n                        f\"to component {self.name}.\\nError message is: \\n{errorMessage}.\"\n                    )\n\n        expandElementals(mat, blueprint)\n\n        missing = set(mat.massFrac.keys()).difference(nucsInProblem)\n\n        if missing:\n            raise ValueError(\n                f\"The nuclides {missing} are present in material {mat} by compositions, but are not specified in the \"\n                \"`nuclide flags` section of the input file. They need to be added, or custom isotopics need to be \"\n                \"applied.\"\n            )\n\n        return mat\n\n\ndef expandElementals(mat, blueprint):\n    \"\"\"\n    Expand elements to isotopics during material construction.\n\n    Does so as required by modeling options or user input.\n\n    See Also\n    --------\n    armi.reactor.blueprints.Blueprints._resolveNuclides\n        Sets the metadata defining this behavior.\n    \"\"\"\n    elementExpansionPairs = []\n    for elementToExpand in blueprint.elementsToExpand:\n        if elementToExpand.symbol not in mat.massFrac:\n            continue\n        nucFlags = blueprint.nuclideFlags.get(elementToExpand.symbol)\n        nuclidesToBecome = (\n            [nuclideBases.byName[nn] for nn in nucFlags.expandTo] if (nucFlags and nucFlags.expandTo) else None\n        )\n        elementExpansionPairs.append((elementToExpand, nuclidesToBecome))\n\n    densityTools.expandElementalMassFracsToNuclides(mat.massFrac, elementExpansionPairs)\n\n\ndef insertDepletableNuclideKeys(c, blueprint):\n    \"\"\"\n    Auto update number density keys on all DEPLETABLE components.\n\n    .. impl:: Insert any depletable blueprint flags onto this component.\n        :id: I_ARMI_BP_NUC_FLAGS0\n        :implements: R_ARMI_BP_NUC_FLAGS\n\n        This is called during the component construction process for each component from within\n        :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`.\n\n        For a given initialized component, check its flags to determine if it has been marked as\n        depletable. If it is, use\n        :py:func:`~armi.nucDirectory.nuclideBases.initReachableActiveNuclidesThroughBurnChain` to\n        apply the user-specifications in the \"nuclide flags\" section of the blueprints to the\n        Component such that all active isotopes and derivatives of those isotopes in the burn chain\n        are initialized to have an entry in the component's ``nuclides`` array.\n\n        Note that certain case settings, including ``fpModel`` and ``fpModelLibrary``, may trigger\n        modifications to the active nuclides specified by the user in the \"nuclide flags\" section of\n        the blueprints.\n\n    Notes\n    -----\n    This should be moved to a neutronics/depletion plugin hook but requires some refactoring in how\n    active nuclides and reactors are initialized first.\n\n    See Also\n    --------\n    armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface.isDepletable :\n        contains design docs describing the ``DEPLETABLE`` flagging situation\n    \"\"\"\n    if c.hasFlags(Flags.DEPLETABLE):\n        # depletable components, whether auto-derived or explicitly flagged need expanded nucs\n        (\n            c.p.nuclides,\n            c.p.numberDensities,\n        ) = nuclideBases.initReachableActiveNuclidesThroughBurnChain(\n            c.p.nuclides,\n            c.p.numberDensities,\n            blueprint.activeNuclides,\n        )\n\n\nclass ComponentKeyedList(yamlize.KeyedList):\n    \"\"\"\n    An OrderedDict of ComponentBlueprints keyed on the name.\n\n    This is used within the ``components:`` main entry of the blueprints.\n\n    This is *not* (yet) used when components are defined within a block blueprint. That is handled in the blockBlueprint\n    construct method.\n    \"\"\"\n\n    item_type = ComponentBlueprint\n    key_attr = ComponentBlueprint.name\n\n\nclass GroupedComponent(yamlize.Object):\n    \"\"\"\n    A pointer to a component with a multiplicity to be used in a ComponentGroup.\n\n    Multiplicity can be a fraction (e.g. to set volume fractions)\n    \"\"\"\n\n    name = yamlize.Attribute(type=str)\n    mult = yamlize.Attribute(type=float)\n\n\nclass ComponentGroup(yamlize.KeyedList):\n    \"\"\"\n    A single component group containing multiple GroupedComponents.\n\n    Example\n    -------\n    triso:\n      kernel:\n        mult: 0.7\n      buffer:\n        mult: 0.3\n    \"\"\"\n\n    group_name = yamlize.Attribute(type=str)\n    key_attr = GroupedComponent.name\n    item_type = GroupedComponent\n\n\nclass ComponentGroups(yamlize.KeyedList):\n    \"\"\"\n    A list of component groups.\n\n    This is used in the top-level blueprints file.\n    \"\"\"\n\n    key_attr = ComponentGroup.group_name\n    item_type = ComponentGroup\n\n\n# This import-time magic requires all possible components be imported before this module imports. The intent was to make\n# registration basically automatic. This has proven to be quite problematic and will be replaced with an explicit\n# plugin-level component registration system.\nfor dimName in set([kw for cType in components.ComponentType.TYPES.values() for kw in cType.DIMENSION_NAMES]):\n    setattr(\n        ComponentBlueprint,\n        dimName,\n        yamlize.Attribute(name=dimName, type=ComponentDimension, default=None),\n    )\n\n\ndef _setComponentFlags(component, flags, blueprint):\n    \"\"\"Update component flags based on user input in blueprint.\"\"\"\n    # The component __init__ calls setType(), which gives us our initial guess at what the flags should be.\n    if flags is not None:\n        # override the flags from __init__ with the ones from the blueprint\n        component.p.flags = Flags.fromString(flags)\n    else:\n        # Potentially add the DEPLETABLE flag. Don't do this if we set flags explicitly.\n        # WARNING: If you add flags explicitly, it will turn off depletion so be sure to add depletable to your list of\n        # flags if you expect depletion\n        if any(nuc in blueprint.activeNuclides for nuc in component.getNuclides()):\n            component.p.flags |= Flags.DEPLETABLE\n"
  },
  {
    "path": "armi/reactor/blueprints/gridBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nInput definitions for Grids.\n\nGrids are given names which can be referred to on other input structures (like core maps and pin\nmaps).\n\nThese are in turn interpreted into concrete things at lower levels. For example:\n\n* Core map lattices get turned into :py:mod:`armi.reactor.grids`, which get set to\n  ``core.spatialGrid``.\n* Block pin map lattices get applied to the components to provide some subassembly spatial details.\n\nLattice inputs here are floating in space. Specific dimensions and anchor points are handled by the\nlower-level objects definitions. This is intended to maximize lattice reusability.\n\nSee Also\n--------\narmi.utils.asciimaps\n    Description of the ascii maps and their formats.\n\nExamples\n--------\n::\n\n    grids:\n        control:\n            geom: hex\n            symmetry: full\n            lattice map: |\n               - - - - - - - - - 1 1 1 1 1 1 1 1 1 4\n                - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1\n                 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1\n                  - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1\n                   - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                    - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                     - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                      - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                       - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                        7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1\n                         1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1\n                          1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                           1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                            1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                             1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                              1 1 1 1 1 1 1 1 1 3 1 1 1\n                               1 1 1 1 1 1 1 1 1 1 1 1\n                                1 6 1 1 1 1 1 1 1 1 1\n                                 1 1 1 1 1 1 1 1 1 1\n        sfp:\n            geom: cartesian\n            lattice pitch:\n                x: 25.0\n                y: 25.0\n            lattice map: |\n                2 2 2 2 2\n                2 1 1 1 2\n                2 1 3 1 2\n                2 3 1 1 2\n                2 2 2 2 2\n\n        core:\n            geom: hex\n            symmetry: third periodic\n            origin:\n                x: 0.0\n                y: 10.1\n                z: 1.1\n            lattice map: |\n                -     SH   SH   SH\n                -  SH   SH   SH   SH\n                 SH   RR   RR   RR   SH\n                   RR   RR   RR   RR   SH\n                 RR   RR   RR   RR   RR   SH\n                   RR   OC   OC   RR   RR   SH\n                     OC   OC   OC   RR   RR   SH\n                   OC   OC   OC   OC   RR   RR\n                     OC   MC   OC   OC   RR   SH\n                       MC   MC   PC   OC   RR   SH\n                     MC   MC   MC   OC   OC   RR\n                       MC   MC   MC   OC   RR   SH\n                         PC   MC   MC   OC   RR   SH\n                       MC   MC   MC   MC   OC   RR\n                         IC   MC   MC   OC   RR   SH\n                           IC   US   MC   OC   RR\n                         IC   IC   MC   OC   RR   SH\n                           IC   MC   MC   OC   RR\n                         IC   IC   MC   PC   RR   SH\n\n\"\"\"\n\nimport copy\nimport itertools\nfrom io import StringIO\nfrom typing import Tuple\n\nimport numpy as np\nimport yamlize\nfrom ruamel.yaml import scalarstring\n\nfrom armi import runLog\nfrom armi.reactor import blueprints, geometry, grids\nfrom armi.utils import asciimaps\nfrom armi.utils.customExceptions import InputError\nfrom armi.utils.mathematics import isMonotonic\n\n\nclass Triplet(yamlize.Object):\n    \"\"\"A x, y, z triplet for coordinates or lattice pitch.\"\"\"\n\n    x = yamlize.Attribute(type=float)\n    y = yamlize.Attribute(type=float, default=0.0)\n    z = yamlize.Attribute(type=float, default=0.0)\n\n    def __init__(self, x=0.0, y=0.0, z=0.0):\n        self.x = x\n        self.y = y\n        self.z = z\n\n\nclass Pitch(yamlize.Object):\n    \"\"\"A x, y, z triplet or triangular hex pitch for coordinates or lattice pitch for hexagonal grids.\"\"\"\n\n    hex = yamlize.Attribute(type=float, default=0.0)\n    x = yamlize.Attribute(type=float, default=0.0)\n    y = yamlize.Attribute(type=float, default=0.0)\n    z = yamlize.Attribute(type=float, default=0.0)\n\n    def __init__(self, hexPitch=0.0, x=0.0, y=0.0, z=0.0):\n        \"\"\"\n        Parameters\n        ----------\n        hex : float, optional\n            Triangular/hex lattice pitch\n        x : float, optional\n            Cartesian grid: pitch in the x direction\n            Hexagonal grid: interpreted as hex lattice pitch\n        y : float, optional\n            Cartesian grid: pitch in the y direction\n        z : float, optional\n            Pitch in the z direction\n\n        Raises\n        ------\n        InputError\n            * If a `hexPitch` and `x` or `y` pitch are provided simultaneously.\n            * If no non-zero value is provided for any parameter.\n        \"\"\"\n        if hexPitch and (x or y):\n            raise InputError(\"Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.\")\n\n        if not any([hexPitch, x, y, z]):\n            raise InputError(\"`lattice pitch` must have at least one non-zero attribute! Check the blueprints.\")\n\n        self.hex = hexPitch or x\n        self.x = x\n        self.y = y\n        self.z = z\n\n\nclass GridBlueprint(yamlize.Object):\n    \"\"\"\n    A grid input blueprint.\n\n    These directly build Grid objects and contain information about how to populate the Grid with\n    child ArmiObjects for the Reactor Model.\n\n    The grids get origins either from a parent block (for pin lattices) or from a System (for Cores,\n    SFPs, and other components).\n\n    .. impl:: Define a lattice map in reactor core.\n        :id: I_ARMI_BP_GRID\n        :implements: R_ARMI_BP_GRID\n\n        Defines a yaml construct that allows the user to specify a grid from within their blueprints\n        file, including a name, geometry, dimensions, symmetry, and a map with the relative\n        locations of components within that grid.\n\n        Relies on the underlying infrastructure from the ``yamlize`` package for reading from text\n        files, serialization, and internal storage of the data.\n\n        Is implemented as part of a blueprints file by being used in key-value pairs within the\n        :py:class:`~armi.reactor.blueprints.gridBlueprint.Grid` class, which is imported and used as\n        an attribute within the larger :py:class:`~armi.reactor.blueprints.Blueprints` class.\n\n        Includes a ``construct`` method, which instantiates an instance of one of the subclasses of\n        :py:class:`~armi.reactor.grids.structuredgrid.StructuredGrid`. This is typically called from\n        within :py:meth:`~armi.reactor.blueprints.blockBlueprint.BlockBlueprint.construct`, which\n        then also associates the individual components in the block with locations specified in the\n        grid.\n\n    Attributes\n    ----------\n    name : str\n        The grid name\n    geom : str\n        The geometry of the grid (e.g. 'cartesian')\n    latticeMap : str\n        An asciimap representation of the lattice contents\n    latticeDimensions : Pitch\n        An x/y/z Triplet or hex pitch with grid dimensions in cm. This is used to specify a\n        uniform grid, such as Cartesian or Hex. Mutually exclusive with gridBounds.\n    gridBounds : dict\n        A dictionary containing explicit grid boundaries. Specific keys used will depend on the type\n        of grid being defined. Mutually exclusive with latticeDimensions.\n    symmetry : str\n        A string defining the symmetry mode of the grid\n    gridContents : dict\n        A {(i,j): str} dictionary mapping spatialGrid indices in 2-D to string specifiers of what's\n        supposed to be in the grid.\n    orientationBOL : dict\n        A {(i,j): float} dictionary mapping spatialGrid indices in 2-D to the orientation of\n        what's supposed to be in the grid.\n    \"\"\"\n\n    name = yamlize.Attribute(key=\"name\", type=str)\n    geom = yamlize.Attribute(key=\"geom\", type=str, default=geometry.HEX)\n    latticeMap = yamlize.Attribute(key=\"lattice map\", type=str, default=None)\n    latticeDimensions = yamlize.Attribute(key=\"lattice pitch\", type=Pitch, default=None)\n    gridBounds = yamlize.Attribute(key=\"grid bounds\", type=dict, default=None)\n    symmetry = yamlize.Attribute(\n        key=\"symmetry\",\n        type=str,\n        default=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)),\n    )\n    # gridContents is the final form of grid contents information; it is set regardless of how the\n    # input is read. When writing, we attempt to preserve the input mode and write ascii map if that\n    # was what was originally provided.\n    gridContents = yamlize.Attribute(key=\"grid contents\", type=dict, default=None)\n    # allowing us to add custom orientations to the objects on this gritd, at BOL\n    orientationBOL = yamlize.Attribute(key=\"orientationBOL\", type=dict, default=None)\n\n    @gridContents.validator\n    def gridContents(self, value):\n        if value is None:\n            return True\n        if not all(isinstance(key, tuple) for key in value.keys()):\n            raise InputError(\"Grid contents Keys need to be like [i, j]. Check the blueprints.\")\n\n        return True\n\n    @orientationBOL.validator\n    def orientationBOL(self, value):\n        if value is None:\n            return True\n        if not all(isinstance(key, tuple) for key in value.keys()):\n            raise InputError(\"Orientation BOL Keys need to be like [i, j]. Check the blueprints.\")\n\n        return True\n\n    def __init__(\n        self,\n        name=None,\n        geom=geometry.HEX,\n        latticeMap=None,\n        symmetry=str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)),\n        gridContents=None,\n        orientationBOL=None,\n        gridBounds=None,\n    ):\n        \"\"\"\n        A Grid blueprint.\n\n        Notes\n        -----\n        yamlize does not call an ``__init__`` method, instead it uses ``__new__`` and setattr this\n        is only needed for when you want to make this object from a non-YAML source.\n\n        Warning\n        -------\n        This is a Yamlize object, so ``__init__`` never really gets called. Only ``__new__`` does.\n        \"\"\"\n        self.name = name\n        self.geom = str(geom)\n        self.latticeMap = latticeMap\n        self._readFromLatticeMap = False\n        self.symmetry = str(symmetry)\n        self.gridContents = gridContents\n        self.orientationBOL = orientationBOL\n        self.gridBounds = gridBounds\n\n    @property\n    def readFromLatticeMap(self):\n        \"\"\"\n        This is implemented as a property, since as a Yamlize object, ``__init__`` is not always\n        called and we have to lazily evaluate its default value.\n        \"\"\"\n        return getattr(self, \"_readFromLatticeMap\", False)\n\n    @readFromLatticeMap.setter\n    def readFromLatticeMap(self, value):\n        self._readFromLatticeMap = value\n\n    def construct(self):\n        \"\"\"Build a Grid from a grid definition.\"\"\"\n        self._readGridContents()\n        grid = self._constructSpatialGrid()\n        return grid\n\n    def _constructSpatialGrid(self):\n        \"\"\"\n        Build spatial grid.\n\n        If you do not enter ``latticeDimensions``, a unit grid will be produced which must be adjusted to the proper\n        dimensions (often by inspection of children) at a later time.\n        \"\"\"\n        symmetry = geometry.SymmetryType.fromStr(self.symmetry) if self.symmetry else None\n        geom = self.geom\n        maxIndex = self._getMaxIndex()\n        runLog.extra(f\"Creating the spatial grid {self.name}\", single=True)\n        if geom in (geometry.RZT, geometry.RZ):\n            if self.gridBounds is None:\n                # This check is regrettably late. It would be nice if we could validate that bounds\n                # are provided if R-Theta mesh is being used.\n                raise InputError(\n                    f\"Grid bounds must be provided for `{self.name}` to specify a grid with r-theta components.\"\n                )\n            for key in (\"theta\", \"r\"):\n                if key not in self.gridBounds:\n                    raise InputError(f\"{key} grid bounds were not provided for `{self.name}`.\")\n\n            # convert to list, otherwise it is a CommentedSeq\n            theta = np.array(self.gridBounds[\"theta\"])\n            radii = np.array(self.gridBounds[\"r\"])\n            for lst, name in ((theta, \"theta\"), (radii, \"radii\")):\n                if not isMonotonic(lst, \"<\"):\n                    raise InputError(\n                        f\"Grid bounds for {self.name}:{name} is not sorted or contains duplicates. Check blueprints.\"\n                    )\n            spatialGrid = grids.ThetaRZGrid(bounds=(theta, radii, (0.0, 0.0)))\n        if geom in (geometry.HEX, geometry.HEX_CORNERS_UP):\n            if not self.latticeDimensions:\n                pitch = 1.0\n            else:\n                ld = self.latticeDimensions\n                if ld.hex and (ld.x or ld.y):\n                    raise InputError(\"Cannot mix `hex` with `x` and `y` attributes of `latticePitch`.\")\n\n                if not any([ld.hex, ld.x, ld.y, ld.z]):\n                    raise InputError(\"`lattice pitch` must have at least one non-zero attribute! Check the blueprints.\")\n\n                pitch = ld.hex or ld.x\n            # add 2 for potential dummy assems\n            spatialGrid = grids.HexGrid.fromPitch(\n                pitch,\n                numRings=maxIndex + 2,\n                cornersUp=geom == geometry.HEX_CORNERS_UP,\n            )\n        elif geom == geometry.CARTESIAN:\n            # if full core or not cut-off, bump the first assembly from the center of the mesh into\n            # the positive values.\n            xw, yw = (self.latticeDimensions.x, self.latticeDimensions.y) if self.latticeDimensions else (1.0, 1.0)\n\n            # Specifically in the case of grid blueprints, where we have grid contents available, we\n            # can also infer \"through center\" based on the contents. Note that the \"through center\"\n            # symmetry check cannot be performed when the grid contents has not been provided (i.e.,\n            # None or empty).\n            if self.gridContents and symmetry.domain == geometry.DomainType.FULL_CORE:\n                nx, ny = _getGridSize(self.gridContents.keys())\n                if nx == ny and nx % 2 == 1:\n                    symmetry.isThroughCenterAssembly = True\n\n            isOffset = symmetry is not None and not symmetry.isThroughCenterAssembly\n            spatialGrid = grids.CartesianGrid.fromRectangle(xw, yw, numRings=maxIndex + 1, isOffset=isOffset)\n\n        runLog.debug(\"Built grid: {}\".format(spatialGrid))\n        # set geometric metadata on spatialGrid. This information is needed in various parts of the\n        # code and is best encapsulated on the grid itself rather than on the container state.\n        spatialGrid._geomType: str = str(self.geom)\n        self.symmetry = str(symmetry)\n        spatialGrid._symmetry: str = self.symmetry\n        return spatialGrid\n\n    def _getMaxIndex(self):\n        \"\"\"\n        Find the max index in the grid contents.\n\n        Used to limit the size of the spatialGrid. Used to be called maxNumRings.\n        \"\"\"\n        if self.gridContents:\n            return max(itertools.chain(*zip(*self.gridContents.keys())))\n        else:\n            return 6\n\n    def expandToFull(self):\n        \"\"\"\n        Unfold the blueprints to represent full symmetry.\n\n        Notes\n        -----\n        This relatively rudimentary, and copies entries from the currently-represented domain to\n        their corresponding locations in full symmetry. This may not produce the desired behavior\n        for some scenarios, such as when expanding fuel shuffling paths or the like. Future work may\n        make this more sophisticated.\n        \"\"\"\n        if geometry.SymmetryType.fromAny(self.symmetry).domain == geometry.DomainType.FULL_CORE:\n            return\n\n        # fill the new grid contents\n        grid = self.construct()\n        self._expandToFullOrientationBOL(grid)\n\n        newContents = copy.copy(self.gridContents)\n        for idx, contents in self.gridContents.items():\n            equivs = grid.getSymmetricEquivalents(idx)\n            for idx2 in equivs:\n                newContents[idx2] = contents\n\n        self.gridContents = newContents\n\n        # set the grid symmetry\n        split = geometry.THROUGH_CENTER_ASSEMBLY in self.symmetry\n        self.symmetry = str(\n            geometry.SymmetryType(\n                geometry.DomainType.FULL_CORE,\n                geometry.BoundaryType.NO_SYMMETRY,\n                throughCenterAssembly=split,\n            )\n        )\n\n    def _expandToFullOrientationBOL(self, grid):\n        \"\"\"Set the orientationBOL parameter during expandToFulLCore().\n\n        Parameters\n        ----------\n        grid : Grid\n            Spatial grid for the current ARMI object.\n        \"\"\"\n        if self.orientationBOL is None:\n            return\n\n        newOrientations = copy.copy(self.orientationBOL)\n\n        for idx, contents in self.gridContents.items():\n            equivs = grid.getSymmetricEquivalents(idx)\n            angle = 360.0 / (len(equivs) + 1)\n            for count, idx2 in enumerate(equivs):\n                loc = grid.indicesToRingPos(*idx)\n                if loc in self.orientationBOL:\n                    loc2 = grid.indicesToRingPos(*idx2)\n                    newOrientation = self.orientationBOL[loc] + (count + 1) * angle\n                    newOrientations[loc2] = newOrientation % 360.0\n\n        self.orientationBOL = newOrientations\n\n    def _readGridContents(self):\n        \"\"\"\n        Read the specifiers as a function of grid position.\n\n        The contents can either be provided as:\n\n        * A dict mapping indices to specifiers (default output of this)\n        * An asciimap\n\n        The output will always be stored in ``self.gridContents``.\n        \"\"\"\n        if self.gridContents:\n            return\n        elif self.latticeMap:\n            self._readGridContentsLattice()\n\n        if self.gridContents is None:\n            # Make sure we have at least something; clients shouldn't have to worry about whether\n            # gridContents exist at all.\n            self.gridContents = dict()\n\n    def _readGridContentsLattice(self):\n        \"\"\"Read an ascii map of grid contents.\n\n        This update the gridContents attribute, which is a dict mapping grid i,j,k indices to textual specifiers\n        (e.g. ``IC``)).\n        \"\"\"\n        self.readFromLatticeMap = True\n        symmetry = geometry.SymmetryType.fromStr(self.symmetry)\n        geom = geometry.GeomType.fromStr(self.geom)\n        latticeCls = asciimaps.asciiMapFromGeomAndDomain(self.geom, symmetry.domain)\n        asciimap = latticeCls()\n        asciimap.readAscii(self.latticeMap)\n        self.gridContents = dict()\n\n        iOffset = 0\n        jOffset = 0\n        if geom == geometry.GeomType.CARTESIAN and symmetry.domain == geometry.DomainType.FULL_CORE:\n            # asciimaps is not smart about where the center should be, so we need to offset\n            # apropriately to get (0,0) in the middle\n            nx, ny = _getGridSize(asciimap.keys())\n\n            # turns out this works great for even and odd cases. love it when integer math works in your favor\n            iOffset = int(-nx / 2)\n            jOffset = int(-ny / 2)\n\n        for (i, j), spec in asciimap.items():\n            if spec == \"-\":\n                # skip placeholders\n                continue\n            self.gridContents[i + iOffset, j + jOffset] = spec\n\n    def getLocators(self, spatialGrid: grids.Grid, latticeIDs: list):\n        \"\"\"\n        Return spatialLocators in grid corresponding to lattice IDs.\n\n        This requires a fully-populated ``gridContents`` attribute.\n        \"\"\"\n        if latticeIDs is None:\n            return []\n        if self.gridContents is None:\n            return []\n        # tried using yamlize to coerce ints to strings but failed after much struggle, so we just\n        # auto-convert here to deal with int-like specifications. (yamlize.StrList fails to coerce\n        # when ints are provided)\n        latticeIDs = [str(i) for i in latticeIDs]\n        locators = []\n        for (i, j), spec in self.gridContents.items():\n            locator = spatialGrid[i, j, 0]\n            if spec in latticeIDs:\n                locators.append(locator)\n\n        return locators\n\n    def getMultiLocator(self, spatialGrid, latticeIDs):\n        \"\"\"Create a MultiIndexLocation based on lattice IDs.\"\"\"\n        spatialLocator = grids.MultiIndexLocation(grid=spatialGrid)\n        spatialLocator.extend(self.getLocators(spatialGrid, latticeIDs))\n        return spatialLocator\n\n\nclass Grids(yamlize.KeyedList):\n    item_type = GridBlueprint\n    key_attr = GridBlueprint.name\n\n\ndef _getGridSize(idx) -> Tuple[int, int]:\n    \"\"\"\n    Return the number of spaces between the min and max of a collection of (int, int) tuples, inclusive.\n\n    This essentially returns the number of grid locations along the i, and j dimensions, given the (i,j) indices of each\n    occupied location. This is useful for determining certain grid offset behavior.\n    \"\"\"\n    nx = max(key[0] for key in idx) - min(key[0] for key in idx) + 1\n    ny = max(key[1] for key in idx) - min(key[1] for key in idx) + 1\n\n    return nx, ny\n\n\ndef _filterOutsideDomain(gridBp):\n    \"\"\"Remove grid contents that lie outside the represented domain.\n\n    This removes extra objects; ARMI allows the user input specifiers in regions outside of the\n    represented domain, which is fine as long as the contained specifier is consistent with the\n    corresponding region in the represented domain given the symmetry condition. For instance, if we\n    have a 1/3-core hex model, it is typically okay for an assembly to be specified outside of the\n    first 1/3rd of the core, as long as it is the same assembly as would be there when expanding the\n    first 1/3rd into a full-core model.\n\n    However, we do not really want these hanging around, since editing the represented 1/Nth of the\n    core will probably lead to consistency issues, so we remove them.\n    \"\"\"\n    grid = gridBp.construct()\n\n    contentsToRemove = {\n        idx\n        for idx, _contents in gridBp.gridContents.items()\n        if not grid.locatorInDomain(grid[idx + (0,)], symmetryOverlap=False)\n    }\n    for idx in contentsToRemove:\n        symmetrics = grid.getSymmetricEquivalents(idx)\n        for symmetric in symmetrics:\n            if symmetric in gridBp.gridContents:\n                if gridBp.gridContents[symmetric] != gridBp.gridContents[idx]:\n                    raise ValueError(\n                        \"The contents at `{}` (`{}`) in grid `{}` is not the \"\n                        \"same as it's symmetric equivalent at `{}` (`{}`). \"\n                        \"Check your grid blueprints for symmetry.\".format(\n                            idx,\n                            gridBp.gridContents[idx],\n                            gridBp.name,\n                            symmetric,\n                            gridBp.gridContents[symmetric],\n                        )\n                    )\n\n        del gridBp.gridContents[idx]\n\n\ndef saveToStream(stream, bluep, full=False, tryMap=False):\n    \"\"\"\n    Save the blueprints to the passed stream.\n\n    This can save either the entire blueprints, or just the `grids:` section of the blueprints, based on the passed\n    ``full`` argument. Saving just the grid blueprints can be useful when cobbling blueprints together with !include\n    flags.\n\n    .. impl:: Write a blueprint file from a blueprint object.\n        :id: I_ARMI_BP_TO_DB\n        :implements: R_ARMI_BP_TO_DB\n\n        First makes a copy of the blueprints that are passed in. Then modifies any grids specified in the blueprints\n        into a canonical lattice map style, if needed. Then uses the ``dump`` method that is inherent to all ``yamlize``\n        subclasses to write the blueprints to the given ``stream`` object.\n\n        If called with the ``full`` argument, the entire blueprints is dumped. If not, only the grids portion is dumped.\n\n    Parameters\n    ----------\n    stream :\n        file output stream of some kind\n    bluep : armi.reactor.blueprints.Blueprints, or Grids\n    full : bool\n        Is this a full output file, or just a partial/grids?\n    tryMap : bool\n        regardless of input form, attempt to output as a lattice map\n    \"\"\"\n    # To save, we want to try our best to output our grid blueprints in the lattice map style. However, we do not want\n    # to wreck the state that the current blueprints are in. So we make a copy and do some manipulations to try to\n    # canonicalize it and save that, leaving the original blueprints unmolested.\n    bp = copy.deepcopy(bluep)\n\n    if isinstance(bp, blueprints.Blueprints):\n        gridDesigns = bp.gridDesigns\n    elif isinstance(bp, blueprints.Grids):\n        gridDesigns = bp\n    else:\n        raise TypeError(f\"Expected Blueprints or Grids, got {type(bp)}\")\n\n    for gridDesignType, gridDesign in gridDesigns.items():\n        # The core equilibrium path should be put into the grid contents rather than a lattice map until we write a\n        # string-> tuple parser for reading it back in. Skip this type of grid.\n        if gridDesignType == \"coreEqPath\":\n            continue\n        _filterOutsideDomain(gridDesign)\n\n        if not gridDesign.gridContents:\n            # there is no grid, so there must be lattice, and that goes to output\n            continue\n\n        if gridDesign.readFromLatticeMap or tryMap:\n            symmetry = geometry.SymmetryType.fromStr(gridDesign.symmetry)\n\n            aMap = asciimaps.asciiMapFromGeomAndDomain(gridDesign.geom, symmetry.domain)()\n            try:\n                if gridDesign.latticeMap:\n                    # Try to use the lattice map first, it was the original source of truth.\n                    aMap.readAscii(gridDesign.latticeMap)\n                else:\n                    # If there is no original lattice map, use the current grid of data.\n                    aMap.asciiLabelByIndices = {(key[0], key[1]): val for key, val in gridDesign.gridContents.items()}\n                    aMap.gridContentsToAscii()\n            except Exception as e:\n                runLog.warning(\n                    \"The `lattice map` for the current assembly arrangement cannot be written. Defaulting to using the \"\n                    f\"`grid contents` dictionary instead. Exception: {e}\"\n                )\n                aMap = None\n\n            if aMap is not None:\n                # If there is an ascii map available then use it to fill out the contents of the lattice map section of\n                # the grid design. This also clears out the grid contents so there is not duplicate data.\n                gridDesign.gridContents = None\n                mapString = StringIO()\n                aMap.writeAscii(mapString)\n                gridDesign.latticeMap = scalarstring.LiteralScalarString(mapString.getvalue())\n            else:\n                gridDesign.latticeMap = None\n\n        else:\n            # Grid contents were supplied as a dictionary, so we shouldn't even have a latticeMap, unless it was set\n            # explicitly in code somewhere. Discard if there is one.\n            gridDesign.latticeMap = None\n\n    toSave = bp if full else gridDesigns\n\n    # NOTE: type(bp) here used because importing Blueprints causes a circular import\n    type(toSave).dump(toSave, stream)\n"
  },
  {
    "path": "armi/reactor/blueprints/isotopicOptions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDefines nuclide flags and custom isotopics via input.\n\nNuclide flags control meta-data about nuclides. Custom isotopics allow specification of arbitrary isotopic compositions.\n\"\"\"\n\nimport yamlize\n\nfrom armi import materials, runLog\nfrom armi.nucDirectory import elements, nucDir, nuclideBases\nfrom armi.physics.neutronics.fissionProductModel.fissionProductModelSettings import (\n    CONF_FISSION_PRODUCT_LIBRARY_NAME,\n    CONF_FP_MODEL,\n)\nfrom armi.physics.neutronics.settings import (\n    CONF_MCNP_LIB_BASE,\n    CONF_NEUTRONICS_KERNEL,\n    CONF_XS_KERNEL,\n)\nfrom armi.utils import densityTools, units\nfrom armi.utils.customExceptions import InputError\n\n\nclass NuclideFlag(yamlize.Object):\n    \"\"\"\n    Defines whether or not each nuclide is included in the burn chain and cross sections.\n\n    Also controls which nuclides get expanded from elementals to isotopics and which natural\n    isotopics to exclude (if any). Oftentimes, cross section library creators include some natural\n    isotopes but not all. For example, it is common to include O16 but not O17 or O18. Each code has\n    slightly different interpretations of this so we give the user full control here.\n\n    We also try to provide useful defaults.\n\n    There are lots of complications that can arise in these choices. It makes reasonable sense to\n    use elemental compositions for things that are typically used  without isotopic modifications\n    (Fe, O, Zr, Cr, Na). If we choose to expand some or all of these to isotopics at initialization\n    based on cross section library requirements, a single case will work fine with a given lattice\n    physics option. However, restarting from that case with different cross section needs is\n    challenging.\n\n    .. impl:: The blueprint object that represents a nuclide flag.\n        :id: I_ARMI_BP_NUC_FLAGS1\n        :implements: R_ARMI_BP_NUC_FLAGS\n\n        This class creates a yaml interface for the user to specify in their blueprints which\n        isotopes should be depleted. It is incorporated into the \"nuclide flags\" section of a\n        blueprints file by being included as key-value pairs within the\n        :py:class:`~armi.reactor.blueprints.isotopicOptions.NuclideFlags` class, which is in turn\n        included into the overall blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`.\n\n        This class includes a boolean ``burn`` attribute which can be specified for any nuclide.\n        This attribute is examined by the\n        :py:meth:`~armi.reactor.blueprints.isotopicOptions.NuclideFlag.fileAsActiveOrInert` method\n        to sort the nuclides into sets of depletable or not, which is typically called during\n        construction of assemblies in :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.\n\n        Note that while the ``burn`` attribute can be set by the user in the blueprints, other\n        methods may also set it based on case settings (see, for instance,\n        :py:func:`~armi.reactor.blueprints.isotopicOptions.genDefaultNucFlags`,\n        :py:func:`~armi.reactor.blueprints.isotopicOptions.autoUpdateNuclideFlags`, and\n        :py:func:`~armi.reactor.blueprints.isotopicOptions.getAllNuclideBasesByLibrary`).\n\n    Attributes\n    ----------\n    nuclideName : str\n        The name of the nuclide\n    burn : bool\n        True if this nuclide should be added to the burn chain. If True, all reachable nuclides via\n        transmutation and decay must be included as well.\n    xs : bool\n        True if this nuclide should be included in the cross section libraries. Effectively, if this\n        nuclide is in the problem at all, this should be true.\n    expandTo : list of str, optional\n        isotope nuclideNames to expand to. For example, if nuclideName is ``O`` then this could be\n        ``[\"O16\", \"O17\"]`` to expand it into those two isotopes (but not ``O18``). The nuclides will\n        be scaled up uniformly to account for any missing natural nuclides.\n    \"\"\"\n\n    nuclideName = yamlize.Attribute(type=str)\n\n    @nuclideName.validator\n    def nuclideName(self, value):\n        if value not in nuclideBases.byName and value not in elements.bySymbol:\n            allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys()))\n            raise ValueError(f\"`{value}` is not a valid nuclide name, must be one of: {allowedKeys}\")\n\n    burn = yamlize.Attribute(type=bool)\n    xs = yamlize.Attribute(type=bool)\n    expandTo = yamlize.Attribute(type=yamlize.StrList, default=None)\n\n    def __init__(self, nuclideName, burn, xs, expandTo):\n        # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr\n        self.nuclideName = nuclideName\n        self.burn = burn\n        self.xs = xs\n        self.expandTo = expandTo\n\n    def __repr__(self):\n        return f\"<NuclideFlag name:{self.nuclideName} burn:{self.burn} xs:{self.xs}>\"\n\n    def fileAsActiveOrInert(self, activeSet, inertSet):\n        \"\"\"\n        Given a nuclide or element name, file it as either active or inert.\n\n        If isotopic expansions are requested, include the isotopics rather than the NaturalNuclideBase, as the\n        NaturalNuclideBase will never occur in such a problem.\n        \"\"\"\n        undefBurnChainActiveNuclides = set()\n        nb = nuclideBases.byName[self.nuclideName]\n        if self.expandTo:\n            nucBases = [nuclideBases.byName[nn] for nn in self.expandTo]\n            expanded = [nb.element]  # error to expand non-elements\n        else:\n            nucBases = [nb]\n            expanded = []\n\n        for nuc in nucBases:\n            if self.burn:\n                if not nuc.trans and not nuc.decays:\n                    # DUMPs and LFPs usually\n                    undefBurnChainActiveNuclides.add(nuc.name)\n                activeSet.add(nuc.name)\n            if self.xs:\n                inertSet.add(nuc.name)\n        return expanded, undefBurnChainActiveNuclides\n\n\nclass NuclideFlags(yamlize.KeyedList):\n    \"\"\"An OrderedDict of ``NuclideFlags``, keyed by their ``nuclideName``.\"\"\"\n\n    item_type = NuclideFlag\n    key_attr = NuclideFlag.nuclideName\n\n\nclass CustomIsotopic(yamlize.Map):\n    \"\"\"\n    User specified, custom isotopics input defined by a name (such as MOX), and key/pairs of nuclide\n    names and numeric values consistent with the ``input format``.\n\n    .. impl:: Certain material modifications will be applied using this code.\n        :id: I_ARMI_MAT_USER_INPUT2\n        :implements: R_ARMI_MAT_USER_INPUT\n\n        Defines a yaml construct that allows the user to define a custom isotopic vector from within\n        their blueprints file, including a name and key-value pairs corresponding to nuclide names\n        and their concentrations.\n\n        Relies on the underlying infrastructure from the ``yamlize`` package for reading from text\n        files, serialization, and internal storage of the data.\n\n        Is implemented as part of a blueprints file by being used in key-value pairs within the\n        :py:class:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopics` class, which is\n        imported and used as an attribute within the larger\n        :py:class:`~armi.reactor.blueprints.Blueprints` class.\n\n        These isotopics are linked to a component during calls to\n        :py:meth:`~armi.reactor.blueprints.componentBlueprint.ComponentBlueprint.construct`, where\n        the name specified in the ``isotopics`` attribute of the component blueprint is searched\n        against the available ``CustomIsotopics`` defined in the \"custom isotopics\" section of the\n        blueprints. Once linked, the\n        :py:meth:`~armi.reactor.blueprints.isotopicOptions.CustomIsotopic.apply` method is called,\n        which adjusts the ``massFrac`` attribute of the component's material class.\n    \"\"\"\n\n    key_type = yamlize.Typed(str)\n    value_type = yamlize.Typed(float)\n    name = yamlize.Attribute(type=str)\n    inputFormat = yamlize.Attribute(key=\"input format\", type=str)\n\n    @inputFormat.validator\n    def inputFormat(self, value):\n        if value not in self._allowedFormats:\n            raise ValueError(f\"Cannot set `inputFormat` to `{value}`, must be one of: {self._allowedFormats}\")\n\n    _density = yamlize.Attribute(key=\"density\", type=float, default=None)\n\n    _allowedFormats = {\"number fractions\", \"number densities\", \"mass fractions\"}\n\n    def __new__(cls, *args):\n        self = yamlize.Map.__new__(cls, *args)\n\n        # the density as computed by source number densities\n        self._computedDensity = None\n        return self\n\n    def __init__(self, name, inputFormat, density):\n        # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr\n        self._name = None\n        self.name = name\n        self._inputFormat = None\n        self.inputFormat = inputFormat\n        self.density = density\n        self.massFracs = {}\n\n    def __setitem__(self, key, value):\n        if key not in nuclideBases.byName and key not in elements.bySymbol:\n            allowedKeys = set(nuclideBases.byName.keys()).update(set(elements.bySymbol.keys()))\n            raise ValueError(f\"Key `{key}` is not valid, must be one of: {allowedKeys}\")\n\n        yamlize.Map.__setitem__(self, key, value)\n\n    @property\n    def density(self):\n        return self._computedDensity or self._density\n\n    @density.setter\n    def density(self, value):\n        if self._computedDensity is not None:\n            raise AttributeError(\"Density was computed from number densities, and should not be set directly.\")\n        self._density = value\n        if value is not None and value < 0:\n            raise ValueError(f\"Cannot set `density` to `{value}`, must greater than 0\")\n\n    @classmethod\n    def from_yaml(cls, loader, node, rtd):\n        \"\"\"\n        Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the\n        object.\n        \"\"\"\n        self = yamlize.Map.from_yaml.__func__(cls, loader, node, rtd)\n\n        try:\n            self._initializeMassFracs()\n            self._expandElementMassFracs()\n        except Exception as ex:\n            # use a YamlizingError to get line/column of erroneous input\n            raise yamlize.YamlizingError(str(ex), node)\n\n        return self\n\n    @classmethod\n    def from_yaml_key_val(cls, loader, key_node, val_node, key_attr, rtd):\n        \"\"\"\n        Override the ``Yamlizable.from_yaml`` to inject custom data validation logic, and complete initialization of the\n        object.\n        \"\"\"\n        self = yamlize.Map.from_yaml_key_val.__func__(cls, loader, key_node, val_node, key_attr, rtd)\n\n        try:\n            self._initializeMassFracs()\n            self._expandElementMassFracs()\n        except Exception as ex:\n            # use a YamlizingError to get line/column of erroneous input\n            raise yamlize.YamlizingError(str(ex), val_node)\n\n        return self\n\n    def _initializeMassFracs(self):\n        self.massFracs = dict()  # defaults to 0.0, __init__ is not called\n\n        if any(v < 0.0 for v in self.values()):\n            raise ValueError(f\"Custom isotopic input for {self.name} is negative\")\n\n        valSum = sum(self.values())\n        if not abs(valSum - 1.0) < 1e-5 and \"fractions\" in self.inputFormat:\n            raise ValueError(f\"Fractional custom isotopic input values must sum to 1.0 in: {self.name}\")\n\n        if self.inputFormat == \"number fractions\":\n            sumNjAj = 0.0\n\n            for nuc, nj in self.items():\n                if nj:\n                    sumNjAj += nj * nucDir.getAtomicWeight(nuc)\n\n            for nuc, value in self.items():\n                massFrac = value * nucDir.getAtomicWeight(nuc) / sumNjAj\n                self.massFracs[nuc] = massFrac\n\n        elif self.inputFormat == \"number densities\":\n            if self._density is not None:\n                raise InputError(\n                    f\"Custom isotopic `{self.name}` is over-specified. It was provided as number densities, and but \"\n                    f\"density ({self.density}) was also provided. Is the input format correct?\"\n                )\n\n            M = {\n                nuc: Ni / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * nucDir.getAtomicWeight(nuc)\n                for nuc, Ni in self.items()\n            }\n            densityTotal = sum(M.values())\n            if densityTotal < 0:\n                raise ValueError(\"Computed density is negative\")\n\n            for nuc, Mi in M.items():\n                self.massFracs[nuc] = Mi / densityTotal\n\n            self._computedDensity = densityTotal\n\n        elif self.inputFormat == \"mass fractions\":\n            self.massFracs = dict(self)  # as input\n\n        else:\n            raise ValueError(f\"Unrecognized custom isotopics input format {self.inputFormat}.\")\n\n    def _expandElementMassFracs(self):\n        \"\"\"\n        Expand the custom isotopics input entries that are elementals to isotopics.\n\n        This is necessary when the element name is not a elemental nuclide. Most everywhere else expects Nuclide objects\n        (or nuclide names). This input allows a user to enter \"U\" which would expand to the naturally occurring uranium\n        isotopics.\n\n        This is different than the isotopic expansion done for meeting user-specified modeling options (such as an\n        MC**2, or MCNP expecting elements or isotopes), because it translates the user input into something that can be\n        used later on.\n        \"\"\"\n        elementsToExpand = []\n        for nucName in self.massFracs:\n            if nucName not in nuclideBases.byName:\n                element = elements.bySymbol.get(nucName)\n                if element is not None:\n                    runLog.info(f\"Expanding custom isotopic `{self.name}` element `{nucName}` to natural isotopics\")\n                    # include all natural isotopes with None flag\n                    elementsToExpand.append((element, None))\n                else:\n                    raise InputError(f\"Unrecognized nuclide/isotope/element in input: {nucName}\")\n\n        densityTools.expandElementalMassFracsToNuclides(self.massFracs, elementsToExpand)\n\n    def apply(self, material):\n        \"\"\"\n        Apply specific isotopic compositions to a component.\n\n        Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation\n        does not update these material properties. Use with care.\n\n        Parameters\n        ----------\n        material : armi.materials.material.Material\n            An ARMI Material instance.\n        \"\"\"\n        material.massFrac = dict(self.massFracs)\n        if self.density is not None:\n            if not isinstance(material, materials.Custom):\n                runLog.important(\n                    \"A custom isotopic with associated density has been specified for non-`Custom` material \"\n                    f\"{material}. The reference density of materials in the materials library will not be changed, but \"\n                    \"the associated components will use the density implied by the custom isotopics.\",\n                    single=True,\n                )\n                # specifically, non-Custom materials only use refDensity and dLL, mat.customDensity has no effect\n                return\n\n            material.customDensity = self.density\n\n\nclass CustomIsotopics(yamlize.KeyedList):\n    \"\"\"OrderedDict of CustomIsotopic objects, keyed by their name.\"\"\"\n\n    item_type = CustomIsotopic\n\n    key_attr = CustomIsotopic.name\n\n    # note: yamlize does not call an __init__ method, instead it uses __new__ and setattr\n\n    def apply(self, material, customIsotopicsName):\n        \"\"\"\n        Apply specific isotopic compositions to a component.\n\n        Generically, materials have composition-dependent bulk properties such as mass density. Note that this operation\n        does not update these material properties. Use with care.\n\n        Parameters\n        ----------\n        material : armi.materials.material.Material\n            Material instance to adjust.\n        customIsotopicName : str\n            String corresponding to the ``CustomIsoptopic.name``.\n        \"\"\"\n        if customIsotopicsName not in self:\n            raise KeyError(\n                \"The input custom isotopics do not include {}. The only present specifications are {}\".format(\n                    customIsotopicsName, self.keys()\n                )\n            )\n\n        custom = self[customIsotopicsName]\n        custom.apply(material)\n\n\ndef getDefaultNuclideFlags():\n    \"\"\"\n    Return a default set of nuclides to model and deplete.\n\n    Notes\n    -----\n    The nuclideFlags input on blueprints has confused new users and is infrequently changed. It will be moved to be a\n    user setting, but in any case a reasonable default should be provided. We will by default model medium-lived and\n    longer actinides between U234 and CM247.\n\n    We will include B10 and B11 without depletion, sodium, and structural elements.\n\n    We will include LFPs with depletion.\n    \"\"\"\n    nuclideFlags = {}\n    actinides = {\n        \"U\": [234, 235, 236, 238],\n        \"NP\": [237, 238],\n        \"PU\": [236] + list(range(238, 243)),\n        \"AM\": range(241, 244),\n        \"CM\": range(242, 248),\n    }\n\n    for el, masses in actinides.items():\n        for mass in masses:\n            nuclideFlags[f\"{el}{mass}\"] = {\"burn\": True, \"xs\": True, \"expandTo\": None}\n\n    for fp in [35, 38, 39, 40, 41]:\n        nuclideFlags[f\"LFP{fp}\"] = {\"burn\": True, \"xs\": True, \"expandTo\": None}\n\n    for dmp in [1, 2]:\n        nuclideFlags[f\"DUMP{dmp}\"] = {\"burn\": True, \"xs\": True, \"expandTo\": None}\n\n    for boron in [10, 11]:\n        nuclideFlags[f\"B{boron}\"] = {\"burn\": False, \"xs\": True, \"expandTo\": None}\n\n    for struct in [\"ZR\", \"C\", \"SI\", \"V\", \"CR\", \"MN\", \"FE\", \"NI\", \"MO\", \"W\", \"NA\", \"HE\", \"AL\", \"CO\", \"NB\"]:\n        nuclideFlags[struct] = {\"burn\": False, \"xs\": True, \"expandTo\": None}\n\n    return nuclideFlags\n\n\ndef eleExpandInfoBasedOnCodeENDF(cs):\n    \"\"\"\n    Intelligently choose elements to expand based on code and ENDF version.\n\n    If settings point to a particular code and library and we know that combo requires certain elementals to be\n    expanded, we flag them here to make the user input as simple as possible.\n\n    This determines both which elementals to keep and which specific expansion subsets to use.\n\n    Notes\n    -----\n    This logic is expected to be moved to respective plugins in time.\n\n    Returns\n    -------\n    elementalsToKeep : set\n        Set of NaturalNuclideBase instances to not expand into natural isotopics.\n    expansions : dict\n        Element to list of nuclides for expansion.\n        For example: {oxygen: [oxygen16]} indicates that all\n        oxygen should be expanded to O16, ignoring natural\n        O17 and O18. (variables are Natural/NuclideBases)\n    \"\"\"\n    elementalsToKeep = set()\n    oxygenElementals = [nuclideBases.byName[\"O\"]]\n    hydrogenElementals = [nuclideBases.byName[name] for name in [\"H\"]]\n    endf70Elementals = [nuclideBases.byName[name] for name in [\"C\", \"V\", \"ZN\"]]\n    endf71Elementals = [nuclideBases.byName[name] for name in [\"C\"]]\n    endf80Elementals = []\n    elementalsInMC2 = set()\n    expansionStrings = {}\n    mc2Expansions = {\n        \"HE\": [\"HE4\"],  # neglect HE3\n        \"O\": [\"O16\"],  # neglect O17 and O18\n        \"W\": [\"W182\", \"W183\", \"W184\", \"W186\"],  # neglect W180\n    }\n    mcnpExpansions = {\"O\": [\"O16\"]}\n\n    for element in elements.byName.values():\n        # any NaturalNuclideBase that's available in MC2 libs\n        nnb = nuclideBases.byName.get(element.symbol)\n        if nnb and nnb.getMcc2Id():\n            elementalsInMC2.add(nnb)\n\n    if \"MCNP\" in cs[CONF_NEUTRONICS_KERNEL]:\n        expansionStrings.update(mcnpExpansions)\n        if cs[CONF_MCNP_LIB_BASE] == \"ENDF/B-V.0\":\n            # ENDF/B V.0\n            elementalsToKeep.update(nuclideBases.instances)  # skip expansion\n        elif cs[CONF_MCNP_LIB_BASE] == \"ENDF/B-VII.0\":\n            # ENDF/B VII.0\n            elementalsToKeep.update(endf70Elementals)\n        elif cs[CONF_MCNP_LIB_BASE] == \"ENDF/B-VII.1\":\n            # ENDF/B VII.1\n            elementalsToKeep.update(endf71Elementals)\n        elif cs[CONF_MCNP_LIB_BASE] == \"ENDF/B-VIII.0\":\n            # ENDF/B VIII.0\n            elementalsToKeep.update(endf80Elementals)\n        else:\n            raise InputError(\n                \"Failed to determine nuclides for modeling. The `mcnpLibraryVersion` \"\n                f\"setting value ({cs[CONF_MCNP_LIB_BASE]}) is not supported.\"\n            )\n\n    elif cs[CONF_XS_KERNEL] == \"SERPENT\":\n        elementalsToKeep.update(endf70Elementals)\n        expansionStrings.update(mc2Expansions)\n\n    elif cs[CONF_XS_KERNEL] in [\"\", \"MC2v3\", \"MC2v3-PARTISN\"]:\n        elementalsToKeep.update(endf71Elementals)\n        expansionStrings.update(mc2Expansions)\n\n    elif cs[CONF_XS_KERNEL] == \"DRAGON\":\n        # Users need to use default nuclear lib name. This is documented.\n        dragLib = cs[\"dragonDataPath\"]\n        # only supports ENDF/B VII/VIII at the moment.\n        if \"7r0\" in dragLib:\n            elementalsToKeep.update(endf70Elementals)\n        elif \"7r1\" in dragLib:\n            elementalsToKeep.update(endf71Elementals)\n        elif \"8r0\" in dragLib:\n            elementalsToKeep.update(endf80Elementals)\n            elementalsToKeep.update(hydrogenElementals)\n            elementalsToKeep.update(oxygenElementals)\n        else:\n            raise ValueError(f\"Unrecognized DRAGLIB name: {dragLib} Use default file name.\")\n\n    elif cs[CONF_XS_KERNEL] == \"MC2v2\":\n        # strip out any NaturalNuclideBase with no getMcc2Id() (not on mcc-nuclides.yaml)\n        elementalsToKeep.update(elementalsInMC2)\n        expansionStrings.update(mc2Expansions)\n\n    # convert convenient string notation to actual NuclideBase objects\n    expansions = {}\n    for nnb, nbs in expansionStrings.items():\n        expansions[nuclideBases.byName[nnb]] = [nuclideBases.byName[nb] for nb in nbs]\n\n    return elementalsToKeep, expansions\n\n\ndef genDefaultNucFlags():\n    \"\"\"Perform all the yamlize-required type conversions.\"\"\"\n    flagsDict = getDefaultNuclideFlags()\n    flags = NuclideFlags()\n    for nucName, nucFlags in flagsDict.items():\n        flag = NuclideFlag(nucName, nucFlags[\"burn\"], nucFlags[\"xs\"], nucFlags[\"expandTo\"])\n        flags[nucName] = flag\n\n    return flags\n\n\ndef autoUpdateNuclideFlags(cs, nuclideFlags, inerts):\n    \"\"\"\n    This function is responsible for examining the fission product model treatment that is selected by the user and\n    adding a set of nuclides to the `nuclideFlags` list.\n\n    Notes\n    -----\n    The reason for adding this method is that when switching between fission product modeling treatments it can be\n    time-consuming to manually adjust the ``nuclideFlags`` inputs.\n\n    See Also\n    --------\n    genDefaultNucFlags\n    \"\"\"\n    nbs = getAllNuclideBasesByLibrary(cs)\n    if nbs:\n        runLog.info(\n            \"Adding explicit fission products to the nuclide flags based on the fission product model set to \"\n            f\"`{cs[CONF_FP_MODEL]}`.\"\n        )\n        for nb in nbs:\n            nuc = nb.name\n            if nuc in nuclideFlags or elements.byZ[nb.z] in nuclideFlags:\n                continue\n            nuclideFlags[nuc] = NuclideFlag(nuc, burn=False, xs=True, expandTo=[])\n            # inert since burn is False\n            inerts.add(nuc)\n\n\ndef getAllNuclideBasesByLibrary(cs):\n    \"\"\"\n    Return a list of nuclide bases available for cross section modeling\n    based on the ``CONF_FISSION_PRODUCT_LIBRARY_NAME`` setting.\n    \"\"\"\n    nbs = []\n    if cs[CONF_FP_MODEL] == \"explicitFissionProducts\":\n        if not cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]:\n            pass\n        if cs[CONF_FISSION_PRODUCT_LIBRARY_NAME] == \"MC2-3\":\n            nbs = nuclideBases.byMcc3Id.values()\n        else:\n            raise ValueError(\n                \"An option to handle the `CONF_FISSION_PRODUCT_LIBRARY_NAME` set to \"\n                f\"`{cs[CONF_FISSION_PRODUCT_LIBRARY_NAME]}` has not been implemented.\"\n            )\n\n    return nbs\n"
  },
  {
    "path": "armi/reactor/blueprints/reactorBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nDefinitions of top-level reactor arrangements like the Core (default), SFP, etc.\n\nSee documentation of blueprints in :ref:`bp-input-file` for more context. See example in\n:py:mod:`armi.reactor.blueprints.tests.test_reactorBlueprints`.\n\nThis was built to replace the old system that loaded the core geometry from the ``cs['geometry']`` setting. Until the\ngeom file-based input is completely removed, this system will attempt to migrate the core layout from geom files. When\ngeom files are used, explicitly specifying a ``core`` system will result in an error.\n\nSystem Blueprints are a big step in the right direction to generalize user input, but was still mostly adapted from the\nold Core layout input. As such, they still only really support Core-like systems. Future work should generalize the\nconcept of \"system\" to more varied scenarios.\n\nSee Also\n--------\narmi.reactor.blueprints.gridBlueprints : Method for storing system assembly layouts.\n\"\"\"\n\nimport yamlize\n\nfrom armi import context, getPluginManagerOrFail, runLog\nfrom armi.reactor import geometry, grids\nfrom armi.reactor.blueprints.gridBlueprint import Triplet\nfrom armi.utils import tabulate\n\n\nclass SystemBlueprint(yamlize.Object):\n    \"\"\"\n    The reactor-level structure input blueprint.\n\n    .. impl:: Build core and spent fuel pool from blueprints\n        :id: I_ARMI_BP_SYSTEMS\n        :implements: R_ARMI_BP_SYSTEMS, R_ARMI_BP_CORE\n\n        This class creates a yaml interface for the user to define systems with grids, such as cores or spent fuel\n        pools, each having their own name, type, grid, and position in space. It is incorporated into the \"systems\"\n        section of a blueprints file by being included as key-value pairs within the\n        :py:class:`~armi.reactor.blueprints.reactorBlueprint.Systems` class, which is in turn included into the overall\n        blueprints within :py:class:`~armi.reactor.blueprints.Blueprints`.\n\n        This class includes a :py:meth:`~armi.reactor.blueprints.reactorBlueprint.SystemBlueprint.construct` method,\n        which is typically called from within :py:func:`~armi.reactor.reactors.factory` during the initialization of the\n        reactor object to instantiate the core and/or spent fuel pool objects. During that process, a spatial grid is\n        constructed based on the grid blueprints specified in the \"grids\" section of the blueprints (see\n        :need:`I_ARMI_BP_GRID`) and the assemblies needed to fill the lattice are built from blueprints using\n        :py:meth:`~armi.reactor.blueprints.Blueprints.constructAssem`.\n\n    Notes\n    -----\n    We use string keys to link grids to objects that use them. This differs from how blocks / assembies are specified,\n    which use YAML anchors. YAML anchors have proven to be problematic and difficult to work with.\n    \"\"\"\n\n    name = yamlize.Attribute(key=\"name\", type=str)\n    typ = yamlize.Attribute(key=\"type\", type=str, default=\"core\")\n    gridName = yamlize.Attribute(key=\"grid name\", type=str)\n    origin = yamlize.Attribute(key=\"origin\", type=Triplet, default=None)\n\n    def __init__(self, name=None, gridName=None, origin=None):\n        \"\"\"\n        A Reactor-level structure like a core, or ex-core like SFP.\n\n        Notes\n        -----\n        yamlize does not call an __init__ method, instead it uses __new__ and setattr this is only needed for when you\n        want to make this object from a non-YAML source.\n        \"\"\"\n        self.name = name\n        self.gridName = gridName\n        self.origin = origin\n\n    @staticmethod\n    def _resolveSystemType(typ: str):\n        \"\"\"Loop over all plugins that could be attached and determine if any tell us how to build a specific systems\n        attribute.\n        \"\"\"\n        manager = getPluginManagerOrFail()\n\n        # Only need this to handle the case we don't find the system we expect\n        seen = set()\n        for options in manager.hook.defineSystemBuilders():\n            for key, builder in options.items():\n                # Take the first match we find. This would allow other plugins to define a new core builder before\n                # finding those defined by the ReactorPlugin\n                if key == typ:\n                    return builder\n                seen.add(key)\n\n        raise ValueError(\n            f\"Could not determine an appropriate class for handling a system of type `{typ}`. \"\n            f\"Supported types are {seen}.\"\n        )\n\n    def construct(self, cs, bp, reactor, loadComps=True):\n        \"\"\"Build a core or ex-core grid and fill it with children.\n\n        Parameters\n        ----------\n        cs : :py:class:`Settings <armi.settings.Settings>`\n            armi settings to apply\n        bp : :py:class:`Reactor <armi.reactor.blueprints.Blueprints>`\n            armi blueprints to apply\n        reactor : :py:class:`Reactor <armi.reactor.reactors.Reactor>`\n            reactor to fill\n        loadComps : bool, optional\n            whether to fill reactor with assemblies, as defined in blueprints, or not. Is False in\n            :py:class:`UniformMeshGeometryConverter <armi.reactor.converters.uniformMesh.UniformMeshGeometryConverter>`\n            within the initNewReactor() method.\n\n        Returns\n        -------\n        Composite\n            A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure.\n\n        Raises\n        ------\n        ValueError\n            input error, no grid design provided\n        ValueError\n            objects were added to non-existent grid locations\n        \"\"\"\n        runLog.info(f\"Constructing the `{self.name}`\")\n\n        if not bp.gridDesigns:\n            raise ValueError(\"The input must define grids to construct a reactor, but does not. Update input.\")\n\n        gridDesign = bp.gridDesigns.get(self.gridName, None)\n        system = self._resolveSystemType(self.typ)(self.name)\n\n        # Some systems may not require a prescribed grid design. Only use one if provided\n        if gridDesign is not None:\n            spatialGrid = gridDesign.construct()\n            system.spatialGrid = spatialGrid\n            system.spatialGrid.armiObject = system\n\n        reactor.add(system)  # ensure the reactor is the parent\n        spatialLocator = grids.CoordinateLocation(self.origin.x, self.origin.y, self.origin.z, None)\n        system.spatialLocator = spatialLocator\n        if context.MPI_RANK != 0:\n            # Non-primary nodes get the reactor via DistributeState.\n            return None\n\n        system = self._constructComposites(cs, bp, loadComps, system, gridDesign)\n\n        return system\n\n    def _constructComposites(self, cs, bp, loadComps, system, gridDesign):\n        \"\"\"Fill a grid with composities, if there are any to fill.\n\n        Parameters\n        ----------\n        cs : Settings object.\n            armi settings to apply\n        bp : Blueprints object.\n            armi blueprints to apply\n        loadComps : bool\n            whether to fill reactor with composities, as defined in blueprints, or not\n        system : Composite\n            The composite we are building.\n        gridDesign : GridBlueprint\n            The definition of the grid on the object.\n\n        Returns\n        -------\n        Composite\n            A Composite object with a grid, like a Spent Fuel Pool or other ex-core structure.\n        \"\"\"\n        from armi.reactor.reactors import Core  # avoid circular import\n\n        if loadComps and gridDesign is not None:\n            self._loadComposites(cs, bp, system, gridDesign.gridContents, gridDesign.orientationBOL)\n\n            if isinstance(system, Core):\n                self._modifyGeometry(system, gridDesign)\n                summarizeMaterialData(system)\n                system.processLoading(cs)\n\n        return system\n\n    def _loadComposites(self, cs, bp, container, gridContents, orientationBOL):\n        from armi.reactor.cores import Core\n\n        runLog.header(f\"=========== Adding Composites to {container} ===========\")\n        badLocations = set()\n        for locationInfo, aTypeID in gridContents.items():\n            # handle the hex-grid special case, where the user enters (ring, pos)\n            i, j = locationInfo\n            if isinstance(container, Core) and container.geomType == geometry.GeomType.HEX:\n                loc = container.spatialGrid.indicesToRingPos(i, j)\n            else:\n                loc = locationInfo\n\n            # correctly rotate the Composite\n            if orientationBOL is None or loc not in orientationBOL:\n                orientation = 0.0\n            else:\n                orientation = orientationBOL[loc]\n\n            # create a new Composite to add to the grid\n            newAssembly = bp.constructAssem(cs, specifier=aTypeID, orientation=orientation)\n\n            # add the Composite to the grid\n            posi = container.spatialGrid[i, j, 0]\n            try:\n                container.add(newAssembly, posi)\n            except LookupError:\n                badLocations.add(posi)\n\n        if badLocations:\n            raise ValueError(f\"Attempted to add objects to non-existent locations on the grid: {badLocations}.\")\n\n        # init position history param on each assembly\n        for a in container:\n            loc = a.getLocation()\n            if loc in a.NOT_IN_CORE:\n                a.p.ringPosHist = [(loc, loc)]\n            else:\n                try:\n                    ring, pos, _ = grids.locatorLabelToIndices(a.getLocation())\n                    a.p.ringPosHist = [(ring, pos)]\n                except ValueError:\n                    # some ex-core structures may not have valid locator label indices\n                    a.p.ringPosHist = [(a.NOT_CREATED_YET, a.NOT_CREATED_YET)]\n\n    def _modifyGeometry(self, container, gridDesign):\n        \"\"\"Perform post-load geometry conversions like full core, edge assems.\"\"\"\n        # all cases should have no edge assemblies. They are added ephemerally when needed\n        from armi.reactor.converters import geometryConverters\n\n        runLog.header(\"=========== Applying Geometry Modifications ===========\")\n        if not container.isFullCore:\n            runLog.extra(\"Applying non-full core modifications\")\n            converter = geometryConverters.EdgeAssemblyChanger()\n            converter.scaleParamsRelatedToSymmetry(container)\n            converter.removeEdgeAssemblies(container)\n\n        # now update the spatial grid dimensions based on the populated children (unless specified on input)\n        if not gridDesign.latticeDimensions:\n            runLog.info(f\"Updating spatial grid pitch data for {container.geomType} geometry\")\n            if container.geomType == geometry.GeomType.HEX:\n                container.spatialGrid.changePitch(container[0][0].getPitch())\n            elif container.geomType == geometry.GeomType.CARTESIAN:\n                xw, yw = container[0][0].getPitch()\n                container.spatialGrid.changePitch(xw, yw)\n\n\nclass Systems(yamlize.KeyedList):\n    item_type = SystemBlueprint\n    key_attr = SystemBlueprint.name\n\n\ndef summarizeMaterialData(container):\n    \"\"\"\n    Create a summary of the material objects and source data for a reactor container.\n\n    Parameters\n    ----------\n    container : Core object\n        Any Core object with Blocks and Components defined.\n    \"\"\"\n    runLog.header(f\"=========== Summarizing Source of Material Data for {container} ===========\")\n    materialNames = set()\n    materialData = []\n    for c in container.iterComponents():\n        if c.material.name in materialNames:\n            continue\n        materialData.append((c.material.name, c.material.DATA_SOURCE))\n        materialNames.add(c.material.name)\n\n    materialData = sorted(materialData)\n    runLog.info(tabulate.tabulate(data=materialData, headers=[\"Material Name\", \"Source Location\"], tableFmt=\"armi\"))\n    return materialData\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_assemblyBlueprints.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for Assembly Blueprints.\"\"\"\n\nimport unittest\n\nfrom armi import settings\nfrom armi.reactor import blueprints\n\n\nclass TestMaterialModifications(unittest.TestCase):\n    twoBlockInput_correct = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel, *block_fuel]\n        height: [1.0, 1.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n\"\"\"\n\n    twoBlockInput_wrongMeshPoints = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel, *block_fuel]\n        height: [1.0, 1.0]\n        axial mesh points: [1]\n        xs types: [A, A]\n\"\"\"\n\n    twoBlockInput_wrongHeights = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel, *block_fuel]\n        height: [1.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n\"\"\"\n\n    twoBlockInput_wrongXSTypes = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel, *block_fuel]\n        height: [1.0, 1.0]\n        axial mesh points: [1, 1]\n        xs types: [A]\n\"\"\"\n\n    twoBlockInput_wrongMatMods = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel, *block_fuel]\n        height: [1.0, 1.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n        material modifications:\n            U235_wt_frac: [0.5]\n\"\"\"\n\n    def loadCustomAssembly(self, assemblyInput):\n        yamlString = assemblyInput\n        design = blueprints.Blueprints.load(yamlString)\n        design._prepConstruction(settings.Settings())\n        return design.assemblies[\"fuel a\"]\n\n    def test_checkParamConsistency(self):\n        \"\"\"\n        Load assembly from a blueprint file.\n\n        .. test:: Create assembly from blueprint file.\n            :id: T_ARMI_BP_ASSEM\n            :tests: R_ARMI_BP_ASSEM\n        \"\"\"\n        # make sure a good example doesn't error\n        a = self.loadCustomAssembly(self.twoBlockInput_correct)\n        blockAxialMesh = a.getAxialMesh()\n        blockXSTypes = [a[0].p.xsType, a[1].p.xsType]\n        self.assertAlmostEqual(blockAxialMesh, [1.0, 2.0])\n        self.assertEqual(blockXSTypes, [\"A\", \"A\"])\n\n        with self.assertRaises(ValueError):\n            a = self.loadCustomAssembly(self.twoBlockInput_wrongMeshPoints)\n\n        with self.assertRaises(ValueError):\n            a = self.loadCustomAssembly(self.twoBlockInput_wrongHeights)\n\n        with self.assertRaises(ValueError):\n            a = self.loadCustomAssembly(self.twoBlockInput_wrongXSTypes)\n\n        with self.assertRaises(ValueError):\n            a = self.loadCustomAssembly(self.twoBlockInput_wrongMatMods)\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_blockBlueprints.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for block blueprints.\"\"\"\n\nimport io\nimport unittest\n\nfrom armi import settings\nfrom armi.reactor import blueprints\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_blocks\n\nFULL_BP = \"\"\"\nblocks:\n    fuel: &block_fuel\n        grid name: fuelgrid\n        fuel:\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.7\n            latticeIDs: [1]\n        clad: # same args as test_blocks (except mult)\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: .77\n            od: .80\n            latticeIDs: [1,2]\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.6\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.75\n    other fuel: &block_fuel_other\n        grid name: fuelgrid\n        flags: fuel test depletable\n        fuel:\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.67\n            latticeIDs: [1]\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: .77\n            od: .80\n            latticeIDs: [1,2]\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.6\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.75\nassemblies:\n    fuel:\n        specifier: IC\n        blocks:  [*block_fuel, *block_fuel_other]\n        height: [25.0, 25.0]\n        axial mesh points:  [1, 1]\n        material modifications:\n            U235_wt_frac: [0.11, 0.11]\n            ZR_wt_frac:  [0.06, 0.06]\n        xs types: [A, A]\n    fuel other:\n        flags: fuel test\n        specifier: ID\n        blocks:  [*block_fuel, *block_fuel_other]\n        height: [25.0, 25.0]\n        axial mesh points:  [1, 1]\n        material modifications:\n            U235_wt_frac: [0.11, 0.11]\n            ZR_wt_frac:  [0.06, 0.06]\n        xs types: [A, A]\ngrids:\n    fuelgrid:\n       geom: hex_corners_up\n       symmetry: full\n       lattice map: |\n         - - -  1 1 1 1\n           - - 1 1 2 1 1\n            - 1 1 1 1 1 1\n             1 2 1 2 1 2 1\n              1 1 1 1 1 1\n               1 1 2 1 1\n                1 1 1 1\n\n\"\"\"\n\nFULL_BP_ERRANT_ID = (\n    FULL_BP.split(\"lattice map:\")[0]\n    + \"\"\"lattice map: |\n         - - -  1 1 1 1\n           - - 1 1 1 1 1\n            - 1 1 1 1 1 1\n             1 1 1 1 1 1 1\n              1 1 1 1 1 1\n               1 1 1 1 1\n                1 1 1 1\n\n\"\"\"\n)\n\nFULL_BP_NO_COMP = (\n    FULL_BP.split(\"lattice map:\")[0]\n    + \"\"\"lattice map: |\n         - - -  1 1 1 1\n           - - 1 1 1 1 1\n            - 1 1 1 1 1 1\n             1 3 1 1 1 3 1\n              1 1 1 1 1 1\n               1 1 1 1 1\n                1 1 1 1\n\n\"\"\"\n)\n\nFULL_BP_GRID = (\n    FULL_BP.split(\"lattice map:\")[0]\n    + \"\"\"grid contents:\n         ? - -3\n           - 3\n         : '1'\n         ? - -2\n           - 3\n         : '1'\n         ? - -1\n           - 3\n         : '1'\n         ? - 0\n           - 3\n         : '1'\n         ? - -3\n           - 2\n         : '1'\n         ? - -2\n           - 2\n         : '1'\n         ? - -1\n           - 2\n         : '2'\n         ? - 0\n           - 2\n         : '1'\n         ? - 1\n           - 2\n         : '1'\n         ? - -3\n           - 1\n         : '1'\n         ? - -2\n           - 1\n         : '1'\n         ? - -1\n           - 1\n         : '1'\n         ? - 0\n           - 1\n         : '1'\n         ? - 1\n           - 1\n         : '1'\n         ? - 2\n           - 1\n         : '1'\n         ? - -3\n           - 0\n         : '1'\n         ? - -2\n           - 0\n         : '3'\n         ? - -1\n           - 0\n         : '1'\n         ? - 0\n           - 0\n         : '2'\n         ? - 1\n           - 0\n         : '1'\n         ? - 2\n           - 0\n         : '3'\n         ? - 3\n           - 0\n         : '1'\n         ? - -2\n           - -1\n         : '1'\n         ? - -1\n           - -1\n         : '1'\n         ? - 0\n           - -1\n         : '1'\n         ? - 1\n           - -1\n         : '1'\n         ? - 2\n           - -1\n         : '1'\n         ? - 3\n           - -1\n         : '1'\n         ? - -1\n           - -2\n         : '1'\n         ? - 0\n           - -2\n         : '1'\n         ? - 1\n           - -2\n         : '2'\n         ? - 2\n           - -2\n         : '1'\n         ? - 3\n           - -2\n         : '1'\n         ? - 0\n           - -3\n         : '1'\n         ? - 1\n           - -3\n         : '1'\n         ? - 2\n           - -3\n         : '1'\n         ? - 3\n           - -3\n         : '1'\n\"\"\"\n)\n\n\nclass TestGriddedBlock(unittest.TestCase):\n    \"\"\"Tests for a block that has components in a lattice.\"\"\"\n\n    def setUp(self):\n        self.cs = settings.Settings()\n\n        with io.StringIO(FULL_BP) as stream:\n            self.blueprints = blueprints.Blueprints.load(stream)\n            self.blueprints._prepConstruction(self.cs)\n\n    def test_constructSpatialGrid(self):\n        \"\"\"Test intermediate grid construction function.\"\"\"\n        bDesign = self.blueprints.blockDesigns[\"fuel\"]\n        gridDesign = bDesign._getGridDesign(self.blueprints)\n        self.assertEqual(gridDesign.gridContents[0, 0], \"2\")\n\n    def test_getLocatorsAtLatticePositions(self):\n        \"\"\"Ensure extraction of specifiers results in locators.\"\"\"\n        bDesign = self.blueprints.blockDesigns[\"fuel\"]\n        gridDesign = bDesign._getGridDesign(self.blueprints)\n        grid = gridDesign.construct()\n        locators = gridDesign.getLocators(grid, [\"2\"])\n        self.assertEqual(len(locators), 5)\n        self.assertIs(grid[locators[0].getCompleteIndices()], locators[0])\n\n    def test_blockLattice(self):\n        \"\"\"Make sure constructing a block with grid specifiers works as a whole.\n\n        .. test:: Create block with blueprint file.\n            :id: T_ARMI_BP_BLOCK\n            :tests: R_ARMI_BP_BLOCK\n        \"\"\"\n        aDesign = self.blueprints.assemDesigns.bySpecifier[\"IC\"]\n        a = aDesign.construct(self.cs, self.blueprints)\n        fuelBlock = a.getFirstBlock(Flags.FUEL)\n        fuel = fuelBlock.getComponent(Flags.FUEL)\n        self.assertTrue(fuel.spatialLocator)\n        seen = False\n        for locator in fuel.spatialLocator:\n            if locator == (1, 0, 0):\n                seen = True\n        self.assertTrue(seen)\n\n    def test_componentsNotInLattice(self):\n        \"\"\"\n        Ensure that we catch cases when a component is expected to be in the grid,\n        but is not. In this case, latticeID \"2\" is not in the lattice.\n        \"\"\"\n        with self.assertRaises(ValueError) as ee:\n            with io.StringIO(FULL_BP_ERRANT_ID) as stream:\n                self.blueprints = blueprints.Blueprints.load(stream)\n                self.blueprints._prepConstruction(self.cs)\n\n            self.assertIn(\n                \"Check that the component's latticeIDs align with the block's grid.\",\n                ee.args[0],\n            )\n\n    def test_latticeNotInComponents(self):\n        \"\"\"\n        Ensure that we catch cases when a latticeID listed in the grid is not present\n        in any of the components on the block. In this case, latticeID \"2\" is not in the lattice.\n        \"\"\"\n        with self.assertRaises(ValueError) as ee:\n            with io.StringIO(FULL_BP_NO_COMP) as stream:\n                self.blueprints = blueprints.Blueprints.load(stream)\n                self.blueprints._prepConstruction(self.cs)\n\n            self.assertIn(\n                \"All IDs in the grid must appear in at least one component.\",\n                ee.args[0],\n            )\n\n    def test_nonLatticeComponentHasRightMult(self):\n        \"\"\"Make sure non-grid components in blocks with grids get the right multiplicity.\"\"\"\n        aDesign = self.blueprints.assemDesigns.bySpecifier[\"IC\"]\n        a = aDesign.construct(self.cs, self.blueprints)\n        fuelBlock = a.getFirstBlock(Flags.FUEL)\n        duct = fuelBlock.getComponent(Flags.DUCT)\n        self.assertEqual(duct.getDimension(\"mult\"), 1.0)\n\n    def test_explicitFlags(self):\n        \"\"\"\n        Test flags are created from blueprint file.\n\n        .. test:: Nuc flags can define depletable objects.\n            :id: T_ARMI_BP_NUC_FLAGS0\n            :tests: R_ARMI_BP_NUC_FLAGS\n        \"\"\"\n        a1 = self.blueprints.assemDesigns.bySpecifier[\"IC\"].construct(self.cs, self.blueprints)\n        b1 = a1[0]\n        b2 = a1[1]\n\n        a2 = self.blueprints.assemDesigns.bySpecifier[\"ID\"].construct(self.cs, self.blueprints)\n\n        self.assertTrue(b1.hasFlags(Flags.FUEL, exact=True))\n        self.assertTrue(b2.hasFlags(Flags.FUEL | Flags.TEST | Flags.DEPLETABLE, exact=True))\n\n        self.assertEqual(a1.p.flags, Flags.FUEL)\n        self.assertTrue(a1.hasFlags(Flags.FUEL, exact=True))\n        self.assertTrue(a2.hasFlags(Flags.FUEL | Flags.TEST, exact=True))\n\n    def test_densConsistentCompConstructor(self):\n        a1 = self.blueprints.assemDesigns.bySpecifier[\"IC\"].construct(self.cs, self.blueprints)\n        fuelBlock = a1[0]\n        clad = fuelBlock.getComponent(Flags.CLAD)\n\n        # now construct clad programmatically like in test_Blocks\n        programmaticBlock = test_blocks.buildSimpleFuelBlock()\n        programaticClad = programmaticBlock.getComponent(Flags.CLAD)\n        self.assertAlmostEqual(\n            clad.density(),\n            clad.material.density(Tc=clad.temperatureInC),\n        )\n\n        self.assertAlmostEqual(\n            clad.density(),\n            programaticClad.density(),\n        )\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_blueprints.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests the blueprints (loading input) file.\"\"\"\n\nimport io\nimport os\nimport pathlib\nimport unittest\n\nimport yamlize\n\nfrom armi import settings\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.physics.neutronics.settings import CONF_XS_KERNEL\nfrom armi.reactor import blueprints, parameters\nfrom armi.reactor.blueprints.componentBlueprint import ComponentBlueprint\nfrom armi.reactor.blueprints.gridBlueprint import saveToStream\nfrom armi.reactor.blueprints.isotopicOptions import CustomIsotopics, NuclideFlags\nfrom armi.reactor.flags import Flags\nfrom armi.settings.fwSettings.globalSettings import CONF_INPUT_HEIGHTS_HOT\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import directoryChangers, textProcessors\n\n\nclass TestBlueprints(unittest.TestCase):\n    \"\"\"Test that the basic functionality of faithfully receiving user input to construct ARMI data\n    model objects works as expected.\n\n    Try to ensure you test for ideas and not exact matches here, to make the tests more robust.\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.cs = settings.Settings()\n        cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)\n        cls.directoryChanger.open()\n\n        y = textProcessors.resolveMarkupInclusions(pathlib.Path(os.getcwd()) / \"refSmallReactor.yaml\")\n        cls.blueprints = blueprints.Blueprints.load(y)\n        cls.blueprints._prepConstruction(cls.cs)\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.directoryChanger.close()\n\n    @staticmethod\n    def __stubify(latticeMap):\n        \"\"\"Little helper method to allow lattie maps to be compared free of whitespace.\"\"\"\n        return latticeMap.replace(\" \", \"\").replace(\"-\", \"\").replace(\"\\n\", \"\")\n\n    def test_roundTripCompleteBP(self):\n        \"\"\"Test the round-tip of reading and writing blueprint files.\n\n        .. test:: Validates the round trip of reading and writing blueprints.\n            :id: T_ARMI_BP_TO_DB1\n            :tests: R_ARMI_BP_TO_DB\n        \"\"\"\n        # the correct lattice map\n        latticeMap = \"\"\"-   -   SH\n  -   SH  SH\n-   SH  OC  SH\n  SH  OC  OC  SH\n    OC  IC  OC  SH\n  OC  IC  IC  OC  SH\n    IC  IC  IC  OC  SH\n      IC  IC  PC  OC  SH\n    IC  PC  IC  IC  OC  SH\n      LA  IC  IC  IC  OC\n        IC  IC  IC  IC  SH\n      IC  LB  IC  IC  OC\n        IC  IC  PC  IC  SH\n          LA  IC  IC  OC\n        IC  IC  IC  IC  SH\n          IC  IC  IC  OC\n        IC  IC  IC  PC  SH\"\"\"\n        latticeMap = self.__stubify(latticeMap)\n\n        # validate some core elements from the blueprints\n        self.assertEqual(self.blueprints.gridDesigns[\"core\"].symmetry, \"third periodic\")\n        map0 = self.__stubify(self.blueprints.gridDesigns[\"core\"].latticeMap)\n        self.assertEqual(map0, latticeMap)\n\n        # save the blueprint to a stream\n        stream = io.StringIO()\n        stream.seek(0)\n        self.blueprints.dump(self.blueprints)\n        saveToStream(stream, self.blueprints, True, True)\n        stream.seek(0)\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            # save the stream to a file\n            filePath = \"test_roundTripCompleteBP.yaml\"\n            with open(filePath, \"w\") as fout:\n                fout.write(stream.read())\n\n            # load the blueprint from that file again\n            bp = blueprints.Blueprints.load(open(filePath, \"r\").read())\n\n            # re-validate some core elements from the blueprints\n            self.assertEqual(bp.gridDesigns[\"core\"].symmetry, \"third periodic\")\n            map1 = self.__stubify(bp.gridDesigns[\"core\"].latticeMap)\n            self.assertEqual(map1, latticeMap)\n\n    def test_nuclides(self):\n        \"\"\"Tests the available sets of nuclides work as expected.\"\"\"\n        actives = set(self.blueprints.activeNuclides)\n        inerts = set(self.blueprints.inertNuclides)\n        self.assertEqual(actives.union(inerts), set(self.blueprints.allNuclidesInProblem))\n        self.assertEqual(actives.intersection(inerts), set())\n\n    def test_getAssemblyTypeBySpecifier(self):\n        aDesign = self.blueprints.assemDesigns.bySpecifier[\"IC\"]\n        self.assertEqual(aDesign.name, \"igniter fuel\")\n        self.assertEqual(aDesign.specifier, \"IC\")\n\n    def test_specialIsotopicVectors(self):\n        mox = self.blueprints.customIsotopics[\"MOX\"]\n        allNucsInProblem = set(self.blueprints.allNuclidesInProblem)\n        for a in mox.keys():\n            self.assertIn(a, allNucsInProblem)\n        self.assertIn(\"U235\", mox)\n        self.assertAlmostEqual(mox[\"PU239\"], 0.00286038)\n\n    def test_componentDimensions(self):\n        \"\"\"Tests that the user can specify the dimensions of a component with arbitrary fidelity.\n\n        .. test:: A component can be correctly created from a blueprint file.\n            :id: T_ARMI_BP_COMP\n            :tests: R_ARMI_BP_COMP\n        \"\"\"\n        fuelAssem = self.blueprints.constructAssem(self.cs, name=\"igniter fuel\")\n        fuel = fuelAssem.getComponents(Flags.FUEL)[0]\n        self.assertAlmostEqual(fuel.getDimension(\"od\", cold=True), 0.86602)\n        self.assertAlmostEqual(fuel.getDimension(\"id\", cold=True), 0.0)\n        self.assertAlmostEqual(fuel.getDimension(\"od\"), 0.87763665, 4)\n        self.assertAlmostEqual(fuel.getDimension(\"id\"), 0.0)\n        self.assertAlmostEqual(fuel.getDimension(\"mult\"), 169)\n\n    def test_traceNuclides(self):\n        \"\"\"Ensure that armi.reactor.blueprints.componentBlueprint.insertDepletableNuclideKeys runs.\n\n        .. test:: Users marking components as depletable will affect number densities.\n            :id: T_ARMI_BP_NUC_FLAGS1\n            :tests: R_ARMI_BP_NUC_FLAGS\n        \"\"\"\n        fuel = (\n            self.blueprints.constructAssem(self.cs, \"igniter fuel\").getFirstBlock(Flags.FUEL).getComponent(Flags.FUEL)\n        )\n        self.assertIn(\"AM241\", fuel.getNuclides())\n        self.assertLess(fuel.getNumberDensity(\"AM241\"), 1e-5)\n\n\nclass TestBlueprintsSchema(unittest.TestCase):\n    \"\"\"Test the blueprint schema checks.\"\"\"\n\n    _yamlString = r\"\"\"blocks:\n    fuel: &block_fuel\n        fuel: &component_fuel_fuel\n            shape: Hexagon\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1.0\n            op: 10.0\n    fuel2: &block_fuel2\n        group1:\n            shape: Group\n        duct:\n            shape: Hexagon\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            ip: 9.0\n            mult: 1.0\n            op: 10.0\n        matrix:\n            shape: DerivedShape\n            material: Graphite\n            Tinput: 25.0\n            Thot: 600.0\n\ncomponents:\n    freefuel:\n        shape: Sphere\n        material: UZr\n        Tinput: 25.0\n        Thot: 600.0\n        id: 0.0\n        mult: 1.0\n        od: 4.0\n    freeclad:\n        shape: Sphere\n        material: HT9\n        Tinput: 25.0\n        Thot: 600.0\n        id: 4.0\n        mult: 1.0\n        od: 4.1\n\ncomponent groups:\n    group1:\n      freefuel:\n        mult: 1.0\n      freeclad:\n        mult: 1.0\n\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n    fuel b:\n        <<: *assembly_a\n        hotChannelFactors: Reactor\n    fuel c: &assembly_c\n        specifier: OC\n        blocks: [*block_fuel2]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\ngrids:\n    pins:\n        geom: cartesian\n        lattice map: |\n            2 2 2 2 2\n            2 1 1 1 2\n            2 1 3 1 2\n            2 3 1 1 2\n            2 2 2 2 2\n\"\"\"\n\n    def test_noDuplicateKeysInYamlBlueprints(self):\n        \"\"\"\n        Prove that if you duplicate a section of a YAML blueprint file,\n        a hard error will be thrown.\n        \"\"\"\n        # loop through a few different sections, to test blueprints broadly\n        sections = [\"blocks:\", \"components:\", \"component groups:\"]\n        for sectionName in sections:\n            # modify blueprint YAML to duplicate this section\n            yamlString = str(self._yamlString)\n            i = yamlString.find(sectionName)\n            lenSection = yamlString[i:].find(\"\\n\\n\")\n            section = yamlString[i : i + lenSection]\n            yamlString = yamlString[:i] + section + yamlString[i : i + lenSection]\n\n            # validate that this is now an invalid YAML blueprint\n            with self.assertRaises(Exception):\n                _design = blueprints.Blueprints.load(yamlString)\n\n    def test_assemblyParameters(self):\n        cs = settings.Settings()\n        design = blueprints.Blueprints.load(self._yamlString)\n        fa = design.constructAssem(cs, name=\"fuel a\")\n        fb = design.constructAssem(cs, name=\"fuel b\")\n        for paramDef in fa.p.paramDefs.inCategory(parameters.Category.assignInBlueprints):\n            # Semantics of __iter__() and items() is different now in the parameter system. We use the parameter\n            # definitions (which have a global-ish sense of `assigned`ness), so we can't tell, per-object, whether\n            # they've been set.\n            self.assertEqual(paramDef.default, fa.p[paramDef.name])\n            self.assertIn(paramDef.name, fb.p)\n\n        self.assertEqual(fa.p.hotChannelFactors, \"Default\")\n        self.assertEqual(fb.p.hotChannelFactors, \"Reactor\")\n\n    def test_nuclidesMc2v2(self):\n        \"\"\"Tests that ZR is not expanded to its isotopics for this setting.\"\"\"\n        cs = settings.Settings()\n        newSettings = {CONF_XS_KERNEL: \"MC2v2\"}\n        cs = cs.modified(newSettings=newSettings)\n\n        design = blueprints.Blueprints.load(self._yamlString)\n        design._prepConstruction(cs)\n        self.assertTrue(set({\"U238\", \"U235\", \"ZR\"}).issubset(set(design.allNuclidesInProblem)))\n\n        assem = design.constructAssem(cs, name=\"fuel a\")\n        self.assertTrue(set(assem.getNuclides()).issubset(set(design.allNuclidesInProblem)))\n\n    def test_nuclidesMc2v3(self):\n        \"\"\"Tests that ZR is expanded to its isotopics for MC2v3.\"\"\"\n        cs = settings.Settings()\n        newSettings = {CONF_XS_KERNEL: \"MC2v3\"}\n        cs = cs.modified(newSettings=newSettings)\n\n        design = blueprints.Blueprints.load(self._yamlString)\n        design._prepConstruction(cs)\n\n        # 93 and 95 are not naturally occurring.\n        zrNucs = {\"ZR\" + str(A) for A in range(90, 97)} - {\"ZR93\", \"ZR95\"}\n        self.assertTrue(set({\"U238\", \"U235\"} | zrNucs).issubset(set(design.allNuclidesInProblem)))\n        self.assertTrue(zrNucs.issubset(set(design.inertNuclides)))\n\n        assem = design.constructAssem(cs, name=\"fuel a\")\n        # the assembly won't get non-naturally occurring nuclides\n        nb = NuclideBases()\n        unnaturalZr = (n.name for n in nb.elements.bySymbol[\"ZR\"].nuclides if n.abundance == 0.0)\n        designNucs = set(design.allNuclidesInProblem).difference(unnaturalZr)\n        self.assertTrue(set(assem.getNuclides()).issubset(designNucs))\n\n    def test_merge(self):\n        yamlString = r\"\"\"\nnuclide flags:\n    B10: {burn: true, xs: true}\n    B11: {burn: true, xs: true}\n    DUMP1: {burn: true, xs: true}\n    FE: {burn: true, xs: true}\n    NI: {burn: true, xs: true}\n    C: {burn: true, xs: true}\n    MO: {burn: true, xs: true}\n    SI: {burn: true, xs: true}\n    CR: {burn: true, xs: true}\n    MN:  {burn: true, xs: true}\n    NA:  {burn: true, xs: true}\n    V:  {burn: true, xs: true}\n    W:  {burn: true, xs: true}\nblocks:\n    nomerge block: &unmerged_block\n        A: &comp_a\n            shape: Circle\n            material: B4C\n            Tinput: 50.0\n            Thot: 500.0\n            id: 0.0\n            mult: 1\n            od: .5\n        Gap1: &comp_gap\n            shape: Circle\n            material: Void\n            Tinput: 50.0\n            Thot: 500.0\n            id: A.od\n            mult: 1\n            od: B.id\n        B: &gcomp_b\n            shape: Circle\n            material: HT9\n            Tinput: 20.0\n            Thot: 600.0\n            id: .5\n            mult: 1\n            od: .75\n        Gap2: &comp_gap2\n            shape: Circle\n            material: Void\n            Tinput: 50.0\n            Thot: 500.0\n            id: B.od\n            mult: 1\n            od: Clad.id\n        Clad: &comp_clad\n            shape: Circle\n            material: HT9\n            Tinput: 20.0\n            Thot: 700.0\n            id: .75\n            mult: 1\n            od: 1.0\n        coolant: &comp_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 600.0\n            Thot: 600.0\n        duct: &comp_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 20.0\n            Thot: 500.0\n            ip: 1.2\n            mult: 1\n            op: 1.4\n        intercoolant: &comp_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 500.0\n            Thot: 500.0\n            ip: duct.op\n            mult: 1\n            op: 1.6\n    merge block: &merged_block\n        A:\n            <<: *comp_a\n            mergeWith: Clad\n        Gap1: *comp_gap\n        B:\n            <<: *gcomp_b\n            mergeWith: Clad\n        Gap2: *comp_gap2\n        Clad: *comp_clad\n        coolant: *comp_coolant\n        duct: *comp_duct\n        intercoolant: *comp_intercoolant\nassemblies:\n    a: &assembly_a\n        specifier: IC\n        blocks: [*merged_block, *unmerged_block]\n        height: [1.0, 1.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n\"\"\"\n        bp = blueprints.Blueprints.load(yamlString)\n        a = bp.constructAssem(settings.Settings(), name=\"a\")\n        mergedBlock, unmergedBlock = a\n        self.assertNotIn(\"A\", mergedBlock.getComponentNames())\n        self.assertNotIn(\"B\", mergedBlock.getComponentNames())\n\n        self.assertEqual(len(mergedBlock) + 4, len(unmergedBlock))\n        self.assertAlmostEqual(\n            sum(c.getArea() for c in mergedBlock),\n            sum(c.getArea() for c in unmergedBlock),\n        )\n\n        mergedNucs, unmergedNucs = (\n            mergedBlock.getNumberDensities(),\n            unmergedBlock.getNumberDensities(),\n        )\n        errorMessage = \"\"\n        for nucName in set(unmergedNucs) | set(mergedNucs):\n            n1, n2 = unmergedNucs[nucName], mergedNucs[nucName]\n            try:\n                self.assertAlmostEqual(n1, n2)\n            except AssertionError:\n                errorMessage += \"\\nnuc {} not equal. unmerged: {} merged: {}\".format(nucName, n1, n2)\n        self.assertTrue(not errorMessage, errorMessage)\n        self.assertAlmostEqual(mergedBlock.getMass(), unmergedBlock.getMass())\n\n    def test_nuclideFlags(self):\n        with self.assertRaises(yamlize.YamlizingError):\n            NuclideFlags.load(\"{potato: {burn: true, xs: true}}\")\n\n        with self.assertRaises(yamlize.YamlizingError):\n            NuclideFlags.load(\"{U238: {burn: 12, xs: 0}}\")\n\n    def test_customIsotopics(self):\n        with self.assertRaises(yamlize.YamlizingError):\n            CustomIsotopics.load(\"MOX: {input format: applesauce}\")\n\n        with self.assertRaises(yamlize.YamlizingError):\n            CustomIsotopics.load(\"MOX: {input format: number densities, density: -0.1}\")\n\n        with self.assertRaises(yamlize.YamlizingError):\n            CustomIsotopics.load(\"MOX: {input format: number densities, density: 1.5, FAKENUC234: 0.000286}\")\n\n    def test_components(self):\n        bads = [\n            # bad shape\n            {\n                \"shape\": \"potato\",\n                \"name\": \"name\",\n                \"material\": \"HT9\",\n                \"Tinput\": 1.0,\n                \"Thot\": 1.0,\n            },\n            # bad merge\n            {\n                \"shape\": \"circle\",\n                \"name\": \"name\",\n                \"material\": \"HT9\",\n                \"Tinput\": 1.0,\n                \"Thot\": 1.0,\n                \"mergeWith\": 6,\n            },\n            # bad isotopics\n            {\n                \"shape\": \"circle\",\n                \"name\": \"name\",\n                \"material\": \"HT9\",\n                \"Tinput\": 1.0,\n                \"Thot\": 1.0,\n                \"isotopics\": 4,\n            },\n            # bad key\n            {\n                \"shape\": \"circle\",\n                \"name\": \"name\",\n                \"material\": \"HT9\",\n                \"Tinput\": 1.0,\n                \"Thot\": 1.0,\n                5: \"od\",\n            },\n            # bad linked dimension\n            {\n                \"shape\": \"circle\",\n                \"name\": \"name\",\n                \"material\": \"HT9\",\n                \"Tinput\": 1.0,\n                \"Thot\": 1.0,\n                \"mult\": \"potato,mult\",\n            },\n        ]\n        for bad in bads:\n            with self.assertRaises(yamlize.YamlizingError):\n                ComponentBlueprint.load(repr(bad))\n\n    def test_cladding_invalid(self):\n        \"\"\"Make sure cladding input components are flagged as invalid.\"\"\"\n        bad = {\n            \"name\": \"cladding\",\n            \"shape\": \"Circle\",\n            \"material\": \"HT9\",\n            \"Tinput\": 1.0,\n            \"Thot\": 1.0,\n        }\n        with self.assertRaises(yamlize.YamlizingError):\n            ComponentBlueprint.load(repr(bad))\n\n    def test_withoutBlocks(self):\n        # Some projects use a script to generate an input that has completely unique blocks,\n        # so the blocks: section is not needed\n        yamlWithoutBlocks = \"\"\"\nnuclide flags:\n    U238: {burn: true, xs: true}\n    U235: {burn: true, xs: true}\n    LFP35: {burn: true, xs: true}\n    U236: {burn: true, xs: true}\n    PU239: {burn: true, xs: true}\n    DUMP2: {burn: true, xs: true}\n    DUMP1: {burn: true, xs: true}\n    NP237: {burn: true, xs: true}\n    PU238: {burn: true, xs: true}\n    PU236: {burn: true, xs: true}\n    LFP39: {burn: true, xs: true}\n    PU238: {burn: true, xs: true}\n    LFP40: {burn: true, xs: true}\n    PU241: {burn: true, xs: true}\n    LFP38: {burn: true, xs: true}\n    U234: {burn: true, xs: true}\n    AM241: {burn: true, xs: true}\n    LFP41: {burn: true, xs: true}\n    PU242: {burn: true, xs: true}\n    AM243: {burn: true, xs: true}\n    CM244: {burn: true, xs: true}\n    CM242: {burn: true, xs: true}\n    AM242: {burn: true, xs: true}\n    PU240: {burn: true, xs: true}\n    CM245: {burn: true, xs: true}\n    NP238: {burn: true, xs: true}\n    CM243: {burn: true, xs: true}\n    CM246: {burn: true, xs: true}\n    CM247: {burn: true, xs: true}\n    ZR: {burn: false, xs: true}\n\nassemblies:\n    fuel a: &assembly_a\n        specifier: FF\n        blocks:\n        - { name: fuel,\n            fuel: { shape: Hexagon, material: UZr, Tinput: 25.0, Thot: 600.0, ip: 0.0, mult: 1.0, op: 10.0} }\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n    fuel b:\n        <<: *assembly_a\n        specifier: IF\n        \"\"\"\n        cs = settings.Settings()\n        design = blueprints.Blueprints.load(yamlWithoutBlocks)\n        design.constructAssem(cs, name=\"fuel a\")\n        fa = design.constructAssem(cs, name=\"fuel a\")\n        fb = design.constructAssem(cs, name=\"fuel b\")\n        for a in (fa, fb):\n            self.assertEqual(1, len(a))\n            self.assertEqual(1, len(a[0]))\n\n    def test_topLevelComponentInput(self):\n        \"\"\"\n        Make sure components defined at the top level are loaded.\n\n        Components can be loaded either within the block blueprint\n        or on their own outside of blocks. This checks the latter\n        form.\n\n        We specified a 3D component in the test input (sphere)\n        so that it has a height and therefore a volume\n        without requiring a parent.\n        \"\"\"\n        cs = settings.Settings()\n        design = blueprints.Blueprints.load(self._yamlString)\n        # The following is needed to prep customisotopics\n        # which is required during construction of a component\n        design._resolveNuclides(cs)\n        componentDesign = design.componentDesigns[\"freefuel\"]\n        topComponent = componentDesign.construct(design, {}, cs[CONF_INPUT_HEIGHTS_HOT])\n        self.assertEqual(topComponent.getDimension(\"od\", cold=True), 4.0)\n        self.assertGreater(topComponent.getVolume(), 0.0)\n        self.assertGreater(topComponent.getMass(\"U235\"), 0.0)\n\n    def test_componentGroupInput(self):\n        \"\"\"Make sure component groups can be input in blueprints.\"\"\"\n        design = blueprints.Blueprints.load(self._yamlString)\n        componentGroup = design.componentGroups[\"group1\"]\n        self.assertEqual(componentGroup[\"freefuel\"].name, \"freefuel\")\n        self.assertEqual(componentGroup[\"freefuel\"].mult, 1.0)\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_componentBlueprint.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for testing componentBlueprint.\"\"\"\n\nimport inspect\nimport unittest\n\nfrom armi import settings\nfrom armi.reactor import blueprints\nfrom armi.reactor.flags import Flags\n\n\nclass TestComponentBlueprint(unittest.TestCase):\n    componentString = r\"\"\"\nblocks:\n    block: &block\n        component:\n            flags: {flags}\n            shape: Hexagon\n            material: {material} # This is being used to format a string to allow for different materials to be added\n            {isotopics} # This is being used to format a string to allow for different isotopics to be added\n            Tinput: 25.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 169.0\n            op: 0.86602\nassemblies:\n    assembly: &assembly_a\n        specifier: IC\n        blocks: [*block]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n\"\"\"\n\n    def test_compInitIncompleteBurnChain(self):\n        nuclideFlagsFuelWithBurn = (\n            inspect.cleandoc(\n                r\"\"\"\n            nuclide flags:\n                U238: {burn: true, xs: true}\n                U235: {burn: true, xs: true}\n                ZR: {burn: false, xs: true}\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlagsFuelWithBurn + self.componentString.format(material=\"UZr\", isotopics=\"\", flags=\"\")\n        )\n        cs = settings.Settings()\n        with self.assertRaises(ValueError):\n            bp.constructAssem(cs, \"assembly\")\n\n    def test_compInitControlCustomIso(self):\n        nuclideFlags = (\n            inspect.cleandoc(\n                \"\"\"\n            nuclide flags:\n                U234: {burn: true, xs: true}\n                U235: {burn: true, xs: true}\n                U238: {burn: true, xs: true}\n                B10: {burn: true, xs: true}\n                B11: {burn: true, xs: true}\n                C: {burn: true, xs: true}\n                DUMP1: {burn: true, xs: true}\n            custom isotopics:\n                B4C:\n                    input format: number densities\n                    B10: 1.0\n                    B11: 1.0\n                    C: 1.0\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: B4C\", flags=\"\")\n        )\n        cs = settings.Settings()\n        _ = bp.constructAssem(cs, \"assembly\")\n\n    def test_autoDepletable(self):\n        nuclideFlags = (\n            inspect.cleandoc(\n                \"\"\"\n            nuclide flags:\n                U234: {burn: true, xs: true}\n                U235: {burn: true, xs: true}\n                U238: {burn: true, xs: true}\n                B10: {burn: true, xs: true}\n                B11: {burn: true, xs: true}\n                C: {burn: true, xs: true}\n                DUMP1: {burn: true, xs: true}\n            custom isotopics:\n                B4C:\n                    input format: number densities\n                    B10: 1.0\n                    B11: 1.0\n                    C: 1.0\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: B4C\", flags=\"\")\n        )\n        cs = settings.Settings()\n        a = bp.constructAssem(cs, \"assembly\")\n        expectedNuclides = [\"B10\", \"B11\", \"C\", \"DUMP1\"]\n        unexpectedNuclides = [\"U234\", \"U325\", \"U238\"]\n        for nuc in expectedNuclides:\n            self.assertIn(nuc, a[0][0].getNuclides())\n        for nuc in unexpectedNuclides:\n            self.assertNotIn(nuc, a[0][0].getNuclides())\n\n        c = a[0][0]\n\n        # Since we didn't supply flags, we should get the DEPLETABLE flag added\n        # automatically, since this one has depletable nuclides\n        self.assertEqual(c.p.flags, Flags.DEPLETABLE)\n        # More robust test, but worse unittest.py output when it fails\n        self.assertTrue(c.hasFlags(Flags.DEPLETABLE))\n\n        # repeat the process with some flags set explicitly\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: B4C\", flags=\"fuel test\")\n        )\n        cs = settings.Settings()\n        a = bp.constructAssem(cs, \"assembly\")\n        c = a[0][0]\n\n        # Since we supplied flags, we should NOT get the DEPLETABLE flag added\n        self.assertEqual(c.p.flags, Flags.FUEL | Flags.TEST)\n        # More robust test, but worse unittest.py output when it fails\n        self.assertTrue(c.hasFlags(Flags.FUEL | Flags.TEST))\n\n    def test_compInitAmericiumCustomIso(self):\n        nuclideFlags = (\n            inspect.cleandoc(\n                r\"\"\"\n            nuclide flags:\n                CM242: {burn: true, xs: true}\n                PU241: {burn: true, xs: true}\n                AM242G: {burn: true, xs: true}\n                AM242M: {burn: true, xs: true}\n                AM241: {burn: true, xs: true}\n                LFP41: {burn: true, xs: true}\n                PU240: {burn: true, xs: true}\n                AM243: {burn: true, xs: true}\n                NP238: {burn: true, xs: true}\n                PU242: {burn: true, xs: true}\n                CM243: {burn: true, xs: true}\n                PU238: {burn: true, xs: true}\n                DUMP2: {burn: true, xs: true}\n                DUMP1: {burn: true, xs: true}\n                U238: {burn: true, xs: true}\n                CM244: {burn: true, xs: true}\n                LFP40: {burn: true, xs: true}\n                U236: {burn: true, xs: true}\n                PU236: {burn: true, xs: true}\n                U234: {burn: true, xs: true}\n                CM245: {burn: true, xs: true}\n                PU239: {burn: true, xs: true}\n                NP237: {burn: true, xs: true}\n                U235: {burn: true, xs: true}\n                LFP39: {burn: true, xs: true}\n                LFP35: {burn: true, xs: true}\n                LFP38: {burn: true, xs: true}\n                CM246: {burn: true, xs: true}\n                CM247: {burn: true, xs: true}\n                B10: {burn: true, xs: true}\n                B11: {burn: true, xs: true}\n                W186: {burn: true, xs: true}\n                C: {burn: true, xs: true}\n                S: {burn: true, xs: true}\n                P: {burn: true, xs: true}\n            custom isotopics:\n                AM:\n                    input format: number densities\n                    AM241: 1.0\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: AM\", flags=\"\")\n        )\n        cs = settings.Settings()\n        a = bp.constructAssem(cs, \"assembly\")\n        expectedNuclides = [\n            \"AM241\",\n            \"U238\",\n            \"AM243\",\n            \"AM242M\",\n            \"NP237\",\n            \"NP238\",\n            \"U234\",\n            \"U235\",\n            \"LFP38\",\n            \"LFP39\",\n            \"PU239\",\n            \"PU238\",\n            \"LFP35\",\n            \"U236\",\n            \"CM247\",\n            \"CM246\",\n            \"CM245\",\n            \"CM244\",\n            \"PU240\",\n            \"PU241\",\n            \"PU242\",\n            \"PU236\",\n            \"CM243\",\n            \"CM242\",\n            \"DUMP2\",\n            \"LFP41\",\n            \"LFP40\",\n        ]\n        unexpectedNuclides = [\"B10\", \"B11\", \"W186\", \"C\", \"S\", \"P\"]\n        for nuc in expectedNuclides:\n            self.assertIn(nuc, a[0][0].getNuclides())\n        for nuc in unexpectedNuclides:\n            self.assertNotIn(nuc, a[0][0].getNuclides())\n\n    def test_compInitThoriumBurnCustomIso(self):\n        nuclideFlags = (\n            inspect.cleandoc(\n                r\"\"\"\n            nuclide flags:\n                TH232: {burn: true, xs: true}\n                PA233: {burn: true, xs: true}\n                PA231: {burn: true, xs: true}\n                U232: {burn: true, xs: true}\n                U233: {burn: true, xs: true}\n                CM242: {burn: true, xs: true}\n                PU241: {burn: true, xs: true}\n                AM242G: {burn: true, xs: true}\n                AM242M: {burn: true, xs: true}\n                AM241: {burn: true, xs: true}\n                LFP41: {burn: true, xs: true}\n                PU240: {burn: true, xs: true}\n                AM243: {burn: true, xs: true}\n                NP238: {burn: true, xs: true}\n                PU242: {burn: true, xs: true}\n                CM243: {burn: true, xs: true}\n                PU238: {burn: true, xs: true}\n                DUMP2: {burn: true, xs: true}\n                DUMP1: {burn: true, xs: true}\n                U238: {burn: true, xs: true}\n                CM244: {burn: true, xs: true}\n                LFP40: {burn: true, xs: true}\n                U236: {burn: true, xs: true}\n                PU236: {burn: true, xs: true}\n                U234: {burn: true, xs: true}\n                CM245: {burn: true, xs: true}\n                PU239: {burn: true, xs: true}\n                NP237: {burn: true, xs: true}\n                U235: {burn: true, xs: true}\n                LFP39: {burn: true, xs: true}\n                LFP35: {burn: true, xs: true}\n                LFP38: {burn: true, xs: true}\n                CM246: {burn: true, xs: true}\n                CM247: {burn: true, xs: true}\n            custom isotopics:\n                Thorium:\n                    input format: number densities\n                    TH232: 1.0\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: Thorium\", flags=\"\")\n        )\n        cs = settings.Settings()\n        a = bp.constructAssem(cs, \"assembly\")\n        expectedNuclides = [\"TH232\", \"PA233\", \"PA231\", \"DUMP2\", \"LFP35\"]\n        for nuc in expectedNuclides:\n            self.assertIn(nuc, a[0][0].getNuclides())\n\n    def test_compInitThoriumNoBurnCustomIso(self):\n        nuclideFlags = (\n            inspect.cleandoc(\n                r\"\"\"\n            nuclide flags:\n                TH232: {burn: false, xs: true}\n            custom isotopics:\n                Thorium:\n                    input format: number densities\n                    TH232: 1.0\n            \"\"\"\n            )\n            + \"\\n\"\n        )\n        bp = blueprints.Blueprints.load(\n            nuclideFlags + self.componentString.format(material=\"Custom\", isotopics=\"isotopics: Thorium\", flags=\"\")\n        )\n        cs = settings.Settings()\n        a = bp.constructAssem(cs, \"assembly\")\n        expectedNuclides = [\"TH232\"]\n        for nuc in expectedNuclides:\n            self.assertIn(nuc, a[0][0].getNuclides())\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_customIsotopics.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit test custom isotopics.\"\"\"\n\nimport unittest\nfrom logging import DEBUG\n\nimport numpy as np\nimport yamlize\n\nfrom armi import runLog, settings\nfrom armi.materials import Fluid, Sodium\nfrom armi.physics.neutronics.settings import (\n    CONF_MCNP_LIB_BASE,\n    CONF_NEUTRONICS_KERNEL,\n    CONF_XS_KERNEL,\n)\nfrom armi.reactor import blueprints\nfrom armi.reactor.blueprints import isotopicOptions\nfrom armi.reactor.flags import Flags\nfrom armi.tests import mockRunLogs\nfrom armi.utils.customExceptions import InputError\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestCustomIsotopics(unittest.TestCase):\n    yamlPreamble = r\"\"\"\nnuclide flags:\n    U238: {burn: true, xs: true}\n    U235: {burn: true, xs: true}\n    U234: {burn: true, xs: true}\n    ZR: {burn: false, xs: true}\n    AL: {burn: false, xs: true}\n    FE: {burn: false, xs: true}\n    C: {burn: false, xs: true}\n    NA: {burn: false, xs: true}\n    DUMP2: {burn: true, xs: true}\n    DUMP1: {burn: true, xs: true}\n    LFP35: {burn: true, xs: true}\n    PU239: {burn: true, xs: true}\n    NP237: {burn: true, xs: true}\n    LFP38: {burn: true, xs: true}\n    LFP39: {burn: true, xs: true}\n    PU240: {burn: true, xs: true}\n    PU236: {burn: true, xs: true}\n    PU238: {burn: true, xs: true}\n    U236: {burn: true, xs: true}\n    LFP40: {burn: true, xs: true}\n    PU241: {burn: true, xs: true}\n    AM241: {burn: true, xs: true}\n    LFP41: {burn: true, xs: true}\n    PU242: {burn: true, xs: true}\n    AM243: {burn: true, xs: true}\n    CM244: {burn: true, xs: true}\n    CM242: {burn: true, xs: true}\n    AM242: {burn: true, xs: true}\n    CM245: {burn: true, xs: true}\n    NP238: {burn: true, xs: true}\n    CM243: {burn: true, xs: true}\n    CM246: {burn: true, xs: true}\n    CM247: {burn: true, xs: true}\n    NI: {burn: true, xs: true}\n    W: {burn: true, xs: true, expandTo: [\"W182\", \"W183\", \"W184\", \"W186\"]}\n    MN: {burn: true, xs: true}\n    CR: {burn: true, xs: true}\n    V: {burn: true, xs: true}\n    SI: {burn: true, xs: true}\n    MO: {burn: true, xs: true}\n\ncustom isotopics:\n    uranium isotopic mass fractions:\n        input format: mass fractions\n        U238: 0.992742\n        U235: 0.007204\n        U234: 0.000054\n        density: 19.1\n\n    uranium isotopic number fractions:\n        input format: number fractions\n        U238: 0.992650\n        U235: 0.007295\n        U234: 0.000055\n        density: 19.1\n\n    uranium isotopic number densities: &u_isotopics\n        input format: number densities\n        U234: 2.6539102e-06\n        U235: 3.5254048e-04\n        U238: 4.7967943e-02\n\n    bad uranium isotopic mass fractions:\n        input format: mass fractions\n        U238: 0.992742\n        U235: 0.007204\n        U234: 0.000054\n        density: 0\n\n    negative uranium isotopic mass fractions:\n        input format: mass fractions\n        U238: 0.992742\n        U235: 0.007204\n        U234: 0.000054\n        density: -1\n\n    linked uranium number densities: *u_isotopics\n\n    steel:\n        input format: mass fractions\n        FE: 0.7\n        C: 0.3\n        density: 7.0\n\n    sodium custom isotopics:\n        input format: mass fractions\n        NA: 1\n        density: 666\n\n\"\"\"\n\n    yamlGoodBlocks = r\"\"\"\nblocks:\n    uzr fuel: &block_0\n        fuel: &basic_fuel\n            shape: Hexagon\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1.0\n            op: 10.0\n\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 1.0\n            od: 10.0\n\n        sodium1:\n            shape: Circle\n            material: Sodium\n            Tinput: 100\n            Thot: 600\n            id: 0\n            mult: 1\n            od: 1\n\n        sodium2:\n            shape: Circle\n            material: Sodium\n            isotopics: sodium custom isotopics\n            Tinput: 100\n            Thot: 600\n            id: 0\n            mult: 1\n            od: 1\n\n    uranium fuel from isotopic mass fractions : &block_1\n        fuel:\n            <<: *basic_fuel\n            material: Custom\n            isotopics: uranium isotopic mass fractions\n\n    wrong material: &block_2\n        fuel:\n            <<: *basic_fuel\n            isotopics: uranium isotopic mass fractions\n\n    uranium fuel from number fractions: &block_3\n        fuel:\n            <<: *basic_fuel\n            material: Custom\n            isotopics: uranium isotopic number fractions\n\n    uranium fuel from number densities: &block_4\n        fuel:\n            <<: *basic_fuel\n            material: Custom\n            isotopics: uranium isotopic number densities\n\n    uranium fuel from nd link: &block_5\n        fuel:\n            <<: *basic_fuel\n            material: Custom\n            isotopics: linked uranium number densities\n\n    fuel with no modifications: &block_6  # after a custom density has been set\n        fuel:\n            <<: *basic_fuel\n\n    overspecified fuel: &block_7\n        fuel:\n            <<: *basic_fuel\n            material: UraniumOxide\n            isotopics: uranium isotopic number densities\n\n    density set via number density: &block_8\n        fuel:\n            <<: *basic_fuel\n            isotopics: uranium isotopic number densities\n\n    steel: &block_9\n        clad:\n            shape: Hexagon\n            material: Custom\n            isotopics: steel\n            Tinput: 100\n            Thot: 600.0\n            ip: 0.0\n            mult: 169.0\n            op: 0.86602\n\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_0, *block_1, *block_2, *block_3, *block_4, *block_5, *block_6, *block_7, *block_8, *block_9]\n        height: [10, 10, 10, 10, 10, 10, 10, 10, 10, 10]\n        axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n        xs types: [A, A, A, A, A, A, A, A, A, A]\n        material modifications:\n            TD_frac: [\"\", \"\", \"\", \"\", \"\", \"\", \"\", 0.1, \"\", \"\"]\n\n\"\"\"\n\n    yamlBadBlocks = r\"\"\"\nblocks:\n    uzr fuel: &block_0\n        fuel: &basic_fuel\n            shape: Hexagon\n            material: UZr\n            Tinput: 100\n            Thot: 600.0\n            ip: 0.0\n            mult: 1.0\n            op: 10.0\n\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 100\n            Thot: 600.0\n            id: 0.0\n            mult: 1.0\n            od: 10.0\n\n    custom void: &block_1\n        fuel:\n            <<: *basic_fuel\n            material: Void\n            isotopics: uranium isotopic number densities\n\n    steel: &block_2\n        clad:\n            shape: Hexagon\n            material: Custom\n            isotopics: steel\n            Tinput: 100\n            Thot: 600.0\n            ip: 0.0\n            mult: 169.0\n            op: 0.86602\n\n    no density uo2: &block_3\n        fuel:\n            <<: *basic_fuel\n            material: UraniumOxide\n            isotopics: uranium isotopic number densities\n\n    no density uo2: &block_4\n        fuel:\n            <<: *basic_fuel\n            material: UraniumOxide\n            isotopics: bad uranium isotopic mass fractions\n\n    no density uo2: &block_5\n        fuel:\n            <<: *basic_fuel\n            material: UraniumOxide\n            isotopics: bad uranium isotopic mass fractions\n\n\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_0, *block_1, *block_2]\n        height: [10, 10, 10]\n        axial mesh points: [1, 1, 1]\n        xs types: [A, A, A]\n        material modifications:\n            TD_frac: [\"\", \"\", \"\"]\n\n    fuel b: &assembly_b\n        specifier: IC\n        blocks: [*block_0, *block_3, *block_2]\n        height: [10, 10, 10]\n        axial mesh points: [1, 1, 1]\n        xs types: [A, A, A]\n        material modifications:\n            TD_frac: [\"\", \"0.0\", \"\"]  # set density to 0 to cause error in custom density\n\n    fuel c: &assembly_c\n        specifier: IC\n        blocks: [*block_0, *block_4, *block_2]\n        height: [10, 10, 10]\n        axial mesh points: [1, 1, 1]\n        xs types: [A, A, A]\n\n    fuel d: &assembly_d\n        specifier: IC\n        blocks: [*block_0, *block_5, *block_2]\n        height: [10, 10, 10]\n        axial mesh points: [1, 1, 1]\n        xs types: [A, A, A]\n\n\"\"\"\n\n    # this yaml is supposed to successfully build\n    yamlString = yamlPreamble + yamlGoodBlocks\n\n    # This yaml is designed to raise an error when built\n    yamlStringWithError = yamlPreamble + yamlBadBlocks\n    \"\"\":meta hide-value:\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cs = settings.Settings()\n        cs = cs.modified(\n            newSettings={\n                CONF_XS_KERNEL: \"MC2v2\",\n                \"inputHeightsConsideredHot\": False,\n            }\n        )\n\n        cls.bp = blueprints.Blueprints.load(cls.yamlString)\n        cls.a = cls.bp.constructAssem(cs, name=\"fuel a\")\n        cls.numUZrNuclides = 29  # Number of nuclides defined `nuclide flags`\n        cls.numCustomNuclides = 28  # Number of nuclides defined in `nuclide flags` without Zr\n\n    def test_unmodified(self):\n        \"\"\"Ensure that unmodified components have the correct isotopics.\"\"\"\n        fuel = self.a[0].getComponent(Flags.FUEL)\n        self.assertEqual(self.numUZrNuclides, len(fuel.p.numberDensities))\n        # NOTE: This density does not come from the material but is based on number densities.\n        self.assertAlmostEqual(15.5, fuel.density(), 0)  # i.e. it is not 19.1\n\n    def test_massFractionsAreApplied(self):\n        \"\"\"Ensure that the custom isotopics can be specified via mass fractions.\n\n        .. test:: Test that custom isotopics can be specified via mass fractions.\n            :id: T_ARMI_MAT_USER_INPUT3\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        fuel1 = self.a[1].getComponent(Flags.FUEL)\n        fuel2 = self.a[2].getComponent(Flags.FUEL)\n        self.assertEqual(self.numCustomNuclides, len(fuel1.p.numberDensities))\n        self.assertAlmostEqual(19.1, fuel1.density())\n\n        # keys are same\n        keys1 = set([i for i, v in enumerate(fuel1.p.numberDensities) if v == 0.0])\n        keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0])\n        self.assertEqual(keys1, keys2)\n\n    def test_densAppliedToNonCustomMats(self):\n        \"\"\"Ensure that a density can be set in custom isotopics for components using library materials.\"\"\"\n        # The template block\n        fuel0 = self.a[0].getComponent(Flags.FUEL)\n        # The block with custom density but not the 'Custom' material\n        fuel2 = self.a[2].getComponent(Flags.FUEL)\n        # A block like the template block, but made after the custom block\n        fuel6 = self.a[6].getComponent(Flags.FUEL)\n        # A block with custom density set via number density\n        fuel8 = self.a[8].getComponent(Flags.FUEL)\n\n        dLL = fuel2.material.linearExpansionFactor(Tc=600, T0=25)\n        # the exponent here is 3 because inputHeightsConsideredHot = False.\n        # if inputHeightsConsideredHot were True, then we would use a factor of 2 instead\n        f = 1 / ((1 + dLL) ** 3)\n\n        # Check that the density is set correctly on the custom density block,\n        # and that it is not the same as the original\n        self.assertAlmostEqual(19.1 * f, fuel2.density())\n        self.assertNotAlmostEqual(fuel0.density(), fuel2.density(), places=2)\n        # Check that the custom density block has the correct material\n        self.assertEqual(\"UZr\", fuel2.material.name)\n        # Check that the block with only number densities set has a new density\n        self.assertAlmostEqual(19.1 * f, fuel8.density())\n        # original material density should not be changed after setting a custom density component,\n        # so a new block without custom isotopics and density should have the same density as the original\n        self.assertAlmostEqual(fuel6.density(), fuel0.density())\n        self.assertEqual(fuel6.material.name, fuel0.material.name)\n        self.assertEqual(\"UZr\", fuel0.material.name)\n\n    def test_densAppliedToNonCustomMatsFluid(self):\n        \"\"\"\n        Ensure that a density can be set in custom isotopics for components using library materials, specifically in the\n        case of a fluid component. In this case, inputHeightsConsideredHot does not matter, and the material has a zero\n        dLL value.\n        \"\"\"\n        # The template block\n        sodium1 = self.a[0].getComponentByName(\"sodium1\")\n        sodium2 = self.a[0].getComponentByName(\"sodium2\")\n\n        self.assertEqual(sodium1.material.name, \"Sodium\")\n        self.assertEqual(sodium2.material.name, \"Sodium\")\n        self.assertTrue(isinstance(sodium1.material, Fluid))\n        self.assertTrue(isinstance(sodium2.material, Fluid))\n        self.assertEqual(sodium1.p.customIsotopicsName, \"\")\n        self.assertEqual(sodium2.p.customIsotopicsName, \"sodium custom isotopics\")\n\n        # show that, even though the two components have the same material class\n        # and the same temperatures, their densities are different\n        self.assertNotEqual(sodium1.density(), sodium2.density())\n\n        # show that sodium1 has a density from the material class, while sodium2\n        # has a density from the blueprint and adjusted from Tinput -> Thot\n        s = Sodium()\n        self.assertAlmostEqual(sodium1.density(), s.density(Tc=600))\n        self.assertAlmostEqual(sodium2.density(), s.density(Tc=600) * (666 / s.density(Tc=100)))\n\n    def test_customDensityLogsAndErrors(self):\n        \"\"\"Test that the right warning messages and errors are emitted when applying custom densities.\"\"\"\n        # Check for warnings when specifying both TD_frac and custom isotopics\n        with mockRunLogs.BufferLog() as mockLog:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mockLog.getStdout())\n            runLog.LOG.startLog(\"test_customDensityLogsAndErrors\")\n            runLog.LOG.setVerbosity(DEBUG)\n\n            # rebuild the input to capture the logs\n            cs = settings.Settings()\n            cs = cs.modified(newSettings={CONF_XS_KERNEL: \"MC2v2\"})\n            bp = blueprints.Blueprints.load(self.yamlString)\n            bp.constructAssem(cs, name=\"fuel a\")\n\n            # Check for log messages\n            streamVal = mockLog.getStdout()\n            self.assertIn(\"and a custom isotopic with density\", streamVal, msg=streamVal)\n            self.assertIn(\"Custom isotopics and material modifications have both\", streamVal, msg=streamVal)\n            self.assertIn(\"A custom material density was specified\", streamVal, msg=streamVal)\n            self.assertIn(\n                \"A custom isotopic with associated density has been specified for non-`Custom`\",\n                streamVal,\n                msg=streamVal,\n            )\n\n        # Check that assigning a custom density to the Void material fails\n        cs = settings.Settings()\n        cs = cs.modified(newSettings={CONF_XS_KERNEL: \"MC2v2\"})\n        bp = blueprints.Blueprints.load(self.yamlStringWithError)\n        # Ensure we have some Void\n        self.assertEqual(bp.blockDesigns[\"custom void\"][\"fuel\"].material, \"Void\")\n        # Can't have stuff in Void\n        with self.assertRaises(ValueError):\n            bp.constructAssem(cs, name=\"fuel a\")\n\n        # Try making a 0 density non-Void material by setting TD_frac to 0.0\n        with self.assertRaises(ValueError):\n            bp.constructAssem(cs, name=\"fuel b\")\n\n        # Try making a material with mass fractions with a density of 0\n        with self.assertRaises(ValueError):\n            bp.constructAssem(cs, name=\"fuel c\")\n\n        # Try making a material with mass fractions with a negative density\n        with self.assertRaises(ValueError):\n            bp.constructAssem(cs, name=\"fuel d\")\n\n    def test_numberFractions(self):\n        \"\"\"Ensure that the custom isotopics can be specified via number fractions.\n\n        .. test:: Test that custom isotopics can be specified via number fractions.\n            :id: T_ARMI_MAT_USER_INPUT4\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        # fuel blocks 2 and 4 should be the same, one is defined as mass fractions, and the other as number fractions\n        fuel2 = self.a[1].getComponent(Flags.FUEL)\n        fuel4 = self.a[3].getComponent(Flags.FUEL)\n        self.assertAlmostEqual(fuel2.density(), fuel4.density())\n\n        keys2 = set([i for i, v in enumerate(fuel2.p.numberDensities) if v == 0.0])\n        keys4 = set([i for i, v in enumerate(fuel4.p.numberDensities) if v == 0.0])\n        self.assertEqual(keys2, keys4)\n        np.testing.assert_almost_equal(fuel2.p.numberDensities, fuel4.p.numberDensities)\n\n    def test_numberDensities(self):\n        \"\"\"Ensure that the custom isotopics can be specified via number densities.\n\n        .. test:: Test that custom isotopics can be specified via number fractions.\n            :id: T_ARMI_MAT_USER_INPUT5\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        # fuel blocks 2 and 5 should be the same, one is defined as mass fractions, and the other as number densities\n        fuel2 = self.a[1].getComponent(Flags.FUEL)\n        fuel5 = self.a[4].getComponent(Flags.FUEL)\n        self.assertAlmostEqual(fuel2.density(), fuel5.density())\n\n        for i, nuc in enumerate(fuel2.p.nuclides):\n            self.assertIn(nuc, fuel5.p.nuclides)\n            j = np.where(fuel5.p.nuclides == nuc)[0][0]\n            self.assertAlmostEqual(fuel2.p.numberDensities[i], fuel5.p.numberDensities[j])\n\n    def test_numberDensitiesAnchor(self):\n        fuel4 = self.a[4].getComponent(Flags.FUEL)\n        fuel5 = self.a[5].getComponent(Flags.FUEL)\n        self.assertAlmostEqual(fuel4.density(), fuel5.density())\n        np.testing.assert_almost_equal(fuel4.p.numberDensities, fuel5.p.numberDensities)\n\n    def test_expandedNatural(self):\n        cs = settings.Settings()\n        cs = cs.modified(newSettings={CONF_XS_KERNEL: \"MC2v3\"})\n\n        bp = blueprints.Blueprints.load(self.yamlString)\n        a = bp.constructAssem(cs, name=\"fuel a\")\n        b = a[-1]\n        c = b.getComponent(Flags.CLAD)\n        self.assertIn(\"FE56\", c.getNumberDensities())  # natural isotopic\n        self.assertNotIn(\"FE51\", c.getNumberDensities())  # un-natural\n        self.assertNotIn(\"FE\", c.getNumberDensities())\n\n    def test_infDiluteAreOnlyNatural(self):\n        \"\"\"Make sure nuclides specified as In-Problem but not actually in any material are only natural isotopics.\"\"\"\n        self.assertIn(\"AL27\", self.bp.allNuclidesInProblem)\n        self.assertNotIn(\"AL26\", self.bp.allNuclidesInProblem)\n\n    def test_getDefaultNuclideFlags(self):\n        # This is a bit of a silly test. We are checking what is essentially a hard coded dictionary\n        nucDict = isotopicOptions.getDefaultNuclideFlags()\n        entry = {\"burn\": True, \"xs\": True, \"expandTo\": None}\n        self.assertEqual(nucDict[\"DUMP1\"], entry)\n        self.assertEqual(nucDict[\"CM244\"], entry)\n        self.assertEqual(nucDict[\"LFP38\"], entry)\n        entry = {\"burn\": False, \"xs\": True, \"expandTo\": None}\n        self.assertEqual(nucDict[\"B10\"], entry)\n        self.assertEqual(nucDict[\"NI\"], entry)\n\n\nclass TestCustomIsotopicsErrors(unittest.TestCase):\n    def test_densityMustBePositive(self):\n        with self.assertRaises(yamlize.YamlizingError):\n            _ = isotopicOptions.CustomIsotopic.load(\n                r\"\"\"\n            name: atom repellent\n            input format: mass fractions\n            U234: 2.6539102e-06\n            U235: 3.5254048e-04\n            U238: 4.7967943e-02\n            density: -0.0001\n            \"\"\"\n            )\n\n    def test_nonConformantElementName(self):\n        with self.assertRaises(yamlize.YamlizingError):\n            _ = isotopicOptions.CustomIsotopic.load(\n                r\"\"\"\n            name: non-upper case\n            input format: number densities\n            Au: 0.01\n            \"\"\"\n            )\n\n    def test_numberDensitiesCannotSpecifyDensity(self):\n        with self.assertRaises(yamlize.YamlizingError):\n            _ = isotopicOptions.CustomIsotopic.load(\n                r\"\"\"\n            name: over-specified isotopics\n            input format: number densities\n            AU: 0.01\n            density: 10.0\n            \"\"\"\n            )\n\n\nclass TestIsotopicsMissingData(unittest.TestCase):\n    \"\"\"Custom materials must define isotopics.\"\"\"\n\n    yamlBlocksBadIsotopics = r\"\"\"\nblocks:\n    steel: &block_0\n        clad:\n            shape: Hexagon\n            material: Custom\n            #isotopics: sodium custom isotopics\n            Tinput: 25.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 169.0\n            op: 0.86602\n\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_0]\n        height: [10]\n        axial mesh points: [1]\n        xs types: [A]\n\"\"\"\n\n    def test_customComponentsWithoutComposition(self):\n        cs = settings.Settings()\n        bp = blueprints.Blueprints.load(self.yamlBlocksBadIsotopics)\n\n        with self.assertRaises(IOError):\n            _a = bp.constructAssem(cs, name=\"fuel a\")\n\n\nclass TestNuclideFlagsExpansion(unittest.TestCase):\n    yamlString = r\"\"\"\nnuclide flags:\n    U238: {burn: false, xs: true}\n    U235: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\n    AL: {burn: false, xs: true}\n    FE: {burn: false, xs: true, expandTo: [\"FE54\"]}\n    C: {burn: false, xs: true}\n    NI: {burn: true, xs: true}\n    MN: {burn: true, xs: true}\n    CR: {burn: true, xs: true}\n    V: {burn: true, xs: true}\n    SI: {burn: true, xs: true}\n    MO: {burn: true, xs: true}\n    W: {burn: true, xs: true}\n    ZN: {burn: true, xs: true}\n    O: {burn: true, xs: true}\nblocks:\n    uzr fuel: &block_0\n        fuel:\n            shape: Hexagon\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            mult: 1.0\n            op: 10.0\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 1.0\n            od: 10.0\n        dummy:\n            shape: Circle\n            material: ZnO\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 1.0\n            od: 10.0\nassemblies:\n    fuel a:\n        specifier: IC\n        blocks: [*block_0]\n        height: [10]\n        axial mesh points: [1]\n        xs types: [A]\n    \"\"\"\n\n    def test_expandedNatural(self):\n        cs = settings.Settings()\n        cs = cs.modified(newSettings={CONF_XS_KERNEL: \"MC2v3\"})\n\n        bp = blueprints.Blueprints.load(self.yamlString)\n        a = bp.constructAssem(cs, name=\"fuel a\")\n        b = a[-1]\n        c = b.getComponent(Flags.CLAD)\n        nd = c.getNumberDensities()\n        self.assertIn(\"FE54\", nd)  # natural isotopic as requested\n        self.assertNotIn(\"FE56\", nd)  # natural isotopic not requested\n        self.assertNotIn(\"FE51\", nd)  # un-natural\n        self.assertNotIn(\"FE\", nd)\n\n    def test_eleExpandInfoBasedOnCodeENDF(self):\n        with TemporaryDirectoryChanger():\n            # Reference elements to expand by library\n            ref_E70_elem = [\"C\", \"V\", \"ZN\"]\n            ref_E71_elem = [\"C\"]\n            ref_E80_elem = []\n\n            # Load settings and set neutronics kernel to MCNP\n            cs = settings.Settings()\n            cs = cs.modified(newSettings={CONF_NEUTRONICS_KERNEL: \"MCNP\"})\n\n            # Set ENDF/B-VII.0 as MCNP cross section library base\n            cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: \"ENDF/B-VII.0\"})\n            eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)\n            E70_elem = [x.label for x in eleToKeep]\n\n            # Set ENDF/B-VII.1 as MCNP cross section library base\n            cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: \"ENDF/B-VII.1\"})\n            eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)\n            E71_elem = [x.label for x in eleToKeep]\n\n            # Set ENDF/B-VIII.0 as MCNP cross section library base\n            cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: \"ENDF/B-VIII.0\"})\n            eleToKeep, expansions = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)\n            E80_elem = [x.label for x in eleToKeep]\n\n            # Assert equality of returned elements to reference elements\n            self.assertEqual(sorted(E70_elem), sorted(ref_E70_elem))\n            self.assertEqual(sorted(E71_elem), sorted(ref_E71_elem))\n            self.assertEqual(sorted(E80_elem), sorted(ref_E80_elem))\n\n            # Disallowed inputs\n            not_allowed = [\"ENDF/B-VIIII.0\", \"ENDF/B-VI.0\", \"JEFF-3.3\"]\n            # Assert raise InputError in case of invalid library setting\n            for x in not_allowed:\n                with self.assertRaises(InputError) as context:\n                    cs = cs.modified(newSettings={CONF_MCNP_LIB_BASE: x})\n                    _ = isotopicOptions.eleExpandInfoBasedOnCodeENDF(cs)\n\n                self.assertTrue(\"Failed to determine nuclides for modeling\" in str(context.exception))\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_gridBlueprints.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for grid blueprints.\"\"\"\n\nimport io\nimport os\nimport unittest\n\nfrom armi import configure, isConfigured\n\nif not isConfigured():\n    configure()\n\nfrom armi.reactor.blueprints import Blueprints\nfrom armi.reactor.blueprints.gridBlueprint import Grids, Pitch, saveToStream\nfrom armi.utils.customExceptions import InputError\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nLATTICE_BLUEPRINT = \"\"\"\ncontrol:\n    geom: hex_corners_up\n    symmetry: full\n    lattice pitch: \n      hex: 1.2\n    lattice map: |\n       - - - - - - - - - 1 1 1 1 1 1 1 1 1 4\n        - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1\n         - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1\n          - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1\n           - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n            - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n             - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n              - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n               - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1\n                 1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1\n                  1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                   1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                    1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                     1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                      1 1 1 1 1 1 1 1 1 3 1 1 1\n                       1 1 1 1 1 1 1 1 1 1 1 1\n                        1 6 1 1 1 1 1 1 1 1 1\n                         1 1 1 1 1 1 1 1 1 1\npins:\n  geom: hex\n  symmetry: full\n  lattice pitch: \n    hex: 1.3\n  lattice map: |\n    -   -   FP\n      -   FP  FP\n    -   CL  CL  CL\n      FP  FP  FP  FP\n    FP  FP  FP  FP  FP\n      CL  CL  CL  CL\n    FP  FP  FP  FP  FP\n      FP  FP  FP  FP\n    CL  CL  CL  CL  CL\n      FP  FP  FP  FP\n    FP  FP  FP  FP  FP\n      CL  CL  CL  CL\n    FP  FP  FP  FP  FP\n      FP  FP  FP  FP\n        CL  CL  CL\n          FP  FP\n            FP\n\nsfp:\n    geom: cartesian\n    symmetry: full\n    lattice map: |\n        2 2 2 2 2\n        2 1 1 1 2\n        2 1 3 1 2\n        2 3 1 1 2\n        2 2 2 2 2\n\nsfp quarter:\n    geom: cartesian\n    symmetry: quarter through center assembly\n    lattice map: |\n        2 2 2 2 2\n        2 1 1 1 2\n        2 1 3 1 2\n        2 3 1 1 2\n        2 2 2 2 2\n\nsfp quarter even:\n    geom: cartesian\n    symmetry: quarter core\n    lattice map: |\n        2 2 2 2 2\n        2 1 1 1 2\n        2 1 3 1 2\n        2 3 1 1 2\n        2 2 2 2 2\n\nsfp even:\n    geom: cartesian\n    symmetry: full\n    lattice map: |\n        1 2 2 2 2 2\n        1 2 1 1 1 2\n        1 2 1 4 1 2\n        1 2 2 1 1 2\n        1 2 2 2 2 2\n        1 1 1 1 1 1\n\"\"\"\n\nRZT_BLUEPRINT = \"\"\"\nrzt_core:\n    geom: thetarz\n    symmetry: eighth core periodic\n    grid bounds:\n        r:\n            - 0.0\n            - 14.2857142857\n            - 28.5714285714\n            - 42.8571428571\n            - 57.1428571429\n            - 71.4285714286\n            - 85.7142857143\n            - 100.001\n            - 115.001\n            - 130.001\n        theta:\n            - 0.0\n            - 0.11556368446681414\n            - 0.2311273689343264\n            - 0.34669105340061696\n            - 0.43870710999683127\n            - 0.5542707944631219\n            - 0.6698344789311578\n            - 0.7853981633974483\n    grid contents:\n        [0,0]: assembly1_1 fuel\n        [0,1]: assembly1_2 fuel\n        [0,2]: assembly1_3 fuel\n        [0,3]: assembly1_4 fuel\n        [0,4]: assembly1_5 fuel\n        [0,5]: assembly1_6 fuel\n        [0,6]: assembly1_7 fuel\n\n        [1,0]: assembly2_1 fuel\n        [1,1]: assembly2_2 fuel\n        [1,2]: assembly2_3 fuel\n        [1,3]: assembly2_4 fuel\n        [1,4]: assembly2_5 fuel\n        [1,5]: assembly2_6 fuel\n        [1,6]: assembly2_7 fuel\n\n        [2,0]: assembly3_1 fuel\n        [2,1]: assembly3_2 fuel\n        [2,2]: assembly3_3 fuel\n        [2,3]: assembly3_4 fuel\n        [2,4]: assembly3_5 fuel\n        [2,5]: assembly3_6 fuel\n        [2,6]: assembly3_7 fuel\n\n        [3,0]: assembly4_1 fuel\n        [3,1]: assembly4_2 fuel\n        [3,2]: assembly4_3 fuel\n        [3,3]: assembly4_4 fuel\n        [3,4]: assembly4_5 fuel\n        [3,5]: assembly4_6 fuel\n        [3,6]: assembly4_7 fuel\n\n        [4,0]: assembly5_1 fuel\n        [4,1]: assembly5_2 fuel\n        [4,2]: assembly5_3 fuel\n        [4,3]: assembly5_4 fuel\n        [4,4]: assembly5_5 fuel\n        [4,5]: assembly5_6 fuel\n        [4,6]: assembly5_7 fuel\n\n        [5,0]: assembly6_1 fuel\n        [5,1]: assembly6_2 fuel\n        [5,2]: assembly6_3 fuel\n        [5,3]: assembly6_4 fuel\n        [5,4]: assembly6_5 fuel\n        [5,5]: assembly6_6 fuel\n        [5,6]: assembly6_7 fuel\n\n        [6,0]: assembly7_1 fuel\n        [6,1]: assembly7_2 fuel\n        [6,2]: assembly7_3 fuel\n        [6,3]: assembly7_4 fuel\n        [6,4]: assembly7_5 fuel\n        [6,5]: assembly7_6 fuel\n        [6,6]: assembly7_7 fuel\n\n        [7,0]: assembly8_1 fuel\n        [7,1]: assembly8_2 fuel\n        [7,2]: assembly8_3 fuel\n        [7,3]: assembly8_4 fuel\n        [7,4]: assembly8_5 fuel\n        [7,5]: assembly8_6 fuel\n        [7,6]: assembly8_7 fuel\n\n        [8,0]: assembly9_1 fuel\n        [8,1]: assembly9_2 fuel\n        [8,2]: assembly9_3 fuel\n        [8,3]: assembly9_4 fuel\n        [8,4]: assembly9_5 fuel\n        [8,5]: assembly9_6 fuel\n        [8,6]: assembly9_7 fuel\n\"\"\"\n\nSMALL_HEX = \"\"\"core:\n  geom: hex\n  symmetry: third periodic\n  lattice map: |\n    F\n     F\n    F F\n     F\n    F F\npins:\n  geom: hex\n  symmetry: full\n  lattice map: |\n    -   -   FP\n      -   FP  FP\n    -   CL  CL  CL\n      FP  FP  FP  FP\n    FP  FP  FP  FP  FP\n      CL  CL  CL  CL\n    FP  FP  FP  FP  FP\n      FP  FP  FP  FP\n    CL  CL  CL  CL  CL\n      FP  FP  FP  FP\n    FP  FP  FP  FP  FP\n      CL  CL  CL  CL\n    FP  FP  FP  FP  FP\n      FP  FP  FP  FP\n        CL  CL  CL\n          FP  FP\n            FP\n\"\"\"\n\nTINY_GRID = \"\"\"core:\n    geom: hex\n    lattice map:\n    grid bounds:\n    symmetry: full\n    grid contents:\n       ? - 0\n         - 0\n       : IF\n\"\"\"\n\nBIG_FULL_HEX_CORE = \"\"\"core:\n  geom: hex\n  symmetry: full\n  lattice map: |\n    -   -   -   -   -   -   SS  SS\n      -   -   -   -   SS  SS  SS  SS  SS\n    -   -   -   -   SS  DD  DD  DD  DD  SS\n      -   -   -   SS  DD  DD  DD  DD  DD  SS\n    -   -   -   SS  DD  DD  DD  DD  DD  DD  SS\n      -   -   SS  DD  DD  DD  DD  DD  DD  DD  SS\n    -   -   SS  DD  DD  DD  DD  DD  DD  DD  DD  SS\n      -   -   SS  DD  DD  DD  RB  DD  DD  DD  SS\n    -   -   SS  DD  DD  RB  RB  RB  RB  DD  DD  SS\n      -   SS  DD  DD  RB  RB  FF  RB  RB  DD  DD  SS\n    -   SS  SS  DD  RB  FF  FF  FF  FF  RB  DD  DD  SS\n      -   SS  DD  RB  FF  FF  FF  FF  FF  RB  DD  RR\n    -   SS  DD  DD  FF  FF  PC  PC  PC  FF  DD  DD  SS\n      SS  SS  DD  RB  FF  II  PC  FF  FF  RB  DD  DD  SS\n    -   SS  DD  RB  FF  SS  II  II  PC  FF  RB  DD  RR\n      SS  DD  DD  FF  II  II  II  II  II  FF  DD  DD  SS\n    -   SS  DD  RB  II  II  II  II  II  II  RB  DD  SS\n      SS  DD  RB  FF  RC  II  SS  II  II  FF  RB  DD  SS\n    SS  DD  DD  FF  II  II  II  RC  PC  II  FF  DD  DD  SS\n      SS  DD  RB  II  PC  II  II  II  PC  II  RB  DD  SS\n    SS  DD  RB  FF  II  II  II  II  II  II  FF  RB  DD  SS\n      SS  DD  FF  II  II  WW  II  II  II  II  FF  DD  SS\n    SS  DD  RB  FF  II  II  WW  XX  PC  II  FF  RB  DD  SS\n      SS  DD  FF  PC  II  BB  AA  YY  SS  DC  FF  DD  SS\n    SS  DD  RB  FF  II  RC  CC  ZZ  II  II  FF  RB  DD  SS\n      SS  DD  FF  II  II  II  II  II  II  II  FF  DD  SS\n    SS  DD  RB  FF  II  II  II  II  II  II  FF  RB  DD  SS\n      SS  DD  RB  II  II  II  II  RC  II  II  RB  DD  SS\n    SS  DD  DD  FF  PC  II  SS  II  II  PC  FF  DD  DD  SS\n      SS  DD  RB  II  II  II  II  II  II  II  RB  DD  SS\n    -   SS  DD  FF  II  PC  II  II  II  II  FF  DD  SS\n      SS  DD  RB  FF  II  II  PC  II  II  FF  RB  DD  SS\n    -   SS  DD  RB  FF  SS  II  II  PC  FF  RB  DD  SS\n      SS  SS  DD  RB  FF  II  II  II  FF  RB  DD  SS  SS\n    -   SS  DD  DD  FF  FF  II  II  FF  FF  DD  DD  SS\n      -   SS  DD  RB  FF  FF  FF  FF  FF  RB  DD  SS\n    -   SS  SS  DD  RB  FF  FF  FF  FF  RB  DD  SS  SS\n      -   SS  DD  DD  RB  RB  RB  RB  RB  DD  DD  SS\n        -   SS  DD  DD  RB  RB  RB  RB  DD  DD  SS\n          -   SS  DD  DD  DD  DD  DD  DD  DD  SS\n            SS  DD  DD  DD  DD  DD  DD  DD  DD  SS\n              SS  DD  DD  DD  DD  DD  DD  DD  SS\n                SS  DD  DD  DD  DD  DD  DD  SS\n                  SS  DD  DD  DD  DD  DD  SS\n                    SS  DD  DD  DD  DD  SS\n                      SS  SS  SS  SS  SS\n                        -   SS  SS  -\n\"\"\"\n\n\nclass TestGridBPRoundTrip(unittest.TestCase):\n    def setUp(self):\n        self.grids = Grids.load(SMALL_HEX)\n\n    def test_contents(self):\n        self.assertIn(\"core\", self.grids)\n\n    def test_roundTrip(self):\n        \"\"\"\n        Test saving blueprint data to a stream.\n\n        .. test:: Grid blueprints can be written to disk.\n            :id: T_ARMI_BP_TO_DB0\n            :tests: R_ARMI_BP_TO_DB\n        \"\"\"\n        stream = io.StringIO()\n        saveToStream(stream, self.grids, False, True)\n        stream.seek(0)\n        gridBp = Grids.load(stream)\n        self.assertIn(\"third\", gridBp[\"core\"].symmetry)\n\n    def test_tinyMap(self):\n        \"\"\"\n        Test that a lattice map can be defined, written, and read in from blueprint file.\n\n        .. test:: Define a lattice map in reactor core.\n            :id: T_ARMI_BP_GRID1\n            :tests: R_ARMI_BP_GRID\n        \"\"\"\n        grid = Grids.load(TINY_GRID)\n        stream = io.StringIO()\n        saveToStream(stream, grid, full=True, tryMap=True)\n        stream.seek(0)\n        text = stream.read()\n        self.assertIn(\"IF\", text)\n        stream.seek(0)\n        gridBp = Grids.load(stream)\n        self.assertIn(\"full\", gridBp[\"core\"].symmetry)\n        self.assertIn(\"IF\", gridBp[\"core\"].latticeMap)\n\n\nclass TestGridBPRoundTripFull(unittest.TestCase):\n    def test_fullMap(self):\n        \"\"\"\n        Test that a lattice map can be defined, written, and read in from blueprint file.\n\n        .. test:: Define a lattice map in reactor core.\n            :id: T_ARMI_BP_GRID2\n            :tests: R_ARMI_BP_GRID\n        \"\"\"\n        grid = Grids.load(BIG_FULL_HEX_CORE)\n        gridDesign = grid[\"core\"]\n        _ = gridDesign.construct()\n\n        # test before the round-trip\n        self.assertEqual(gridDesign.gridContents[0, 0], \"AA\")\n        self.assertEqual(gridDesign.gridContents[-2, 1], \"BB\")\n        self.assertEqual(gridDesign.gridContents[-1, 0], \"CC\")\n        self.assertEqual(gridDesign.gridContents[-1, 1], \"WW\")\n        self.assertEqual(gridDesign.gridContents[1, 0], \"XX\")\n        self.assertEqual(gridDesign.gridContents[2, -1], \"YY\")\n        self.assertEqual(gridDesign.gridContents[1, -1], \"ZZ\")\n        self.assertEqual(gridDesign.gridContents[-3, 1], \"RC\")\n        self.assertEqual(gridDesign.gridContents[3, -1], \"PC\")\n\n        # perform a roundtrip\n        stream = io.StringIO()\n        saveToStream(stream, grid, full=True, tryMap=True)\n        stream.seek(0)\n        gridBp = Grids.load(stream)\n        gridDesign = gridBp[\"core\"]\n        _ = gridDesign.construct()\n\n        # test again after the round-trip\n        self.assertEqual(gridDesign.gridContents[0, 0], \"AA\")\n        self.assertEqual(gridDesign.gridContents[-2, 1], \"BB\")\n        self.assertEqual(gridDesign.gridContents[-1, 0], \"CC\")\n        self.assertEqual(gridDesign.gridContents[-1, 1], \"WW\")\n        self.assertEqual(gridDesign.gridContents[1, 0], \"XX\")\n        self.assertEqual(gridDesign.gridContents[2, -1], \"YY\")\n        self.assertEqual(gridDesign.gridContents[1, -1], \"ZZ\")\n        self.assertEqual(gridDesign.gridContents[-3, 1], \"RC\")\n        self.assertEqual(gridDesign.gridContents[3, -1], \"PC\")\n\n\nclass TestGridBlueprintsSection(unittest.TestCase):\n    \"\"\"Tests for lattice blueprint section.\"\"\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.grids = Grids.load(LATTICE_BLUEPRINT.format(self._testMethodName))\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_simpleRead(self):\n        gridDesign = self.grids[\"control\"]\n        grid = gridDesign.construct()\n        self.assertAlmostEqual(grid.pitch, 1.2)\n        self.assertEqual(gridDesign.gridContents[-8, 0], \"6\")\n\n        gridDesign = self.grids[\"pins\"]\n        grid = gridDesign.construct()\n        self.assertAlmostEqual(grid.pitch, 1.3)\n        self.assertEqual(gridDesign.gridContents[-4, 0], \"FP\")\n        self.assertEqual(gridDesign.gridContents[-3, 3], \"CL\")\n\n        # Cartesian full, odd\n        gridDesign2 = self.grids[\"sfp\"]\n        _ = gridDesign2.construct()\n        self.assertEqual(gridDesign2.gridContents[1, 1], \"1\")\n        self.assertEqual(gridDesign2.gridContents[0, 0], \"3\")\n        self.assertEqual(gridDesign2.gridContents[-1, -1], \"3\")\n\n        # Cartesian quarter, odd\n        gridDesign3 = self.grids[\"sfp quarter\"]\n        grid = gridDesign3.construct()\n        self.assertEqual(gridDesign3.gridContents[0, 0], \"2\")\n        self.assertEqual(gridDesign3.gridContents[1, 1], \"3\")\n        self.assertEqual(gridDesign3.gridContents[2, 2], \"3\")\n        self.assertEqual(gridDesign3.gridContents[3, 3], \"1\")\n        self.assertTrue(grid.symmetry.isThroughCenterAssembly)\n\n        # cartesian quarter, even not through center\n        gridDesign3 = self.grids[\"sfp quarter even\"]\n        grid = gridDesign3.construct()\n        self.assertFalse(grid.symmetry.isThroughCenterAssembly)\n\n        # Cartesian full, even/odd hybrid\n        gridDesign4 = self.grids[\"sfp even\"]\n        grid = gridDesign4.construct()\n        self.assertEqual(gridDesign4.gridContents[0, 0], \"4\")\n        self.assertEqual(gridDesign4.gridContents[-1, -1], \"2\")\n        self.assertEqual(gridDesign4.gridContents[2, 2], \"2\")\n        self.assertEqual(gridDesign4.gridContents[-3, -3], \"1\")\n        with self.assertRaises(KeyError):\n            self.assertEqual(gridDesign4.gridContents[-4, -3], \"1\")\n\n    def test_pitchBasics(self):\n        # use only hex input\n        p = Pitch(123, 0, 0, 0)\n        self.assertEqual(p.hex, 123)\n        self.assertEqual(p.x, 0)\n        self.assertEqual(p.y, 0)\n        self.assertEqual(p.z, 0)\n\n        # use only X, Y, Z inputs\n        p = Pitch(0, 1, 2, 3)\n        self.assertEqual(p.hex, 1)\n        self.assertEqual(p.x, 1)\n        self.assertEqual(p.y, 2)\n        self.assertEqual(p.z, 3)\n\n    def test_pitchEdgeCases(self):\n        with self.assertRaises(InputError):\n            # cannot mix hex with x,y,z pitch\n            Pitch(1, 2, 3, 4)\n\n        with self.assertRaises(InputError):\n            # SOMETHING needs to be non-zero\n            Pitch(0, 0, 0, 0)\n\n    def test_simpleReadLatticeMap(self):\n        \"\"\"Read lattice map and create a grid.\n\n        .. test:: Define a lattice map in reactor core.\n            :id: T_ARMI_BP_GRID0\n            :tests: R_ARMI_BP_GRID\n        \"\"\"\n        from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP\n\n        # Cartesian full, even/odd hybrid\n        gridDesign4 = self.grids[\"sfp even\"]\n        _grid = gridDesign4.construct()\n\n        # test that we can correctly save this to a YAML\n        bp = Blueprints.load(FULL_BP)\n        filePath = \"TestGridBlueprintsSection__test_simpleReadLatticeMap.log\"\n        with open(filePath, \"w\") as stream:\n            saveToStream(stream, bp, True)\n\n        # test that the output looks valid, and includes a lattice map\n        with open(filePath, \"r\") as f:\n            outText = f.read()\n            self.assertIn(\"blocks:\", outText)\n            self.assertIn(\"shape: Circle\", outText)\n            self.assertIn(\"assemblies:\", outText)\n            self.assertIn(\"flags: fuel test\", outText)\n            self.assertIn(\"grid contents:\", outText)\n            self.assertIn(\"lattice map:\", outText)\n            before, after = outText.split(\"lattice map:\")\n            self.assertGreater(len(before), 100)\n            self.assertGreater(len(after), 20)\n            self.assertIn(\"1 2 1 2 1 2 1\", after, msg=\"lattice map not showing up\")\n            self.assertNotIn(\"- -3\", after, msg=\"grid contents are showing up when they shouldn't\")\n            self.assertNotIn(\"readFromLatticeMap\", outText)\n\n        self.assertTrue(os.path.exists(filePath))\n\n    def test_simpleReadNoLatticeMap(self):\n        from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP_GRID\n\n        # Cartesian full, even/odd hybrid\n        gridDesign4 = self.grids[\"sfp even\"]\n        _grid = gridDesign4.construct()\n\n        # test that we can correctly save this to a YAML\n        bp = Blueprints.load(FULL_BP_GRID)\n        filePath = \"TestGridBlueprintsSection__test_simpleReadNoLatticeMap.log\"\n        with open(filePath, \"w\") as stream:\n            saveToStream(stream, bp, True)\n\n        # test that the output looks valid, and includes a lattice map\n        with open(filePath, \"r\") as f:\n            outText = f.read()\n            self.assertIn(\"blocks:\", outText)\n            self.assertIn(\"shape: Circle\", outText)\n            self.assertIn(\"assemblies:\", outText)\n            self.assertIn(\"flags: fuel test\", outText)\n            self.assertIn(\"grid contents:\", outText)\n            self.assertIn(\"lattice map:\", outText)\n            before, after = outText.split(\"grid contents:\")\n            self.assertGreater(len(before), 100)\n            self.assertGreater(len(after), 20)\n            self.assertIn(\"- -3\", after, msg=\"grid contents not showing up\")\n            self.assertNotIn(\"1 3 1 2 1 3 1\", after, msg=\"lattice map showing up when it shouldn't\")\n            self.assertNotIn(\"readFromLatticeMap\", outText)\n\n        self.assertTrue(os.path.exists(filePath))\n\n\nclass TestRZTGridBlueprint(unittest.TestCase):\n    \"\"\"Tests for R-Z-Theta grid inputs.\"\"\"\n\n    def setUp(self):\n        self.grids = Grids.load(RZT_BLUEPRINT)\n\n    def test_construct(self):\n        gridDesign = self.grids[\"rzt_core\"]\n        grid = gridDesign.construct()\n        self.assertEqual(gridDesign.gridContents[2, 2], \"assembly3_3 fuel\")\n        self.assertEqual(\n            grid.indicesOfBounds(57.1428571429, 71.4285714286, 0.5542707944631219, 0.6698344789311578),\n            (5, 4, 0),\n        )\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_materialModifications.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for material modifications.\"\"\"\n\nimport unittest\n\nfrom numpy.testing import assert_allclose\n\nfrom armi import materials, settings\nfrom armi.reactor import blueprints\nfrom armi.reactor.blueprints.blockBlueprint import BlockBlueprint\n\n\nclass TestMaterialModifications(unittest.TestCase):\n    uZrInput = r\"\"\"\nnuclide flags:\n    U: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\nblocks:\n    fuel: &block_fuel\n        fuel1: &component_fuel_fuel1\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\n        fuel2: &component_fuel_fuel2\n            shape: Hexagon\n            material: UZr\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    fuel a: &assembly_a\n        specifier: IC\n        blocks: [*block_fuel]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n\"\"\"\n\n    b4cInput = r\"\"\"\nnuclide flags:\n    B: {burn: false, xs: true}\n    C: {burn: false, xs: true}\nblocks:\n    poison: &block_poison\n        poison:\n            shape: Hexagon\n            material: B4C\n            Tinput: 600.0\n            Thot: 600.0\n            ip: 0.0\n            mult: 1\n            op: 10.0\nassemblies:\n    assem a: &assembly_a\n        specifier: IC\n        blocks: [*block_poison]\n        height: [1.0]\n        axial mesh points: [1]\n        xs types: [A]\n\"\"\"\n\n    def loadUZrAssembly(self, materialModifications):\n        return self._loadAssembly(self.uZrInput, materialModifications, \"fuel a\")\n\n    @staticmethod\n    def _loadAssembly(bpBase: str, materialModifications: str, assem: str):\n        yamlString = bpBase + \"\\n\" + materialModifications\n        design = blueprints.Blueprints.load(yamlString)\n        design._prepConstruction(settings.Settings())\n        return design.assemblies[assem]\n\n    def loadB4CAssembly(self, materialModifications: str):\n        return self._loadAssembly(self.b4cInput, materialModifications, \"assem a\")\n\n    def test_noMaterialModifications(self):\n        a = self.loadUZrAssembly(\"\")\n        # mass fractions should be whatever UZr is\n        uzr = materials.UZr()\n        fuelComponent = a[0][0]\n        totalMass = fuelComponent.getMass()\n        for nucName in uzr.massFrac:\n            massFrac = fuelComponent.getMass(nucName) / totalMass\n            assert_allclose(uzr.massFrac[nucName], massFrac)\n\n    def test_u235_wt_frac_modification(self):\n        \"\"\"Test constructing a component where the blueprints specify a material\n        modification for one nuclide.\n\n        .. test:: A material modification can be applied to all the components in an assembly.\n            :id: T_ARMI_MAT_USER_INPUT0\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            U235_wt_frac: [0.20]\n        \"\"\"\n        )\n        fuelComponent = a[0][0]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.20, u235 / u)\n\n        fuelComponent = a[0][1]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.20, u235 / u)\n\n    def test_u235_wt_frac_byComponent_modification1(self):\n        \"\"\"Test constructing a component where the blueprints specify a material\n        modification for one nuclide, for just one component.\n\n        .. test:: A material modification can be applied to one component in an assembly.\n            :id: T_ARMI_MAT_USER_INPUT1\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            by component:\n                fuel1:\n                    U235_wt_frac: [0.20]\n            U235_wt_frac: [0.30]\n        \"\"\"\n        )\n        fuelComponent = a[0][0]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.20, u235 / u)\n\n        fuelComponent = a[0][1]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.30, u235 / u)\n\n    def test_u235_wt_frac_byComponent_modification2(self):\n        \"\"\"Test constructing a component where the blueprints specify a material\n        modification for one nuclide, for multiple components.\n\n        .. test:: A material modification can be applied to multiple components in an assembly.\n            :id: T_ARMI_MAT_USER_INPUT2\n            :tests: R_ARMI_MAT_USER_INPUT\n        \"\"\"\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            by component:\n                fuel1:\n                    U235_wt_frac: [0.20]\n                fuel2:\n                    U235_wt_frac: [0.50]\n            U235_wt_frac: [0.30]\n        \"\"\"\n        )\n        fuelComponent = a[0][0]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.20, u235 / u)\n\n        fuelComponent = a[0][1]\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.50, u235 / u)\n\n    def test_materialModificationLength(self):\n        \"\"\"If the wrong number of material modifications are defined, there is an error.\"\"\"\n        with self.assertRaises(ValueError):\n            _a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            by component:\n                fuel1:\n                    U235_wt_frac: [0.2]\n            U235_wt_frac: [0.11, 0.22, 0.33, 0.44]\n            \"\"\"\n            )\n\n    def test_invalidComponentModification(self):\n        with self.assertRaises(ValueError):\n            _a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            by component:\n                invalid component:\n                    U235_wt_frac: [0.2]\n            \"\"\"\n            )\n\n    def test_zrWtFracModification(self):\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            ZR_wt_frac: [0.077]\n        \"\"\"\n        )\n        fuelComponent = a[0][0]\n        totalMass = fuelComponent.getMass()\n        zr = fuelComponent.getMass(\"ZR\")\n        assert_allclose(0.077, zr / totalMass)\n\n    def test_bothU235ZrWtFracModification(self):\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            ZR_wt_frac: [0.077]\n            U235_wt_frac: [0.20]\n        \"\"\"\n        )\n        fuelComponent = a[0][0]\n\n        # check u235 enrichment\n        u235 = fuelComponent.getMass(\"U235\")\n        u = fuelComponent.getMass(\"U\")\n        assert_allclose(0.20, u235 / u)\n\n        # check zr frac\n        totalMass = fuelComponent.getMass()\n        zr = fuelComponent.getMass(\"ZR\")\n        assert_allclose(0.077, zr / totalMass)\n\n    def test_checkByComponentMaterialInput(self):\n        a = self.loadUZrAssembly(\"\")\n        materialInput = {\"fake_material\": {\"ZR_wt_frac\": 0.5}}\n        with self.assertRaises(ValueError):\n            BlockBlueprint._checkByComponentMaterialInput(a, materialInput)\n\n    def test_filterMaterialInput(self):\n        a = self.loadUZrAssembly(\"\")\n        materialInput = {\n            \"byBlock\": {\"ZR_wt_frac\": 0.1, \"U235_wt_frac\": 0.1},\n            \"fuel1\": {\"U235_wt_frac\": 0.2},\n            \"fuel2\": {\"ZR_wt_frac\": 0.3, \"U235_wt_frac\": 0.3},\n        }\n        componentDesign = a[0][0]\n        filteredMaterialInput, _ = BlockBlueprint._filterMaterialInput(materialInput, componentDesign)\n\n        filteredMaterialInput_reference = {\"ZR_wt_frac\": 0.1, \"U235_wt_frac\": 0.2}\n\n        self.assertEqual(filteredMaterialInput, filteredMaterialInput_reference)\n\n    def test_invalidMatModName(self):\n        \"\"\"\n        This test shows that we can detect invalid material modification\n        names when they are specified on an assembly blueprint. We happen to know\n        that ZR_wt_frac is a valid modification for the UZr material class, so we\n        use that in the first call to prove that things initially work fine.\n        \"\"\"\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            ZR_wt_frac: [1]\n            by component:\n                fuel2:\n                    ZR_wt_frac: [0]\n        \"\"\"\n        )\n        # just to prove that the above works fine before we modify it\n        self.assertAlmostEqual(a[0][0].getMassFrac(\"ZR\"), 1)\n        self.assertAlmostEqual(a[0][1].getMassFrac(\"ZR\"), 0)\n\n        with self.assertRaises(ValueError):\n            a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            this_is_a_fake_name: [1]\n            by component:\n                fuel2:\n                    ZR_wt_frac: [0]\n        \"\"\"\n            )\n\n        with self.assertRaises(ValueError):\n            a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            ZR_wt_frac: [1]\n            by component:\n                fuel2:\n                    this_is_a_fake_name: [0]\n        \"\"\"\n            )\n\n    def test_invalidMatModType(self):\n        \"\"\"\n        This test shows that we can detect material modifications that are invalid\n        because of their values, not just their names.\n        We happen to know that ZR_wt_frac is a valid modification for UZr, so we\n        use that in the first call to prove that things initially work fine.\n        \"\"\"\n        a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            ZR_wt_frac: [1]\n        \"\"\"\n        )\n        # just to prove that the above works fine before we modify it\n        self.assertAlmostEqual(a[0][0].getMassFrac(\"ZR\"), 1)\n\n        with self.assertRaises(ValueError) as ee:\n            a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            ZR_wt_frac: [this_is_a_value_of_incompatible_type]\n        \"\"\"\n            )\n\n            self.assertIn(\n                \"Something went wrong in applying the material modifications\",\n                ee.args[0],\n            )\n\n    def test_matModsUpTheMRO(self):\n        \"\"\"\n        Make sure that valid/invalid material modifications are searched up\n        the MRO for a material class.\n        \"\"\"\n        _a = self.loadUZrAssembly(\n            \"\"\"\n        material modifications:\n            ZR_wt_frac: [1]\n            class1_wt_frac: [1]\n            class1_custom_isotopics: [dummy]\n            class2_custom_isotopics: [dummy]\n            by component:\n                fuel2:\n                    ZR_wt_frac: [0]\n                    class1_wt_frac: [1]\n                    class1_custom_isotopics: [dummy]\n                    class2_custom_isotopics: [dummy]\ncustom isotopics:\n    dummy:\n        input format: mass fractions\n        density: 1\n        U: 1\n\"\"\"\n        )\n\n        with self.assertRaises(ValueError):\n            _a = self.loadUZrAssembly(\n                \"\"\"\n        material modifications:\n            ZR_wt_frac: [1]\n            klass1_wt_frac: [1]\n            klass1_custom_isotopics: [dummy]\n            klass2_custom_isotopics: [dummy]\n            by component:\n                fuel2:\n                    ZR_wt_frac: [0]\n                    klass1_wt_frac: [1]\n                    klass1_custom_isotopics: [dummy]\n                    klass2_custom_isotopics: [dummy]\ncustom isotopics:\n    dummy:\n        input format: mass fractions\n        density: 1\n        U: 1\n\"\"\"\n            )\n\n    def test_theoreticalDensity(self):\n        \"\"\"Test the theoretical density can be loaded from material modifications.\"\"\"\n        mods = \"\"\"\n        material modifications:\n            TD_frac: [0.5]\n        \"\"\"\n        a = self.loadB4CAssembly(mods)\n        comp = a[0][0]\n        mat = comp.material\n        self.assertEqual(mat.getTD(), 0.5)\n        self.assertEqual(comp.p.theoreticalDensityFrac, 0.5)\n"
  },
  {
    "path": "armi/reactor/blueprints/tests/test_reactorBlueprints.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for reactor blueprints.\"\"\"\n\nimport logging\nimport os\nimport unittest\n\nfrom armi import runLog, settings\nfrom armi.reactor import blueprints, reactors\nfrom armi.reactor.blueprints import gridBlueprint, reactorBlueprint\nfrom armi.reactor.blueprints.tests import test_customIsotopics\nfrom armi.reactor.composites import Composite\nfrom armi.reactor.excoreStructure import ExcoreStructure\nfrom armi.reactor.reactors import Core, loadFromCs\nfrom armi.reactor.spentFuelPool import SpentFuelPool\nfrom armi.settings.caseSettings import Settings\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import mockRunLogs\n\nCORE_BLUEPRINT = \"\"\"\ncore:\n  grid name: core\n  origin:\n    x: 0.0\n    y: 10.1\n    z: 1.1\nsfp:\n    type: sfp\n    grid name: sfp\n    origin:\n        x: 0.0\n        y: 12.1\n        z: 1.1\nevst:\n    type: excore\n    grid name: evst\n    origin:\n        x: 0.0\n        y: 100.0\n        z: 0.0\n\"\"\"\n\nGRIDS = \"\"\"\ncore:\n    geom: hex\n    symmetry: third core periodic\n    grid contents:\n      [0, 0]: IC\n      [1, 1]: IC\n    orientationBOL:\n      [1, 1]: 60.0\n      [3, 2]: 120.0\nsfp:\n    lattice pitch:\n        x: 25.0\n        y: 25.0\n    geom: cartesian\n    symmetry: full\n    lattice map: |\n      IC IC\n      IC IC\n    orientationBOL:\n      [0, 0]: 60.0\n      [0, -1]: 120.0\nevst:\n    lattice pitch:\n        x: 32.0\n        y: 32.0\n    geom: cartesian\n    symmetry: full\n    lattice map: |\n      IC IC\n      IC IC\n\"\"\"\n\nSMALL_YAML = \"\"\"\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n    sfp:\n        type: sfp\n        grid name: sfp\n        origin:\n            x: 1000.0\n            y: 1000.0\n            z: 1000.0\n    evst:\n        type: excore\n        grid name: evst\n        origin:\n            x: 2000.0\n            y: 2000.0\n            z: 2000.0\ngrids:\n    core:\n        geom: hex\n        symmetry: third core periodic\n        grid contents:\n            [0, 0]: IC\n            [1, 1]: IC\n    sfp:\n        lattice pitch:\n            x: 25.0\n            y: 25.0\n        geom: cartesian\n        symmetry: full\n        lattice map: |\n          IC IC\n          IC IC\n    evst:\n      lattice pitch:\n          x: 32.0\n          y: 32.0\n      geom: hex\n      symmetry: full\n\"\"\"\n\n\nclass TestReactorBlueprints(unittest.TestCase):\n    \"\"\"Tests for reactor blueprints.\"\"\"\n\n    def setUp(self):\n        # add testMethodName to avoid I/O collisions during parallel testing\n        self.systemDesigns = reactorBlueprint.Systems.load(CORE_BLUEPRINT)\n        self.gridDesigns = gridBlueprint.Grids.load(GRIDS)\n\n    def test_simpleRead(self):\n        self.assertAlmostEqual(self.systemDesigns[\"core\"].origin.y, 10.1)\n        self.assertAlmostEqual(self.systemDesigns[\"sfp\"].origin.y, 12.1)\n        self.assertAlmostEqual(self.systemDesigns[\"evst\"].origin.y, 100)\n\n    def _setupReactor(self):\n        fnames = [self._testMethodName + n for n in [\"geometry.yaml\", \"sfp-geom.yaml\"]]\n        for fn in fnames:\n            with open(fn, \"w\") as f:\n                f.write(SMALL_YAML)\n\n        cs = settings.Settings()\n        bp = blueprints.Blueprints.load(test_customIsotopics.TestCustomIsotopics.yamlString)\n        bp.systemDesigns = self.systemDesigns\n        bp.gridDesigns = self.gridDesigns\n        reactor = reactors.Reactor(cs.caseTitle, bp)\n        core = bp.systemDesigns[\"core\"].construct(cs, bp, reactor)\n        sfp = bp.systemDesigns[\"sfp\"].construct(cs, bp, reactor)\n        evst = bp.systemDesigns[\"evst\"].construct(cs, bp, reactor)\n        for fn in fnames:\n            os.remove(fn)\n\n        return core, sfp, evst\n\n    def test_construct(self):\n        \"\"\"Actually construct some reactor systems.\n\n        .. test:: Create core and spent fuel pool with blueprint.\n            :id: T_ARMI_BP_SYSTEMS\n            :tests: R_ARMI_BP_SYSTEMS\n\n        .. test:: Create core object with blueprint.\n            :id: T_ARMI_BP_CORE\n            :tests: R_ARMI_BP_CORE\n        \"\"\"\n        core, sfp, evst = self._setupReactor()\n        self.assertEqual(len(core), 2)\n        self.assertEqual(len(sfp), 4)\n        self.assertEqual(len(evst), 4)\n\n        self.assertIsInstance(core, Core)\n        self.assertIsInstance(sfp, SpentFuelPool)\n        self.assertIsInstance(evst, ExcoreStructure)\n\n    def test_materialDataSummary(self):\n        \"\"\"Test that the material data summary for the core is valid as a printout to the stdout.\"\"\"\n        expectedMaterialData = [\n            (\"Custom\", \"ARMI\"),\n            (\"HT9\", \"ARMI\"),\n            (\"Sodium\", \"ARMI\"),\n            (\"UZr\", \"ARMI\"),\n        ]\n        core, _sfp, _evst = self._setupReactor()\n        materialData = reactorBlueprint.summarizeMaterialData(core)\n        for actual, expected in zip(materialData, expectedMaterialData):\n            self.assertEqual(actual, expected)\n\n    def test_excoreStructure(self):\n        _core, _sfp, evst = self._setupReactor()\n        self.assertIsInstance(evst, ExcoreStructure)\n        self.assertEqual(evst.parent.__class__.__name__, \"Reactor\")\n        self.assertEqual(evst.spatialGrid.__class__.__name__, \"CartesianGrid\")\n\n        # add one composite object and validate\n        comp1 = Composite(\"thing1\")\n        loc = evst.spatialGrid[(0, 0, 0)]\n\n        self.assertEqual(len(evst.getChildren()), 4)\n        evst.add(comp1, loc)\n        self.assertEqual(len(evst.getChildren()), 5)\n\n    def test_spentFuelPool(self):\n        _core, sfp, evst = self._setupReactor()\n        self.assertIsInstance(sfp, SpentFuelPool)\n        self.assertEqual(sfp.parent.__class__.__name__, \"Reactor\")\n        self.assertEqual(sfp.spatialGrid.__class__.__name__, \"CartesianGrid\")\n        self.assertEqual(sfp.numColumns, 2)\n\n        # add one assembly and validate\n        self.assertEqual(len(sfp.getChildren()), 4)\n        sfp.add(evst.getChildren()[0])\n        self.assertEqual(len(sfp.getChildren()), 5)\n\n    def test_orientationBOL(self):\n        core, sfp, _evst = self._setupReactor()\n\n        # test for hex core\n        a0 = core.getAssembly(locationString=\"001-001\")\n        self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9)\n        a1 = core.getAssembly(locationString=\"003-002\")\n        self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9)\n\n        # test cartesian, non-core\n        a0 = sfp.getAssembly(\"A0005\")\n        self.assertAlmostEqual(a0.p.orientation[2], 60.0, delta=1e-9)\n        a1 = sfp.getAssembly(\"A0003\")\n        self.assertAlmostEqual(a1.p.orientation[2], 120.0, delta=1e-9)\n\n    def test_fullCoreAreNotConverted(self):\n        \"\"\"Prove that geometries aren't being converted when reading in a full-core BP.\"\"\"\n        cs = Settings(os.path.join(TESTING_ROOT, \"reactors\", \"smallHexReactor\", \"smallHexReactor.yaml\"))\n        runLog.setVerbosity(logging.INFO)\n        with mockRunLogs.BufferLog() as log:\n            self.assertEqual(\"\", log.getStdout())\n            r = loadFromCs(cs)\n            # ensure that, for full core, only the correct parts of the geom modification are hit\n            self.assertIn(\"Applying Geometry Modifications\", log.getStdout())\n            self.assertIn(\"Updating spatial grid\", log.getStdout())\n            self.assertNotIn(\"Applying non-full core\", log.getStdout())\n\n        a = r.core.getAssemblyWithStringLocation(\"003-012\")\n        self.assertIn(\"fuel assembly\", str(a).lower())\n\n        b = a[2]\n        self.assertIn(\"fuel\", str(b).lower())\n        self.assertEqual(b.p.molesHmBOL, b.getHMMoles())\n"
  },
  {
    "path": "armi/reactor/components/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComponents package contains components and shapes.\n\nThese objects hold the dimensions, temperatures, composition, and shape of reactor primitives.\n\n.. _component-class-diagram:\n\n.. pyreverse:: armi.reactor.components -A -k --ignore=componentParameters.py\n    :align: center\n    :alt: Component class diagram\n    :width: 100%\n\n    Class inheritance diagram for :py:mod:`armi.reactor.components`.\n\"\"\"\n\n# ruff: noqa: F405, I001\nimport math\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.reactor.components.component import *  # noqa: F403\nfrom armi.reactor.components.basicShapes import *  # noqa: F403\nfrom armi.reactor.components.complexShapes import *  # noqa: F403\nfrom armi.reactor.components.volumetricShapes import *  # noqa: F403\n\n\ndef factory(shape, bcomps, kwargs):\n    \"\"\"\n    Build a new component object.\n\n    Parameters\n    ----------\n    shape : str\n        lowercase string corresponding to the component type name\n    bcomps : list(Component)\n        list of \"sibling\" components. This list is used to find component links, which are of the form\n        ``<name>.<dimension``.\n    kwargs : dict\n        dictionary of inputs for the Component subclass's ``__init__`` method.\n    \"\"\"\n    try:\n        class_ = ComponentType.TYPES[shape]\n    except KeyError:\n        raise ValueError(\n            \"Unrecognized component shape: '{}'\\nValid component names are {}\".format(\n                shape, \", \".join(ComponentType.TYPES.keys())\n            )\n        )\n\n    _removeDimensionNameSpaces(kwargs)\n\n    try:\n        return class_(components=bcomps, **kwargs)\n    except TypeError:\n        # TypeError raised when kwarg is missing. We add extra information\n        # to the error to indicate which component needs updating.\n        runLog.error(f\"Potentially invalid kwargs {kwargs} for {class_} of shape {shape}. Check input.\")\n        raise\n\n\ndef _removeDimensionNameSpaces(attrs):\n    \"\"\"Some components use spacing in their dimension names, but can't internally.\"\"\"\n    for key in list(attrs.keys()):\n        if \" \" in key:\n            clean = key.replace(\" \", \"_\")\n            attrs[clean] = attrs.pop(key)\n\n\n# Below are a few component base classes\n\n\nclass NullComponent(Component):\n    \"\"\"Returns zero for all dimensions.\"\"\"\n\n    def __cmp__(self, other):\n        \"\"\"Be smaller than everything.\"\"\"\n        return -1\n\n    def __lt__(self, other):\n        return True\n\n    def __bool__(self):\n        \"\"\"Handles truth testing.\"\"\"\n        return False\n\n    __nonzero__ = __bool__  # Python2 compatibility\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        return None\n\n    def getDimension(self, key, Tc=None, cold=False):\n        return 0.0\n\n\nclass UnshapedComponent(Component):\n    \"\"\"\n    A component with undefined dimensions.\n\n    Useful for situations where you just want to enter the area directly.\n\n    For instance, when you want to model neutronic behavior of an assembly based\n    on only knowing the area fractions of each material in the assembly.\n\n    See Also\n    --------\n    DerivedShape : Useful to just fill leftover space in a block with a material\n    \"\"\"\n\n    pDefs = componentParameters.getUnshapedParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        area=np.nan,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        Component.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            area=area,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, modArea=modArea)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"\n        Get the area of this component in cm^2.\n\n        Parameters\n        ----------\n        cold : bool, optional\n            If True, compute the area with as-input dimensions, instead of thermally-expanded.\n        Tc : float, optional\n            Temperature in C to compute the area at\n        \"\"\"\n        if cold and Tc is not None:\n            raise ValueError(f\"Cannot compute component area at {Tc} and cold dimensions simultaneously.\")\n        coldArea = self.p.area\n        if cold:\n            return coldArea\n        if Tc is None:\n            Tc = self.temperatureInC\n\n        return self.getThermalExpansionFactor(Tc) ** 2 * coldArea\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"\n        Approximate it as circular and return the radius.\n\n        This is the smallest it can possibly be. Since this is used to determine\n        the outer component, it will never be allowed to be the outer one.\n\n        Parameters\n        ----------\n        Tc : float\n            Ignored for this component\n        cold : bool, optional\n            If True, compute the area with as-input dimensions, instead of thermally-expanded.\n\n        Notes\n        -----\n        Tc is not used in this method for this particular component.\n        \"\"\"\n        return 2 * math.sqrt(self.getComponentArea(cold=cold) / math.pi)\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"\n        Component is unshaped; assume it is circular and there is no ID (return 0.0).\n\n        Parameters\n        ----------\n        Tc : float, optional\n            Ignored for this component\n        cold : bool, optional\n            Ignored for this component\n        \"\"\"\n        return 0.0\n\n    @staticmethod\n    def fromComponent(otherComponent):\n        \"\"\"\n        Build a new UnshapedComponent that has area equal to that of another component.\n\n        This can be used to \"freeze\" a DerivedShape, among other things.\n\n        Notes\n        -----\n        Components created in this manner will not thermally expand beyond the expanded\n        area of the original component, but will retain their hot temperature.\n        \"\"\"\n        newC = UnshapedComponent(\n            name=otherComponent.name,\n            material=otherComponent.material,\n            Tinput=otherComponent.temperatureInC,\n            Thot=otherComponent.temperatureInC,\n            area=otherComponent.getComponentArea(),\n        )\n\n        return newC\n\n\nclass UnshapedVolumetricComponent(UnshapedComponent):\n    \"\"\"\n    A component with undefined dimensions.\n\n    Useful for situations where you just want to enter the volume directly.\n    \"\"\"\n\n    is3D = True\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        area=np.nan,\n        op=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n        volume=np.nan,\n    ):\n        Component.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            area=area,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, op=op, userDefinedVolume=volume)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        return self.getVolume() / self.parent.getHeight()\n\n    def getComponentVolume(self):\n        \"\"\"Get the volume of the component in cm^3.\"\"\"\n        return self.getDimension(\"userDefinedVolume\")\n\n    def setVolume(self, val):\n        self.setDimension(\"userDefinedVolume\", val)\n        self.clearCache()\n\n\nclass ZeroMassComponent(UnshapedVolumetricComponent):\n    \"\"\"\n    A component that never has mass -- it always returns zero for getMass and\n    getNumberDensity.\n\n    Useful for situations where you want to give a block integrated flux, but ensure\n    mass is never added to it\n\n    See Also\n    --------\n    armi.reactor.batch.makeMgFluxBlock\n    \"\"\"\n\n    def getNumberDensity(self, *args, **kwargs):\n        \"\"\"Always return 0 because this component has not mass.\"\"\"\n        return 0.0\n\n    def setNumberDensity(self, *args, **kwargs):\n        \"\"\"Never add mass.\"\"\"\n        pass\n\n\nclass PositiveOrNegativeVolumeComponent(UnshapedVolumetricComponent):\n    \"\"\"\n    A component that may have negative mass for removing mass from batches.\n\n    See Also\n    --------\n    armi.reactor.batch.makeMassAdditionComponent\n    \"\"\"\n\n    def _checkNegativeVolume(self, volume):\n        \"\"\"Allow negative areas.\"\"\"\n        pass\n\n\nclass DerivedShape(UnshapedComponent):\n    \"\"\"\n    This a component that does have specific dimensions, but they're complicated.\n\n    Notes\n    -----\n    - This component type is \"derived\" through the addition or\n      subtraction of other shaped components (e.g. Coolant)\n    - Because its area and volume are defined by other components,\n      a DerivedShape's area and volume may change as the other\n      components thermally expand. However the DerivedShape cannot\n      drive thermal expansion itself, even if it is a solid component\n      with non-zero thermal expansion coefficient\n    \"\"\"\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"\n        The bounding circle for a derived component.\n\n        Notes\n        -----\n        This is used to sort components relative to one another.\n\n        There can only be one derived component per block, this is generally the coolant\n        inside a duct. Under most circumstances, the volume (or area) of coolant will be\n        greater than any other (single) component (i.e. a single pin) within the assembly.\n        So, sorting based on the Dh of the DerivedShape will result in somewhat expected\n        results.\n        \"\"\"\n        if self.parent is None:\n            # since this is only used for comparison, and it must be smaller than at\n            # least one component, make it 0 instead of infinity.\n            return 0.0\n        else:\n            # area = pi r**2 = pi d**2 / 4  => d = sqrt(4*area/pi)\n            return math.sqrt(4.0 * self.getComponentArea() / math.pi)\n\n    def computeVolume(self):\n        \"\"\"Cannot compute volume until it is derived.\n\n        .. impl:: The volume of a DerivedShape depends on the solid shapes surrounding\n            them.\n            :id: I_ARMI_COMP_FLUID0\n            :implements: R_ARMI_COMP_FLUID\n\n            Computing the volume of a ``DerivedShape`` means looking at the solid\n            materials around it, and finding what shaped space is left over in between\n            them. This method calls the method ``_deriveVolumeAndArea``, which makes\n            use of the fact that the ARMI reactor data model is hierarchical. It starts\n            by finding the parent of this object, and then finding the volume of all\n            the other objects at this level. Whatever is left over, is the volume of\n            this object. Obviously, you can only have one ``DerivedShape`` child of any\n            parent for this logic to work.\n        \"\"\"\n        return self._deriveVolumeAndArea()\n\n    def getMaxVolume(self):\n        \"\"\"\n        The maximum volume of the parent Block.\n\n        Returns\n        -------\n        vol : float\n            volume in cm^3.\n        \"\"\"\n        return self.parent.getMaxArea() * self.parent.getHeight()\n\n    def _deriveVolumeAndArea(self):\n        \"\"\"\n        Derive the volume and area of a ``DerivedShape``.\n\n        Notes\n        -----\n        If a parent exists, this will iterate over it and then determine both the volume and area\n        based on its context within the scope of the parent object by considering the volumes and\n        areas of the surrounding components.\n\n        Since some components are volumetric shapes, this must consider the volume so that it wraps\n        around in all three dimensions.\n\n        But there are also situations where we need to handle zero-height blocks with purely 2D\n        components. Thus we track area and volume fractions here when possible.\n        \"\"\"\n        if self.parent is None:\n            raise ValueError(f\"Cannot compute volume/area of {self} without a parent object.\")\n\n        # Determine the volume/areas of the non-derived shape components within the parent.\n        siblingVolume = 0.0\n        siblingArea = 0.0\n        for sibling in self.parent:\n            if sibling is self:\n                continue\n            elif not self and isinstance(sibling, DerivedShape):\n                raise ValueError(f\"More than one ``DerivedShape`` component in {self.parent} is not allowed.\")\n\n            siblingVolume += sibling.getVolume()\n            try:\n                if siblingArea is not None:\n                    siblingArea += sibling.getArea()\n            except Exception:\n                siblingArea = None\n\n        remainingVolume = self.getMaxVolume() - siblingVolume\n        if siblingArea:\n            remainingArea = self.parent.getMaxArea() - siblingArea\n\n        # Check for negative\n        if remainingVolume < 0:\n            msg = (\n                f\"The component areas in {self.parent} exceed the maximum \"\n                \"allowable volume based on the geometry. Check that the \"\n                \"geometry is defined correctly.\\n\"\n                f\"Maximum allowable volume: {self.getMaxVolume()} \"\n                f\"cm^3\\nVolume of all non-derived shape components: {siblingVolume} cm^3\\n\"\n            )\n            runLog.error(msg)\n            raise ValueError(f\"Negative area/volume errors occurred for {self.parent}. Check log for errors.\")\n\n        height = self.parent.getHeight()\n        if not height:\n            # special handling for 0-height blocks\n            if not remainingArea:\n                raise ValueError(f\"Cannot derive area in 0-height block {self.parent}\")\n            self.p.area = remainingArea\n        else:\n            self.p.area = remainingVolume / height\n\n        return remainingVolume\n\n    def getVolume(self):\n        \"\"\"\n        Get volume of derived shape.\n\n        The DerivedShape must pay attention to all of the companion objects, because if\n        they change, this changes.  However it's inefficient to always recompute the\n        derived volume, so we have to rely on the parent to know if anything has changed.\n\n        Since each parent is only allowed one DerivedShape, we can reset the update flag\n        here.\n\n        Returns\n        -------\n        float\n            volume of component in cm^3.\n        \"\"\"\n        if self.parent.derivedMustUpdate:\n            # tell _updateVolume to update it during the below getVolume call\n            self.p.volume = None\n            self.parent.derivedMustUpdate = False\n        vol = UnshapedComponent.getVolume(self)\n        return vol\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"\n        Get the area of this component in cm^2.\n\n        Parameters\n        ----------\n        cold : bool, optional\n            If True, compute the area with as-input dimensions, instead of thermally-expanded.\n        Tc : float, optional\n            Temperature in C to compute the area at\n        \"\"\"\n        if cold and Tc is not None:\n            raise ValueError(f\"Cannot compute component area at {Tc} and cold dimensions simultaneously.\")\n\n        if cold:\n            # At cold temp, the DerivedShape has the area of the parent minus the other siblings\n            parentArea = self.parent.getMaxArea()\n            # NOTE: Here we assume there is one-and-only-one DerivedShape in each Component\n            siblings = sum([c.getArea(cold=True) for c in self.parent if not isinstance(c, DerivedShape)])\n            return parentArea - siblings\n\n        if Tc is not None:\n            # The DerivedShape has the area of the parent minus the other siblings\n            parentArea = self.parent.getMaxArea()\n            # NOTE: Here we assume there is one-and-only-one DerivedShape in each Component\n            siblings = sum([c.getArea(Tc=Tc) for c in self.parent if not isinstance(c, DerivedShape)])\n            return parentArea - siblings\n\n        if self.parent.derivedMustUpdate:\n            self.computeVolume()\n\n        return self.p.area\n"
  },
  {
    "path": "armi/reactor/components/basicShapes.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComponents represented by basic shapes.\n\nMany reactor components can be described in 2D by circles, hexagons, rectangles, etc. These\nare defined in this subpackage.\n\"\"\"\n\nimport math\n\nfrom armi.reactor.components import ShapedComponent, componentParameters\n\n\nclass Circle(ShapedComponent):\n    \"\"\"A Circle.\n\n    .. impl:: Circle shaped Component\n        :id: I_ARMI_COMP_SHAPES0\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation of a Circle Component. This includes\n        setting key parameters such as its material, temperature, and dimensions. It\n        also includes a method to retrieve the area of a Circle\n        Component via the ``getComponentArea`` method.\n    \"\"\"\n\n    is3D = False\n\n    THERMAL_EXPANSION_DIMS = {\"od\", \"id\"}\n\n    pDefs = componentParameters.getCircleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        od,\n        id=0.0,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea)\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        return max(self.getDimension(\"id\", Tc, cold), self.getDimension(\"od\", Tc, cold))\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        return min(self.getDimension(\"id\", Tc, cold), self.getDimension(\"od\", Tc, cold))\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area for the circle component in cm^2.\"\"\"\n        idiam = self.getDimension(\"id\", cold=cold, Tc=Tc)\n        od = self.getDimension(\"od\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\", cold=cold, Tc=Tc)\n        area = math.pi * (od**2 - idiam**2) / 4.0\n        area *= mult\n        return area\n\n    def isEncapsulatedBy(self, other):\n        \"\"\"Return True if this ring lies completely inside the argument component.\"\"\"\n        otherID, otherOD = other.getDimension(\"id\"), other.getDimension(\"od\")\n        myID, myOD = self.getDimension(\"id\"), self.getDimension(\"od\")\n        return otherID <= myID < otherOD and otherID < myOD <= otherOD\n\n\nclass Hexagon(ShapedComponent):\n    \"\"\"A Hexagon.\n\n    This hexagonal shape has a hexagonal hole cut out of the center of it. By default, that inner\n    hole has a diameter of zero, making this a solid object with no hole.\n\n    .. impl:: Hexagon shaped Component\n        :id: I_ARMI_COMP_SHAPES1\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation of a hexagonal Component. This includes setting key\n        parameters such as its material, temperature, and dimensions. It also includes methods for\n        retrieving geometric dimension information unique to hexagons such as the ``getPitchData``\n        method.\n    \"\"\"\n\n    is3D = False\n\n    pDefs = componentParameters.getHexagonParameterDefinitions()\n\n    THERMAL_EXPANSION_DIMS = {\"ip\", \"op\"}\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        op,\n        ip=0.0,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, op=op, ip=ip, mult=mult, modArea=modArea)\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        sideLength = self.getDimension(\"op\", Tc, cold) / math.sqrt(3)\n        return 2.0 * sideLength\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        sideLength = self.getDimension(\"ip\", Tc, cold) / math.sqrt(3)\n        return 2.0 * sideLength\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area for the hexagon component in cm^2.\"\"\"\n        op = self.getDimension(\"op\", cold=cold, Tc=Tc)\n        ip = self.getDimension(\"ip\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        area = math.sqrt(3.0) / 2.0 * (op**2 - ip**2)\n        area *= mult\n        return area\n\n    def getPitchData(self):\n        \"\"\"\n        Return the pitch data that should be used to determine block pitch.\n\n        Notes\n        -----\n        This pitch data should only be used if this is the pitch defining component in\n        a block. The block is responsible for determining which component in it is the\n        pitch defining component.\n        \"\"\"\n        return self.getDimension(\"op\")\n\n\nclass Rectangle(ShapedComponent):\n    \"\"\"A Rectangle.\n\n    .. impl:: Rectangle shaped Component\n        :id: I_ARMI_COMP_SHAPES2\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation for a rectangular Component. This\n        includes setting key parameters such as its material, temperature, and\n        dimensions. It also includes methods for computing geometric\n        information related to rectangles, such as the\n        ``getBoundingCircleOuterDiameter`` and ``getPitchData`` methods.\n    \"\"\"\n\n    is3D = False\n\n    THERMAL_EXPANSION_DIMS = {\"lengthInner\", \"lengthOuter\", \"widthInner\", \"widthOuter\"}\n\n    pDefs = componentParameters.getRectangleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        lengthOuter=None,\n        lengthInner=0.0,\n        widthOuter=None,\n        widthInner=0.0,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            lengthOuter=lengthOuter,\n            lengthInner=lengthInner,\n            widthOuter=widthOuter,\n            widthInner=widthInner,\n            mult=mult,\n            modArea=modArea,\n        )\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        lengthO = self.getDimension(\"lengthOuter\", Tc, cold=cold)\n        widthO = self.getDimension(\"widthOuter\", Tc, cold=cold)\n        return math.sqrt(widthO**2 + lengthO**2)\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        lengthI = self.getDimension(\"lengthInner\", Tc, cold=cold)\n        widthI = self.getDimension(\"widthInner\", Tc, cold=cold)\n        return math.sqrt(widthI**2 + lengthI**2)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area of the rectangle in cm^2.\"\"\"\n        lengthO = self.getDimension(\"lengthOuter\", cold=cold, Tc=Tc)\n        widthO = self.getDimension(\"widthOuter\", cold=cold, Tc=Tc)\n        lengthI = self.getDimension(\"lengthInner\", cold=cold, Tc=Tc)\n        widthI = self.getDimension(\"widthInner\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        area = mult * (lengthO * widthO - lengthI * widthI)\n        return area\n\n    def isLatticeComponent(self):\n        \"\"\"Return true if the component is a `lattice component` containing void material and zero area.\"\"\"\n        return self.containsVoidMaterial() and self.getArea() == 0.0\n\n    def getPitchData(self):\n        \"\"\"\n        Return the pitch data that should be used to determine block pitch.\n\n        Notes\n        -----\n        For rectangular components there are two pitches, one for each dimension.\n        This pitch data should only be used if this is the pitch defining component in\n        a block. The block is responsible for determining which component in it is the\n        pitch defining component.\n        \"\"\"\n        return (self.getDimension(\"lengthOuter\"), self.getDimension(\"widthOuter\"))\n\n\nclass SolidRectangle(Rectangle):\n    \"\"\"Solid rectangle component.\"\"\"\n\n    is3D = False\n\n    THERMAL_EXPANSION_DIMS = {\"lengthOuter\", \"widthOuter\"}\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        lengthOuter=None,\n        widthOuter=None,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            lengthOuter=lengthOuter,\n            widthOuter=widthOuter,\n            mult=mult,\n            modArea=modArea,\n        )\n\n        # these need to be set so that we don't try to write NoDefaults to the database.\n        # Ultimately, it makes more sense to have the non-Solid Rectangle inherit from\n        # this (and probably be called a HollowRectangle or RectangularShell or\n        # whatever), since a solid rectangle is more generic of the two. Then the\n        # Parameter definitions for the hollow rectangle could inherit from the ones,\n        # adding the inner dimensions so that we wouldn't need to do this here.\n        self.p.lengthInner = 0\n        self.p.widthInner = 0\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area of the solid rectangle in cm^2.\"\"\"\n        lengthO = self.getDimension(\"lengthOuter\", cold=cold, Tc=Tc)\n        widthO = self.getDimension(\"widthOuter\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        area = mult * (lengthO * widthO)\n        return area\n\n\nclass Square(Rectangle):\n    \"\"\"Square component that can be solid or hollow.\n\n    .. impl:: Square shaped Component\n        :id: I_ARMI_COMP_SHAPES3\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation for a square Component. This class\n        subclasses the ``Rectangle`` class because a square is a type of rectangle.\n        This includes setting key parameters such as its material, temperature, and\n        dimensions.\n    \"\"\"\n\n    is3D = False\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        widthOuter=None,\n        widthInner=0.0,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            lengthOuter=widthOuter,\n            widthOuter=widthOuter,\n            widthInner=widthInner,\n            lengthInner=widthInner,\n            mult=mult,\n            modArea=modArea,\n        )\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area of the square in cm^2.\"\"\"\n        widthO = self.getDimension(\"widthOuter\", cold=cold, Tc=Tc)\n        widthI = self.getDimension(\"widthInner\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        area = mult * (widthO * widthO - widthI * widthI)\n        return area\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        widthO = self.getDimension(\"widthOuter\", Tc, cold=cold)\n        return math.sqrt(widthO**2 + widthO**2)\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        widthI = self.getDimension(\"widthInner\", Tc, cold=cold)\n        return math.sqrt(widthI**2 + widthI**2)\n\n    def getPitchData(self):\n        \"\"\"\n        Return the pitch data that should be used to determine block pitch.\n\n        Notes\n        -----\n        For rectangular components there are two pitches, one for each dimension.\n        This pitch data should only be used if this is the pitch defining component in\n        a block. The block is responsible for determining which component in it is the\n        pitch defining component.\n        \"\"\"\n        # both dimensions are the same for a square.\n        return (self.getDimension(\"widthOuter\"), self.getDimension(\"widthOuter\"))\n\n\nclass Triangle(ShapedComponent):\n    \"\"\"\n    Triangle with defined base and height.\n\n    .. impl:: Triangle shaped Component\n        :id: I_ARMI_COMP_SHAPES4\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation for defining a triangular Component. This\n        includes setting key parameters such as its material, temperature, and\n        dimensions. It also includes providing a method for retrieving the area of a\n        Triangle Component via the ``getComponentArea`` method.\n\n    Notes\n    -----\n    The exact angles of the triangle are undefined. The exact side lengths and angles\n    are not critical to calculation of component area, so area can still be calculated.\n    \"\"\"\n\n    is3D = False\n\n    THERMAL_EXPANSION_DIMS = {\"base\", \"height\"}\n\n    pDefs = componentParameters.getTriangleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        base=None,\n        height=None,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, base=base, height=height, mult=mult, modArea=modArea)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area of the triangle in cm^2.\"\"\"\n        base = self.getDimension(\"base\", cold=cold, Tc=Tc)\n        height = self.getDimension(\"height\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        area = mult * base * height / 2.0\n        return area\n"
  },
  {
    "path": "armi/reactor/components/complexShapes.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Components represented by complex shapes, and typically less widely used.\"\"\"\n\nimport math\n\nfrom armi.reactor.components import ShapedComponent, basicShapes, componentParameters\n\n\nclass HoledHexagon(basicShapes.Hexagon):\n    \"\"\"Hexagon with n uniform circular holes hollowed out of it.\n\n    .. impl:: Holed hexagon shaped Component\n        :id: I_ARMI_COMP_SHAPES5\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides an implementation for a holed hexagonal Component. This includes setting\n        key parameters such as its material, temperature, and dimensions. It also provides the\n        capability to retrieve the diameter of the inner hole via the ``getCircleInnerDiameter``\n        method.\n    \"\"\"\n\n    THERMAL_EXPANSION_DIMS = {\"op\", \"holeOD\", \"holeRadFromCenter\"}\n\n    pDefs = componentParameters.getHoledHexagonParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        op,\n        holeOD,\n        nHoles,\n        holeRadFromCenter=0.0,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            op=op,\n            holeOD=holeOD,\n            nHoles=nHoles,\n            holeRadFromCenter=holeRadFromCenter,\n            mult=mult,\n            modArea=modArea,\n        )\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area for the hexagon with n number of circular holes in cm^2.\"\"\"\n        op = self.getDimension(\"op\", cold=cold, Tc=Tc)\n        holeOD = self.getDimension(\"holeOD\", cold=cold, Tc=Tc)\n        nHoles = self.getDimension(\"nHoles\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        hexArea = math.sqrt(3.0) / 2.0 * (op**2)\n        circularArea = nHoles * math.pi * ((holeOD / 2.0) ** 2)\n        area = mult * (hexArea - circularArea)\n        return area\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"\n        For the special case of only one single hole, returns the diameter of that hole.\n\n        For any other case, returns 0.0 because an \"circle inner diameter\" becomes undefined.\n        \"\"\"\n        if self.getDimension(\"nHoles\") == 1:\n            return self.getDimension(\"holeOD\", Tc, cold)\n        else:\n            return 0.0\n\n\nclass HexHoledCircle(basicShapes.Circle):\n    \"\"\"Circle with a single uniform hexagonal hole hollowed out of it.\"\"\"\n\n    THERMAL_EXPANSION_DIMS = {\"od\", \"holeOP\"}\n\n    pDefs = componentParameters.getHexHoledCircleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        od,\n        holeOP,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, od=od, holeOP=holeOP, mult=mult, modArea=modArea)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        r\"\"\"Computes the area for the circle with one hexagonal hole.\"\"\"\n        od = self.getDimension(\"od\", cold=cold, Tc=Tc)\n        holeOP = self.getDimension(\"holeOP\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        hexArea = math.sqrt(3.0) / 2.0 * (holeOP**2)\n        circularArea = math.pi * ((od / 2.0) ** 2)\n        area = mult * (circularArea - hexArea)\n        return area\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"Returns the diameter of the hole equal to the hexagon outer pitch.\"\"\"\n        return self.getDimension(\"holeOP\", Tc, cold)\n\n\nclass FilletedHexagon(basicShapes.Hexagon):\n    \"\"\"\n    A hexagon with a hexagonal hole cut out of the center of it, where the corners of both the\n    outer and inner hexagons are rounded, with independent radii of curvature.\n\n    By default, the inner hole has a diameter of zero, making this a solid object with no hole.\n    \"\"\"\n\n    THERMAL_EXPANSION_DIMS = {\"iR\", \"oR\", \"ip\", \"op\"}\n\n    pDefs = componentParameters.getFilletedHexagonParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        op,\n        ip=0.0,\n        iR=0.0,\n        oR=0.0,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, op=op, ip=ip, iR=iR, oR=oR, mult=mult, modArea=modArea)\n\n    @staticmethod\n    def _area(D, r):\n        \"\"\"Helper function, to calculate the area of a hexagon with rounded corners.\"\"\"\n        if D <= 0.0:\n            return 0.0\n\n        area = 1.0 - (1.0 - (math.pi / (2.0 * math.sqrt(3)))) * (2 * r / D) ** 2\n        area *= (math.sqrt(3.0) / 2.0) * D**2\n        return area\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area for the rounded hexagon component in cm^2.\"\"\"\n        op = self.getDimension(\"op\", cold=cold, Tc=Tc)\n        ip = self.getDimension(\"ip\", cold=cold, Tc=Tc)\n        oR = self.getDimension(\"oR\", cold=cold, Tc=Tc)\n        iR = self.getDimension(\"iR\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n\n        area = self._area(op, oR) - self._area(ip, iR)\n        area *= mult\n        return area\n\n\nclass HoledRectangle(basicShapes.Rectangle):\n    \"\"\"Rectangle with one circular hole in it.\"\"\"\n\n    THERMAL_EXPANSION_DIMS = {\"lengthOuter\", \"widthOuter\", \"holeOD\"}\n\n    pDefs = componentParameters.getHoledRectangleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        holeOD,\n        lengthOuter=None,\n        widthOuter=None,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            lengthOuter=lengthOuter,\n            widthOuter=widthOuter,\n            holeOD=holeOD,\n            mult=mult,\n            modArea=modArea,\n        )\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area (in cm^2) for the the rectangle with one hole in it.\"\"\"\n        length = self.getDimension(\"lengthOuter\", cold=cold, Tc=Tc)\n        width = self.getDimension(\"widthOuter\", cold=cold, Tc=Tc)\n        rectangleArea = length * width\n        holeOD = self.getDimension(\"holeOD\", cold=cold, Tc=Tc)\n        circularArea = math.pi * ((holeOD / 2.0) ** 2)\n        mult = self.getDimension(\"mult\")\n        area = mult * (rectangleArea - circularArea)\n        return area\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"Returns the ``holeOD``.\"\"\"\n        return self.getDimension(\"holeOD\", Tc, cold)\n\n\nclass HoledSquare(basicShapes.Square):\n    \"\"\"Square with one circular hole in it.\n\n    .. impl:: Holed square shaped Component\n        :id: I_ARMI_COMP_SHAPES6\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides an implementation for a holed square Component. This includes setting\n        key parameters such as its material, temperature, and dimensions. It also includes methods\n        to retrieve geometric dimension information unique to holed squares via the\n        ``getComponentArea`` and ``getCircleInnerDiameter`` methods.\n    \"\"\"\n\n    THERMAL_EXPANSION_DIMS = {\"widthOuter\", \"holeOD\"}\n\n    pDefs = componentParameters.getHoledRectangleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        holeOD,\n        widthOuter=None,\n        mult=1.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, widthOuter=widthOuter, holeOD=holeOD, mult=mult, modArea=modArea)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area (in cm^2) for the the square with one hole in it.\"\"\"\n        width = self.getDimension(\"widthOuter\", cold=cold, Tc=Tc)\n        rectangleArea = width**2\n        holeOD = self.getDimension(\"holeOD\", cold=cold, Tc=Tc)\n        circularArea = math.pi * ((holeOD / 2.0) ** 2)\n        mult = self.getDimension(\"mult\")\n        area = mult * (rectangleArea - circularArea)\n        return area\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"Returns the ``holeOD``.\"\"\"\n        return self.getDimension(\"holeOD\", Tc, cold)\n\n\nclass Helix(ShapedComponent):\n    \"\"\"A spiral wire component used to model a pin wire-wrap.\n\n    .. impl:: Helix shaped Component\n        :id: I_ARMI_COMP_SHAPES7\n        :implements: R_ARMI_COMP_SHAPES\n\n        This class provides the implementation for a helical Component. This includes setting key\n        parameters such as its material, temperature, and dimensions. It also includes the\n        ``getComponentArea`` method to retrieve the area of a helix. Helixes can be used for wire\n        wrapping around fuel pins in fast reactor designs.\n\n    Notes\n    -----\n    http://mathworld.wolfram.com/Helix.html\n    In a single rotation with an axial climb of P, the length of the helix will be a factor of\n    2*pi*sqrt(r^2+c^2)/2*pi*c longer than vertical length L. P = 2*pi*c.\n\n    - od: outer diameter of the helix wire\n    - id: inner diameter of the helix wire (if non-zero, helix wire is annular.)\n    - axialPitch: vertical distance between wraps. Is also the axial distance required to complete a\n                  full 2*pi rotation.\n    - helixDiameter: The helix diameter is the distance from the center of the wire-wrap on one side\n                     to the center of the wire-wrap on the opposite side (can be visualized if the\n                     axial pitch is 0.0 - creates a circle).\n    \"\"\"\n\n    is3D = False\n\n    THERMAL_EXPANSION_DIMS = {\"od\", \"id\", \"axialPitch\", \"helixDiameter\"}\n\n    pDefs = componentParameters.getHelixParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        od,\n        axialPitch,\n        helixDiameter,\n        mult=1.0,\n        id=0.0,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            od=od,\n            axialPitch=axialPitch,\n            mult=mult,\n            helixDiameter=helixDiameter,\n            id=id,\n            modArea=modArea,\n        )\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"The diameter of a circle which is encompassed by the exterior of the wire-wrap.\"\"\"\n        return self.getDimension(\"helixDiameter\", Tc, cold=cold) + self.getDimension(\"od\", Tc, cold)\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"The diameter of a circle which is encompassed by the interior of the wire-wrap.\n\n        This should be equal to the outer diameter of the pin in which the wire is wrapped around.\n        \"\"\"\n        return self.getDimension(\"helixDiameter\", Tc, cold=cold) - self.getDimension(\"od\", Tc, cold)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Computes the area for the helix in cm^2.\"\"\"\n        ap = self.getDimension(\"axialPitch\", cold=cold, Tc=Tc)\n        hd = self.getDimension(\"helixDiameter\", cold=cold, Tc=Tc)\n        id = self.getDimension(\"id\", cold=cold, Tc=Tc)\n        od = self.getDimension(\"od\", cold=cold, Tc=Tc)\n        mult = self.getDimension(\"mult\")\n        c = ap / (2.0 * math.pi)\n        helixFactor = math.sqrt((hd / 2.0) ** 2 + c**2) / c\n        area = mult * math.pi * ((od / 2.0) ** 2 - (id / 2.0) ** 2) * helixFactor\n        return area\n"
  },
  {
    "path": "armi/reactor/components/component.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nComponents represent geometric objects within an assembly such as fuel, bond, coolant, ducts, wires, etc.\n\nThis module contains the abstract definition of a Component.\n\"\"\"\n\nimport copy\nimport re\nfrom typing import Union\n\nimport numpy as np\n\nfrom armi import materials, runLog\nfrom armi.bookkeeping import report\nfrom armi.materials import custom, material, void\nfrom armi.reactor import composites, flags, parameters\nfrom armi.reactor.components import componentParameters\nfrom armi.utils import densityTools\nfrom armi.utils.units import C_TO_K\n\nCOMPONENT_LINK_REGEX = re.compile(r\"^\\s*(.+?)\\s*\\.\\s*(.+?)\\s*$\")\n\n\n_NICE_DIM_NAMES = {\n    \"id\": \"Inner Diameter (cm)\",\n    \"od\": \"Outer Diameter (cm)\",\n    \"ip\": \"Inner Pitch (cm)\",\n    \"op\": \"Outer Pitch (cm)\",\n    \"mult\": \"Multiplicity\",\n    \"axialPitch\": \"Axial Pitch (cm)\",\n    \"helixDiameter\": \"Helix Diameter (cm)\",\n    \"length\": \"Length (cm)\",\n    \"height\": \"Height (cm)\",\n    \"width\": \"Width (cm)\",\n    \"areaMod\": \"Area Mod. Factor\",\n}\n\n\nclass _DimensionLink(tuple):\n    \"\"\"\n    A linked dimension, where one component uses a dimension from another.\n\n    Useful when the boundaries are physically shared and should move together.\n\n    The tuple contains (linkedComponent, linkedDimensionName).\n\n    In equating two components, we need the linked dimensions to resolve responsibly/precisely.\n    \"\"\"\n\n    def getLinkedComponent(self):\n        \"\"\"Return the linked component.\"\"\"\n        return self[0]\n\n    def resolveDimension(self, Tc=None, cold=False):\n        \"\"\"Return the current value of the linked dimension.\"\"\"\n        linkedComponent = self[0]\n        dimID = self[1]\n        return linkedComponent.getDimension(dimID, Tc=Tc, cold=cold)\n\n    def __eq__(self, other):\n        otherDimension = other.resolveDimension() if isinstance(other, _DimensionLink) else other\n        return self.resolveDimension() == otherDimension\n\n    def __ne__(self, other):\n        return not self.__eq__(other)\n\n    def __str__(self):\n        \"\"\"Return a string representation of a dimension link.\n\n        These look like ``otherComponentName.otherDimensionName``. For example, if a link were to a\n        ``fuel`` component's ``od`` param, the link would render as ``fuel.od``.\n        \"\"\"\n        return f\"{self[0].name}.{self[1]}\"\n\n\nclass ComponentType(composites.CompositeModelType):\n    \"\"\"\n    ComponetType is a metaclass for storing and initializing Component subclass types.\n\n    The construction of Component subclasses is being done through factories for ease of user input.\n    As a consequence, the ``__init__`` methods' arguments need to be known in order to conform them\n    to the correct format. Additionally, the constructors arguments can be used to determine the\n    Component subclasses dimensions.\n\n    Warning\n    -------\n    The import-time metaclass-based component subclass registration was a good idea, but in practice\n    has caused significant confusion and trouble. We will replace this soon with an explicit\n    plugin-based component subclass registration system.\n    \"\"\"\n\n    TYPES = dict()  #: :meta hide-value:\n\n    NON_DIMENSION_NAMES = (\n        \"Tinput\",\n        \"Thot\",\n        \"isotopics\",\n        \"mergeWith\",\n        \"material\",\n        \"name\",\n        \"components\",\n        \"area\",\n    )\n\n    def __new__(cls, name, bases, attrs):\n        newType = composites.CompositeModelType.__new__(cls, name, bases, attrs)\n        ComponentType.TYPES[name.lower()] = newType\n\n        # the co_varnames attribute contains arguments and then locals so we must\n        # restrict it to just the arguments.\n        signature = newType.__init__.__code__.co_varnames[1 : newType.__init__.__code__.co_argcount]\n\n        # INIT_SIGNATURE and DIMENSION_NAMES are in the same order as the method signature\n        newType.INIT_SIGNATURE = tuple(signature)\n        newType.DIMENSION_NAMES = tuple(k for k in newType.INIT_SIGNATURE if k not in ComponentType.NON_DIMENSION_NAMES)\n        return newType\n\n\nclass Component(composites.Composite, metaclass=ComponentType):\n    \"\"\"\n    A primitive object in a reactor that has definite area/volume, material and composition.\n\n    Could be fuel pins, cladding, duct, wire wrap, etc. One component object may represent\n    multiple physical components via the ``multiplicity`` mechanism.\n\n    .. impl:: Define a physical piece of a reactor.\n        :id: I_ARMI_COMP_DEF\n        :implements: R_ARMI_COMP_DEF\n\n        The primitive object in an ARMI reactor is a Component. A Component is comprised\n        of a shape and composition. This class serves as a base class which all\n        Component types within ARMI are built upon. All primitive shapes (such as a\n        square, circle, holed hexagon, helix etc.) are derived from this base class.\n\n        Fundamental capabilities of this class include the ability to store parameters\n        and attributes which describe the physical state of each Component within the\n        ARMI data model.\n\n    .. impl:: Order Components by their outermost diameter (using the < operator).\n        :id: I_ARMI_COMP_ORDER\n        :implements: R_ARMI_COMP_ORDER\n\n        Determining Component order by outermost diameters is implemented via\n        the ``__lt__()`` method, which is used to control ``sort()`` as the\n        standard approach in Python. However, ``__lt__()`` does not show up in the API.\n\n    Attributes\n    ----------\n    temperatureInC : float\n        Current temperature of component in celsius.\n    inputTemperatureInC : float\n        Reference temperature in C at which dimension definitions were input\n    temperatureInC : float\n        Temperature in C to which dimensions were thermally-expanded upon input.\n    material : str or material.Material\n        The material object that makes up this component and give it its thermo-mechanical properties.\n    \"\"\"\n\n    DIMENSION_NAMES = tuple()  # will be assigned by ComponentType\n    INIT_SIGNATURE = tuple()  # will be assigned by ComponentType\n\n    is3D = False  # flag to show that area is 2D by default\n\n    _COMP_REPORT_GROUPS = {\n        \"intercoolant\": report.INTERCOOLANT_DIMS,\n        \"bond\": report.BOND_DIMS,\n        \"duct\": report.DUCT_DIMS,\n        \"coolant\": report.COOLANT_DIMS,\n        \"clad\": report.CLAD_DIMS,\n        \"fuel\": report.FUEL_DIMS,\n        \"wire\": report.WIRE_DIMS,\n        \"liner\": report.LINER_DIMS,\n        \"gap\": report.GAP_DIMS,\n    }\n\n    _TOLERANCE = 1e-10\n\n    THERMAL_EXPANSION_DIMS = set()\n\n    pDefs = componentParameters.getComponentParameterDefinitions()\n\n    material: materials.Material\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        area=None,\n        isotopics=\"\",\n        mergeWith=\"\",\n        components=None,\n    ):\n        if components and name in components:\n            raise ValueError(f\"Non-unique component name {name} repeated in same block.\")\n\n        composites.Composite.__init__(self, str(name))\n        self.p.area = area\n        self.inputTemperatureInC = Tinput\n        self.temperatureInC = Thot\n        self.material = None\n        self.setProperties(material)\n        self.applyMaterialMassFracsToNumberDensities()  # not necessary when duplicating\n        self.setType(name)\n        self.p.mergeWith = mergeWith\n        self.p.customIsotopicsName = isotopics\n\n    @property\n    def temperatureInC(self):\n        \"\"\"Return the hot temperature in Celsius.\"\"\"\n        return self.p.temperatureInC\n\n    @temperatureInC.setter\n    def temperatureInC(self, value):\n        \"\"\"Set the hot temperature in Celsius.\"\"\"\n        self.p.temperatureInC = value\n\n    @property\n    def temperatureInK(self):\n        \"\"\"Current hot temperature in Kelvin.\"\"\"\n        return self.temperatureInC + C_TO_K\n\n    def __lt__(self, other):\n        \"\"\"\n        True if a circle encompassing this object has a smaller diameter than one encompassing\n        another component.\n\n        If the bounding circles for both components have identical size, then revert to checking the\n        inner diameter of each component for sorting.\n\n        This allows sorting because the Python sort functions only use this method.\n        \"\"\"\n        thisOD = self.getBoundingCircleOuterDiameter(cold=True)\n        thatOD = other.getBoundingCircleOuterDiameter(cold=True)\n        try:\n            if thisOD == thatOD:\n                thisID = self.getCircleInnerDiameter(cold=True)\n                thatID = other.getCircleInnerDiameter(cold=True)\n                return thisID < thatID\n            else:\n                return thisOD < thatOD\n        except (NotImplementedError, Exception) as e:\n            if isinstance(e, NotImplementedError):\n                raise NotImplementedError(f\"getCircleInnerDiameter not implemented for at least one of {self}, {other}\")\n            else:\n                raise ValueError(\n                    f\"Components 1 ({self} with OD {thisOD}) and 2 ({other} and OD {thatOD}) cannot be ordered because \"\n                    \"their bounding circle outer diameters are not comparable.\"\n                )\n\n    def __setstate__(self, state):\n        composites.Composite.__setstate__(self, state)\n        self.material.parent = self\n\n    def _linkAndStoreDimensions(self, components, **dims):\n        \"\"\"Link dimensions to another component.\"\"\"\n        for key, val in dims.items():\n            self.setDimension(key, val)\n\n        if components:\n            self.resolveLinkedDims(components)\n\n    def resolveLinkedDims(self, components):\n        \"\"\"Convert dimension link strings to actual links.\n\n        .. impl:: The volume of some defined shapes depend on the solid components surrounding them.\n            :id: I_ARMI_COMP_FLUID1\n            :implements: R_ARMI_COMP_FLUID\n\n            Some Components are fluids and are thus defined by the shapes surrounding\n            them. This method cycles through each dimension defining the border of this\n            Component and converts the name of that Component to a link to the object\n            itself. This series of links is then used downstream to resolve dimensional information.\n        \"\"\"\n        for dimName in self.DIMENSION_NAMES:\n            value = self.p[dimName]\n            if not isinstance(value, str):\n                continue\n\n            match = COMPONENT_LINK_REGEX.search(value)\n\n            if match:\n                try:\n                    name = match.group(1)\n                    comp = components[name]\n                    linkedKey = match.group(2)\n                    self.p[dimName] = _DimensionLink((comp, linkedKey))\n                except Exception:\n                    if value.count(\".\") > 1:\n                        raise ValueError(\n                            f\"Name of {self} has a period in it. \"\n                            f\"Components cannot not have periods in their names: `{value}`\"\n                        )\n                    else:\n                        raise KeyError(f\"Bad component link `{dimName}` defined as `{value}` in {self}\")\n\n    def setLink(self, key, otherComp, otherCompKey):\n        \"\"\"Set the dimension link.\"\"\"\n        self.p[key] = _DimensionLink((otherComp, otherCompKey))\n\n    def setProperties(self, properties):\n        \"\"\"Apply thermo-mechanical properties of a Material.\"\"\"\n        if isinstance(properties, str):\n            mat = materials.resolveMaterialClassByName(properties)()\n            # note that the material will not be expanded to natural isotopics\n            # here because the user-input blueprints information is not available\n        else:\n            mat = properties\n        self.material = mat\n        self.material.parent = self\n        self.clearLinkedCache()\n\n    def applyMaterialMassFracsToNumberDensities(self):\n        \"\"\"\n        Set the hot number densities for the component based on material mass fractions/density.\n\n        Notes\n        -----\n        - the density returned accounts for the expansion of the component\n          due to the difference in self.inputTemperatureInC and self.temperatureInC\n        - After the expansion, the density of the component should reflect the 3d\n          density of the material\n        \"\"\"\n        # note, that this is not the actual material density, but rather 2D expanded\n        # `density` is 3D density\n        # call getProperty to cache and improve speed\n        density = self.material.getProperty(\"pseudoDensity\", Tc=self.temperatureInC)\n        self.p.numberDensities = densityTools.getNDensFromMasses(density, self.material.massFrac)\n\n        # Sometimes material thermal expansion depends on its parent's composition (e.g. Pu frac) so\n        # setting number densities can sometimes change thermal expansion behavior. Call again so\n        # the material has access to its parent's comp when providing the reference initial density.\n        densityBasedOnParentComposition = self.material.getProperty(\"pseudoDensity\", Tc=self.temperatureInC)\n        self.p.nuclides, self.p.numberDensities = densityTools.getNDensFromMasses(\n            densityBasedOnParentComposition, self.material.massFrac\n        )\n\n        # material needs to be expanded from the material's cold temp to hot,\n        # not components cold temp, so we don't use mat.linearExpansionFactor or\n        # component.getThermalExpansionFactor.\n        # Materials don't typically define the temperature for which their references\n        # density is defined so linearExpansionPercent must be called\n        coldMatAxialExpansionFactor = 1.0 + self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100\n        self.changeNDensByFactor(1.0 / coldMatAxialExpansionFactor)\n\n    def adjustDensityForHeightExpansion(self, newHot):\n        \"\"\"\n        Change the densities in cases where height of the block/component is changing with expansion.\n\n        Notes\n        -----\n        Call before setTemperature since we need old hot temp. This works well if there is only 1\n        solid component. If there are multiple components expanding at different rates during\n        thermal expansion this becomes more complicated and, and axial expansion should be used.\n        Multiple expansion rates cannot trivially be accommodated.\n        \"\"\"\n        self.changeNDensByFactor(1.0 / self.getHeightFactor(newHot))\n\n    def getHeightFactor(self, newHot):\n        \"\"\"\n        Return the factor by which height would change by if we did 3D expansion.\n\n        Notes\n        -----\n        Call before setTemperature since we need old hot temp.\n        \"\"\"\n        return self.getThermalExpansionFactor(Tc=newHot, T0=self.temperatureInC)\n\n    def getProperties(self):\n        \"\"\"Return the active Material object defining thermo-mechanical properties.\n\n        .. impl:: Material properties are retrievable.\n            :id: I_ARMI_COMP_MAT0\n            :implements: R_ARMI_COMP_MAT\n\n            This method returns the material object that is assigned to the Component.\n\n        .. impl:: Components have one-and-only-one material.\n            :id: I_ARMI_COMP_1MAT\n            :implements: R_ARMI_COMP_1MAT\n\n            This method returns the material object that is assigned to the Component.\n        \"\"\"\n        return self.material\n\n    @property\n    def liquidPorosity(self):\n        return self.parent.p.liquidPorosity\n\n    @liquidPorosity.setter\n    def liquidPorosity(self, porosity):\n        self.parent.p.liquidPorosity = porosity\n\n    @property\n    def gasPorosity(self):\n        return self.parent.p.gasPorosity\n\n    @gasPorosity.setter\n    def gasPorosity(self, porosity):\n        self.parent.p.gasPorosity = porosity\n\n    def __copy__(self):\n        \"\"\"Duplicate a component, used for breaking fuel into separate components.\"\"\"\n        linkedDims = self._getLinkedDimsAndValues()\n        newC = copy.deepcopy(self)\n        self._restoreLinkedDims(linkedDims)\n        newC._restoreLinkedDims(linkedDims)\n        return newC\n\n    def setLumpedFissionProducts(self, lfpCollection):\n        \"\"\"Sets lumped fission product collection on a lfp compatible material if possible.\"\"\"\n        try:\n            self.getProperties().setLumpedFissionProducts(lfpCollection)\n        except AttributeError:\n            # This material doesn't setLumpedFissionProducts because it's a regular\n            # material, not a lumpedFissionProductCompatable material\n            pass\n\n    def getArea(self, cold=False, Tc=None):\n        \"\"\"\n        Get the area of a Component in cm^2.\n\n        .. impl:: Get a dimension of a Component.\n            :id: I_ARMI_COMP_VOL0\n            :implements: R_ARMI_COMP_VOL\n\n            This method returns the area of a Component.\n\n        See Also\n        --------\n        block.getVolumeFractions: component coolant is typically the \"leftover\" and is calculated and set here\n        \"\"\"\n        area = self.getComponentArea(cold=cold, Tc=Tc)\n        if self.p.get(\"modArea\", None):\n            comp, arg = self.p.modArea\n            if arg == \"sub\":\n                area -= comp.getComponentArea(cold=cold, Tc=Tc)\n            elif arg == \"add\":\n                area += comp.getComponentArea(cold=cold, Tc=Tc)\n            else:\n                raise ValueError(f\"Option {arg} does not exist\")\n\n        self._checkNegativeArea(area, cold)\n        return area\n\n    def getVolume(self):\n        \"\"\"\n        Return the volume [cm^3] of the Component.\n\n        .. impl:: Get a dimension of a Component.\n            :id: I_ARMI_COMP_VOL1\n            :implements: R_ARMI_COMP_VOL\n\n            This method returns the volume of a Component.\n\n        Notes\n        -----\n        ``self.p.volume`` is not set until this method is called, so under most circumstances it is\n        probably not safe to access ``self.p.volume`` directly. This is because not all components\n        (e.g., ``DerivedShape``) can compute their volume during initialization.\n        \"\"\"\n        if self.p.volume is None:\n            self._updateVolume()\n            if self.p.volume is None:\n                raise ValueError(f\"{self} has undefined volume.\")\n        return self.p.volume\n\n    def clearCache(self):\n        \"\"\"\n        Invalidate the volume so that it will be recomputed from current dimensions upon next access.\n\n        The updated value will be based on its shape and current dimensions.\n        If there is a parent container and that container contains a DerivedShape, then that must be\n        updated as well since its volume may be changing.\n\n        See Also\n        --------\n        clearLinkedCache: Clears cache of components that depend on this component's dimensions.\n        \"\"\"\n        self.p.volume = None\n        if self.parent:\n            self.parent.derivedMustUpdate = True\n\n    def _updateVolume(self):\n        \"\"\"Recompute and store volume.\"\"\"\n        self.p.volume = self.computeVolume()\n\n    def computeVolume(self):\n        \"\"\"Compute volume.\"\"\"\n        if not self.is3D:\n            volume = self.getArea() * self.parent.getHeight()\n        else:\n            volume = self.getComponentVolume()\n\n        self._checkNegativeVolume(volume)\n        return volume\n\n    def _checkNegativeArea(self, area, cold):\n        \"\"\"\n        Check for negative area and warn/error when appropriate.\n\n        Negative component area is allowed for Void materials (such as gaps) which may be placed\n        between components that will overlap during thermal expansion (such as liners and cladding\n        and annular fuel).\n\n        Overlapping is allowed to maintain conservation of atoms while sticking close to the\n        as-built geometry. Modules that need true geometries will have to handle this themselves.\n        \"\"\"\n        if np.isnan(area):\n            return\n\n        if area < 0.0:\n            if (cold and not self.containsVoidMaterial()) or self.containsSolidMaterial():\n                negAreaFailure = (\n                    f\"Component {self} with {self.material} has cold negative area of {area} cm^2. \"\n                    \"This can be caused by component overlap with component dimension linking or by invalid inputs.\"\n                )\n                raise ArithmeticError(negAreaFailure)\n\n    def _checkNegativeVolume(self, volume):\n        \"\"\"Check for negative volume.\n\n        See Also\n        --------\n        self._checkNegativeArea\n        \"\"\"\n        if np.isnan(volume):\n            return\n\n        if volume < 0.0 and self.containsSolidMaterial():\n            negVolFailure = (\n                f\"Component {self} with {self.material} has cold negative volume of {volume} cm^3. \"\n                \"This can be caused by component overlap with component dimension linking or by invalid inputs.\"\n            )\n            raise ArithmeticError(negVolFailure)\n\n    def containsVoidMaterial(self):\n        \"\"\"Returns True if component material is void.\"\"\"\n        return isinstance(self.material, void.Void)\n\n    def containsSolidMaterial(self):\n        \"\"\"Returns True if the component material is a solid.\"\"\"\n        return not isinstance(self.material, material.Fluid)\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"\n        Get the area of this component in cm^2.\n\n        Parameters\n        ----------\n        cold : bool, optional\n            Compute the area with as-input dimensions instead of thermally-expanded\n        Tc : float, optional\n            Temperature to compute the area at\n        \"\"\"\n        raise NotImplementedError\n\n    def getComponentVolume(self):\n        return self.p.volume\n\n    def setVolume(self, val):\n        raise NotImplementedError\n\n    def setArea(self, val):\n        raise NotImplementedError\n\n    def setTemperature(self, temperatureInC):\n        r\"\"\"\n        Adjust temperature of this component.\n\n        This will cause thermal expansion or contraction of solid or liquid components and will\n        accordingly adjust number densities to conserve mass.\n\n        Liquids still have a number density adjustment, but some mass tends to expand in or out of\n        the bounding area.\n\n        Since some composites have multiple materials in them that thermally expand differently,\n        the axial dimension is generally left unchanged. Hence, this a 2-D thermal expansion.\n\n        Number density change is proportional to mass density change :math:`\\frac{d\\rho}{\\rho}`.\n        A multiplicative factor :math:`f_N` to apply to number densities when going from T to T'\n        is as follows:\n\n        .. math::\n\n            N^{\\prime} = N \\cdot f_N \\\\\n            \\frac{dN}{N} = f_N - 1\n\n        Since :math:`\\frac{dN}{N} \\sim\\frac{d\\rho}{\\rho}`, we have:\n\n        .. math::\n\n            f_N  = \\frac{d\\rho}{\\rho} + 1 = \\frac{\\rho^{\\prime}}{\\rho}\n\n        \"\"\"\n        prevTemp, self.temperatureInC = self.temperatureInC, float(temperatureInC)\n        f = self.material.getThermalExpansionDensityReduction(prevTemp, self.temperatureInC)\n        self.changeNDensByFactor(f)\n        self.clearLinkedCache()\n\n    def getNuclides(self):\n        \"\"\"\n        Return nuclides in this component.\n\n        This includes anything that has been specified in here, including trace nuclides.\n        \"\"\"\n        if self.p.nuclides is None:\n            return []\n        return [nucName.decode() for nucName in self.p.nuclides]\n\n    def getNumberDensity(self, nucName):\n        \"\"\"\n        Get the number density of nucName, return zero if it does not exist here.\n\n        Parameters\n        ----------\n        nucName : str\n            Nuclide name\n\n        Returns\n        -------\n        number density : float\n            number density in atoms/bn-cm.\n        \"\"\"\n        i = np.where(self.p.nuclides == nucName.encode())[0]\n        if i.size > 0:\n            return self.p.numberDensities[i[0]]\n        else:\n            return 0.0\n\n    def getNuclideNumberDensities(self, nucNames: list[str]) -> list[float]:\n        \"\"\"Return a list of number densities for the nuc names requested.\"\"\"\n        if isinstance(nucNames, (list, tuple, np.ndarray)):\n            byteNucs = np.asanyarray(nucNames, dtype=\"S6\")\n        else:\n            byteNucs = [nucName.encode() for nucName in nucNames]\n\n        if self.p.numberDensities is None:\n            return np.zeros(len(byteNucs), dtype=np.float64)\n\n        # trivial case where nucNames is the full set of nuclides in the same order\n        if np.array_equal(byteNucs, self.p.nuclides):\n            return np.array(self.p.numberDensities)\n\n        if len(byteNucs) < len(self.p.nuclides) / 10:\n            return self._getNumberDensitiesArray(byteNucs)\n\n        nDensDict = dict(zip(self.p.nuclides, self.p.numberDensities))\n        return [nDensDict.get(nuc, 0.0) for nuc in byteNucs]\n\n    def _getNumberDensitiesArray(self, byteNucs):\n        \"\"\"\n        Get number densities using direct array lookup.\n\n        When only a small subset of nuclide number densities are requested, it is\n        likely faster to lookup the index for each nuclide than to recreate the\n        entire dictionary for a lookup.\n\n        Parameters\n        ----------\n        byteNucs : np.ndarray, dtype=\"S6\"\n            List of nuclides for which to retrieve number densities, as encoded byte strings\n        \"\"\"\n        ndens = np.zeros(len(byteNucs), dtype=np.float64)\n        nuclides = self.p.nuclides\n        numberDensities = self.p.numberDensities\n\n        # if it's just a small subset of nuclides, use np.where for direct index lookup\n        for i, nuc in enumerate(byteNucs):\n            j = np.where(nuclides == nuc)[0]\n            if j.size > 0:\n                ndens[i] = numberDensities[j[0]]\n        return ndens\n\n    def _getNdensHelper(self):\n        nucs = self.getNuclides()\n        return dict(zip(nucs, self.p.numberDensities)) if len(nucs) > 0 else {}\n\n    def setName(self, name):\n        \"\"\"Components use name for type and name.\"\"\"\n        composites.Composite.setName(self, name)\n        self.setType(name)\n\n    def setNumberDensity(self, nucName, val):\n        \"\"\"\n        Set heterogeneous number density.\n\n        .. impl:: Setting nuclide fractions.\n            :id: I_ARMI_COMP_NUCLIDE_FRACS0\n            :implements: R_ARMI_COMP_NUCLIDE_FRACS\n\n            The method allows a user or plugin to set the number density of a Component. It also\n            indicates to other processes that may depend on a Component's status about this change\n            via the ``assigned`` attribute.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide to modify\n        val : float\n            Number density to set in atoms/bn-cm (heterogeneous)\n        \"\"\"\n        self.updateNumberDensities({nucName: val})\n\n    def setNumberDensities(self, numberDensities):\n        \"\"\"\n        Set one or more multiple number densities. Clears out any number density not listed.\n\n        .. impl:: Setting nuclide fractions.\n            :id: I_ARMI_COMP_NUCLIDE_FRACS1\n            :implements: R_ARMI_COMP_NUCLIDE_FRACS\n\n            The method allows a user or plugin to set the number densities of a Component. In\n            contrast to the ``setNumberDensity`` method, it sets all densities within a Component.\n\n        Parameters\n        ----------\n        numberDensities : dict\n            nucName: ndens pairs.\n\n        Notes\n        -----\n        We don't just call setNumberDensity for each nuclide because we don't want to call\n        ``getVolumeFractions`` for each nuclide (it's inefficient).\n        \"\"\"\n        self.updateNumberDensities(numberDensities, wipe=True)\n\n    def updateNumberDensities(self, numberDensities, wipe=False):\n        \"\"\"\n        Set one or more multiple number densities. Leaves unlisted number densities alone.\n\n        Parameters\n        ----------\n        numberDensities : dict\n            nucName: ndens pairs.\n        wipe : bool, optional\n            Controls whether the old number densities are wiped. Any nuclide densities not provided\n            in numberDensities will be effectively set to 0.0.\n\n        Notes\n        -----\n        Sometimes volume/dimensions change due to number density change when the material thermal\n        expansion depends on the component's composition (e.g. its plutonium fraction). In this\n        case, changing the density will implicitly change the area/volume. Since it is difficult to\n        predict the new dimensions, and perturbation/depletion calculations almost exclusively\n        assume constant volume, the densities sent are automatically adjusted to conserve mass with\n        the original dimensions. That is, the component's densities are not exactly as passed, but\n        whatever they would need to be to preserve volume integrated number densities (moles) from\n        the pre-perturbed component's volume/dimensions.\n\n        This has no effect if the material thermal expansion has no dependence on component\n        composition. If this is not desired, `self.p.numberDensities` and `self.p.nuclides` can be\n        set directly.\n        \"\"\"\n        # prepare to change the densities with knowledge that dims could change due to material\n        # thermal expansion dependence on composition\n        if self.p.numberDensities is not None and self.p.numberDensities.size > 0:\n            dLLprev = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0\n            materialExpansion = True\n        else:\n            dLLprev = 0.0\n            materialExpansion = False\n\n        try:\n            vol = self.getVolume()\n        except (AttributeError, TypeError):\n            # Either no parent to get height or parent's height is None. Which would be\n            # AttributeError and TypeError respectively, but other errors could be possible.\n            vol = None\n            area = self.getArea()\n\n        # change the densities\n        if wipe:\n            self.p.nuclides = np.asanyarray(list(numberDensities.keys()), dtype=\"S6\")\n            self.p.numberDensities = np.array(list(numberDensities.values()))\n        else:\n            newNucs = []\n            newNumDens = []\n            nucs = self.p.nuclides\n            ndens = self.p.numberDensities\n            for nucName, dens in numberDensities.items():\n                i = np.where(nucs == nucName.encode())[0]\n                if i.size > 0:\n                    ndens[i[0]] = dens\n                else:\n                    newNucs.append(nucName.encode())\n                    newNumDens.append(dens)\n            self.p.nuclides = np.append(nucs, newNucs)\n            self.p.numberDensities = np.append(ndens, newNumDens)\n\n        # check if thermal expansion changed\n        dLLnew = self.material.linearExpansionPercent(Tc=self.temperatureInC) / 100.0\n        if dLLprev != dLLnew and materialExpansion:\n            # the thermal expansion changed so the volume change is happening at same time as\n            # density change was requested. Attempt to make mass consistent with old dims (since the\n            # density change was for the old volume and otherwise mass wouldn't be conserved).\n\n            self.clearLinkedCache()  # enable recalculation of volume, otherwise it uses cached\n            if vol is not None:\n                factor = vol / self.getVolume()\n            else:\n                factor = area / self.getArea()\n            self.changeNDensByFactor(factor)\n\n        # since we are updating the object the param points to but not the param itself, we have to\n        # inform the param system to flag it as modified so it syncs during ``syncMpiState``.\n        self.p.assigned = parameters.SINCE_ANYTHING\n        self.p.paramDefs[\"numberDensities\"].assigned = parameters.SINCE_ANYTHING\n\n    def changeNDensByFactor(self, factor):\n        \"\"\"Change the number density of all nuclides within the object by a multiplicative factor.\"\"\"\n        if self.p.numberDensities is not None:\n            self.p.numberDensities *= factor\n        self._changeOtherDensParamsByFactor(factor)\n\n    def _changeOtherDensParamsByFactor(self, factor):\n        \"\"\"Change the number density of all nuclides within the object by a multiplicative factor.\"\"\"\n        if self.p.detailedNDens is not None:\n            self.p.detailedNDens *= factor\n        # Update pinNDens\n        if self.p.pinNDens is not None:\n            self.p.pinNDens *= factor\n\n    def getEnrichment(self):\n        \"\"\"Get the mass enrichment of this component, as defined by the material.\"\"\"\n        return self.getMassEnrichment()\n\n    def getMassEnrichment(self):\n        \"\"\"\n        Get the mass enrichment of this component, as defined by the material.\n\n        Notes\n        -----\n        Getting mass enrichment on any level higher than this is ambiguous because you may have\n        enriched boron in one pin and uranium in another and blending those doesn't make sense.\n        \"\"\"\n        if self.material.enrichedNuclide is None:\n            raise ValueError(f\"Cannot get enrichment of {self.material} because `enrichedNuclide` is not defined.\")\n        enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide]\n        baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides]\n        massFracs = self.getMassFracs()\n        massFracEnrichedElement = sum(\n            massFrac for nucName, massFrac in massFracs.items() if nucName in baselineNucNames\n        )\n        try:\n            return massFracs.get(self.material.enrichedNuclide, 0.0) / massFracEnrichedElement\n        except ZeroDivisionError:\n            return 0.0\n\n    def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float:\n        r\"\"\"\n        Determine the mass in grams of nuclide(s) and/or elements in this object.\n\n        .. math::\n\n            \\text{mass} = \\frac{\\sum_i (N_i \\cdot V \\cdot  A_i)}{N_A \\cdot 10^{-24}}\n\n        where\n            :math:`N_i` is number density of nuclide i in (1/bn-cm),\n\n            :math:`V` is the object volume in :math:`cm^3`\n\n            :math:`N_A` is Avogadro's number in 1/moles,\n\n            :math:`A_i` is the atomic weight of of nuclide i in grams/mole\n\n        Parameters\n        ----------\n        nuclideNames : str, optional\n            The nuclide/element specifier to get the mass of in the object.\n            If omitted, total mass is returned.\n\n        Returns\n        -------\n        mass : float\n            The mass in grams.\n        \"\"\"\n        volume = self.getVolume() / (self.parent.getSymmetryFactor() if self.parent else 1.0)\n        if nuclideNames is None:\n            nDens = self._getNdensHelper()\n        else:\n            nuclideNames = self._getNuclidesFromSpecifier(nuclideNames)\n            # densities comes from self.p.numberDensities\n            if len(nuclideNames) > 0:\n                densities = self.getNuclideNumberDensities(nuclideNames)\n                nDens = dict(zip(nuclideNames, densities))\n            else:\n                nDens = {}\n        return densityTools.calculateMassDensity(nDens) * volume\n\n    def setDimension(self, key, val, retainLink=False, cold=True):\n        \"\"\"\n        Set a single dimension on the component.\n\n        .. impl:: Set a Component dimension, considering thermal expansion.\n            :id: I_ARMI_COMP_EXPANSION1\n            :implements: R_ARMI_COMP_EXPANSION\n\n            Dimensions should be set considering the impact of thermal expansion. This\n            method allows for a user or plugin to set a dimension and indicate if the\n            dimension is for a cold configuration or not. If it is not for a cold\n            configuration, the thermal expansion factor is considered when setting the dimension.\n\n            If the ``retainLink`` argument is ``True``, any Components linked to this one will also\n            have its dimensions changed consistently. After a dimension is updated, the\n            ``clearLinkedCache`` method is called which sets the volume of this Component to\n            ``None``. This ensures that when the volume is next accessed it is recomputed using the\n            updated dimensions.\n\n        Parameters\n        ----------\n        key : str\n            The dimension key (op, ip, mult, etc.)\n        val : float\n            The value to set on the dimension\n        retainLink : bool, optional\n            If True, the val will be applied to the dimension of linked component which indirectly\n            changes this component's dimensions.\n        cold : bool, optional\n            If True sets the component cold dimension to the specified value.\n        \"\"\"\n        if not key:\n            return\n        if retainLink and self.dimensionIsLinked(key):\n            linkedComp, linkedDimName = self.p[key]\n            linkedComp.setDimension(linkedDimName, val, cold=cold)\n        else:\n            if not cold:\n                expansionFactor = self.getThermalExpansionFactor() if key in self.THERMAL_EXPANSION_DIMS else 1.0\n                val /= expansionFactor\n            self.p[key] = val\n\n        self.clearLinkedCache()\n\n    def getDimension(self, key, Tc=None, cold=False):\n        \"\"\"\n        Return a specific dimension at temperature as determined by key.\n\n        .. impl:: Retrieve a dimension at a specified temperature.\n            :id: I_ARMI_COMP_DIMS\n            :implements: R_ARMI_COMP_DIMS\n\n            Due to thermal expansion, Component dimensions depend on their temperature. This method\n            retrieves a dimension from the Component at a particular temperature, if provided. If\n            the Component is a LinkedComponent then the dimensions are resolved to ensure that any\n            thermal expansion that has occurred to the Components that the LinkedComponent depends\n            on is reflected in the returned dimension.\n\n        Parameters\n        ----------\n        key : str\n            The dimension key (op, ip, mult, etc.)\n        Tc : float\n            Temperature in C. If None, the current temperature of the component is used.\n        cold : bool, optional\n            If true, will return cold (input) value of the requested dimension\n        \"\"\"\n        dimension = self.p[key]\n\n        if isinstance(dimension, _DimensionLink):\n            return dimension.resolveDimension(Tc=Tc, cold=cold)\n\n        if not dimension or cold or key not in self.THERMAL_EXPANSION_DIMS:\n            return dimension\n\n        return self.getThermalExpansionFactor(Tc) * dimension\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"Abstract bounding circle method that should be overwritten by each shape subclass.\"\"\"\n        raise NotImplementedError\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        \"\"\"Abstract inner circle method that should be overwritten by each shape subclass.\n\n        Notes\n        -----\n        The inner circle is meaningful for annular shapes, i.e., circle with non-zero ID, hexagon\n        with non-zero IP, etc. For shapes with corners (e.g., hexagon, rectangle, etc) the inner\n        circle intersects the corners of the inner bound, opposed to intersecting the \"flats\".\n        \"\"\"\n        raise NotImplementedError\n\n    def dimensionIsLinked(self, key):\n        \"\"\"True if a the specified dimension is linked to another dimension.\"\"\"\n        return key in self.p and isinstance(self.p[key], _DimensionLink)\n\n    def getDimensionNamesLinkedTo(self, otherComponent):\n        \"\"\"Find dimension names linked to the other component in this component.\"\"\"\n        dimNames = []\n        for dimName in self.DIMENSION_NAMES:\n            isLinked = self.dimensionIsLinked(dimName)\n            if isLinked and self.p[dimName].getLinkedComponent() is otherComponent:\n                dimNames.append((dimName, self.p[dimName][1]))\n        return dimNames\n\n    def clearLinkedCache(self):\n        \"\"\"Clear this cache and any other dependent volumes.\"\"\"\n        self.clearCache()\n        if self.parent:\n            # changes in dimensions can affect cached variables such as pitch\n            self.parent.cached = {}\n            for c in self.getLinkedComponents():\n                # no clearCache since parent already updated derivedMustUpdate in self.clearCache()\n                c.p.volume = None\n\n    def getLinkedComponents(self):\n        \"\"\"Find other components that are linked to this component.\"\"\"\n        dependents = []\n        for child in self.parent:\n            for dimName in child.DIMENSION_NAMES:\n                isLinked = child.dimensionIsLinked(dimName)\n                if isLinked and child.p[dimName].getLinkedComponent() is self:\n                    dependents.append(child)\n        return dependents\n\n    def getThermalExpansionFactor(self, Tc=None, T0=None):\n        \"\"\"\n        Retrieves the material thermal expansion fraction.\n\n        .. impl:: Calculates radial thermal expansion factor.\n            :id: I_ARMI_COMP_EXPANSION0\n            :implements: R_ARMI_COMP_EXPANSION\n\n            This method enables the calculation of the thermal expansion factor for a given\n            material. If the material is solid, the difference between ``T0`` and ``Tc`` is used to\n            calculate the thermal expansion factor. If a solid material does not have a linear\n            expansion factor defined and the temperature difference is greater than a predetermined\n            tolerance, an error is raised. Thermal expansion of fluids or custom materials is\n            neglected, currently.\n\n        Parameters\n        ----------\n        Tc : float, optional\n            Adjusted temperature to get the thermal expansion factor at relative to the reference temperature\n\n        Returns\n        -------\n        Thermal expansion factor as a percentage (1.0 + dLL), where dLL is the linear expansion factor.\n        \"\"\"\n        if isinstance(self.material, (material.Fluid, custom.Custom)):\n            return 1.0  # No thermal expansion of fluids or custom materials\n\n        if T0 is None:\n            T0 = self.inputTemperatureInC\n        if Tc is None:\n            Tc = self.temperatureInC\n\n        dLL = self.material.linearExpansionFactor(Tc=Tc, T0=T0)\n        if not dLL and abs(Tc - T0) > self._TOLERANCE:\n            runLog.error(\n                f\"Linear expansion percent may not be implemented in the {self.material} material class.\\n\"\n                \"This method needs to be implemented on the material to allow thermal expansion.\"\n                f\".\\nReference temperature: {T0}, Adjusted temperature: {Tc}, Temperature difference: {(Tc - T0)}, \"\n                f\"Specified tolerance: {self._TOLERANCE}\",\n                single=True,\n            )\n            raise RuntimeError(\n                f\"Linear expansion percent may not be implemented in the {self.material} material class.\"\n            )\n        return 1.0 + dLL\n\n    def printContents(self, includeNuclides=True):\n        \"\"\"Print a listing of the dimensions and composition of this component.\"\"\"\n        runLog.important(self)\n        runLog.important(self.setDimensionReport())\n        if includeNuclides:\n            for nuc in self.getNuclides():\n                runLog.important(f\"{nuc:10s} {self.getNumberDensity(nuc):.7e}\")\n\n    def setDimensionReport(self):\n        \"\"\"Gives a report of the dimensions of this component.\"\"\"\n        reportGroup = None\n        for componentType, componentReport in self._COMP_REPORT_GROUPS.items():\n            if componentType in self.getName():\n                reportGroup = componentReport\n                break\n        if not reportGroup:\n            return f\"No report group designated for {self.getName()} component.\"\n        reportGroup.header = [\n            \"\",\n            f\"Tcold ({self.inputTemperatureInC})\",\n            f\"Thot ({self.temperatureInC})\",\n        ]\n\n        dimensions = {\n            k: self.p[k] for k in self.DIMENSION_NAMES if k not in (\"modArea\", \"area\") and self.p[k] is not None\n        }  # py3 cannot format None\n        # Set component name and material\n        report.setData(\"Name\", [self.getName(), \"\"], reportGroup)\n        report.setData(\"Material\", [self.getProperties().name, \"\"], reportGroup)\n\n        for dimName in dimensions:\n            niceName = _NICE_DIM_NAMES.get(dimName, dimName)\n            refVal = self.getDimension(dimName, cold=True)\n            hotVal = self.getDimension(dimName)\n            try:\n                report.setData(niceName, [refVal, hotVal], reportGroup)\n            except ValueError:\n                runLog.warning(f\"{self} has an invalid dimension for {dimName}. refVal: {refVal} hotVal: {hotVal}\")\n\n        # calculate thickness if applicable.\n        suffix = None\n        if \"id\" in dimensions:\n            suffix = \"d\"\n        elif \"ip\" in dimensions:\n            suffix = \"p\"\n\n        if suffix:\n            coldIn = self.getDimension(f\"i{suffix}\", cold=True)\n            hotIn = self.getDimension(f\"i{suffix}\")\n            coldOut = self.getDimension(f\"o{suffix}\", cold=True)\n            hotOut = self.getDimension(f\"o{suffix}\")\n\n        if suffix and coldIn > 0.0:\n            hotThick = (hotOut - hotIn) / 2.0\n            coldThick = (coldOut - coldIn) / 2.0\n            vals = (\n                \"Thickness (cm)\",\n                f\"{coldThick:.7f}\",\n                f\"{hotThick:.7f}\",\n            )\n            report.setData(vals[0], [vals[1], vals[2]], reportGroup)\n\n        return report.ALL[reportGroup]\n\n    def updateDims(self, key=\"\", val=None):\n        self.setDimension(key, val)\n\n    def mergeNuclidesInto(self, compToMergeWith):\n        \"\"\"\n        Set another component's number densities to reflect this one merged into it.\n\n        You must also modify the geometry of the other component and remove this component to\n        conserve atoms.\n        \"\"\"\n        # record pre-merged number densities and areas\n        aMe = self.getArea()\n        # if negative-area gap, treat is as 0.0 and return\n        if aMe <= 0.0:\n            return\n        aMerge = compToMergeWith.getArea()\n        meNDens = {nucName: aMe / aMerge * self.getNumberDensity(nucName) for nucName in self.getNuclides()}\n        mergeNDens = {nucName: compToMergeWith.getNumberDensity(nucName) for nucName in compToMergeWith.getNuclides()}\n        # set the new homogenized number densities from both. Allow overlapping nuclides.\n        for nucName in set(meNDens) | set(mergeNDens):\n            compToMergeWith.setNumberDensity(nucName, (meNDens.get(nucName, 0.0) + mergeNDens.get(nucName, 0.0)))\n\n    def iterComponents(self, typeSpec=None, exact=False):\n        if self.hasFlags(typeSpec, exact):\n            yield self\n\n    def backUp(self):\n        \"\"\"\n        Create and store a backup of the state.\n\n        This needed to be overridden due to linked components which actually have a parameter value\n        of another ARMI component.\n        \"\"\"\n        linkedDims = self._getLinkedDimsAndValues()\n        composites.Composite.backUp(self)\n        self._restoreLinkedDims(linkedDims)\n\n    def restoreBackup(self, paramsToApply):\n        \"\"\"\n        Restore the parameters from previously created backup.\n\n        This needed to be overridden due to linked components which actually have a parameter value\n        of another ARMI component.\n        \"\"\"\n        linkedDims = self._getLinkedDimsAndValues()\n        composites.Composite.restoreBackup(self, paramsToApply)\n        self._restoreLinkedDims(linkedDims)\n\n    def _getLinkedDimsAndValues(self):\n        linkedDims = []\n\n        for dimName in self.DIMENSION_NAMES:\n            # backUp and restore are called in tight loops, getting the value and checking here is\n            # faster than calling self.dimensionIsLinked because that requires and extra\n            # p.__getitem__\n            try:\n                val = self.p[dimName]\n            except Exception:\n                raise RuntimeError(\n                    f\"Could not find parameter {dimName} defined for {self}. Is the desired Component class?\"\n                )\n            if isinstance(val, _DimensionLink):\n                linkedDims.append((self.p.paramDefs[dimName].fieldName, val))\n                del self.p[dimName]\n\n        return linkedDims\n\n    def _restoreLinkedDims(self, linkedDims):\n        # force update without setting the \".assigned\" flag\n        for fieldName, value in linkedDims:\n            setattr(self.p, fieldName, value)\n\n    def adjustMassEnrichment(self, massFraction):\n        \"\"\"\n        Change the mass fraction of this component.\n\n        The nuclides to adjust are defined by the material. This changes whichever nuclides are to\n        be enriched vs. the baseline nuclides of that element while holding mass constant. For\n        example it might adjust boron or uranium enrichment.\n\n        Conceptually, you could hold number of atoms, volume, or mass constant during this\n        operation. Historically ARMI adjusted mass fractions which was meant to keep mass constant.\n\n        If you have 20 mass % Uranium and adjust the enrichment, you will still have 20% Uranium\n        mass. But, the actual mass actually might change a bit because the enriched nuclide weighs\n        less.\n\n        See Also\n        --------\n        Material.enrichedNuclide\n        \"\"\"\n        if self.material.enrichedNuclide is None:\n            raise ValueError(f\"Cannot adjust enrichment of {self.material} because `enrichedNuclide` is not defined.\")\n        enrichedNuclide = self.nuclideBases.byName[self.material.enrichedNuclide]\n        baselineNucNames = [nb.name for nb in enrichedNuclide.element.nuclides]\n        massFracsBefore = self.getMassFracs()\n        massFracEnrichedElement = sum(\n            massFrac for nucName, massFrac in massFracsBefore.items() if nucName in baselineNucNames\n        )\n\n        adjustedMassFracs = {self.material.enrichedNuclide: massFracEnrichedElement * massFraction}\n\n        baselineNucNames.remove(self.material.enrichedNuclide)\n        massFracTotalUnenriched = massFracEnrichedElement - massFracsBefore[self.material.enrichedNuclide]\n        for baseNucName in baselineNucNames:\n            # maintain relative mass fractions of baseline nuclides.\n            frac = massFracsBefore.get(baseNucName, 0.0) / massFracTotalUnenriched\n            if not frac:\n                continue\n            adjustedMassFracs[baseNucName] = massFracEnrichedElement * (1 - massFraction) * frac\n        self.setMassFracs(adjustedMassFracs)\n\n    def getMgFlux(self, adjoint=False, average=False, gamma=False):\n        \"\"\"\n        Return the multigroup neutron flux in [n/cm^2/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as\n        set in the ISOTXS library.\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        average : bool, optional\n            If True, will return average flux between latest and previous. Does not work for pin detailed.\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        flux : np.ndarray\n            multigroup neutron flux in [n/cm^2/s]\n        \"\"\"\n        if average:\n            raise NotImplementedError(\"Component has no method for producing average MG flux -- tryusing blocks\")\n\n        volume = self.getVolume() / self.parent.getSymmetryFactor()\n        return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume\n\n    def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n        \"\"\"\n        Return the multigroup neutron tracklength in [n-cm/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the\n        next energy group, as set in the ISOTXS library.\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        integratedFlux : multigroup neutron tracklength in [n-cm/s]\n        \"\"\"\n        if self.p.pinNum is None:\n            # no pin-level flux is available\n            if not self.parent:\n                return np.zeros(1)\n\n            volumeFraction = (self.getVolume() / self.parent.getSymmetryFactor()) / self.parent.getVolume()\n            return volumeFraction * self.parent.getIntegratedMgFlux(adjoint, gamma)\n\n        # pin-level flux is available. Note that it is NOT integrated on the param level.\n        if gamma:\n            if adjoint:\n                raise ValueError(\"Adjoint gamma flux is currently unsupported.\")\n            else:\n                pinFluxes = self.parent.p.pinMgFluxesGamma\n        else:\n            if adjoint:\n                pinFluxes = self.parent.p.pinMgFluxesAdj\n            else:\n                pinFluxes = self.parent.p.pinMgFluxes\n\n        return pinFluxes[self.p.pinNum - 1] * self.getVolume() / self.parent.getSymmetryFactor()\n\n    def getPinMgFluxes(self, adjoint: bool = False, gamma: bool = False) -> np.ndarray[tuple[int, int], float]:\n        \"\"\"Retrieves the pin multigroup fluxes for the component.\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        np.ndarray\n            A ``(N, nGroup)`` array of pin multigroup fluxes, where ``N`` is the equivalent to the\n            multiplicity of the component (``self.p.mult``) and ``nGroup`` is the number of energy\n            groups of the flux.\n\n        Raises\n        ------\n        ValueError\n            If the location(s) of the component are not aligned with pin indices from the block.\n            This would happen if this component is not actually a pin.\n        \"\"\"\n        # If we get a None, for a non-pin thing, the exception block at the bottom will catch\n        # that and inform the user. so we don't need to add extra guard rails here\n        indexMap = self.getPinIndices()\n\n        # Get the parameter name we are trying to retrieve\n        if gamma:\n            if adjoint:\n                raise ValueError(\"Adjoint gamma flux is currently unsupported.\")\n            else:\n                param = \"pinMgFluxesGamma\"\n        else:\n            if adjoint:\n                param = \"pinMgFluxesAdj\"\n            else:\n                param = \"pinMgFluxes\"\n\n        try:\n            return self.parent.p[param][indexMap]\n        except Exception as ee:\n            msg = f\"Failure getting {param} from {self} via parent {self.parent}\"\n            runLog.error(msg)\n            runLog.error(ee)\n            raise ValueError(msg) from ee\n\n    def getPinIndices(self) -> np.ndarray[tuple[int], np.uint16]:\n        \"\"\"Find the indices for the locations where this component can be found in the block.\n\n        Returns\n        -------\n        np.array[int]\n            The indices in various Block-level pin methods,\n            e.g., :meth:`armi.reactor.blocks.Block.getPinLocations`, that correspond to\n            this component.\n\n        Raises\n        ------\n        ValueError\n            If this does not have pin indices. This can be the case for components that live\n            on blocks without spatial grids, or if they do not share lattice sites, via\n            ``spatialLocator`` with other pins.\n\n        See Also\n        --------\n        :meth`:armi.reactor.blocks.HexBlock.assignPinIndices`\n        \"\"\"\n        ix = self.p.pinIndices\n        if isinstance(ix, np.ndarray):\n            return ix\n        # Find a sibling that has pin indices and has the same spatial locator as us\n        withPinIndices = (c for c in self.parent if c is not self and c.p.pinIndices is not None)\n        for sibling in withPinIndices:\n            if sibling.spatialLocator == self.spatialLocator:\n                return sibling.p.pinIndices\n        msg = f\"{self} on {self.parent} has no pin indices.\"\n        raise ValueError(msg)\n\n    def density(self) -> float:\n        \"\"\"Returns the mass density of the object in g/cc.\"\"\"\n        density = composites.Composite.density(self)\n\n        if not density and not isinstance(self.material, void.Void):\n            # possible that there are no nuclides in this component yet. In that case,\n            # defer to Material. Material.density is wrapped to warn if it's attached\n            # to a parent. Avoid that by calling the inner function directly\n            density = self.material.density.__wrapped__(self.material, Tc=self.temperatureInC)\n\n        return density\n\n    def getLumpedFissionProductCollection(self):\n        \"\"\"\n        Get collection of LFP objects. Will work for global or block-level LFP models.\n\n        Returns\n        -------\n        lfps : LumpedFissionProduct\n            lfpName keys , lfp object values\n\n        See Also\n        --------\n        armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct\n        \"\"\"\n        if self.parent:\n            return self.parent.getLumpedFissionProductCollection()\n        else:\n            return composites.ArmiObject.getLumpedFissionProductCollection(self)\n\n    def getMicroSuffix(self):\n        return self.parent.getMicroSuffix()\n\n    def getPitchData(self):\n        \"\"\"\n        Return the pitch data that should be used to determine block pitch.\n\n        Notes\n        -----\n        This pitch data should only be used if this is the pitch defining component in a block. The\n        block is responsible for determining which component in it is the pitch defining component.\n        \"\"\"\n        raise NotImplementedError(\n            f\"Method not implemented on component {self}. \"\n            \"Please implement if this component type can be a pitch defining component.\"\n        )\n\n    def getFuelMass(self) -> float:\n        \"\"\"Return the mass in grams if this is a fueled component.\"\"\"\n        return self.getMass() if self.hasFlags(flags.Flags.FUEL) else 0.0\n\n    def finalizeLoadingFromDB(self):\n        \"\"\"Apply any final actions after creating the component from database.\n\n        This should **only** be called internally by the database loader. Otherwise some properties\n        could be doubly applied.\n\n        This exists because the theoretical density is initially defined as a material modification,\n        and then stored as a Material attribute. When reading from blueprints, the blueprint loader\n        sets the theoretical density parameter from the Material attribute. Component parameters are\n        also set when reading from the database. But, we need to set the Material attribute so\n        routines that fetch a material's density property account for the theoretical density.\n        \"\"\"\n        self.material.adjustTD(self.p.theoreticalDensityFrac)\n\n\nclass ShapedComponent(Component):\n    \"\"\"A component with well-defined dimensions.\"\"\"\n"
  },
  {
    "path": "armi/reactor/components/componentParameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Component parameter definitions.\"\"\"\n\nimport numpy as np\n\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.parameters.parameterDefinitions import isNumpyArray, isNumpyF32Array\nfrom armi.utils import units\n\n\ndef getComponentParameterDefinitions():\n    \"\"\"Return the base Component parameters.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"volume\", units=f\"{units.CM}^3\", description=\"Volume of this object.\")\n\n        pb.defParam(\n            \"area\",\n            units=f\"{units.CM}^2\",\n            description=\"Cross sectional area of this component.\",\n        )\n\n        pb.defParam(\n            \"mult\",\n            units=units.UNITLESS,\n            description=\"The multiplicity of this component, i.e. how many of them there are.\",\n            default=1,\n        )\n\n        pb.defParam(\n            \"mergeWith\",\n            units=units.UNITLESS,\n            description=\"Label of other component to merge with\",\n        )\n\n        pb.defParam(\n            \"type\",\n            units=units.UNITLESS,\n            description=\"The name of this object as input on the blueprints\",\n        )\n\n        pb.defParam(\n            \"temperatureInC\",\n            units=units.DEGC,\n            description=\"Component temperature in {}\".format(units.DEGC),\n        )\n\n        pb.defParam(\n            \"numberDensities\",\n            setter=isNumpyArray(\"numberDensities\"),\n            units=f\"#/(bn*{units.CM})\",\n            description=\"Number densities of each nuclide.\",\n        )\n\n        pb.defParam(\n            \"nuclides\",\n            setter=isNumpyArray(\"nuclides\"),\n            units=units.UNITLESS,\n            description=\"Nuclide names corresponding to numberDensities array.\",\n        )\n\n        pb.defParam(\n            \"detailedNDens\",\n            setter=isNumpyArray(\"detailedNDens\"),\n            units=f\"atoms/(bn*{units.CM})\",\n            description=(\n                \"High-fidelity number density vector with up to thousands of nuclides. \"\n                \"Used in high-fi depletion runs where low-fi depletion may also be occurring. \"\n                \"This param keeps the hi-fi and low-fi depletion values from interfering.\"\n            ),\n            saveToDB=True,\n            default=None,\n        )\n\n        pb.defParam(\n            \"pinNDens\",\n            setter=isNumpyF32Array(\"pinNDens\"),\n            units=f\"atoms/(bn*{units.CM})\",\n            description=\"Pin-wise number densities of each nuclide.\",\n            location=ParamLocation.AVERAGE,\n            saveToDB=True,\n            categories=[\"depletion\", parameters.Category.pinQuantities],\n            default=None,\n        )\n\n        pb.defParam(\n            \"percentBu\",\n            units=f\"{units.PERCENT_FIMA}\",\n            description=\"Burnup as a percentage of initial (heavy) metal atoms.\",\n            default=0.0,\n        )\n\n        pb.defParam(\n            \"pinPercentBu\",\n            setter=isNumpyArray(\"pinPercentBu\"),\n            units=units.PERCENT_FIMA,\n            description=\"Pin-wise burnup as a percentage of initial (heavy) metal atoms.\",\n            default=None,\n        )\n\n        pb.defParam(\n            \"buRate\",\n            units=f\"{units.PERCENT_FIMA}/{units.DAYS}\",\n            # This is very related to power, but normalized to %FIMA.\n            description=(\n                \"Current rate of burnup accumulation. Useful for estimating times when burnup limits may be exceeded.\"\n            ),\n        )\n\n        pb.defParam(\n            \"enrichmentBOL\",\n            units=units.UNITLESS,\n            description=\"Enrichment during fabrication (mass fraction)\",\n            default=0.0,\n        )\n\n        pb.defParam(\n            \"massHmBOL\",\n            units=units.GRAMS,\n            description=\"Mass of heavy metal at BOL\",\n            default=None,\n        )\n\n        pb.defParam(\n            \"customIsotopicsName\",\n            units=units.UNITLESS,\n            description=\"Label of isotopics applied to this component.\",\n        )\n\n        pb.defParam(\n            \"modArea\",\n            units=units.UNITLESS,\n            description=\"A (component, operation) tuple used to add/subtract area (in \"\n            \"cm^2) from another components area. See c.getArea()\",\n        )\n\n        pb.defParam(\n            \"zrFrac\",\n            units=units.UNITLESS,\n            description=\"Original Zr frac of this, used for material properties.\",\n        )\n\n        pb.defParam(\n            \"pinNum\",\n            units=units.UNITLESS,\n            description=\"Pin number of this component in some mesh. Starts at 1.\",\n            default=None,\n        )\n\n        def _assignTDFrac(self, val):\n            if val > 1 or val < 0:\n                raise ValueError(f\"Theoretical density fraction must be in range [0,1], got {val}\")\n            self._p_theoreticalDensityFrac = val\n\n        pb.defParam(\n            \"theoreticalDensityFrac\",\n            description=(\n                \"Fractional value between zero and one, inclusive, for the theoretical density \"\n                \"of the material stored on this component.\"\n            ),\n            units=units.UNITLESS,\n            default=1,\n            setter=_assignTDFrac,\n        )\n\n        pb.defParam(\n            \"molesHmBOL\",\n            units=units.MOLES,\n            default=0.0,\n            description=\"Total number of moles of heavy metal at BOL.\",\n        )\n\n        def _validatePinIndices(self, val):\n            if val is not None:\n                # holds [0, 65_535] so at most, 65_535 pins per block\n                self._p_pinIndices = np.array(val, dtype=np.uint16)\n            else:\n                self._p_pinIndices = None\n\n        pb.defParam(\n            \"pinIndices\",\n            default=None,\n            description=(\n                \"Indices within data arrays where values for this component are stored. \"\n                \"The array is zero indexed and structured such that the j-th pin on this \"\n                \"component can be found at ``Block.getPinLocations()[pinIndices[j]]``. \"\n            ),\n            units=units.UNITLESS,\n            setter=_validatePinIndices,\n        )\n\n    return pDefs\n\n\ndef getCircleParameterDefinitions():\n    \"\"\"Return parameters for Circle.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"od\", units=units.CM, description=\"Outer diameter\")\n\n        pb.defParam(\"id\", units=units.CM, description=\"Inner diameter\", default=0.0)\n\n        pb.defParam(\"op\", units=units.CM, description=\"Outer pitch\")\n\n    return pDefs\n\n\ndef getHexagonParameterDefinitions():\n    \"\"\"Return parameters for Hexagon.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"ip\", units=units.CM, description=\"Inner pitch\", default=0.0)\n\n        pb.defParam(\"op\", units=units.CM, description=\"Outer pitch\")\n\n    return pDefs\n\n\ndef getHoledHexagonParameterDefinitions():\n    \"\"\"Return parameters for HoledHexagon.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"holeOD\", units=units.CM, description=\"Diameter of interior hole(s)\")\n\n        pb.defParam(\"nHoles\", units=units.UNITLESS, description=\"Number of interior holes\")\n\n        pb.defParam(\n            \"holeRadFromCenter\",\n            units=units.CM,\n            description=\"Distance from the center of the hexagon to the center of the holes assuming the hole centers \"\n            \"all lie on a circle.\",\n            default=0.0,\n        )\n\n    return pDefs\n\n\ndef getHexHoledCircleParameterDefinitions():\n    \"\"\"Return parameters for HexHoledCircle.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"holeOP\", units=units.CM, description=\"Pitch of interior hole\")\n\n    return pDefs\n\n\ndef getFilletedHexagonParameterDefinitions():\n    \"\"\"Return parameters for FilletedHexagon.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"iR\", units=units.CM, description=\"Radius of curvature of the inner corners\")\n        pb.defParam(\"oR\", units=units.CM, description=\"Radius of curvature of the outer corners\")\n\n    return pDefs\n\n\ndef getHoledRectangleParameterDefinitions():\n    \"\"\"Return parameters for HoledRectangle.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"holeOD\", units=units.CM, description=\"Diameter of interior hole\")\n\n    return pDefs\n\n\ndef getHelixParameterDefinitions():\n    \"\"\"Return parameters for Helix.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"od\", units=units.CM, description=\"Outer diameter\")\n\n        pb.defParam(\"id\", units=units.CM, description=\"Inner diameter\", default=0.0)\n\n        pb.defParam(\"op\", units=units.CM, description=\"Outer pitch\")\n\n        pb.defParam(\n            \"axialPitch\",\n            units=units.CM,\n            description=\"Axial pitch of helix in helical shapes.\",\n        )\n\n        pb.defParam(\"helixDiameter\", units=units.CM, description=\"Diameter of helix\")\n\n    return pDefs\n\n\ndef getRectangleParameterDefinitions():\n    \"\"\"Return parameters for Rectangle.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"lengthInner\", units=units.CM, description=\"Inner length\")\n\n        pb.defParam(\"lengthOuter\", units=units.CM, description=\"Outer length\")\n\n        pb.defParam(\"widthInner\", units=units.CM, description=\"Inner width\")\n\n        pb.defParam(\"widthOuter\", units=units.CM, description=\"Outer width\")\n\n    return pDefs\n\n\ndef getCubeParameterDefinitions():\n    \"\"\"Return parameters for Cube.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\n            \"lengthInner\",\n            units=units.CM,\n            default=0.0,\n            description=\"Inner length dimension (if the cube is hollow).\",\n        )\n\n        pb.defParam(\"lengthOuter\", units=units.CM, description=\"Outermost length dimension\")\n\n        pb.defParam(\n            \"widthInner\",\n            units=units.CM,\n            default=0.0,\n            description=\"Inner width dimension (if the cube is hollow).\",\n        )\n\n        pb.defParam(\"widthOuter\", units=units.CM, description=\"Outermost width dimension\")\n\n        pb.defParam(\n            \"heightInner\",\n            units=units.CM,\n            default=0.0,\n            description=\"Inner height dimension (if the cube is hollow).\",\n        )\n\n        pb.defParam(\"heightOuter\", units=units.CM, description=\"Outermost height dimension\")\n\n    return pDefs\n\n\ndef getTriangleParameterDefinitions():\n    \"\"\"Return parameters for Triangle.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"base\", units=units.CM, description=\"Length of the base of the triangle\")\n\n        pb.defParam(\"height\", units=units.CM, description=\"Height of the triangle\")\n\n    return pDefs\n\n\ndef getUnshapedParameterDefinitions():\n    \"\"\"Return parameters for UnshapedComponent.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\"op\", units=units.CM, description=\"Outer pitch\")\n\n        pb.defParam(\n            \"userDefinedVolume\",\n            units=f\"{units.CM}^3\",\n            description=\"Volume of this object.\",\n        )\n\n    return pDefs\n\n\ndef getRadialSegmentParameterDefinitions():\n    \"\"\"Return parameters for RadialSegment.\"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, saveToDB=True) as pb:\n        pb.defParam(\n            \"inner_theta\",\n            units=units.RADIANS,\n            description=\"Starting axial position, in radians.\",\n        )\n\n        pb.defParam(\n            \"outer_theta\",\n            units=units.RADIANS,\n            description=\"Ending axial position, in radians.\",\n        )\n\n        pb.defParam(\n            \"inner_radius\",\n            units=units.CM,\n            description=\"Starting radial position; this can be zero.\",\n        )\n\n        pb.defParam(\"outer_radius\", units=units.CM, description=\"Ending radial position.\")\n\n        pb.defParam(\"height\", units=units.CM, description=\"Height of the 3D radial segment.\")\n\n        pb.defParam(\n            \"azimuthal_differential\",\n            units=units.RADIANS,\n            description=\"Perturbation in the azimuthal dimension (see inner_theta and outer_theta).\",\n        )\n\n        pb.defParam(\n            \"radius_differential\",\n            units=units.UNITLESS,\n            description=\"Perturbation in the radial dimension (see inner_radius and outer_radius).\",\n        )\n\n        pb.defParam(\n            \"inner_axial\",\n            units=units.UNITLESS,\n            description=\"Perturbation in the axial dimension (picture outer_axial = inner_axial + height).\",\n        )\n\n        pb.defParam(\n            \"outer_axial\",\n            units=units.UNITLESS,\n            description=\"Perturbation result in the axial dimension (picture outer_axial = inner_axial + height).\",\n        )\n\n    return pDefs\n"
  },
  {
    "path": "armi/reactor/components/tests/__init__.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/components/tests/test_basicShapes.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit testing file for basic shapes.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.materials import resolveMaterialClassByName\nfrom armi.reactor.components.basicShapes import (\n    Circle,\n    Hexagon,\n    Rectangle,\n    SolidRectangle,\n    Square,\n    Triangle,\n)\n\n\nclass TestBasicShapes(unittest.TestCase):\n    \"\"\"Class for testing basic shapes.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.material = resolveMaterialClassByName(\"HT9\")()\n\n    def test_circleArea(self):\n        od = 2.0\n        id = 1.5\n        comp = Circle(\"Test\", material=self.material, Tinput=20, Thot=300, od=od, id=id, mult=2)\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), math.pi * (od**2 / 4 - id**2 / 4) * 2)\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        odHot = comp.getDimension(\"od\")\n        idHot = comp.getDimension(\"id\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            math.pi * (odHot**2 / 4 - idHot**2 / 4) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_hexagonArea(self):\n        op = 2.0\n        ip = 1.5\n        comp = Hexagon(\"Test\", material=self.material, Tinput=20, Thot=300, op=op, ip=ip, mult=2)\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), math.sqrt(3.0) * (op**2 - ip**2))\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        opHot = comp.getDimension(\"op\")\n        ipHot = comp.getDimension(\"ip\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            math.sqrt(3.0) * (opHot**2 - ipHot**2),\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_rectangleArea(self):\n        lo = 2.0\n        li = 1.5\n        wo = 2.5\n        wi = 1.25\n        comp = Rectangle(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            lengthOuter=lo,\n            lengthInner=li,\n            widthOuter=wo,\n            widthInner=wi,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (lo * wo - li * wi))\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        loHot = comp.getDimension(\"lengthOuter\")\n        liHot = comp.getDimension(\"lengthInner\")\n        woHot = comp.getDimension(\"widthOuter\")\n        wiHot = comp.getDimension(\"widthInner\")\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (loHot * woHot - liHot * wiHot))\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_solidRectangleArea(self):\n        lo = 2.0\n        wo = 2.5\n        comp = SolidRectangle(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            lengthOuter=lo,\n            widthOuter=wo,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * lo * wo)\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        loHot = comp.getDimension(\"lengthOuter\")\n        woHot = comp.getDimension(\"widthOuter\")\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * loHot * woHot)\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_squareArea(self):\n        wo = 2.5\n        wi = 1.25\n        comp = Square(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            widthOuter=wo,\n            widthInner=wi,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), 2 * (wo**2 - wi**2))\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        woHot = comp.getDimension(\"widthOuter\")\n        wiHot = comp.getDimension(\"widthInner\")\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), 2 * (woHot**2 - wiHot**2))\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_triangleArea(self):\n        base = 2.5\n        height = 1.25\n        comp = Triangle(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            base=base,\n            height=height,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), base * height)\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        baseHot = comp.getDimension(\"base\")\n        heightHot = comp.getDimension(\"height\")\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), baseHot * heightHot)\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n"
  },
  {
    "path": "armi/reactor/components/tests/test_complexShapes.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit testing file for basic shapes.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.materials import resolveMaterialClassByName\nfrom armi.reactor.components.complexShapes import (\n    HexHoledCircle,\n    HoledHexagon,\n    HoledRectangle,\n    HoledSquare,\n)\n\n\nclass TestComplexShapes(unittest.TestCase):\n    \"\"\"Class for testing complex shapes.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.material = resolveMaterialClassByName(\"HT9\")()\n\n    @staticmethod\n    def circArea(d):\n        return math.pi * (d / 2) ** 2\n\n    @staticmethod\n    def hexArea(op):\n        return math.sqrt(3.0) / 2.0 * op**2\n\n    @staticmethod\n    def rectArea(l, w):\n        return l * w\n\n    def test_holedHexagon(self):\n        op = 2.0\n        holeOD = 0.5\n        nHoles = 2\n        comp = HoledHexagon(\n            \"TestHoledHexagon\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            op=op,\n            holeOD=holeOD,\n            nHoles=nHoles,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=True),\n            (self.hexArea(op) - nHoles * self.circArea(holeOD)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        opHot = comp.getDimension(\"op\")\n        holeODHot = comp.getDimension(\"holeOD\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            (self.hexArea(opHot) - nHoles * self.circArea(holeODHot)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n        # Test that holeRadFromCenter does not change the area.\n        comp2 = HoledHexagon(\n            \"TestHoledHexagonHoleRadFromCenter\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            op=op,\n            holeOD=holeOD,\n            nHoles=nHoles,\n            holeRadFromCenter=(op + holeOD) / 2,\n            mult=2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp2.getComponentArea(cold=True))\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp2.getComponentArea(cold=False))\n\n        compHoleRadFromCenter = HoledHexagon(\n            \"TestHoledHexagon33\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            op=op,\n            holeOD=holeOD,\n            nHoles=nHoles,\n            holeRadFromCenter=0.5,\n            mult=2,\n        )\n        self.assertEqual(compHoleRadFromCenter.getDimension(\"holeRadFromCenter\", cold=True, Tc=500), 0.5)\n        self.assertGreater(compHoleRadFromCenter.getDimension(\"holeRadFromCenter\", cold=False, Tc=500), 0.5)\n\n    def test_holedRectangle(self):\n        lo = 2.0\n        wo = 3.0\n        holeOD = 0.5\n        comp = HoledRectangle(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            lengthOuter=lo,\n            widthOuter=wo,\n            holeOD=holeOD,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=True),\n            (self.rectArea(lo, wo) - self.circArea(holeOD)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        loHot = comp.getDimension(\"lengthOuter\")\n        woHot = comp.getDimension(\"widthOuter\")\n        holeODHot = comp.getDimension(\"holeOD\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            (self.rectArea(loHot, woHot) - self.circArea(holeODHot)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_holedSquare(self):\n        wo = 3.0\n        holeOD = 0.5\n        comp = HoledSquare(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            widthOuter=wo,\n            holeOD=holeOD,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=True),\n            (self.rectArea(wo, wo) - self.circArea(holeOD)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        woHot = comp.getDimension(\"widthOuter\")\n        holeODHot = comp.getDimension(\"holeOD\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            (self.rectArea(woHot, woHot) - self.circArea(holeODHot)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n\n    def test_hexHoledCircle(self):\n        od = 3.0\n        holeOP = 0.5\n        comp = HexHoledCircle(\n            \"Test\",\n            material=self.material,\n            Tinput=20,\n            Thot=300,\n            od=od,\n            holeOP=holeOP,\n            mult=2,\n        )\n\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=True),\n            (self.circArea(od) - self.hexArea(holeOP)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=True), comp.getComponentArea(Tc=20.0))\n\n        odHot = comp.getDimension(\"od\")\n        holeOPHot = comp.getDimension(\"holeOP\")\n        self.assertAlmostEqual(\n            comp.getComponentArea(cold=False),\n            (self.circArea(odHot) - self.hexArea(holeOPHot)) * 2,\n        )\n        self.assertAlmostEqual(comp.getComponentArea(cold=False), comp.getComponentArea(Tc=300))\n"
  },
  {
    "path": "armi/reactor/components/volumetricShapes.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Three-dimensional shapes.\"\"\"\n\nimport math\n\nfrom armi.reactor.components import ShapedComponent, componentParameters\n\n\nclass Sphere(ShapedComponent):\n    \"\"\"A spherical component.\"\"\"\n\n    is3D = True\n\n    THERMAL_EXPANSION_DIMS = {}\n\n    # Just usurp the Circle parameters. This may lead to issues at some point in things like the DB\n    # interface, but for now, they are the same params, so why not?\n    pDefs = componentParameters.getCircleParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        od=None,\n        id=None,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(components, od=od, id=id, mult=mult, modArea=modArea)\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"Abstract bounding circle method that should be overwritten by each shape subclass.\"\"\"\n        return self.getDimension(\"od\")\n\n    def getComponentArea(self, cold=False, Tc=None):\n        \"\"\"Compute an average area over the height.\"\"\"\n        from armi.reactor.blocks import Block  # avoid circular import\n\n        if Tc is not None:\n            raise NotImplementedError(f\"Cannot calculate area at specified temperature: {Tc}\")\n        block = self.getAncestor(lambda c: isinstance(c, Block))\n        return self.getComponentVolume(cold) / block.getHeight()\n\n    def getComponentVolume(self, cold=False):\n        \"\"\"Computes the volume of the sphere in cm^3.\"\"\"\n        od = self.getDimension(\"od\", cold=cold)\n        iD = self.getDimension(\"id\", cold=cold)\n        mult = self.getDimension(\"mult\")\n        vol = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (iD / 2.0) ** 3)\n        return vol\n\n\nclass Cube(ShapedComponent):\n    \"\"\"More correctly, a rectangular cuboid.\n\n    Optionally, there may be a centric cuboid volume cut out of center of this shape.\n    \"\"\"\n\n    is3D = True\n\n    THERMAL_EXPANSION_DIMS = {}\n\n    pDefs = componentParameters.getCubeParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        lengthOuter=None,\n        lengthInner=None,\n        widthOuter=None,\n        widthInner=None,\n        heightOuter=None,\n        heightInner=None,\n        mult=None,\n        modArea=None,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            lengthOuter=lengthOuter,\n            lengthInner=lengthInner,\n            widthOuter=widthOuter,\n            widthInner=widthInner,\n            heightOuter=heightOuter,\n            heightInner=heightInner,\n            mult=mult,\n            modArea=modArea,\n        )\n\n    def getComponentArea(self, cold=False, Tc=None):\n        raise NotImplementedError(\"Cannot compute area of a cube component.\")\n\n    def getComponentVolume(self):\n        \"\"\"Computes the volume of the cube in cm^3.\"\"\"\n        lengthO = self.getDimension(\"lengthOuter\")\n        widthO = self.getDimension(\"widthOuter\")\n        heightO = self.getDimension(\"heightOuter\")\n        lengthI = self.getDimension(\"lengthInner\")\n        widthI = self.getDimension(\"widthInner\")\n        heightI = self.getDimension(\"heightInner\")\n        mult = self.getDimension(\"mult\")\n        vol = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n        return vol\n\n\nclass RadialSegment(ShapedComponent):\n    r\"\"\"A RadialSegement represents a volume element with thicknesses in the\n    azimuthal, radial and axial directions.\n\n    This a 3D projection of a 2D shape that is an angular slice of a ring or circle.\n\n    The 2D shape is like the one below, with an inner and outer position for the\n    theta and the radius:\n\n    Image::\n\n        Y\n        ^                      -\n        |                 -\n        |            -XXXX\\\n        |       -  \\XXXXXXX\\\n        |  theta   |XXXXXXX|\n        |-----------------------> radius, X\n        |\n        |\n    \"\"\"\n\n    is3D = True\n\n    THERMAL_EXPANSION_DIMS = {}\n\n    pDefs = componentParameters.getRadialSegmentParameterDefinitions()\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        inner_radius=None,\n        outer_radius=None,\n        height=None,\n        mult=None,\n        inner_theta=0,\n        outer_theta=math.pi * 2,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            inner_radius=inner_radius,\n            outer_radius=outer_radius,\n            height=height,\n            mult=mult,\n            inner_theta=inner_theta,\n            outer_theta=outer_theta,\n        )\n\n    def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None):\n        if Tc is not None:\n            raise NotImplementedError(f\"Cannot calculate area at specified temperature: {Tc}\")\n        if refHeight:\n            return (\n                (self.getDimension(\"height\", cold=cold) / refHeight)\n                * self.getDimension(\"mult\")\n                * (\n                    math.pi\n                    * (\n                        self.getDimension(\"outer_radius\", cold=cold) ** 2\n                        - self.getDimension(\"inner_radius\", cold=cold) ** 2\n                    )\n                    * (\n                        (self.getDimension(\"outer_theta\", cold=cold) - self.getDimension(\"inner_theta\", cold=cold))\n                        / (math.pi * 2.0)\n                    )\n                )\n            )\n        if refVolume:\n            return (self.getComponentVolume() / refVolume) / self.getDimension(\"height\")\n        else:\n            return self.getComponentVolume() / self.getDimension(\"height\")\n\n    def getComponentVolume(self):\n        mult = self.getDimension(\"mult\")\n        outerRad = self.getDimension(\"outer_radius\")\n        innerRad = self.getDimension(\"inner_radius\")\n        outerTheta = self.getDimension(\"outer_theta\")\n        innerTheta = self.getDimension(\"inner_theta\")\n        height = self.getDimension(\"height\")\n        radialArea = math.pi * (outerRad**2 - innerRad**2)\n        aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)\n        vol = mult * radialArea * aziFraction * height\n        return vol\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        return 2.0 * self.getDimension(\"outer_radius\", Tc, cold)\n\n    def getCircleInnerDiameter(self, Tc=None, cold=False):\n        return 2.0 * self.getDimension(\"inner_radius\", Tc, cold)\n\n\nclass DifferentialRadialSegment(RadialSegment):\n    \"\"\"\n    This component class represents a volume element with thicknesses in the\n    azimuthal, radial and axial directions. Furthermore it has dependent\n    dimensions: (outer theta, outer radius, outer axial) that can be updated\n    depending on the 'differential' in the corresponding directions.\n\n    This component class is super useful for defining ThRZ reactors and\n    perturbing its dimensions using the optimization modules\n\n    See Also\n    --------\n    geometry purturbation:\n    armi.physics.optimize.OptimizationInterface.modifyCase (ThRZReflectorThickness,ThRZActiveHeight,ThRZActiveRadius)\n\n    mesh updating:\n    armi.reactor.reactors.Reactor.importGeom\n    \"\"\"\n\n    is3D = True\n\n    THERMAL_EXPANSION_DIMS = {}\n\n    def __init__(\n        self,\n        name,\n        material,\n        Tinput,\n        Thot,\n        inner_radius=None,\n        radius_differential=None,\n        inner_axial=None,\n        height=None,\n        inner_theta=0,\n        azimuthal_differential=2 * math.pi,\n        mult=1,\n        isotopics=None,\n        mergeWith=None,\n        components=None,\n    ):\n        ShapedComponent.__init__(\n            self,\n            name,\n            material,\n            Tinput,\n            Thot,\n            isotopics=isotopics,\n            mergeWith=mergeWith,\n            components=components,\n        )\n        self._linkAndStoreDimensions(\n            components,\n            inner_radius=inner_radius,\n            radius_differential=radius_differential,\n            inner_axial=inner_axial,\n            height=height,\n            inner_theta=inner_theta,\n            azimuthal_differential=azimuthal_differential,\n            mult=mult,\n        )\n        self.updateDims()\n\n    def updateDims(self, key=\"\", val=None):\n        \"\"\"\n        Update the dimensions of differential radial segment component.\n\n        Notes\n        -----\n        Can be used to update any dimension on the component, but outer_radius, outer_axial, and outer_theta are\n        always updated.\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.updateComponentDims\n        \"\"\"\n        self.setDimension(key, val)\n        self.setDimension(\n            \"outer_radius\",\n            self.getDimension(\"inner_radius\") + self.getDimension(\"radius_differential\"),\n        )\n        self.setDimension(\n            \"outer_axial\",\n            self.getDimension(\"inner_axial\") + self.getDimension(\"height\"),\n        )\n        self.setDimension(\n            \"outer_theta\",\n            self.getDimension(\"inner_theta\") + self.getDimension(\"azimuthal_differential\"),\n        )\n\n    def getComponentArea(self, refVolume=None, refHeight=None, cold=False, Tc=None):\n        if Tc is not None:\n            raise NotImplementedError(f\"Cannot calculate area at specified temperature: {Tc}\")\n        self.updateDims()\n        return RadialSegment.getComponentArea(self, refVolume=None, refHeight=None, cold=False)\n\n    def getComponentVolume(self):\n        self.updateDims()\n        return RadialSegment.getComponentVolume(self)\n"
  },
  {
    "path": "armi/reactor/composites.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains the basic composite pattern underlying the reactor package.\n\nThis follows the principles of the `Composite Design Pattern\n<https://en.wikipedia.org/wiki/Composite_pattern>`_ to allow the construction of a part/whole\nhierarchy representing a physical nuclear reactor. The composite objects act somewhat like lists:\nthey can be indexed, iterated over, appended, extended, inserted, etc. Each member of the hierarchy\nknows its children and its parent, so full access to the hierarchy is available from everywhere.\nThis design was chosen because of the close analogy of the model to the physical nature of nuclear\nreactors.\n\nWarning\n-------\nBecause each member of the hierarchy is linked to the entire tree, it is often unsafe to save\nreferences to individual members; it can cause large and unexpected memory inefficiencies.\n\nSee Also\n--------\n:doc:`/developer/index`.\n\"\"\"\n\nimport collections\nimport itertools\nimport operator\nimport timeit\nfrom typing import (\n    TYPE_CHECKING,\n    Callable,\n    Dict,\n    Iterator,\n    List,\n    Optional,\n    Tuple,\n    Type,\n    Union,\n)\n\nimport numpy as np\n\nfrom armi import context, runLog, utils\nfrom armi.nucDirectory import nucDir, nuclideBases\nfrom armi.physics.neutronics.fissionProductModel import fissionProductModel\nfrom armi.reactor import grids, parameters\nfrom armi.reactor.flags import Flags, TypeSpec\nfrom armi.reactor.parameters import resolveCollections\nfrom armi.utils import densityTools, tabulate, units\nfrom armi.utils.densityTools import calculateNumberDensity\nfrom armi.utils.flags import auto\n\nif TYPE_CHECKING:\n    from armi.reactor.components.component import Component\n\n\nclass FlagSerializer(parameters.Serializer):\n    \"\"\"\n    Serializer implementation for Flags.\n\n    This operates by converting each set of Flags (too large to fit in a uint64) into a\n    sequence of enough uint8 elements to represent all flags. These constitute a\n    dimension of a 2-D numpy array containing all Flags for all objects provided to the\n    ``pack()`` function.\n    \"\"\"\n\n    version = \"1\"\n\n    @staticmethod\n    def pack(data):\n        \"\"\"\n        Flags are represented as a 2D numpy array of uint8 (single-byte, unsigned\n        integers), where each row contains the bytes representing a single Flags\n        instance. We also store the list of field names so that we can verify that the\n        reader and the writer can agree on the meaning of each bit.\n\n        Under the hood, this calls the private implementation providing the\n        :py:class:`armi.reactor.flags.Flags` class as the target output class.\n        \"\"\"\n        return FlagSerializer._packImpl(data, Flags)\n\n    @staticmethod\n    def _packImpl(data, flagCls: Type[utils.Flag]):\n        \"\"\"\n        Implement the pack operation given a target output Flag class.\n\n        This is kept separate from the public interface to permit testing of the\n        functionality without having to do unholy things to ARMI's actual set of\n        ``reactor.flags.Flags``.\n        \"\"\"\n        npa = np.array([b for f in data for b in f.to_bytes()], dtype=np.uint8).reshape((len(data), flagCls.width()))\n\n        return npa, {\"flag_order\": flagCls.sortedFields()}\n\n    @staticmethod\n    def _remapBits(inp: int, mapping: Dict[int, int]):\n        \"\"\"\n        Given an input bitfield, map each bit to the appropriate new bit position based\n        on the passed mapping.\n\n        Parameters\n        ----------\n        inp : int\n            input bitfield\n        mapping : dict\n            dictionary mapping from old bit position -> new bit position\n        \"\"\"\n        f = 0\n        for bit in itertools.count():\n            if (1 << bit) > inp:\n                break\n            if (1 << bit) & inp:\n                f = f | (1 << mapping[bit])\n\n        return f\n\n    @classmethod\n    def unpack(cls, data, version, attrs):\n        \"\"\"\n        Reverse the pack operation.\n\n        This will allow for some degree of conversion from old flags to a new set of\n        flags, as long as all of the source flags still exist in the current set of\n        flags.\n\n        Under the hood, this calls the private implementation providing the\n        :py:class:`armi.reactor.flags.Flags` class as the target output class.\n        \"\"\"\n        return cls._unpackImpl(data, version, attrs, Flags)\n\n    @classmethod\n    def _unpackImpl(cls, data, version, attrs, flagCls: Type[utils.Flag]):\n        \"\"\"\n        Implement the unpack operation given a target output Flag class.\n\n        This is kept separate from the public interface to permit testing of the\n        functionality without having to do unholy things to ARMI's actual set of\n        ``reactor.flags.Flags``.\n\n        If the set of flags for the currently-configured App match the input set of\n        flags, they are read in directly, which is good and cheap. However, if the set\n        of flags differ from the input and the current App, we will try to convert them\n        (as long as all of the input flags exist in the current App). Conversion is done\n        by forming a map from all input bit positions to the current-App bit positions\n        of the same meaning. E.g., if FUEL flag used to be the 3rd bit position, but now\n        it is the 6th bit position, the map will contain ``map[3] = 6``. Then for each\n        bitfield that is read in, each bit position is queried and if present, mapped to\n        the proper corresponding new bit position. The result of this mapping is used to\n        construct the Flags object.\n        \"\"\"\n        flagOrderPassed = attrs[\"flag_order\"]\n        flagOrderNow = flagCls.sortedFields()\n\n        if version != cls.version:\n            raise ValueError(\n                f\"The FlagSerializer version used to pack the data ({version}) does not match \"\n                f\"the current version ({cls.version})! This database either needs to be migrated, \"\n                \"or on-the-fly inter-version conversion needs to be implemented.\"\n            )\n\n        flagSetIn = set(flagOrderPassed)\n        flagSetNow = set(flagOrderNow)\n\n        # Make sure that all of the old flags still exist\n        if not flagSetIn.issubset(flagSetNow):\n            missingFlags = flagSetIn - flagSetNow\n            runLog.warning(\n                \"The set of flags in the database includes unknown flags. For convenience, we will \"\n                f\"add these to the system: {missingFlags}\"\n            )\n            flagCls.extend({k: auto() for k in missingFlags})\n\n        flagOrderNow = flagCls.sortedFields()\n\n        if all(i == j for i, j in zip(flagOrderPassed, flagOrderNow)):\n            out = [flagCls.from_bytes(row.tobytes()) for row in data]\n        else:\n            newFlags = {i: flagOrderNow.index(oldFlag) for (i, oldFlag) in enumerate(flagOrderPassed)}\n            out = [flagCls(cls._remapBits(int.from_bytes(row.tobytes(), byteorder=\"little\"), newFlags)) for row in data]\n\n        return out\n\n\ndef _defineBaseParameters():\n    \"\"\"\n    Return parameter definitions that all ArmiObjects must have to function properly.\n\n    For now, this pretty much just includes ``flags``, since these are used throughout\n    the composite model to filter which objects are considered when traversing the\n    reactor model.\n\n    Note also that the base ParameterCollection class also has a ``serialNum``\n    parameter. These are defined in different locations, since serialNum is a guaranteed\n    feature of a ParameterCollection (for serialization to the database and history\n    tracking), while the ``flags`` parameter is more a feature of the composite model.\n\n    .. important::\n        Notice that the ``flags`` parameter is not written to the database. This is for\n        a couple of reasons:\n        * Flags are derived from an ArmiObject's name. Since the name is stored on\n        the DB, it is possible to recover the flags from that.\n        * Storing flags to the DB may be complicated, since it is easier to imagine a\n        number of flags that is greater than the width of natively-supported integer\n        types, requiring some extra tricks to store the flags in an HDF5 file.\n        * Allowing flags to be modified by plugins further complicates things, in that\n        it is important to ensure that the meaning of all bits in the flag value are\n        consistent between a database state and the current ARMI environment. This may\n        require encoding these meanings in to the database as some form of metadata.\n    \"\"\"\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    pDefs.add(\n        parameters.Parameter(\n            \"flags\",\n            units=units.UNITLESS,\n            description=\"The type specification of this object\",\n            location=parameters.ParamLocation.AVERAGE,\n            saveToDB=True,\n            default=Flags(0),\n            setter=parameters.NoDefault,\n            categories=set(),\n            serializer=FlagSerializer,\n        )\n    )\n\n    return pDefs\n\n\nclass CompositeModelType(resolveCollections.ResolveParametersMeta):\n    \"\"\"\n    Metaclass for tracking subclasses of ArmiObject subclasses.\n\n    It is often useful to have an easily-accessible collection of all classes that participate in\n    the ARMI composite reactor model. This metaclass maintains a collection of all defined\n    subclasses, called TYPES.\n    \"\"\"\n\n    TYPES: Dict[str, Type] = dict()\n    \"\"\"\n    Dictionary mapping class name to class object for all subclasses.\n\n    :meta hide-value:\n    \"\"\"\n\n    def __new__(cls, name, bases, attrs):\n        newType = resolveCollections.ResolveParametersMeta.__new__(cls, name, bases, attrs)\n\n        CompositeModelType.TYPES[name] = newType\n\n        return newType\n\n\nclass ArmiObject(metaclass=CompositeModelType):\n    \"\"\"\n    The abstract base class for all composites and leaves.\n\n    This:\n\n    * declares the interface for objects in the composition\n    * implements default behavior for the interface common to all classes\n    * Declares an interface for accessing and managing child objects\n    * Defines an interface for accessing parents.\n\n    Called \"component\" in gang of four, this is an ArmiObject here because the word component was\n    already taken in ARMI.\n\n    The :py:class:`armi.reactor.parameters.ResolveParametersMeta` metaclass is used to automatically\n    create ``ParameterCollection`` subclasses for storing parameters associated with any particular\n    subclass of ArmiObject. Defining a ``pDefs`` class attribute in the definition of a subclass of\n    ArmiObject will lead to the creation of a new subclass of\n    py:class:`armi.reactor.parameters.ParameterCollection`, which will contain the definitions from\n    that class's ``pDefs`` as well as the definitions for all of its parents. A new\n    ``paramCollectionType`` class attribute will be added to the ArmiObject subclass to reflect\n    which type of parameter collection should be used.\n\n    Warning\n    -------\n    This class has far too many public methods. We are in the midst of a composite tree cleanup that\n    will likely break these out onto a number of separate functional classes grouping things like\n    composition, location, shape/dimensions, and various physics queries. Methods are being\n    collected here from the various specialized subclasses (Block, Assembly) in preparation for this\n    next step. As a result, the public API on this method should be considered unstable.\n\n    .. impl:: Parameters are accessible throughout the armi tree.\n        :id: I_ARMI_PARAM1\n        :implements: R_ARMI_PARAM\n\n        An ARMI reactor model is composed of collections of ARMIObject objects. These\n        objects are combined in a hierarchical manner. Each level of the composite tree\n        is able to be assigned parameters which define it, such as temperature, flux,\n        or keff values. This class defines an attribute of type ``ParameterCollection``,\n        which contains all the functionality of an ARMI ``Parameter`` object. Because\n        the entire model is composed of ARMIObjects at the most basic level, each level\n        of the Composite tree contains this parameter attribute and can thus be queried.\n\n    Attributes\n    ----------\n    name : str\n        Object name\n    parent : ArmiObject\n        The object's parent in a hierarchical tree\n    cached : dict\n        Some cached values for performance\n    p : ParameterCollection\n        The state variables\n    spatialGrid : grids.Grid\n        The spatial grid that this object contains\n    spatialLocator : grids.LocationBase\n        The location of this object in its parent grid, or global space\n\n    See Also\n    --------\n    armi.reactor.parameters\n    \"\"\"\n\n    paramCollectionType: Optional[Type[parameters.ParameterCollection]] = None\n    pDefs = _defineBaseParameters()\n\n    def __init__(self, name):\n        self.name = name\n        self.parent = None\n        self.cached = {}\n        self._backupCache = None\n        self.p = self.paramCollectionType()\n        # NOTE: LFPs are not serialized to the database, which could matter when loading an old DB.\n        self._lumpedFissionProducts = None\n        self.spatialGrid = None\n        self.spatialLocator = grids.CoordinateLocation(0.0, 0.0, 0.0, None)\n\n    def __lt__(self, other):\n        \"\"\"\n        Implement the less-than operator.\n\n        Implementing this on the ArmiObject allows most objects, under most circumstances to be\n        sorted. This is useful from the context of the Database classes, so that they can produce a\n        stable layout of the serialized composite structure.\n\n        By default, this sorts using the spatial locator in K, J, I order, which should give a\n        relatively intuitive order. It also makes sure that the objects being sorted live in the\n        same grid.\n        \"\"\"\n        if self.spatialLocator is None or other.spatialLocator is None:\n            runLog.error(f\"could not compare {self} and {other}\")\n            raise ValueError(\"One or more of the compared objects have no spatialLocator\")\n\n        if self.spatialLocator.grid is not other.spatialLocator.grid:\n            runLog.error(f\"could not compare {self} and {other}\")\n            raise ValueError(\n                \"Composite grids must be the same to compare:\\n\"\n                f\"This grid: {self.spatialGrid}\\n\"\n                f\"Other grid: {other.spatialGrid}\"\n            )\n        try:\n            t1 = tuple(reversed(self.spatialLocator.getCompleteIndices()))\n            t2 = tuple(reversed(other.spatialLocator.getCompleteIndices()))\n            return t1 < t2\n        except ValueError:\n            runLog.error(f\"failed to compare {self.spatialLocator} and {other.spatialLocator}\")\n            raise\n\n    def __getstate__(self):\n        \"\"\"\n        Python method for reducing data before pickling.\n\n        This removes links to parent objects, which allows one to, for example, pickle\n        an assembly without pickling the entire reactor. Likewise, one could\n        MPI_COMM.bcast an assembly without broadcasting the entire reactor.\n\n        Notes\n        -----\n        Special treatment of ``parent`` is not enough, since the spatialGrid also\n        contains a reference back to the armiObject. Consequently, the ``spatialGrid``\n        needs to be reassigned in ``__setstate__``.\n        \"\"\"\n        state = self.__dict__.copy()\n        state[\"parent\"] = None\n\n        if \"r\" in state:\n            raise RuntimeError(\"An ArmiObject should never contain the entire Reactor.\")\n\n        return state\n\n    def __setstate__(self, state):\n        \"\"\"\n        Sets the state of this ArmiObject.\n\n        Notes\n        -----\n        This ArmiObject may have lost a reference to its parent. If the parent was also\n        pickled (serialized), then the parent should update the ``.parent`` attribute\n        during its own ``__setstate__``. That means within the context of\n        ``__setstate__`` one should not rely upon ``self.parent``.\n        \"\"\"\n        self.__dict__.update(state)\n\n        if self.spatialGrid is not None:\n            self.spatialGrid.armiObject = self\n            # Spatial locators also get disassociated with their grids when detached;\n            # make sure they get hooked back up\n            for c in self:\n                c.spatialLocator.associate(self.spatialGrid)\n\n        # now \"reattach\" children\n        for c in self:\n            c.parent = self\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__}: {self.name}>\"\n\n    def __format__(self, spec):\n        return format(str(self), spec)\n\n    def __bool__(self):\n        \"\"\"\n        Flag that says this is non-zero in a boolean context.\n\n        Notes\n        -----\n        The default behavior for ``not [obj]`` that has a  ``__len__`` defined is to see\n        if the length is zero. However, for these composites, we'd like Assemblies, etc.\n        to be considered non-zero even if they don't have any blocks. This is important\n        for parent resolution, etc. If one of these objects exists, it is non-zero,\n        regardless of its contents.\n        \"\"\"\n        return True\n\n    def __add__(self, other):\n        \"\"\"Return a list of all children in this and another object.\"\"\"\n        return self.getChildren() + other.getChildren()\n\n    @property\n    def nuclideBases(self):\n        from armi.reactor.reactors import Reactor\n\n        r = self.getAncestor(lambda c: isinstance(c, Reactor))\n        if r:\n            return r.nuclideBases\n        else:\n            return nuclideBases.nuclideBases\n\n    def duplicate(self):\n        \"\"\"\n        Make a clean copy of this object.\n\n        Warning\n        -------\n        Be careful with inter-object dependencies. If one object contains a reference to another\n        object which contains links to the entire hierarchical tree, memory can fill up rather\n        rapidly. Weak references are designed to help with this problem.\n        \"\"\"\n        raise NotImplementedError\n\n    def clearCache(self):\n        \"\"\"Clear the cache so all new values are recomputed.\"\"\"\n        self.cached = {}\n        for child in self:\n            child.clearCache()\n\n    def _getCached(self, name):\n        \"\"\"\n        Obtain a value from the cache.\n\n        Cached values can be used to temporarily store frequently read but long-to-compute values.\n        The practice is generally discouraged because it's challenging to make sure to properly\n        invalidate the cache when the state changes.\n        \"\"\"\n        return self.cached.get(name, None)\n\n    def _setCache(self, name, val):\n        \"\"\"\n        Set a value in the cache.\n\n        See Also\n        --------\n        _getCached : returns a previously-cached value\n        \"\"\"\n        self.cached[name] = val\n\n    def copyParamsFrom(self, other):\n        \"\"\"\n        Overwrite this object's params with other object's.\n\n        Parameters\n        ----------\n        other : ArmiObject\n            The object to copy params from\n        \"\"\"\n        self.p = other.p.__class__()\n        for p, val in other.p.items():\n            self.p[p] = val\n\n    def updateParamsFrom(self, new):\n        \"\"\"\n        Update this object's params with a new object's.\n\n        Parameters\n        ----------\n        new : ArmiObject\n            The object to copy params from\n        \"\"\"\n        for paramName, val in new.p.items():\n            self.p[paramName] = val\n\n    def iterChildren(\n        self,\n        deep=False,\n        generationNum=1,\n        predicate: Optional[Callable[[\"ArmiObject\"], bool]] = None,\n    ) -> Iterator[\"ArmiObject\"]:\n        \"\"\"Iterate over children of this object.\"\"\"\n        raise NotImplementedError()\n\n    def getChildren(self, deep=False, generationNum=1, includeMaterials=False) -> list[\"ArmiObject\"]:\n        \"\"\"Return the children of this object.\"\"\"\n        raise NotImplementedError()\n\n    def iterChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> Iterator[\"ArmiObject\"]:\n        \"\"\"Produce an iterator of children that have given flags.\"\"\"\n        return self.iterChildren(predicate=lambda o: o.hasFlags(typeSpec, exactMatch))\n\n    def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=False) -> list[\"ArmiObject\"]:\n        \"\"\"Get all children that have given flags.\"\"\"\n        return list(self.iterChildrenWithFlags(typeSpec, exactMatch))\n\n    def iterChildrenOfType(self, typeName: str) -> Iterator[\"ArmiObject\"]:\n        \"\"\"Iterate over children that have a specific input type name.\"\"\"\n        return self.iterChildren(predicate=lambda o: o.getType() == typeName)\n\n    def getChildrenOfType(self, typeName: str) -> list[\"ArmiObject\"]:\n        \"\"\"Produce a list of children that have a specific input type name.\"\"\"\n        return list(self.iterChildrenOfType(typeName))\n\n    def getComponents(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"\n        Return all armi.reactor.component.Component within this Composite.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Component flags. Will restrict Components to specific ones matching the\n            flags specified.\n\n        exact : bool, optional\n            Only match exact component labels (names). If True, 'coolant' will not match\n            'interCoolant'.  This has no impact if compLabel is None.\n\n        Returns\n        -------\n        list of Component\n            items matching compLabel and exact criteria\n        \"\"\"\n        raise NotImplementedError()\n\n    def iterComponents(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"Yield components one by one in a generator.\"\"\"\n        raise NotImplementedError()\n\n    def doChildrenHaveFlags(self, typeSpec: TypeSpec, deep=False):\n        \"\"\"\n        Generator that yields True if the next child has given flags.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Requested type of the child\n        \"\"\"\n        for c in self.getChildren(deep):\n            if c.hasFlags(typeSpec, exact=False):\n                yield True\n            else:\n                yield False\n\n    def containsAtLeastOneChildWithFlags(self, typeSpec: TypeSpec):\n        \"\"\"\n        Return True if any of the children are of a given type.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Requested type of the children\n\n        See Also\n        --------\n        self.doChildrenHaveFlags\n        self.containsOnlyChildrenWithFlags\n        \"\"\"\n        return any(self.doChildrenHaveFlags(typeSpec))\n\n    def containsOnlyChildrenWithFlags(self, typeSpec: TypeSpec):\n        \"\"\"\n        Return True if all of the children are of a given type.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Requested type of the children\n\n        See Also\n        --------\n        self.doChildrenHaveFlags\n        self.containsAtLeastOneChildWithFlags\n        \"\"\"\n        return all(self.doChildrenHaveFlags(typeSpec))\n\n    def copyParamsToChildren(self, paramNames):\n        \"\"\"\n        Copy param values in paramNames to all children.\n\n        Parameters\n        ----------\n        paramNames : list\n            List of param names to copy to children\n\n        \"\"\"\n        for paramName in paramNames:\n            myVal = self.p[paramName]\n            for c in self:\n                c.p[paramName] = myVal\n\n    @classmethod\n    def getParameterCollection(cls):\n        \"\"\"\n        Return a new instance of the specific ParameterCollection type associated with this object.\n\n        This has the same effect as ``obj.paramCollectionType()``. Getting a new\n        instance through a class method like this is useful in situations where the\n        ``paramCollectionType`` is not a top-level object and therefore cannot be\n        trivially pickled. Since we know that by the time we want to make any instances\n        of/unpickle a given ``ArmiObject``, such a class attribute will have been\n        created and associated. So, we use this top-level method to dig\n        dynamically down to the underlying parameter collection type.\n\n        .. impl:: Composites (and all ARMI objects) have parameter collections.\n            :id: I_ARMI_CMP_PARAMS\n            :implements: R_ARMI_CMP_PARAMS\n\n            This class method allows a user to obtain the\n            ``paramCollection`` object, which is the object containing the interface for\n            all parameters of an ARMI object.\n\n        See Also\n        --------\n        :py:meth:`armi.reactor.parameters.parameterCollections.ParameterCollection.__reduce__`\n        \"\"\"\n        return cls.paramCollectionType()\n\n    def getParamNames(self):\n        \"\"\"\n        Get a list of parameters keys that are available on this object.\n\n        Will not have any corner, edge, or timenode dependence.\n        \"\"\"\n        return sorted(k for k in self.p.keys() if not isinstance(k, tuple))\n\n    def nameContains(self, s):\n        \"\"\"\n        True if s is in this object's name (eg. nameContains('fuel')==True for 'testfuel'.\n\n        Notes\n        -----\n        Case insensitive (all gets converted to lower)\n        \"\"\"\n        name = self.name.lower()\n        if isinstance(s, list):\n            return any(n.lower() in name for n in s)\n        else:\n            return s.lower() in name\n\n    def getName(self):\n        \"\"\"Get composite name.\"\"\"\n        return self.name\n\n    def setName(self, name):\n        self.name = name\n\n    def hasFlags(self, typeID: TypeSpec, exact=False):\n        \"\"\"\n        Determine if this object is of a certain type.\n\n        .. impl:: Composites have queryable flags.\n            :id: I_ARMI_CMP_FLAG0\n            :implements: R_ARMI_CMP_FLAG\n\n            This method queries the flags (i.e. the ``typeID``) of the Composite for a\n            given type, returning a boolean representing whether or not the candidate\n            flag is present in this ArmiObject. Candidate flags cannot be passed as a\n            ``string`` type and must be of a type ``Flag``. If no flags exist in the\n            object then ``False`` is returned.\n\n            If a list of flags is provided, then all input flags will be\n            checked against the flags of the object. If exact is ``False``, then the\n            object must have at least one of candidates exactly. If it is ``True`` then\n            the object flags and candidates must match exactly.\n\n        Parameters\n        ----------\n        typeID : TypeSpec\n            Flags to test the object against, to see if it contains them. If a list is\n            provided, each element is treated as a \"candidate\" set of flags. Return True\n            if any of candidates match. When exact is True, the object must match one of\n            the candidates exactly. If exact is False, the object must have at least the\n            flags contained in a candidate for that candidate to be a match; extra flags\n            on the object are permitted. None matches all objects if exact is False, or\n            no objects if exact is True.\n\n        exact : bool, optional\n            Require the type of the object to fully match the provided typeID(s)\n\n        Returns\n        -------\n        hasFlags : bool\n            True if this object is in the typeID list.\n\n        Notes\n        -----\n        Type comparisons use bitwise comparisons using valid flags.\n\n        If you have an 'inner control' assembly, then this will evaluate True for the\n        INNER | CONTROL flag combination. If you just want all FUEL, simply use FUEL\n        with no additional qualifiers. For more complex comparisons, use bitwise\n        operations.\n\n        Always returns true if typeID is none and exact is False, allowing for default\n        parameters to be passed in when the method does not care about the object type.\n        If the typeID is none and exact is True, this will always return False.\n\n        Examples\n        --------\n        If you have an object with the ``INNER``, ``DRIVER``, and ``FUEL`` flags, then\n\n        >>> obj.getType()\n        [some integer]\n\n        >>> obj.hasFlags(Flags.FUEL)\n        True\n\n        >>> obj.hasFlags(Flags.INNER | Flags.DRIVER | Flags.FUEL)\n        True\n\n        >>> obj.hasFlags(Flags.OUTER | Flags.DRIVER | Flags.FUEL)\n        False\n\n        >>> obj.hasFlags(Flags.INNER | Flags.FUEL)\n        True\n\n        >>> obj.hasFlags(Flags.INNER | Flags.FUEL, exact=True)\n        False\n\n        >>> obj.hasFlags([Flags.INNER | Flags.DRIVER | Flags.FUEL, Flags.OUTER | Flags.DRIVER | Flags.FUEL], exact=True)\n        False\n\n        \"\"\"\n        if not typeID:\n            return not exact\n        if isinstance(typeID, str):\n            raise TypeError(\"Must pass Flags, or an iterable of Flags; Strings are no longer supported\")\n\n        elif not isinstance(typeID, Flags):\n            # list behavior gives a spec1 OR spec2 OR ... behavior.\n            return any(self.hasFlags(typeIDi, exact=exact) for typeIDi in typeID)\n\n        if not self.p.flags:\n            # default still set, or null flag. Do down here so we get proper error\n            # handling of invalid typeSpecs\n            return False\n\n        if exact:\n            # all bits must be identical for exact match\n            return self.p.flags == typeID\n\n        # all bits that are 1s in the typeID must be present\n        return self.p.flags & typeID == typeID\n\n    def getType(self):\n        \"\"\"Return the object type.\"\"\"\n        return self.p.type\n\n    def setType(self, typ, flags: Optional[Flags] = None):\n        \"\"\"\n        Set the object type.\n\n        .. impl:: Composites have modifiable flags.\n            :id: I_ARMI_CMP_FLAG1\n            :implements: R_ARMI_CMP_FLAG\n\n            This method allows for the setting of flags parameter of the Composite.\n\n        Parameters\n        ----------\n        typ : str\n            The desired \"type\" for the object. Type describes the general class of the\n            object, and typically corresponds to the name of the blueprint that created\n            it.\n\n        flags : Flags, optional\n            The set of Flags to apply to the object. If these are omitted, then Flags\n            will be derived from the ``typ``.\n\n        Warning\n        -------\n        We are in the process of developing more robust definitions for things like\n        \"name\" and \"type\". \"type\" will generally refer to the name of the blueprint that\n        created a particular object. When present, a \"name\" will refer to a specific\n        instance of an object of a particular \"type\". Think unique names for each\n        assembly in a core, even if they are all created from the same blueprint and\n        therefore have the same \"type\". When this work is complete, it will be strongly\n        discouraged, or even disallowed to change the type of an object after it has\n        been created, and ``setType()`` may be removed entirely.\n        \"\"\"\n        self.p.flags = flags or Flags.fromStringIgnoreErrors(typ)\n        self.p.type = typ\n\n    def getVolume(self):\n        return sum(child.getVolume() for child in self)\n\n    def getArea(self, cold=False):\n        return sum(child.getArea(cold) for child in self)\n\n    def _updateVolume(self):\n        \"\"\"Recompute and store volume.\"\"\"\n        children = self.getChildren()\n        # Derived shapes must come last so we temporarily change the order if we\n        # have one.\n        from armi.reactor.components import DerivedShape\n\n        for child in children[:]:\n            if isinstance(child, DerivedShape):\n                children.remove(child)\n                children.append(child)\n        for child in children:\n            child._updateVolume()\n\n    def getVolumeFractions(self):\n        \"\"\"\n        Return volume fractions of each child.\n\n        Sets volume or area of missing piece (like coolant) if it exists.  Caching would\n        be nice here.\n\n        Returns\n        -------\n        fracs : list\n            list of (component, volFrac) tuples\n\n        See Also\n        --------\n        test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents\n\n        Notes\n        -----\n        void areas can be negative in gaps between fuel/clad/liner(s), but these\n        negative areas are intended to account for overlapping positive areas to insure\n        the total area of components inside the clad is accurate. See\n        test_block.Block_TestCase.test_consistentAreaWithOverlappingComponents\n        \"\"\"\n        children = self.getChildren()\n        numerator = [c.getVolume() for c in children]\n        denom = sum(numerator)\n        if denom == 0.0:\n            numerator = [c.getArea() for c in children]\n            denom = sum(numerator)\n\n        fracs = [(ci, nu / denom) for ci, nu in zip(children, numerator)]\n        return fracs\n\n    def getVolumeFraction(self):\n        \"\"\"Return the volume fraction that this object takes up in its parent.\"\"\"\n        if self.parent is not None:\n            for child, frac in self.parent.getVolumeFractions():\n                if child is self:\n                    return frac\n\n        raise ValueError(f\"No parent is defined for {self}. Cannot compute its volume fraction.\")\n\n    def getMaxArea(self):\n        \"\"\"\n        The maximum area of this object if it were totally full.\n\n        See Also\n        --------\n        armi.reactor.blocks.HexBlock.getMaxArea\n        \"\"\"\n        raise NotImplementedError()\n\n    def getMass(self, nuclideNames: Union[None, str, list[str]] = None) -> float:\n        \"\"\"\n        Determine the mass in grams of nuclide(s) and/or elements in this object.\n\n        .. impl:: Return mass of composite.\n            :id: I_ARMI_CMP_GET_MASS\n            :implements: R_ARMI_CMP_GET_MASS\n\n            This method allows for the querying of the mass of a Composite.\n            If the ``nuclideNames`` argument is included, it will filter for the mass\n            of those nuclide names and provide the sum of the mass of those nuclides.\n\n        Parameters\n        ----------\n        nuclideNames\n            The nuclide/element specifier to get the mass of in the object.\n            If omitted, total mass is returned.\n\n        Returns\n        -------\n        mass : float\n            The mass in grams.\n        \"\"\"\n        return sum(c.getMass(nuclideNames=nuclideNames) for c in self)\n\n    def getMassFrac(self, nucName):\n        \"\"\"\n        Get the mass fraction of a nuclide.\n\n        Notes\n        -----\n        If you need multiple mass fractions, use ``getMassFracs``.\n\n        \"\"\"\n        nuclideNames = self._getNuclidesFromSpecifier(nucName)\n        massFracs = self.getMassFracs()\n        return sum(massFracs.get(nucName, 0.0) for nucName in nuclideNames)\n\n    def getMicroSuffix(self):\n        raise NotImplementedError(\n            f\"Cannot get the suffix on {type(self)} objects. Only certain subclasses\"\n            \" of composite such as Blocks or Components have the concept of micro suffixes.\"\n        )\n\n    def _getNuclidesFromSpecifier(self, nucSpec: Union[None, str, list[str]]):\n        \"\"\"\n        Convert a nuclide specification to a list of valid nuclide/element keys.\n\n        nucSpec : nuclide specifier\n            Can be a string name of a nuclide or element, or a list of such strings.\n\n        This might get Zr isotopes when ZR is passed in if they exist, or it will get elemental ZR if that exists. When\n        expanding elements, all known nuclides are returned, not just the natural ones.\n        \"\"\"\n        allNuclidesHere = self.getNuclides()\n        if nucSpec is None:\n            return allNuclidesHere\n        elif isinstance(nucSpec, (str)):\n            nuclideNames = [nucSpec]\n        elif isinstance(nucSpec, list):\n            nuclideNames = nucSpec\n        else:\n            raise TypeError(f\"nucSpec={nucSpec} is an invalid specifier. It is a {type(nucSpec)}\")\n\n        # expand elementals if appropriate.\n        convertedNucNames = []\n        for nucName in nuclideNames:\n            if nucName in allNuclidesHere:\n                convertedNucNames.append(nucName)\n                continue\n            try:\n                # Need all nuclide bases, not just natural isotopics because, e.g. PU\n                # has no natural isotopics!\n                nucs = [\n                    nb.name\n                    for nb in self.nuclideBases.elements.bySymbol[nucName].nuclides\n                    if not isinstance(nb, nuclideBases.NaturalNuclideBase)\n                ]\n                convertedNucNames.extend(nucs)\n            except KeyError:\n                convertedNucNames.append(nucName)\n\n        return sorted(set(convertedNucNames))\n\n    def getMassFracs(self):\n        \"\"\"\n        Get mass fractions of all nuclides in object.\n\n        Ni [1/cm3] * Ai [g/mole]  ~ mass\n        \"\"\"\n        numDensities = self.getNumberDensities()\n        return densityTools.getMassFractions(numDensities)\n\n    def setMassFrac(self, nucName, val):\n        \"\"\"\n        Adjust the composition of this object so the mass fraction of nucName is val.\n\n        See Also\n        --------\n        setMassFracs : efficiently set multiple mass fractions at the same time.\n        \"\"\"\n        self.setMassFracs({nucName: val})\n\n    def setMassFracs(self, massFracs):\n        r\"\"\"\n        Apply one or more adjusted mass fractions.\n\n        This will adjust the total mass of the object, as the mass of everything\n        designated will change, while anything else will not.\n\n        .. math::\n\n            m_i = \\frac{M_i}{\\sum_j(M_j)}\n\n            (M_{j \\ne i} + M_i) m_i = M_i\n\n            \\frac{m_i M_{j \\ne i}}{1-m_i} = M_i\n\n            \\frac{m_i M_{j \\ne i}}{V(1-m_i)} = M_i/V = m_i \\rho\n\n            N_i = \\frac{m_i \\rho N_A}{A_i}\n\n            N_i = \\frac{m_i M_{j \\ne i} N_A}{V (1-m_i) {A_i}}\n\n            \\frac{M_{j \\ne i}}{V} = m_{j \\ne i} \\rho\n\n            m_{j \\ne i} = 1 - m_i\n\n        Notes\n        -----\n        You can't just change one mass fraction though, you have scale all others to\n        fill the remaining frac.\n\n        Parameters\n        ----------\n        massFracs: dict\n            nucName : new mass fraction pairs.\n\n        \"\"\"\n        rho = self.density()\n        if not rho:\n            raise ValueError(f\"Cannot set mass fractions on {self} because the mass density is zero.\")\n        oldMassFracs = self.getMassFracs()\n        totalFracSet = 0.0\n        for nucName, massFrac in massFracs.items():\n            self.setNumberDensity(\n                nucName,\n                (massFrac * rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM / nucDir.getAtomicWeight(nucName)),\n            )\n            if nucName in oldMassFracs:\n                del oldMassFracs[nucName]\n            totalFracSet += massFrac\n\n        totalOther = sum(oldMassFracs.values())\n        if totalOther:\n            # we normalize the remaining mass fractions so their concentrations relative\n            # to each other stay constant.\n            normalizedOtherMassFracs = {nucNameOther: val / totalOther for nucNameOther, val in oldMassFracs.items()}\n            for nucNameOther, massFracOther in normalizedOtherMassFracs.items():\n                self.setNumberDensity(\n                    nucNameOther,\n                    (\n                        (1.0 - totalFracSet)\n                        * massFracOther\n                        * rho\n                        * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n                        / nucDir.getAtomicWeight(nucNameOther)\n                    ),\n                )\n\n    def adjustMassFrac(\n        self,\n        nuclideToAdjust=None,\n        elementToAdjust=None,\n        nuclideToHoldConstant=None,\n        elementToHoldConstant=None,\n        val=0.0,\n    ):\n        r\"\"\"\n        Set the initial Zr mass fraction while maintaining Uranium enrichment, but general purpose.\n\n        Parameters\n        ----------\n        nuclideToAdjust : str, optional\n            The nuclide name to adjust\n        elementToAdjust : str, optional\n            The element to adjust. All isotopes in this element will adjust\n        nuclideToHoldconstant : str, optional\n            A nuclide to hold constant\n        elementToHoldConstant : str\n            Same\n        val : float\n            The value to set the adjust mass fraction to be.\n\n        Notes\n        -----\n        If you use this for two elements one after the other, you will probably get\n        something wrong. For instance, if you have U-10Zr and add Pu at 10% mass\n        fraction, the Zr fraction will drop below 10% of the total. The U-Zr fractions\n        will remain constant though. So this is mostly useful if you have U-10Zr and\n        want to change it to U-5Zr.\n\n        Theory:\n\n        Mass fraction of each nuclide to be adjusted = Ai where A1+A2+A...+AI = A\n        Mass fraction of nuclides to be held constant = Ci where sum = C\n        Mass fraction of other nuclides is Oi, sum = O\n        new value for A is v\n\n        A+C+O = 1.0\n        A'=v. If A>0, then A'=A*f1=v where f1 = v/A\n        If A=0, then Ai' = v/len(A), distributing the value evenly among isotopes\n\n        Now, to adjust the other nuclides, we know\n        A'+C+O' = 1.0 , or v+C+O' = 1.0\n        So, O'= 1.0-v-C\n        We can scale each Oi evenly by multiplying by the factor f2\n        Oi' = Oi * (1-C-v)/O = Oi * f2  where f2= (1-C-v)\n\n        See Also\n        --------\n        setMassFrac\n        getMassFrac\n        \"\"\"\n        self.clearCache()  # don't keep densities around or anything.\n        if val > 1.0 or val < 0:\n            raise ValueError(f\"Invalid mass fraction {val} for {nuclideToAdjust}/{elementToAdjust} in {self.getName()}\")\n        if not nuclideToAdjust and not elementToAdjust:\n            raise TypeError(\"Must provide a nuclide or element to adjust to adjustMassFrac\")\n\n        # sum of other nuclide mass fractions before change is Y\n        # need Yx+newZr = 1.0 where x is a scaling factor\n        # so x=(1-newZr)/Y\n\n        # determine nuclides to hold constant\n        nuclides = set(self.getNuclides())\n        if nuclideToHoldConstant or elementToHoldConstant:\n            # note that if these arguments are false, you'll get ALL nuclides in the\n            # material use material.getNuclides to get only non-zero ones.  use\n            # nucDir.getNuclides to get all. Intersect with current nuclides to\n            # eliminate double counting of element/isotopes\n            constantNuclides = set(\n                nucDir.getNuclideNames(nucName=nuclideToHoldConstant, elementSymbol=elementToHoldConstant)\n            ).intersection(nuclides)\n            constantSum = sum(self.getMassFrac(nucName) for nucName in constantNuclides)\n        else:\n            constantNuclides = []\n            constantSum = 0.0\n\n        # determine which nuclides we're adjusting.\n        # Rather than calling this material's getNuclides method, we call the\n        # nucDirectory to do this. this way, even zeroed-out nuclides will get in the mix\n        adjustNuclides = set(\n            nucDir.getNuclideNames(nucName=nuclideToAdjust, elementSymbol=elementToAdjust)\n        ).intersection(nuclides)\n        # get original mass frac A of those to be adjusted.\n        A = sum(self.getMassFrac(ni) for ni in adjustNuclides)\n\n        factor1 = val / A if A else None\n\n        # set the ones we're adjusting to their given value.\n        numNucs = len(adjustNuclides)\n        newA = 0.0\n        newMassFracs = {}\n        for nuc in adjustNuclides:\n            if factor1 is None:\n                # this is for when adjust nuclides have zero mass fractions. Like Zr.\n                # In this case, if there are multiple nuclides, we will distribute them\n                # evenly because we have no other indication of how to adjust them.\n                newMassFrac = val / numNucs\n            else:\n                # this is for when the nuclides we're adjusting already exist\n                # with non-zero mass fractions could be Pu vector.\n                newMassFrac = self.getMassFrac(nuc) * factor1\n            newA += newMassFrac\n            newMassFracs[nuc] = newMassFrac\n            if nuc == \"ZR\":\n                # custom parameter only set here to determine how to behave for UZr\n                # density, linear expansion. Can't let it roam with each mass frac\n                # 'cause then the density roams too and there are \"oscillations\"\n                self.zrFrac = newMassFrac\n\n        # error checking.\n        if abs(newA - val) > 1e-10:\n            runLog.error(f\"Adjust Mass fraction did not adjust {adjustNuclides} from {A} to {val}. It got to {newA}\")\n            raise RuntimeError(\"Failed to adjust mass fraction.\")\n\n        # determine the mass fraction of the nuclides that will be adjusted to\n        # accommodate the requested change\n        othersSum = 1.0 - A - constantSum\n        if not othersSum:\n            # no others to be modified.\n            factor2 = 1.0\n        else:\n            # use newA rather than val\n            factor2 = (1.0 - newA - constantSum) / othersSum\n\n        # change all the other nuclides using f2 factor\n        for nuc in self.getNuclides():\n            if nuc not in adjustNuclides and nuc not in constantNuclides:\n                newMassFracs[nuc] = self.getMassFrac(nuc) * factor2\n\n        self.setMassFracs(newMassFracs)\n\n    def adjustMassEnrichment(self, massFraction):\n        \"\"\"\n        Adjust the enrichment of this object.\n\n        If it's Uranium, enrichment means U-235 fraction.\n        If it's Boron, enrichment means B-10 fraction, etc.\n\n        Parameters\n        ----------\n        newEnrich : float\n            The new enrichment as a fraction.\n        \"\"\"\n        raise NotImplementedError\n\n    def getNumberDensity(self, nucName):\n        \"\"\"\n        Return the number density of a nuclide in atoms/barn-cm.\n\n        .. impl:: Get number density for a specific nuclide\n            :id: I_ARMI_CMP_NUC0\n            :implements: R_ARMI_CMP_NUC\n\n            This method queries the number density\n            of a specific nuclide within the Composite. It invokes the\n            ``getNuclideNumberDensities`` method for just the requested nuclide.\n\n        Notes\n        -----\n        This can get called very frequently and has to do volume computations so should\n        use some kind of caching that is invalidated by any temperature, composition,\n        etc. changes. Even with caching the volume calls are still somewhat expensive so\n        prefer the methods in see also.\n\n        See Also\n        --------\n        ArmiObject.getNuclideNumberDensities: More efficient for >1 specific nuc density is needed.\n        ArmiObject.getNumberDensities: More efficient for when all nucs in object is needed.\n        \"\"\"\n        return self.getNuclideNumberDensities([nucName])[0]\n\n    def getNuclideNumberDensities(self, nucNames):\n        \"\"\"Return a list of number densities in atoms/barn-cm for the nuc names requested.\n\n        .. impl:: Get number densities for specific nuclides.\n            :id: I_ARMI_CMP_NUC1\n            :implements: R_ARMI_CMP_NUC\n\n            This method provides the capability to query the volume weighted number\n            densities for a list of nuclides within a given Composite. It provides the\n            result in units of atoms/barn-cm. The volume weighting is accomplished by\n            multiplying the number densities within each child Composite by the volume\n            of the child Composite and dividing by the total volume of the Composite.\n        \"\"\"\n        volumes = np.array([c.getVolume() / (c.parent.getSymmetryFactor() if c.parent else 1.0) for c in self])  # c x 1\n        totalVol = volumes.sum()\n        if totalVol == 0.0:\n            # there are no children so no volume or number density\n            return [0.0] * len(nucNames)\n\n        nucDensForEachComp = np.array([c.getNuclideNumberDensities(nucNames) for c in self])  # c x n\n        return volumes.dot(nucDensForEachComp) / totalVol\n\n    def _getNdensHelper(self):\n        \"\"\"\n        Return a number densities dict with unexpanded lfps.\n\n        Notes\n        -----\n        This is implemented more simply on the component level.\n        \"\"\"\n        nucNames = self.getNuclides()\n        return dict(zip(nucNames, self.getNuclideNumberDensities(nucNames)))\n\n    def getNumberDensities(self, expandFissionProducts=False):\n        \"\"\"\n        Retrieve the number densities in atoms/barn-cm of all nuclides (or those requested) in the object.\n\n        .. impl:: Number density of composite is retrievable.\n            :id: I_ARMI_CMP_GET_NDENS\n            :implements: R_ARMI_CMP_GET_NDENS\n\n            This method provides a way for retrieving the number densities\n            of all nuclides within the Composite. It does this by leveraging the\n            ``_getNdensHelper`` method, which invokes the ``getNuclideNumberDensities``\n            method. This method considers the nuclides within each child Composite of\n            this composite (if they exist). If the ``expandFissionProducts`` flag is\n            ``True``, then the lumped fission products are expanded to include their\n            constituent elements via the ``_expandLFPs`` method.\n\n        Parameters\n        ----------\n        expandFissionProducts : bool (optional)\n            expand the fission product number densities\n\n        Returns\n        -------\n        numberDensities : dict\n            nucName keys, number density values (atoms/bn-cm)\n        \"\"\"\n        numberDensities = self._getNdensHelper()\n        if expandFissionProducts:\n            return self._expandLFPs(numberDensities)\n        return numberDensities\n\n    def _expandLFPs(self, numberDensities):\n        \"\"\"\n        Expand the LFPs on the numberDensities dictionary using this composite's\n        lumpedFissionProductCollection.\n        \"\"\"\n        lfpCollection = self.getLumpedFissionProductCollection()\n        if lfpCollection:  # may not have lfps in non-fuel\n            lfpDensities = lfpCollection.getNumberDensities(self)\n            numberDensities = {\n                nucName: numberDensities.get(nucName, 0.0) + lfpDensities.get(nucName, 0.0)\n                for nucName in set(numberDensities) | set(lfpDensities)\n            }\n            # remove LFPs from the result\n            for lfpName in lfpCollection:\n                numberDensities.pop(lfpName, None)\n        else:\n            lfpMass = sum(\n                dens\n                for name, dens in numberDensities.items()\n                if isinstance(self.nuclideBases.byName[name], nuclideBases.LumpNuclideBase)\n            )\n            if lfpMass:\n                raise RuntimeError(\n                    f\"Composite {self} is attempting to expand lumped fission products, but does not have \"\n                    \"an lfpCollection.\"\n                )\n        return numberDensities\n\n    def getChildrenWithNuclides(self, nucNames):\n        \"\"\"Return children that contain any nuclides in nucNames.\"\"\"\n        nucNames = set(nucNames)  # only convert to set once\n        return [child for child in self if nucNames.intersection(child.getNuclides())]\n\n    def getAncestor(self, fn):\n        \"\"\"\n        Return the first ancestor that satisfies the supplied predicate.\n\n        Parameters\n        ----------\n        fn : Function-like object\n            The predicate used to test the validity of an ancestor. Should return true\n            if the ancestor satisfies the caller's requirements\n        \"\"\"\n        if fn(self):\n            return self\n        if self.parent is None:\n            return None\n        else:\n            return self.parent.getAncestor(fn)\n\n    def getAncestorAndDistance(self, fn, _distance=0) -> Optional[Tuple[\"ArmiObject\", int]]:\n        \"\"\"\n        Return the first ancestor that satisfies the supplied predicate, along with how\n        many levels above self the ancestor lies.\n\n        Parameters\n        ----------\n        fn : Function-like object\n            The predicate used to test the validity of an ancestor. Should return true\n            if the ancestor satisfies the caller's requirements\n        \"\"\"\n        if fn(self):\n            return self, _distance\n        if self.parent is None:\n            return None\n        else:\n            return self.parent.getAncestorAndDistance(fn, _distance + 1)\n\n    def getAncestorWithFlags(self, typeSpec: TypeSpec, exactMatch=False):\n        \"\"\"\n        Return the first ancestor that matches the passed flags.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            A collection of flags to match on candidate parents\n\n        exactMatch : bool\n            Whether the flags match should be exact\n\n        Returns\n        -------\n        armi.composites.ArmiObject\n            the first ancestor up the chain of parents that matches the passed flags\n\n        See Also\n        --------\n        ArmiObject.hasFlags()\n        \"\"\"\n        if self.hasFlags(typeSpec, exact=exactMatch):\n            return self\n\n        if self.parent is None:\n            return None\n        else:\n            return self.parent.getAncestorWithFlags(typeSpec, exactMatch=exactMatch)\n\n    def getTotalNDens(self):\n        \"\"\"\n        Return the total number density of all atoms in this object.\n\n        Returns\n        -------\n        nTot : float\n            Total ndens of all nuclides in atoms/bn-cm. Not homogenized.\n        \"\"\"\n        nFPsPerLFP = fissionProductModel.NUM_FISSION_PRODUCTS_PER_LFP  # LFPs count as two! Big deal in non BOL cases.\n        return sum(dens * (nFPsPerLFP if \"LFP\" in name else 1.0) for name, dens in self.getNumberDensities().items())\n\n    def setNumberDensity(self, nucName, val):\n        \"\"\"\n        Set the number density of this nuclide to this value.\n\n        This distributes atom density evenly across all children that contain nucName.\n        If the nuclide doesn't exist in any of the children, then that's actually an\n        error. This would only happen if some unnatural nuclide like Pu239 built up in\n        fresh UZr. That should be anticipated and dealt with elsewhere.\n        \"\"\"\n        activeChildren = self.getChildrenWithNuclides({nucName})\n        if not activeChildren:\n            activeVolumeFrac = 1.0\n            if val:\n                raise ValueError(\n                    f\"The nuclide {nucName} does not exist in any children of {self}; \"\n                    f\"cannot set its number density to {val}. The nuclides here are: {self.getNuclides()}\"\n                )\n        else:\n            activeVolumeFrac = sum(vf for ci, vf in self.getVolumeFractions() if ci in activeChildren)\n        dehomogenizedNdens = val / activeVolumeFrac  # scale up to dehomogenize on children.\n        for child in activeChildren:\n            child.setNumberDensity(nucName, dehomogenizedNdens)\n\n    def setNumberDensities(self, numberDensities):\n        \"\"\"\n        Set one or more multiple number densities. Reset any non-listed nuclides to 0.0.\n\n        Parameters\n        ----------\n        numberDensities : dict\n            nucName: ndens pairs.\n\n        Notes\n        -----\n        We'd like to not have to call setNumberDensity for each nuclide because we don't\n        want to call ``getVolumeFractions`` for each nuclide (it's inefficient).\n        \"\"\"\n        numberDensities.update({nuc: 0.0 for nuc in self.getNuclides() if nuc not in numberDensities})\n        self.updateNumberDensities(numberDensities)\n\n    def updateNumberDensities(self, numberDensities):\n        \"\"\"\n        Set one or more multiple number densities. Leaves unlisted number densities alone.\n\n        This changes a nuclide number density only on children that already have that\n        nuclide, thereby allowing, for example, actinides to stay in the fuel component\n        when setting block-level values.\n\n        The complication is that various number densities are distributed among various\n        components. This sets the number density for each nuclide evenly across all\n        components that contain it.\n\n        Parameters\n        ----------\n        numberDensities : dict\n            nucName: ndens pairs.\n\n        \"\"\"\n        children, volFracs = zip(*self.getVolumeFractions())\n        childNucs = tuple(set(child.getNuclides()) for child in children)\n\n        allDehomogenizedNDens = collections.defaultdict(dict)\n\n        # compute potentially-different homogenization factors for each child.  evenly\n        # distribute entire number density over the subset of active children.\n        for nuc, dens in numberDensities.items():\n            # get \"active\" indices, i.e., indices of children containing nuc\n            # NOTE: this is one of the rare instances in which (imo), using explicit\n            # indexing clarifies subsequent code since it's not necessary to zip +\n            # filter + extract individual components (just extract by filtered index).\n            indiciesToSet = tuple(i for i, nucsInChild in enumerate(childNucs) if nuc in nucsInChild)\n\n            if not indiciesToSet:\n                if dens == 0:\n                    # density is zero, skip\n                    continue\n\n                # This nuc doesn't exist in any children but is to be set.\n                # Evenly distribute it everywhere.\n                childrenToSet = children\n                dehomogenizedNDens = dens / sum(volFracs)\n\n            else:\n                childrenToSet = tuple(children[i] for i in indiciesToSet)\n                dehomogenizedNDens = dens / sum(volFracs[i] for i in indiciesToSet)\n\n            for child in childrenToSet:\n                allDehomogenizedNDens[child][nuc] = dehomogenizedNDens\n\n        # apply the child-dependent ndens vectors to the children\n        for child, ndens in allDehomogenizedNDens.items():\n            child.updateNumberDensities(ndens)\n\n    def changeNDensByFactor(self, factor):\n        \"\"\"Change the number density of all nuclides within the object by a multiplicative factor.\"\"\"\n        densitiesScaled = {nuc: val * factor for nuc, val in self.getNumberDensities().items()}\n        self.setNumberDensities(densitiesScaled)\n        # Update detailedNDens\n        if self.p.detailedNDens is not None:\n            self.p.detailedNDens *= factor\n        # Update pinNDens\n        if self.p.pinNDens is not None:\n            self.p.pinNDens *= factor\n\n    def clearNumberDensities(self):\n        \"\"\"\n        Reset all the number densities to nearly zero.\n\n        Set to almost zero, so components remember which nuclides are where.\n        \"\"\"\n        ndens = {nuc: units.TRACE_NUMBER_DENSITY for nuc in self.getNuclides()}\n        self.setNumberDensities(ndens)\n\n    def density(self):\n        \"\"\"Returns the mass density of the object in g/cc.\"\"\"\n        density = 0.0\n        for nuc in self.getNuclides():\n            density += (\n                self.getNumberDensity(nuc) * nucDir.getAtomicWeight(nuc) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n            )\n\n        return density\n\n    def getNumberOfAtoms(self, nucName):\n        \"\"\"Return the number of atoms of nucName in this object.\"\"\"\n        numDens = self.getNumberDensity(nucName)  # atoms/bn-cm\n        return numDens * self.getVolume() / units.CM2_PER_BARN\n\n    def getLumpedFissionProductCollection(self):\n        \"\"\"\n        Get collection of LFP objects. Will work for global or block-level LFP models.\n\n        Returns\n        -------\n        lfps : LumpedFissionProduct\n            lfpName keys , lfp object values\n\n        See Also\n        --------\n        armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct : LFP object\n        \"\"\"\n        return self._lumpedFissionProducts\n\n    def setLumpedFissionProducts(self, lfpCollection):\n        self._lumpedFissionProducts = lfpCollection\n\n    def setChildrenLumpedFissionProducts(self, lfpCollection):\n        for c in self:\n            c.setLumpedFissionProducts(lfpCollection)\n\n    def getFissileMassEnrich(self):\n        \"\"\"Returns the fissile mass enrichment.\"\"\"\n        hm = self.getHMMass()\n        if hm > 0:\n            return self.getFissileMass() / hm\n        else:\n            return 0.0\n\n    def getUraniumNumEnrich(self):\n        \"\"\"Returns fissile uranium number fraction.\"\"\"\n        uraniumNucs = self._getNuclidesFromSpecifier(\"U\")\n        totalU = sum(self.getNuclideNumberDensities(uraniumNucs))\n        if totalU < 1e-10:\n            return 0.0\n        fissileU = sum(self.getNuclideNumberDensities([\"U233\", \"U235\"]))\n\n        return fissileU / totalU\n\n    def calcTotalParam(\n        self,\n        param,\n        objs=None,\n        volumeIntegrated=False,\n        addSymmetricPositions=False,\n        typeSpec: TypeSpec = None,\n        generationNum=1,\n        calcBasedOnFullObj=False,\n    ):\n        \"\"\"\n        Sums up a parameter throughout the object's children or list of objects.\n\n        Parameters\n        ----------\n        param : str\n            Name of the block parameter to sum\n\n        objs : iterable, optional\n            A list of objects to sum over. If none, all children in object will be used\n\n        volumeIntegrated : bool, optional\n            Integrate over volume\n\n        addSymmetricPositions : bool, optional\n            If True, will multiply by the symmetry factor of the core (3 for 1/3 models,\n            1 for full core models)\n\n        typeSpec : TypeSpec\n            object types to restrict to\n\n        generationNum : int, optional\n            Which generation to consider. 1 means direct children, 2 means children of\n            children. Default: Just return direct children.\n\n        calcBasedOnFullObj : bool, optional\n            Some assemblies or blocks, such as the center assembly in a third core\n            model, are not modeled as full assemblies or blocks. In the third core model\n            objects at these positions are modeled as having 1/3 the volume and thus 1/3\n            the power. Setting this argument to True will apply the full value of the\n            parameter as if it was a full block or assembly.\n        \"\"\"\n        tot = 0.0\n        if objs is None:\n            objs = self.getChildren(generationNum=generationNum)\n\n        if addSymmetricPositions:\n            if calcBasedOnFullObj:\n                raise ValueError(\n                    \"AddSymmetricPositions is Incompatible with calcBasedOnFullObj. Will result in double counting.\"\n                )\n            try:\n                coreMult = self.powerMultiplier\n            except AttributeError:\n                coreMult = self.parent.powerMultiplier\n            if not coreMult:\n                raise ValueError(f\"powerMultiplier is equal to {coreMult}\")\n        else:\n            coreMult = 1.0\n\n        for a in objs:\n            if not a.hasFlags(typeSpec):\n                continue\n\n            mult = a.getVolume() if volumeIntegrated else 1.0\n            if calcBasedOnFullObj:\n                mult *= a.getSymmetryFactor()\n\n            tot += a.p[param] * mult\n\n        return tot * coreMult\n\n    def calcAvgParam(\n        self,\n        param,\n        typeSpec: TypeSpec = None,\n        weightingParam=None,\n        volumeAveraged=True,\n        absolute=True,\n        generationNum=1,\n    ):\n        r\"\"\"\n        Calculate the child-wide average of a parameter.\n\n        Parameters\n        ----------\n        param : str\n            The ARMI block parameter that you want the average from\n\n        typeSpec : TypeSpec\n            The child types that should be included in the calculation. Restrict average\n            to a certain child type with this parameter.\n\n        weightingParam : None or str, optional\n             An optional block param that the average will be weighted against\n\n        volumeAveraged : bool, optional\n            volume (or height, or area) average this param\n\n        absolute : bool, optional\n            Returns the average of the absolute value of param\n\n        generationNum : int, optional\n            Which generation to average over (1 for children, 2 for grandchildren)\n\n\n        The weighted sum is:\n\n        .. math::\n\n            \\left<\\text{x}\\right> = \\frac{\\sum_{i} x_i w_i}{\\sum_i w_i}\n\n        where :math:`i` is each child, :math:`x_i` is the param value of the i-th child,\n        and :math:`w_i` is the weighting param value of the i-th child.\n\n        Warning\n        -------\n        If a param is unset/zero on any of the children, this will be included in the\n        average and may significantly perturb results.\n\n        Returns\n        -------\n        float\n            The average parameter value.\n        \"\"\"\n        total = 0.0\n        weightSum = 0.0\n        for child in self.getChildren(generationNum=generationNum):\n            if child.hasFlags(typeSpec):\n                if weightingParam:\n                    weight = child.p[weightingParam]\n                    if weight < 0:\n                        # Just for conservatism, do not allow negative weights.\n                        raise ValueError(f\"Weighting value ({weightingParam},{weight}) cannot be negative.\")\n                else:\n                    weight = 1.0\n\n                if volumeAveraged:\n                    weight *= child.getVolume()\n\n                weightSum += weight\n                if absolute:\n                    total += abs(child.p[param]) * weight\n                else:\n                    total += child.p[param] * weight\n        if not weightSum:\n            raise ValueError(\n                f\"Cannot calculate {weightingParam}-weighted average of {param} in {self}. \"\n                f\"Weights sum to zero. typeSpec is {typeSpec}\"\n            )\n        return total / weightSum\n\n    def getMaxParam(\n        self,\n        param,\n        typeSpec: TypeSpec = None,\n        absolute=True,\n        generationNum=1,\n        returnObj=False,\n    ):\n        \"\"\"\n        Find the maximum value for the parameter in this container.\n\n        Parameters\n        ----------\n        param : str\n            block parameter that will be sought.\n\n        typeSpec : TypeSpec\n            restricts the search to cover a variety of block types.\n\n        absolute : bool\n            looks for the largest magnitude value, regardless of sign, default: true\n\n        returnObj : bool, optional\n            If true, returns the child object as well as the value.\n\n        Returns\n        -------\n        maxVal : float\n            The maximum value of the parameter asked for\n        obj : child object\n            The object that has the max (only returned if ``returnObj==True``)\n        \"\"\"\n        compartor = lambda x, y: x > y\n        return self._minMaxHelper(\n            param,\n            typeSpec,\n            absolute,\n            generationNum,\n            returnObj,\n            -float(\"inf\"),\n            compartor,\n        )\n\n    def getMinParam(\n        self,\n        param,\n        typeSpec: TypeSpec = None,\n        absolute=True,\n        generationNum=1,\n        returnObj=False,\n    ):\n        \"\"\"\n        Find the minimum value for the parameter in this container.\n\n        See Also\n        --------\n        getMaxParam : details\n        \"\"\"\n        compartor = lambda x, y: x < y\n        return self._minMaxHelper(param, typeSpec, absolute, generationNum, returnObj, float(\"inf\"), compartor)\n\n    def _minMaxHelper(\n        self,\n        param,\n        typeSpec: TypeSpec,\n        absolute,\n        generationNum,\n        returnObj,\n        startingNum,\n        compartor,\n    ):\n        \"\"\"Helper for getMinParam and getMaxParam.\"\"\"\n        maxP = (startingNum, None)\n        realVal = 0.0\n        objs = self.getChildren(generationNum=generationNum)\n        for b in objs:\n            if b.hasFlags(typeSpec):\n                try:\n                    val = b.p[param]\n                except parameters.UnknownParameterError:\n                    # No worries; not all Composite types are guaranteed to have the\n                    # relevant parameter. It might be a good idea to more strongly\n                    # type-check this, perhaps by passing the paramDef,\n                    # rather than its name?\n                    continue\n                if val is None:\n                    # Neither bigger or smaller than anything (also illegal in Python3)\n                    continue\n                if absolute:\n                    absVal = abs(val)\n                else:\n                    absVal = val\n                if compartor(absVal, maxP[0]):\n                    maxP = (absVal, b)\n                    realVal = val\n        if returnObj:\n            return realVal, maxP[1]\n        else:\n            return realVal\n\n    def getChildParamValues(self, param):\n        \"\"\"Get the child parameter values in a numpy array.\"\"\"\n        return np.array([child.p[param] for child in self])\n\n    def isFuel(self):\n        \"\"\"True if this is a fuel block.\"\"\"\n        return self.hasFlags(Flags.FUEL)\n\n    def containsHeavyMetal(self):\n        \"\"\"True if this has HM.\"\"\"\n        return any(nucDir.isHeavyMetal(nucName) for nucName in self.getNuclides())\n\n    def getNuclides(self):\n        \"\"\"\n        Determine which nuclides are present in this armi object.\n\n        Returns\n        -------\n        list\n            List of nuclide names that exist in this\n        \"\"\"\n        nucs = set()\n        for child in self:\n            nucs.update(child.getNuclides())\n        return nucs\n\n    def getFissileMass(self):\n        \"\"\"Returns fissile mass in grams.\"\"\"\n        return self.getMass(nuclideBases.NuclideBase.fissile)\n\n    def getHMMass(self):\n        \"\"\"Returns heavy metal mass in grams.\"\"\"\n        nucs = []\n        for nucName in self.getNuclides():\n            if nucDir.isHeavyMetal(nucName):\n                nucs.append(nucName)\n\n        mass = self.getMass(nucs)\n        return mass\n\n    def getHMMoles(self):\n        \"\"\"\n        Get the number of moles of heavy metal in this object.\n\n        Notes\n        -----\n        If an object is on a symmetry line, the volume reported by getVolume\n        is reduced to reflect that the block is not wholly within the reactor. This\n        reduction in volume reduces the reported HM moles.\n        \"\"\"\n        return self.getHMDens() / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume()\n\n    def getHMDens(self):\n        \"\"\"\n        Compute the total heavy metal density of this object.\n\n        Returns\n        -------\n        hmDens : float\n            The total heavy metal number (atom) density in atoms/bn-cm.\n        \"\"\"\n        hmNuclides = [nuclide for nuclide in self.getNuclides() if nucDir.isHeavyMetal(nuclide)]\n        hmDens = np.sum(self.getNuclideNumberDensities(hmNuclides))\n        return hmDens\n\n    def getFPMass(self):\n        \"\"\"Returns mass of fission products in this block in grams.\"\"\"\n        nucs = []\n        for nucName in self.getNuclides():\n            if \"LFP\" in nucName:\n                nucs.append(nucName)\n        mass = self.getMass(nucs)\n        return mass\n\n    def getFuelMass(self):\n        \"\"\"Returns mass of fuel in grams.\"\"\"\n        return sum((c.getFuelMass() for c in self))\n\n    def constituentReport(self):\n        \"\"\"A print out of some pertinent constituent information.\"\"\"\n        from armi.utils import iterables\n\n        elementz = self.nuclideBases.elements\n\n        rows = [[\"Constituent\", \"HMFrac\", \"FuelFrac\"]]\n        columns = [-1, self.getHMMass(), self.getFuelMass()]\n\n        for base_ele in [\"U\", \"PU\"]:\n            total = sum([self.getMass(nuclide.name) for nuclide in elementz.bySymbol[base_ele]])\n            rows.append([base_ele, total, total])\n\n        fp_total = self.getFPMass()\n        rows.append([\"FP\", fp_total, fp_total])\n\n        ma_nuclides = iterables.flatten(\n            [ele.nuclides for ele in [elementz.byZ[key] for key in elementz.byZ.keys() if key > 94]]\n        )\n        ma_total = sum([self.getMass(nuclide.name) for nuclide in ma_nuclides])\n        rows.append([\"MA\", ma_total, ma_total])\n\n        for i, row in enumerate(rows):\n            for j, entry in enumerate(row):\n                try:\n                    percent = entry / columns[j] * 100.0\n                    rows[i][j] = percent or \"-\"\n                except ZeroDivisionError:\n                    rows[i][j] = \"NaN\"\n                except TypeError:\n                    pass  # trying to divide the string name\n\n        return \"\\n\".join([\"{:<14}{:<10}{:<10}\".format(*row) for row in rows])\n\n    def getAtomicWeight(self):\n        r\"\"\"\n        Calculate the atomic weight of this object in g/mole of atoms.\n\n        .. warning:: This is not the molecular weight, which is grams per mole of\n            molecules (grams/gram-molecule). That requires knowledge of the chemical\n            formula. Don't be surprised when you run this on UO2 and find it to be 90;\n            there are a lot of Oxygen atoms in UO2.\n\n        .. math::\n\n            A =  \\frac{\\sum_i N_i A_i }{\\sum_i N_i}\n        \"\"\"\n        numerator = 0.0\n        denominator = 0.0\n        numDensities = self.getNumberDensities()\n\n        for nucName, nDen in numDensities.items():\n            atomicWeight = self.nuclideBases.byName[nucName].weight\n            numerator += atomicWeight * nDen\n            denominator += nDen\n\n        return numerator / denominator\n\n    def getMasses(self):\n        \"\"\"\n        Return a dictionary of masses indexed by their nuclide names.\n\n        Notes\n        -----\n        Implemented to get number densities and then convert to mass\n        because getMass is too slow on a large tree.\n        \"\"\"\n        numDensities = self.getNumberDensities()\n        vol = self.getVolume()\n        return {nucName: densityTools.getMassInGrams(nucName, vol, ndens) for nucName, ndens in numDensities.items()}\n\n    def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n        raise NotImplementedError\n\n    def getMgFlux(self, adjoint=False, average=False, gamma=False):\n        \"\"\"\n        Return the multigroup neutron flux in [n/cm^2/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the next energy group, as\n        set in the ISOTXS library.\n\n        On blocks, it is stored integrated over volume on <block>.p.mgFlux\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        average : bool, optional\n            If true, will return average flux between latest and previous. Doesn't work\n            for pin detailed yet\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        flux : np.ndarray\n            multigroup neutron flux in [n/cm^2/s]\n        \"\"\"\n        if average:\n            raise NotImplementedError(\n                f\"{self.__class__} class has no method for producing average MG flux -- tryusing blocks\"\n            )\n\n        volume = self.getVolume()\n        return self.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma) / volume\n\n    def removeMass(self, nucName, mass):\n        self.addMass(nucName, -mass)\n\n    def addMass(self, nucName, mass):\n        \"\"\"Add mass to a particular nuclide.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide name e.g. 'U235'\n\n        mass : float\n            mass in grams of nuclide to be added to this armi Object\n        \"\"\"\n        volume = self.getVolume()\n        addedNumberDensity = densityTools.calculateNumberDensity(nucName, mass, volume)\n        self.setNumberDensity(nucName, self.getNumberDensity(nucName) + addedNumberDensity)\n\n    def addMasses(self, masses):\n        \"\"\"\n        Adds a vector of masses.\n\n        Parameters\n        ----------\n        masses : dict\n            a dictionary of masses (g) indexed by nucNames (string)\n        \"\"\"\n        for nucName, mass in masses.items():\n            if mass:\n                self.addMass(nucName, mass)\n\n    def setMass(self, nucName, mass):\n        \"\"\"\n        Set the mass in an object by adjusting the ndens of the nuclides.\n\n        Parameters\n        ----------\n        nucName : str\n            Nuclide name to set mass of\n        mass : float\n            Mass in grams to set.\n\n        \"\"\"\n        d = calculateNumberDensity(nucName, mass, self.getVolume())\n        self.setNumberDensity(nucName, d)\n\n    def setMasses(self, masses):\n        \"\"\"\n        Set a vector of masses.\n\n        Parameters\n        ----------\n        masses : dict\n            a dictionary of masses (g) indexed by nucNames (string)\n        \"\"\"\n        self.clearNumberDensities()\n        for nucName, mass in masses.items():\n            self.setMass(nucName, mass)\n\n    def getSymmetryFactor(self):\n        \"\"\"\n        Return a scaling factor due to symmetry on the area of the object or its children.\n\n        See Also\n        --------\n        armi.reactor.blocks.HexBlock.getSymmetryFactor : concrete implementation\n        \"\"\"\n        return 1.0\n\n    def getBoundingIndices(self):\n        \"\"\"\n        Find the 3-D index bounds (min, max) of all children in the spatial grid of this object.\n\n        Returns\n        -------\n        bounds : tuple\n            ((minI, maxI), (minJ, maxJ), (minK, maxK))\n        \"\"\"\n        minI = minJ = minK = float(\"inf\")\n        maxI = maxJ = maxK = -float(\"inf\")\n        for obj in self:\n            i, j, k = obj.spatialLocator.getCompleteIndices()\n            if i >= maxI:\n                maxI = i\n            if i <= minI:\n                minI = i\n\n            if j >= maxJ:\n                maxJ = j\n            if j <= minJ:\n                minJ = j\n\n            if k >= maxK:\n                maxK = k\n            if k <= minK:\n                minK = k\n\n        return ((minI, maxI), (minJ, maxJ), (minK, maxK))\n\n    def getComponentNames(self):\n        r\"\"\"\n        Get all unique component names of this Composite.\n\n        Returns\n        -------\n        set or str\n            A set of all unique component names found in this Composite.\n        \"\"\"\n        return set(c.getName() for c in self.iterComponents())\n\n    def getComponentsOfShape(self, shapeClass):\n        \"\"\"\n        Return list of components in this block of a particular shape.\n\n        Parameters\n        ----------\n        shapeClass : Component\n            The class of component, e.g. Circle, Helix, Hexagon, etc.\n\n        Returns\n        -------\n        param : list\n            List of components in this block that are of the given shape.\n        \"\"\"\n        return [c for c in self.iterComponents() if isinstance(c, shapeClass)]\n\n    def getComponentsOfMaterial(self, material=None, materialName=None):\n        \"\"\"\n        Return list of components in this block that are made of a particular material.\n\n        Only one of the selectors may be used\n\n        Parameters\n        ----------\n        material : armi.materials.material.Material, optional\n            The material to match\n        materialName : str, optional\n            The material name to match.\n\n        Returns\n        -------\n        componentsWithThisMat : list\n\n        \"\"\"\n        if materialName is None:\n            materialName = material.getName()\n        else:\n            assert material is None, \"Cannot call with more than one selector. Choose one or the other.\"\n\n        componentsWithThisMat = []\n        for c in self.iterComponents():\n            if c.getProperties().getName() == materialName:\n                componentsWithThisMat.append(c)\n        return componentsWithThisMat\n\n    def hasComponents(self, typeSpec: Union[TypeSpec, List[TypeSpec]], exact=False):\n        \"\"\"\n        Return true if components matching all TypeSpec exist in this object.\n\n        Parameters\n        ----------\n        typeSpec : Flags or iterable of Flags\n            Component flags to check for\n        \"\"\"\n        # Wrap the typeSpec in a tuple if we got a scalar\n        try:\n            typeSpec = iter(typeSpec)\n        except TypeError:\n            typeSpec = (typeSpec,)\n\n        return all(self.getComponents(t, exact) for t in typeSpec)\n\n    def getComponentByName(self, name: str) -> \"Component\":\n        \"\"\"\n        Gets a particular component from this object, based on its name.\n\n        Parameters\n        ----------\n        name\n            The blueprint name of the component to return\n\n        Returns\n        -------\n        Component, c, whose c.name matches name.\n        \"\"\"\n        components = [c for c in self.iterComponents() if c.name == name]\n        nComp = len(components)\n        if nComp == 0:\n            return None\n        elif nComp > 1:\n            raise ValueError(f\"More than one component named '{name}' in {self}\")\n        else:\n            return components[0]\n\n    def getComponent(self, typeSpec: TypeSpec, exact: bool = False, quiet: bool = True) -> Optional[\"Component\"]:\n        \"\"\"\n        Get a particular component from this object.\n\n        Be careful with multiple similar names in one object.\n\n        Parameters\n        ----------\n        typeSpec : flags.Flags or list of Flags\n            The type specification of the component to return\n        exact : boolean, optional\n            Demand that the component flags be exactly equal to the typespec. Default: False\n        quiet : boolean, optional\n            Log if the component is not found. Default: True\n\n        Returns\n        -------\n        Component : The component that matches the criteria or None\n\n        Raises\n        ------\n        ValueError: more than one Component matches the typeSpec\n        \"\"\"\n        results = self.getComponents(typeSpec, exact=exact)\n        if len(results) == 1:\n            return results[0]\n        elif not results:\n            if not quiet:\n                runLog.debug(\n                    f\"No component matched {typeSpec} in {self}. Returning None\",\n                    single=True,\n                    label=f\"None component returned instead of {typeSpec}\",\n                )\n            return None\n        else:\n            raise ValueError(f\"Multiple components match in {self} match typeSpec {typeSpec}: {results}\")\n\n    def getNumComponents(self, typeSpec: TypeSpec, exact=False):\n        \"\"\"\n        Get the number of components that have these flags, taking into account multiplicity. Useful\n        for getting nPins even when there are pin detailed cases.\n\n        Parameters\n        ----------\n        typeSpec : Flags\n            Expected flags of the component to get. e.g. Flags.FUEL\n\n        Returns\n        -------\n        total : int\n            the number of components of this type in this object, including multiplicity.\n        \"\"\"\n        total = 0\n        for c in self.iterComponents(typeSpec, exact):\n            total += int(c.getDimension(\"mult\"))\n        return total\n\n    def setComponentDimensionsReport(self):\n        \"\"\"Makes a summary of the dimensions of the components in this object.\"\"\"\n        reportGroups = []\n        for c in self.iterComponents():\n            reportGroups.append(c.setDimensionReport())\n\n        return reportGroups\n\n    def expandAllElementalsToIsotopics(self):\n        reactorNucs = self.getNuclides()\n        for elemental in self.nuclideBases.where(\n            lambda nb: isinstance(nb, nuclideBases.NaturalNuclideBase) and nb.name in reactorNucs\n        ):\n            self.expandElementalToIsotopics(elemental)\n\n    def expandElementalToIsotopics(self, elementalNuclide):\n        \"\"\"\n        Expands the density of a specific elemental nuclides to its natural isotopics.\n\n        Parameters\n        ----------\n        elementalNuclide : :class:`armi.nucDirectory.nuclideBases.NaturalNuclide` natural nuclide to\n            replace.\n        \"\"\"\n        natName = elementalNuclide.name\n        for component in self.iterComponents():\n            elementalDensity = component.getNumberDensity(natName)\n            if elementalDensity == 0.0:\n                continue\n\n            keepIndex = np.where(component.p.nuclides != natName.encode())[0]\n            newNuclides = [nuc.decode() for nuc in component.p.nuclides[keepIndex]]\n            newNDens = component.p.numberDensities[keepIndex]\n            component.updateNumberDensities(dict(zip(newNuclides, newNDens)), wipe=True)\n\n            # add in isotopics\n            for natNuc in elementalNuclide.getNaturalIsotopics():\n                component.setNumberDensity(natNuc.name, elementalDensity * natNuc.abundance)\n\n    def getAverageTempInC(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"Return the average temperature of the ArmiObject in C by averaging all components.\"\"\"\n        tempNumerator = 0.0\n        totalVol = 0.0\n        for component in self.iterComponents(typeSpec, exact):\n            vol = component.getVolume()\n            tempNumerator += component.temperatureInC * vol\n            totalVol += vol\n\n        return tempNumerator / totalVol\n\n    def resolveLinkedDims(self, components):\n        \"\"\"Resolve link strings to links on all child components.\"\"\"\n        for component in self.iterComponents():\n            component.resolveLinkedDims(components)\n\n    def getDominantMaterial(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"\n        Return the first sample of the most dominant material (by volume) in this object.\n\n        Parameters\n        ----------\n        typeSpec : Flags or iterable of Flags, optional\n            The types of components to consider (e.g. ``[Flags.FUEL, Flags.CONTROL]``)\n        exact : bool, optional\n            Whether or not the TypeSpec is exact\n\n        Returns\n        -------\n        mat : armi.materials.material.Material\n             the first instance of the most dominant material (by volume) in this object.\n\n        See Also\n        --------\n        getComponentsOfMaterial\n            Gets components that are made of a particular material\n        gatherMaterialsByVolume\n            Classifies all materials by volume\n        \"\"\"\n        return getDominantMaterial([self], typeSpec, exact)\n\n\nclass Composite(ArmiObject):\n    \"\"\"\n    An ArmiObject that has children.\n\n    This is a fundamental ARMI state object that generally represents some piece of the\n    nuclear reactor that is made up of other smaller pieces. This object can cache\n    information about its children to help performance.\n\n    **Details about spatial representation**\n\n    Spatial representation of a ``Composite`` is handled through a combination of the\n    ``spatialLocator`` and ``spatialGrid`` parameters. The ``spatialLocator`` is a numpy\n    triple representing either:\n\n    1. Indices in the parent's ``spatialGrid`` (for lattices, etc.), used when the dtype is int.\n\n    2. Coordinates in the parent's universe in cm, used when the dtype is float.\n\n    The top parent of any composite must have a coordinate-based ``spatialLocator``. For\n    example, a Reactor an a Pump should both have coordinates based on how far apart\n    they are.\n\n    The traversal of indices and grids is recursive. The Reactor/Core/Assembly/Block\n    model is handled by putting a 2-D grid (either Theta-R, Hex, or Cartesian) on the\n    Core and individual 1-D Z-meshes on the assemblies. Then, Assemblies have 2-D\n    spatialLocators (i,j,0) and Blocks have 1-D spatiaLocators (0,0,k). These get added\n    to form the global indices. This way, if an assembly is moved, all the blocks\n    immediately and naturally move with it. Individual children may have\n    coordinate-based spatialLocators mixed with siblings in a grid. This allows mixing\n    grid-representation with explicit representation, often useful in advanced\n    assemblies and thermal reactors.\n\n    The traversal of indices and grids is recursive. The\n    Reactor/Core/Assembly/Block model is handled by putting a 2-D grid (either\n    Theta-R, Hex, or Cartesian) on the Core and individual 1-D Z-meshes on the\n    assemblies. Then, Assemblies have 2-D spatialLocators (i,j,0) and Blocks\n    have 1-D spatiaLocators (0,0,k). These get added to form the global indices.\n    This way, if an assembly is moved, all the blocks immediately and naturally\n    move with it. Individual children may have coordinate-based spatialLocators\n    mixed with siblings in a grid. This allows mixing grid-representation with\n    explicit representation, often useful in advanced assemblies and thermal\n    reactors.\n\n    .. impl:: Composites are a physical part of the reactor in a hierarchical data model.\n        :id: I_ARMI_CMP0\n        :implements: R_ARMI_CMP\n\n        An ARMI reactor model is composed of collections of ARMIObject objects. This\n        class is a child-class of the ARMIObject class and provides a structure\n        allowing a reactor model to be composed of Composites.\n\n        This class provides various methods to query and modify the hierarchical ARMI\n        reactor model, including but not limited to, iterating, sorting, and adding or\n        removing child Composites.\n\n    \"\"\"\n\n    _children: list[\"Composite\"]\n\n    def __init__(self, name):\n        ArmiObject.__init__(self, name)\n        self.childrenByLocator = {}\n        self._children = []\n\n    def __getitem__(self, index):\n        return self._children[index]\n\n    def __setitem__(self, index, obj):\n        raise NotImplementedError(\"Unsafe to insert elements directly\")\n\n    def __iter__(self):\n        return iter(self._children)\n\n    def __len__(self):\n        return len(self._children)\n\n    def __contains__(self, item):\n        \"\"\"\n        Membership check.\n\n        This does not use quality checks for membership checking because equality\n        operations can be fairly heavy. Rather, this only checks direct identity\n        matches.\n        \"\"\"\n        return id(item) in set(id(c) for c in self._children)\n\n    def sort(self):\n        \"\"\"Sort the children of this object.\"\"\"\n        # sort the top-level children of this Composite\n        self._children.sort()\n\n        # recursively sort the children below it.\n        for c in self._children:\n            if issubclass(c.__class__, Composite):\n                c.sort()\n\n    def index(self, obj):\n        \"\"\"Obtain the list index of a particular child.\"\"\"\n        return self._children.index(obj)\n\n    def append(self, obj):\n        \"\"\"Append a child to this object.\"\"\"\n        self._children.append(obj)\n\n    def extend(self, seq):\n        \"\"\"Add a list of children to this object.\"\"\"\n        for item in seq:\n            self.add(item)\n\n    def add(self, obj):\n        \"\"\"Add one new child.\"\"\"\n        if obj in self:\n            raise RuntimeError(f\"Cannot add {obj} because it has already been added to {self}.\")\n        obj.parent = self\n        self._children.append(obj)\n\n    def remove(self, obj):\n        \"\"\"Remove a particular child.\"\"\"\n        obj.parent = None\n        obj.spatialLocator = obj.spatialLocator.detachedCopy()\n        self._children.remove(obj)\n\n    def moveTo(self, locator):\n        \"\"\"Move to specific location in parent. Often in a grid.\"\"\"\n        if locator.grid.armiObject is not self.parent:\n            raise ValueError(\n                f\"Cannot move {self} to a location in  {locator.grid.armiObject}\"\n                \", which is not its parent ({self.parent}).\"\n            )\n        self.spatialLocator = locator\n\n    def insert(self, index, obj):\n        \"\"\"Insert an object into the list of children at a particular index.\"\"\"\n        if obj in self._children:\n            raise RuntimeError(f\"Cannot insert {obj} because it has already been added to {self}.\")\n        obj.parent = self\n        self._children.insert(index, obj)\n\n    def removeAll(self):\n        \"\"\"Remove all children.\"\"\"\n        for c in self.getChildren()[:]:\n            self.remove(c)\n\n    def setChildren(self, items):\n        \"\"\"Clear this container and fills it with new children.\"\"\"\n        self.removeAll()\n        for c in items:\n            self.add(c)\n\n    def iterChildren(\n        self,\n        deep=False,\n        generationNum=1,\n        predicate: Optional[Callable[[\"Composite\"], bool]] = None,\n    ) -> Iterator[\"Composite\"]:\n        \"\"\"Iterate over children objects of this composite.\n\n        Parameters\n        ----------\n        deep : bool, optional\n            If true, traverse the entire composite tree. Otherwise, go as far as ``generationNum``.\n        generationNum: int, optional\n            Produce composites at this depth. A depth of ``1`` includes children of ``self``, ``2``\n            is children of children, and so on.\n        predicate: f(Composite) -> bool, optional\n            Function to check on a composite before producing it. All items in the iteration\n            will pass this check.\n\n        Returns\n        -------\n        iterator of Composite\n\n        See Also\n        --------\n        :meth:`getChildren` produces a list for situations where you need to perform\n        multiple iterations or do list operations (append, indexing, sorting, containment, etc.)\n\n        Composites are naturally iterable. The following are identical::\n\n            >>> for child in c.getChildren():\n            ...     pass\n            >>> for child in c.iterChildren():\n            ...     pass\n            >>> for child in c:\n            ...     pass\n\n        If you do not need any depth-traversal, natural iteration should be sufficient.\n\n        The :func:`filter` command may be sufficient if you do not wish to pass a predicate. The following\n        are identical::\n            >>> checker = lambda c: len(c.name) % 3\n            >>> for child in c.getChildren(predicate=checker):\n            ...     pass\n            >>> for child in c.iterChildren(predicate=checker):\n            ...     pass\n            >>> for child in filter(checker, c):\n            ...     pass\n\n        If you're going to be doing traversal beyond the first generation, this method will help you.\n\n        \"\"\"\n        if deep and generationNum > 1:\n            raise RuntimeError(\"Cannot get children with a generation number set and the deep flag set\")\n        if predicate is None:\n            checker = lambda _: True\n        else:\n            checker = predicate\n        yield from self._iterChildren(deep, generationNum, checker)\n\n    def _iterChildren(\n        self, deep: bool, generationNum: int, checker: Callable[[\"Composite\"], bool]\n    ) -> Iterator[\"Composite\"]:\n        if deep or generationNum == 1:\n            yield from filter(checker, self)\n        if deep or generationNum > 1:\n            for c in self:\n                yield from c._iterChildren(deep, generationNum - 1, checker)\n\n    def iterChildrenWithMaterials(self, *args, **kwargs) -> Iterator:\n        \"\"\"Produce an iterator that also includes any materials found on descendants.\n\n        Arguments are forwarded to :meth:`iterChildren` and control the depth of traversal\n        and filtering of objects.\n\n        This is useful for sending state across MPI tasks where you need a more full\n        representation of the composite tree. Which includes the materials attached\n        to components.\n        \"\"\"\n        children = self.iterChildren(*args, **kwargs)\n        # Each entry is either (c, ) or (c, c.material) if the child has a material attribute\n        stitched = map(\n            lambda c: ((c,) if getattr(c, \"material\", None) is None else (c, c.material)),\n            children,\n        )\n        # Iterator that iterates over each \"sub\" iterator. If we have ((c0, ), (c1, m1)), this produces a single\n        # iterator of (c0, c1, m1)\n        return itertools.chain.from_iterable(stitched)\n\n    def getChildren(\n        self,\n        deep=False,\n        generationNum=1,\n        includeMaterials=False,\n        predicate: Optional[Callable[[\"Composite\"], bool]] = None,\n    ) -> list[\"Composite\"]:\n        \"\"\"\n        Return the children objects of this composite.\n\n        .. impl:: Composites have children in the hierarchical data model.\n            :id: I_ARMI_CMP1\n            :implements: R_ARMI_CMP\n\n            This method retrieves all children within a given Composite object. Children of any\n            generation can be retrieved. This is achieved by visiting all children and calling this\n            method recursively for each generation requested.\n\n            If the method is called with ``includeMaterials``, it will additionally include\n            information about the material for each child. If a function is supplied as the\n            ``predicate`` argument, then this method will be used to evaluate all children as a\n            filter to include or not. For example, if the caller of this method only desires\n            children with a certain flag, or children which only contain a certain material, then\n            the ``predicate`` function can be used to perform this filtering.\n\n        Parameters\n        ----------\n        deep : boolean, optional\n            Return all children of all levels.\n\n        generationNum : int, optional\n            Which generation to return. 1 means direct children, 2 means children of children.\n            Setting this parameter will only return children of this generation, not their parents.\n            Default: Just return direct children.\n\n        includeMaterials : bool, optional\n            Include the material properties\n\n        predicate : callable, optional\n            An optional unary predicate to use for filtering results. This can be used to request\n            children of specific types, or with desired attributes. Not all ArmiObjects have the\n            same methods and members, so care should be taken to make sure that the predicate\n            executes gracefully in all cases (e.g., use ``getattr(obj, \"attribute\", None)`` to\n            access instance attributes). Failure to meet the predicate only affects the object in\n            question; children will still be considered.\n\n        See Also\n        --------\n        :meth:`iterChildren` if you do not need to produce a full list, e.g., just iterating\n        over objects.\n\n        Examples\n        --------\n        >>> obj.getChildren()\n        [child1, child2, child3]\n\n        >>> obj.getChildren(generationNum=2)\n        [grandchild1, grandchild2, grandchild3]\n\n        >>> obj.getChildren(deep=True)\n        [child1, child2, child3, grandchild1, grandchild2, grandchild3]\n\n        # Assuming that grandchild1 and grandchild3 are Component objects\n        >>> obj.getChildren(deep=True, predicate=lambda o: isinstance(o, Component))\n        [grandchild1, grandchild3]\n\n        \"\"\"\n        if not includeMaterials:\n            items = self.iterChildren(deep=deep, generationNum=generationNum, predicate=predicate)\n        else:\n            items = self.iterChildrenWithMaterials(deep=deep, generationNum=generationNum, predicate=predicate)\n        return list(items)\n\n    def getComponents(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"\n        Return a list of Component objects within this Composite.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Component flags. Will restrict Components to specific ones matching the flags specified.\n        exact : bool, optional\n            Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no\n            impact if typeSpec is None.\n\n        Returns\n        -------\n        list of Component\n            items matching typeSpec and exact criteria\n        \"\"\"\n        return list(self.iterComponents(typeSpec, exact))\n\n    def getFirstComponent(self, typeSpec: TypeSpec = None, exact=False):\n        \"\"\"\n        Returns a single Component object within this Composite.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Component flags. Will restrict Components to specific ones matching the flags specified.\n        exact : bool, optional\n            Only match exact component labels (names). If True, 'coolant' will not match 'interCoolant'. This has no\n            impact if typeSpec is None.\n\n        Returns\n        -------\n        Component\n            The first item matching typeSpec and exact criteria\n        \"\"\"\n        try:\n            return next(self.iterComponents(typeSpec, exact))\n        except StopIteration:\n            raise ValueError(f\"No component matches {typeSpec} {exact}\")\n\n    def iterComponents(self, typeSpec: TypeSpec = None, exact: bool = False) -> Iterator[\"Component\"]:\n        \"\"\"\n        Return an iterator of armi.reactor.component.Component objects within this Composite.\n\n        Parameters\n        ----------\n        typeSpec : TypeSpec\n            Component flags. Will restrict Components to specific ones matching the flags specified.\n\n        exact : bool, optional\n            Only match exact component labels (names). If True, 'coolant' will not match\n            'interCoolant'. This has no impact if typeSpec is None.\n\n        Returns\n        -------\n        iterator of Component\n            items matching typeSpec and exact criteria\n        \"\"\"\n        return (c for child in self for c in child.iterComponents(typeSpec, exact))\n\n    def syncMpiState(self):\n        \"\"\"\n        Synchronize all parameters of this object and all children to all worker nodes over the\n        network using MPI.\n\n        In parallelized runs, if each process has its own copy of the entire reactor hierarchy, this\n        method synchronizes the state of all parameters on all objects.\n\n        .. impl:: Composites can be synchronized across MPI threads.\n            :id: I_ARMI_CMP_MPI\n            :implements: R_ARMI_CMP_MPI\n\n            Parameters need to be handled properly during parallel code execution. This method\n            synchronizes all parameters of the composite object across all processes by cycling\n            through all the children of the Composite and ensuring that their parameters are\n            properly synchronized. If it fails to synchronize, an error message is displayed which\n            alerts the user to which Composite has inconsistent data across the processes.\n\n        Returns\n        -------\n        int\n            number of parameters synchronized over all components\n        \"\"\"\n        if context.MPI_SIZE == 1:\n            return 0\n\n        startTime = timeit.default_timer()\n        # sync parameters...\n        genItems = itertools.chain(\n            [self],\n            self.iterChildrenWithMaterials(deep=True),\n        )\n        allComps = [c for c in genItems if hasattr(c, \"p\")]\n        sendBuf = [c.p.getSyncData() for c in allComps]\n        runLog.debug(f\"syncMpiState has {len(allComps)} comps\")\n\n        try:\n            context.MPI_COMM.barrier()  # sync up\n            allGatherTime = -timeit.default_timer()\n            allSyncData = context.MPI_COMM.allgather(sendBuf)\n            allGatherTime += timeit.default_timer()\n        except:\n            msg = [\"Failure while trying to allgather.\"]\n            for ci, compData in enumerate(sendBuf):\n                if compData is not None:\n                    msg += [f\"sendBuf[{ci}]: {compData}\"]\n            runLog.error(\"\\n\".join(msg))\n            raise\n\n        # key is (comp, paramName) value is conflicting nodes\n        errors = collections.defaultdict(list)\n        syncCount = 0\n        compsPerNode = {len(nodeSyncData) for nodeSyncData in allSyncData}\n\n        if len(compsPerNode) != 1:\n            raise ValueError(f\"The workers have different reactor sizes! comp lengths: {compsPerNode}\")\n\n        for ci, comp in enumerate(allComps):\n            if not hasattr(comp, \"_syncParameters\"):\n                # materials don't have Parameters to sync\n                continue\n            data = (nodeSyncData[ci] for nodeSyncData in allSyncData)\n            syncCount += comp._syncParameters(data, errors)\n\n        if errors:\n            errorData = sorted(\n                (str(comp), comp.__class__.__name__, str(comp.parent), paramName, nodes)\n                for (comp, paramName), nodes in errors.items()\n            )\n            message = \"Synchronization failed due to overlapping data. Only the first duplicates are listed\\n{}\".format(\n                tabulate.tabulate(\n                    errorData,\n                    headers=[\n                        \"Composite\",\n                        \"Composite Type\",\n                        \"Composite Parent\",\n                        \"ParameterName\",\n                        \"NodeRanks\",\n                    ],\n                )\n            )\n            raise ValueError(message)\n\n        self._markSynchronized()\n        runLog.extra(\n            f\"Synchronized reactor over MPI in {timeit.default_timer() - startTime:.4f} seconds\"\n            f\", {allGatherTime:.4f} seconds in MPI allgather. count:{syncCount}\"\n        )\n\n        return syncCount\n\n    def _syncParameters(self, allSyncData, errors):\n        \"\"\"Ensure no overlap with syncedKeys, use errors to report overlapping data.\"\"\"\n        syncedKeys = set()\n        for nodeRank, nodeSyncData in enumerate(allSyncData):\n            if nodeSyncData is None:\n                continue\n\n            for key, val in nodeSyncData.items():\n                if key in syncedKeys:\n                    # Edge Case: a Composite object is flagged as out of sync, and this parameter\n                    # was also globally modified and readjusted to the original value.\n                    curVal = self.p[key]\n                    if isinstance(val, np.ndarray) or isinstance(curVal, np.ndarray):\n                        if (val != curVal).any():\n                            errors[self, key].append(nodeRank)\n                    elif curVal != val:\n                        errors[self, key].append(nodeRank)\n                        runLog.error(f\"in {self}, {key} differ ({curVal} != {val})\")\n                    continue\n                syncedKeys.add(key)\n                self.p[key] = val\n\n        self.clearCache()\n        return len(syncedKeys)\n\n    def _markSynchronized(self):\n        \"\"\"\n        Mark the composite and child parameters as synchronized across MPI.\n\n        We clear SINCE_LAST_DISTRIBUTE_STATE so that anything after this point will set the\n        SINCE_LAST_DISTRIBUTE_STATE flag, indicating it has been modified\n        SINCE_LAST_DISTRIBUTE_STATE.\n        \"\"\"\n        paramDefs = set()\n        items = itertools.chain(\n            [self],\n            self.iterChildrenWithMaterials(deep=True),\n        )\n        for child in items:\n            # Materials don't have a \"p\" / Parameter attribute to sync\n            if hasattr(child, \"p\"):\n                # below reads as: assigned & everything_but(SINCE_LAST_DISTRIBUTE_STATE)\n                child.p.assigned &= ~parameters.SINCE_LAST_DISTRIBUTE_STATE\n                paramDefs.add(child.p.paramDefs)\n\n        for paramDef in paramDefs:\n            paramDef.resetAssignmentFlag(parameters.SINCE_LAST_DISTRIBUTE_STATE)\n\n    def retainState(self, paramsToApply=None):\n        \"\"\"\n        Restores a state before and after some operation.\n\n        Parameters\n        ----------\n        paramsToApply : iterable\n            Parameters that should be applied to the state after existing the state retainer. All\n            others will be reverted to their values upon entering.\n\n        Notes\n        -----\n        This should be used in a `with` statement.\n        \"\"\"\n        return StateRetainer(self, paramsToApply)\n\n    def backUp(self):\n        \"\"\"\n        Create and store a backup of the state.\n\n        This needed to be overridden due to linked components which actually have a parameter value\n        of another ARMI component.\n        \"\"\"\n        self._backupCache = (self.cached, self._backupCache)\n        self.cached = {}  # don't .clear(), using reference above!\n        self.p.backUp()\n        if self.spatialGrid:\n            self.spatialGrid.backUp()\n\n    def restoreBackup(self, paramsToApply):\n        \"\"\"\n        Restore the parameters from previously created backup.\n\n        Parameters\n        ----------\n        paramsToApply : list of ParmeterDefinitions\n            restores the state of all parameters not in `paramsToApply`\n        \"\"\"\n        self.p.restoreBackup(paramsToApply)\n        self.cached, self._backupCache = self._backupCache\n        if self.spatialGrid:\n            self.spatialGrid.restoreBackup()\n\n    def getLumpedFissionProductsIfNecessary(self, nuclides=None):\n        \"\"\"Return Lumped Fission Product objects that belong to this object or any of its children.\"\"\"\n        if self.requiresLumpedFissionProducts(nuclides=nuclides):\n            lfps = self.getLumpedFissionProductCollection()\n            if lfps is None:\n                for c in self:\n                    return c.getLumpedFissionProductsIfNecessary(nuclides=nuclides)\n            else:\n                return lfps\n        # There are no lumped fission products in the batch so if you use a\n        # dictionary no one will know the difference\n        return {}\n\n    def getLumpedFissionProductCollection(self):\n        \"\"\"\n        Get collection of LFP objects. Will work for global or block-level LFP models.\n\n        Returns\n        -------\n        lfps : object\n            lfpName keys, lfp object values\n\n        See Also\n        --------\n        armi.physics.neutronics.fissionProductModel.lumpedFissionProduct.LumpedFissionProduct\n        \"\"\"\n        lfps = ArmiObject.getLumpedFissionProductCollection(self)\n        if lfps is None:\n            for c in self:\n                lfps = c.getLumpedFissionProductCollection()\n                if lfps is not None:\n                    break\n\n        return lfps\n\n    def requiresLumpedFissionProducts(self, nuclides=None):\n        \"\"\"True if any of the nuclides in this object are Lumped nuclides.\"\"\"\n        if nuclides is None:\n            nuclides = self.getNuclides()\n\n        # ruff: noqa: SIM110\n        for nucName in nuclides:\n            if isinstance(self.nuclideBases.byName[nucName], nuclideBases.LumpNuclideBase):\n                return True\n\n        return False\n\n    def getIntegratedMgFlux(self, adjoint=False, gamma=False):\n        \"\"\"\n        Returns the multigroup neutron tracklength in [n-cm/s].\n\n        The first entry is the first energy group (fastest neutrons). Each additional group is the\n        next energy group, as set in the ISOTXS library.\n\n        Parameters\n        ----------\n        adjoint : bool, optional\n            Return adjoint flux instead of real\n        gamma : bool, optional\n            Whether to return the neutron flux or the gamma flux.\n\n        Returns\n        -------\n        integratedFlux : np.ndarray\n            multigroup neutron tracklength in [n-cm/s]\n        \"\"\"\n        integratedMgFlux = np.zeros(1)\n        for c in self:\n            mgFlux = c.getIntegratedMgFlux(adjoint=adjoint, gamma=gamma)\n            if mgFlux is not None:\n                integratedMgFlux = integratedMgFlux + mgFlux\n\n        return integratedMgFlux\n\n    def _getReactionRates(self, nucName, nDensity=None):\n        \"\"\"\n        Wrapper around logic to get reaction rates for a certain nuclide, to handle any errors.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide name -- e.g. 'U235'\n        nDensity : float\n            number density\n\n        Returns\n        -------\n        rxnRates : dict\n            dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP\n\n        Notes\n        -----\n        If you set nDensity to 1/CM2_PER_BARN this makes 1 group cross section generation easier.\n\n        This method is not designed to work on ``Assembly``, ``Core``, or anything higher on the\n        hierarchy than ``Block``.\n        \"\"\"\n        from armi.reactor.blocks import Block\n        from armi.reactor.reactors import Core\n\n        if nDensity is None:\n            nDensity = self.getNumberDensity(nucName)\n\n        try:\n            return self._getReactionRateDict(\n                nucName,\n                self.getAncestor(lambda c: isinstance(c, Core)).lib,\n                self.getAncestor(lambda x: isinstance(x, Block)).getMicroSuffix(),\n                self.getIntegratedMgFlux(),\n                nDensity,\n            )\n        except AttributeError:\n            runLog.warning(\n                f\"Object {self} does not belong to a core and so has no reaction rates.\",\n                single=True,\n            )\n            return {\"nG\": 0, \"nF\": 0, \"n2n\": 0, \"nA\": 0, \"nP\": 0}\n        except KeyError:\n            runLog.warning(\n                f\"Attempting to get a reaction rate on an isotope not in the lib {nucName}.\",\n                single=True,\n            )\n            return {\"nG\": 0, \"nF\": 0, \"n2n\": 0, \"nA\": 0, \"nP\": 0}\n\n    def _getReactionRateDict(self, nucName, lib, xsSuffix, mgFlux, nDens):\n        \"\"\"\n        Helper to get the reaction rates of a certain nuclide on one ArmiObject.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide name -- e.g. 'U235', 'PU239', etc. Not to be confused with the nuclide _label_, see\n            the nucDirectory module for a description of the difference.\n        lib : isotxs\n            cross section library\n        xsSuffix : str\n            cross section suffix, consisting of the type followed by the burnup group, e.g. 'AB' for the\n            second burnup group of type A\n        mgFlux : np.ndarray\n            integrated mgFlux (n-cm/s)\n        nDens : float\n            number density (atom/bn-cm)\n\n        Returns\n        -------\n        rxnRates - dict\n            dictionary of reaction rates (rxn/s) for nG, nF, n2n, nA and nP\n\n        Notes\n        -----\n        Assume there is no n3n cross section in ISOTXS\n        \"\"\"\n        nucLabel = self.nuclideBases.byName[nucName].label\n        key = f\"{nucLabel}{xsSuffix}\"\n        libNuc = lib[key]\n        rxnRates = {\"n3n\": 0}\n        for rxName, mgXSs in [\n            (\"nG\", libNuc.micros.nGamma),\n            (\"nF\", libNuc.micros.fission),\n            (\"n2n\", libNuc.micros.n2n),\n            (\"nA\", libNuc.micros.nalph),\n            (\"nP\", libNuc.micros.np),\n        ]:\n            rxnRates[rxName] = nDens * sum(mgXSs * mgFlux)\n\n        return rxnRates\n\n    def getReactionRates(self, nucName, nDensity=None):\n        \"\"\"\n        Get the reaction rates of a certain nuclide on this ArmiObject.\n\n        Parameters\n        ----------\n        nucName : str\n            nuclide name -- e.g. 'U235'\n        nDensity : float\n            number Density\n\n        Returns\n        -------\n        rxnRates : dict\n            reaction rates (1/s) for nG, nF, n2n, nA and nP\n\n        Notes\n        -----\n        This is volume integrated NOT (1/cm3-s).\n\n        If you set nDensity to 1 this makes 1-group cross section generation easier.\n        \"\"\"\n        from armi.reactor.components import Component\n\n        # find child objects\n        objects = self.getChildren(deep=True, predicate=lambda x: isinstance(x, Component))\n        if not len(objects):\n            objects = [self]\n\n        # The reaction rates for this object is the sum of its children\n        rxnRates = {\"nG\": 0, \"nF\": 0, \"n2n\": 0, \"nA\": 0, \"nP\": 0, \"n3n\": 0}\n        for armiObject in objects:\n            for rxName, val in armiObject._getReactionRates(nucName, nDensity).items():\n                rxnRates[rxName] += val\n\n        return rxnRates\n\n    def printContents(self, includeNuclides=True):\n        \"\"\"Display information about all the comprising children in this object.\"\"\"\n        runLog.important(self)\n        for c in self:\n            c.printContents(includeNuclides=includeNuclides)\n\n    def _genChildByLocationLookupTable(self):\n        \"\"\"Update the childByLocation lookup table.\"\"\"\n        runLog.extra(\"Generating location-to-child lookup table.\")\n        self.childrenByLocator = {}\n        for child in self:\n            self.childrenByLocator[child.spatialLocator] = child\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        \"\"\"\n        Get sum circle bound.\n\n        Used to roughly approximate relative size vs. other objects\n        \"\"\"\n        getter = operator.methodcaller(\"getBoundingCircleOuterDiameter\", Tc, cold)\n        return sum(map(getter, self))\n\n    def getPuMoles(self):\n        \"\"\"Returns total number of moles of Pu isotopes.\"\"\"\n        nucNames = [nuc.name for nuc in self.nuclideBases.elements.byZ[94].nuclides]\n        puN = np.sum(self.getNuclideNumberDensities(nucNames))\n\n        return puN / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.getVolume()\n\n\nclass StateRetainer:\n    \"\"\"\n    Retains state during some operations.\n\n    This can be used to temporarily cache state, perform an operation, extract some info, and then\n    revert back to the original state.\n\n    * A state retainer is faster than restoring state from a database as it reduces the number of IO\n      reads; however, it does use more memory.\n\n    * This can be used on any object within the composite pattern via with\n      ``[rabc].retainState([list], [of], [parameters], [to], [retain]):``.\n      Use on an object up in the hierarchy applies to all objects below as well.\n\n    * This is intended to work across MPI, so that if you were to broadcast the reactor the state\n      would be correct; however the exact implication on ``parameters`` may be unclear.\n\n    \"\"\"\n\n    def __init__(self, composite: Composite, paramsToApply=None):\n        \"\"\"\n        Create an instance of a StateRetainer.\n\n        Parameters\n        ----------\n        composite: Composite\n            composite object to retain state (recursively)\n\n        paramsToApply: iterable of parameters.Parameter\n            Iterable of parameters.Parameter to retain updated values after `__exit__`. All other\n            parameters are reverted to the original state, i.e. retained at the original value.\n        \"\"\"\n        self.composite = composite\n        self.paramsToApply = set(paramsToApply or [])\n\n    def __enter__(self):\n        self._enterExitHelper(lambda obj: obj.backUp())\n        return self\n\n    def __exit__(self, *args):\n        self._enterExitHelper(lambda obj: obj.restoreBackup(self.paramsToApply))\n\n    def _enterExitHelper(self, func):\n        \"\"\"Helper method for ``__enter__`` and ``__exit__``. ``func`` is a lambda to either\n        ``backUp()`` or ``restoreBackup()``.\n        \"\"\"\n        paramDefs = set()\n        items = itertools.chain(\n            (self.composite,),\n            self.composite.iterChildrenWithMaterials(deep=True),\n        )\n        for child in items:\n            if hasattr(child, \"p\"):\n                # materials don't have Parameters\n                paramDefs.update(child.p.paramDefs)\n            func(child)\n        for paramDef in paramDefs:\n            func(paramDef)\n\n\ndef gatherMaterialsByVolume(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False):\n    \"\"\"\n    Compute the total volume of each material in a set of objects and give samples.\n\n    Parameters\n    ----------\n    objects : list of ArmiObject\n        Objects to look within. This argument allows clients to search though some subset of the\n        three (e.g. when you're looking for all CLADDING components within FUEL blocks)\n\n    typeSpec : TypeSpec\n        Flags for the components to look at\n\n    exact : bool\n        Whether or not the TypeSpec is exact\n\n    Notes\n    -----\n    This helper method is outside the main ArmiObject tree for the special clients that need to\n    filter both by container type (e.g. Block type) with one set of flags, and Components with\n    another set of flags.\n\n    .. warning:: This is a **composition** related helper method that will likely be filed into\n        classes/modules that deal specifically with the composition of things in the data model.\n        Thus clients that use it from here should expect to need updates soon.\n    \"\"\"\n    volumes = {}\n    samples = {}\n    for obj in objects:\n        for c in obj.iterComponents(typeSpec, exact):\n            vol = c.getVolume()\n            matName = c.material.getName()\n            volumes[matName] = volumes.get(matName, 0.0) + vol\n            if matName not in samples:\n                samples[matName] = c.material\n\n    return volumes, samples\n\n\ndef getDominantMaterial(objects: List[ArmiObject], typeSpec: TypeSpec = None, exact=False):\n    \"\"\"\n    Return the first sample of the most dominant material (by volume) in a set of objects.\n\n    Warning\n    -------\n    This is a **composition** related helper method that will likely be filed into classes/modules\n    that deal specifically with the composition of things in the data model. Thus clients that use\n    it from here should expect to need updates soon.\n    \"\"\"\n    volumes, samples = gatherMaterialsByVolume(objects, typeSpec, exact)\n\n    if volumes:\n        # find matName with max volume\n        maxMatName = list(sorted(volumes.items(), key=lambda item: item[1])).pop()[0]\n        # return this material. Note that if this material has properties like Zr-frac, enrichment,\n        # etc. then this will just return one in the batch, not an average.\n        return samples[maxMatName]\n\n    return None\n"
  },
  {
    "path": "armi/reactor/converters/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nContains code that can convert reactor models from one geometry to another.\n\nConversions between geometries are often needed in advance of a certain type of physics\ncalculation that cannot be done on the full 3-D detailed geometry. For example, sometimes\nan analyst wants to convert a reactor from 3-D to R-Z in advance of a very fast running\nneutronics solution.\n\nConverting from one geometry to another while properly conserving mass or some other\nparameter manually is tedious and error prone. So it's well-suited for automation with\nARMI.\n\nThis subpackage contains code that does a certain subset of conversions along those lines.\n\n.. warning::\n    Geometry conversions are relatively design-specific, so the converters in this\n    subpackage are relatively limited in scope as to what they can convert, largely\n    targeting hexagonal pin-type assemblies. If your geometry is different from this, this\n    code is best considered as examples and starting points, as you will likely need to\n    write your own converters in your own plugin. Of course, if your converter is\n    sufficiently generic, we welcome it here.\n\n    In other words, some of these converters may at some point migrate to a more\n    design-specific plugin.\n\n\nSee Also\n--------\narmi.cases.inputModifiers\n    Modify input files and re-write them.\n\"\"\"\n"
  },
  {
    "path": "armi/reactor/converters/axialExpansionChanger/__init__.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Enable component-wise axial expansion for assemblies and/or a reactor.\"\"\"\n\n# ruff: noqa: F401\nfrom armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (\n    AssemblyAxialLinkage,\n)\nfrom armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import (\n    AxialExpansionChanger,\n    makeAssemsAbleToSnapToUniformMesh,\n)\nfrom armi.reactor.converters.axialExpansionChanger.expansionData import (\n    ExpansionData,\n    getSolidComponents,\n    iterSolidComponents,\n)\n"
  },
  {
    "path": "armi/reactor/converters/axialExpansionChanger/assemblyAxialLinkage.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport dataclasses\nimport functools\nimport itertools\nimport typing\nfrom textwrap import dedent\n\nfrom armi import runLog\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.components import Component, UnshapedComponent\nfrom armi.reactor.converters.axialExpansionChanger.expansionData import (\n    iterSolidComponents,\n)\nfrom armi.reactor.grids import MultiIndexLocation\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.assemblies import Assembly\n\n\ndef areAxiallyLinked(componentA: Component, componentB: Component) -> bool:\n    \"\"\"Determine axial component linkage for two components.\n\n    Parameters\n    ----------\n    componentA : :py:class:`Component <armi.reactor.components.component.Component>`\n        component of interest\n    componentB : :py:class:`Component <armi.reactor.components.component.Component>`\n        component to compare and see if is linked to componentA\n\n    Notes\n    -----\n    If componentA and componentB are both solids and the same type, geometric overlap can be checked via\n    getCircleInnerDiameter and getBoundingCircleOuterDiameter. Four different cases are accounted for.\n    If they do not meet these initial criteria, linkage is assumed to be False.\n    Case #1: Unshaped Components. There is no way to determine overlap so they're assumed to be not linked.\n    Case #2: Blocks with specified grids. If componentA and componentB have identical grid indices (cannot be a partial\n    case, ALL of the indices must be contained by one or the other), then overlap can be checked.\n    Case #3: If Component position is not specified via a grid, the multiplicity is checked. If consistent, they are\n    assumed to be in the same positions and their overlap is checked.\n    Case #4: Components are either not both solids, are not the same type, or Cases 1-3 are not True.\n\n    Returns\n    -------\n    linked : bool\n        status is componentA and componentB are axially linked to one another\n    \"\"\"\n    ## Cases 4\n    linked = False\n\n    if isinstance(componentA, type(componentB)) and (\n        componentA.containsSolidMaterial() and componentB.containsSolidMaterial()\n    ):\n        if isinstance(componentA, UnshapedComponent):\n            ## Case 1\n            runLog.warning(\n                f\"Components {componentA} and {componentB} are UnshapedComponents \"\n                \"and do not have 'getCircleInnerDiameter' or getBoundingCircleOuterDiameter methods; \"\n                \"nor is it physical to do so. Instead of crashing and raising an error, \"\n                \"they are going to be assumed to not be linked.\",\n                single=True,\n            )\n        elif isinstance(componentA.spatialLocator, MultiIndexLocation) and isinstance(\n            componentB.spatialLocator, MultiIndexLocation\n        ):\n            ## Case 2\n            fromA = set(tuple(index) for index in componentA.spatialLocator.indices)\n            fromB = set(tuple(index) for index in componentB.spatialLocator.indices)\n            if fromA == fromB:\n                linked = _checkOverlap(componentA, componentB)\n        elif componentA.getDimension(\"mult\") == componentB.getDimension(\"mult\"):\n            ## Case 3\n            linked = _checkOverlap(componentA, componentB)\n\n    return linked\n\n\ndef _checkOverlap(componentA: Component, componentB: Component) -> bool:\n    \"\"\"Check two components for geometric overlap by seeing if one can fit within the other.\n\n    Notes\n    -----\n    When component dimensions are retrieved, cold=True to ensure that dimensions are evaluated\n    at cold/input temperatures. At temperature, solid-solid interfaces in ARMI may produce\n    slight overlaps due to thermal expansion. Handling these potential overlaps are out of scope.\n    \"\"\"\n    idA = componentA.getCircleInnerDiameter(cold=True)\n    odA = componentA.getBoundingCircleOuterDiameter(cold=True)\n    idB = componentB.getCircleInnerDiameter(cold=True)\n    odB = componentB.getBoundingCircleOuterDiameter(cold=True)\n    biggerID = max(idA, idB)\n    smallerOD = min(odA, odB)\n    return biggerID < smallerOD\n\n\n# Make a generic type so we can \"template\" the axial link class based on what could be above/below a thing\nComp = typing.TypeVar(\"Comp\", Block, Component)\n\n\n@dataclasses.dataclass\nclass AxialLink(typing.Generic[Comp]):\n    \"\"\"Small class for named references to objects above and below a specific object.\n\n    Axial expansion in ARMI works by identifying what objects occupy the same axial space.\n    For components in blocks, identify which above and below axially align. This is used\n    to determine what, if any, mass needs to be re-assigned across blocks during expansion.\n    For blocks, the linking determines what blocks need to move as a result of a specific block's\n    axial expansion.\n\n    Attributes\n    ----------\n    lower : Composite or None\n        Object below, if any.\n    upper : Composite or None\n        Object above, if any.\n\n    Notes\n    -----\n    This class is \"templated\" by the type of composite that could be assigned and fetched. A\n    block-to-block linkage could be type-hinted via ``AxialLink[Block]`` or ``AxialLink[Component]``\n    for component-to-component link.\n\n    See Also\n    --------\n    * :attr:`AxialAssemblyLinkage.linkedBlocks`\n    * :attr:`AxialAssemblyLinkage.linkedComponents`\n    \"\"\"\n\n    lower: typing.Optional[Comp] = dataclasses.field(default=None)\n    upper: typing.Optional[Comp] = dataclasses.field(default=None)\n\n\nclass AssemblyAxialLinkage:\n    \"\"\"Determines and stores the block- and component-wise axial linkage for an assembly.\n\n    Parameters\n    ----------\n    assem : armi.reactor.assemblies.Assembly\n        Assembly to be linked\n\n    Attributes\n    ----------\n    a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n        reference to original assembly; is directly modified/changed during expansion.\n    linkedBlocks : dict\n        Keys are blocks in the assembly. Their values are :class:`AxialLink` with\n        ``upper`` and ``lower`` attributes for the blocks potentially above and\n        below this block.\n    linkedComponents : dict\n        Keys are solid components in the assembly. Their values are :class:`AxialLink` with\n        ``upper`` and ``lower`` attributes for the solid components potentially above and\n        below this block.\n    \"\"\"\n\n    linkedBlocks: dict[Block, AxialLink[Block]]\n    linkedComponents: dict[Component, AxialLink[Component]]\n\n    def __init__(self, assem: \"Assembly\"):\n        self.a = assem\n        self.linkedBlocks = self.getLinkedBlocks(assem)\n        self.linkedComponents = {}\n        self._determineAxialLinkage()\n\n    @classmethod\n    def getLinkedBlocks(\n        cls,\n        blocks: typing.Sequence[Block],\n    ) -> dict[Block, AxialLink[Block]]:\n        \"\"\"Produce a mapping showing how blocks are linked.\n\n        Parameters\n        ----------\n        blocks : sequence of armi.reactor.blocks.Block\n            Ordered sequence of blocks from bottom to top. Could just as easily be an\n            :class:`armi.reactor.assemblies.Assembly`.\n\n        Returns\n        -------\n        dict[Block, AxialLink[Block]]\n            Dictionary where keys are individual blocks and their corresponding values point\n            to blocks above and below.\n        \"\"\"\n        nBlocks = len(blocks)\n        if nBlocks:\n            return cls._getLinkedBlocks(blocks, nBlocks)\n        raise ValueError(\"No blocks passed. Cannot determine links\")\n\n    @staticmethod\n    def _getLinkedBlocks(blocks: typing.Sequence[Block], nBlocks: int) -> dict[Block, AxialLink[Block]]:\n        # Use islice to avoid making intermediate lists of subsequences of blocks\n        lower = itertools.chain((None,), itertools.islice(blocks, 0, nBlocks - 1))\n        upper = itertools.chain(itertools.islice(blocks, 1, None), (None,))\n        links = {}\n        for low, mid, high in zip(lower, blocks, upper):\n            links[mid] = AxialLink(lower=low, upper=high)\n        return links\n\n    def _determineAxialLinkage(self):\n        \"\"\"Gets the block and component based linkage.\"\"\"\n        for b in self.a:\n            for c in iterSolidComponents(b):\n                self._getLinkedComponents(b, c)\n\n    def _findComponentLinkedTo(self, c: Component, otherBlock: typing.Optional[Block]) -> typing.Optional[Component]:\n        if otherBlock is None:\n            return None\n        candidate = None\n        # Iterate over all solid components in the other block that are linked to this one\n        areLinked = functools.partial(self.areAxiallyLinked, c)\n        for otherComp in filter(areLinked, iterSolidComponents(otherBlock)):\n            if candidate is None:\n                candidate = otherComp\n            else:\n                errMsg = f\"\"\"\n                    Multiple component axial linkages have been found for the following component!\n                        Component {c}\n                          -> Block {c.parent}\n                          -> Assembly {c.parent.parent}\n                    This is indicative of an error in the blueprints! Candidate components in {otherBlock}:\n                        {candidate}\n                        {otherComp}\n                \"\"\"\n                runLog.error(msg=dedent(errMsg))\n                raise RuntimeError(dedent(errMsg))\n        return candidate\n\n    def _getLinkedComponents(self, b: Block, c: Component):\n        \"\"\"Retrieve the axial linkage for component c.\n\n        Parameters\n        ----------\n        b : :py:class:`Block <armi.reactor.blocks.Block>`\n            key to access blocks containing linked components\n        c : :py:class:`Component <armi.reactor.components.component.Component>`\n            component to determine axial linkage for\n\n        Raises\n        ------\n        RuntimeError\n            multiple candidate components are found to be axially linked to a component\n        \"\"\"\n        linkedBlocks = self.linkedBlocks[b]\n        lowerC = self._findComponentLinkedTo(c, linkedBlocks.lower)\n        upperC = self._findComponentLinkedTo(c, linkedBlocks.upper)\n        lstLinkedC = AxialLink(lowerC, upperC)\n        self.linkedComponents[c] = lstLinkedC\n\n        if self.linkedBlocks[b].lower is None and lstLinkedC.lower is None:\n            runLog.debug(\n                f\"Assembly {self.a}, Block {b}, Component {c} has nothing linked below it!\",\n                single=True,\n            )\n        if self.linkedBlocks[b].upper is None and lstLinkedC.upper is None:\n            runLog.debug(\n                f\"Assembly {self.a}, Block {b}, Component {c} has nothing linked above it!\",\n                single=True,\n            )\n\n    @staticmethod\n    def areAxiallyLinked(componentA: Component, componentB: Component) -> bool:\n        \"\"\"Check if two components are axially linked.\n\n        Parameters\n        ----------\n        componentA : :py:class:`Component <armi.reactor.components.component.Component>`\n            component of interest\n        componentB : :py:class:`Component <armi.reactor.components.component.Component>`\n            component to compare and see if is linked to componentA\n\n        Returns\n        -------\n        bool\n            Status of linkage check\n\n        See Also\n        --------\n        :func:`areAxiallyLinked` for more details, including the criteria for considering components linked.\n        This method is provided to allow subclasses the ability to override the linkage check.\n        \"\"\"\n        return areAxiallyLinked(componentA, componentB)\n"
  },
  {
    "path": "armi/reactor/converters/axialExpansionChanger/axialExpansionChanger.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Enable component-wise axial expansion for assemblies and/or a reactor.\"\"\"\n\nimport typing\nfrom textwrap import dedent\n\nfrom numpy import array\n\nfrom armi import runLog\nfrom armi.materials.material import Fluid\nfrom armi.reactor.assemblies import Assembly\nfrom armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (\n    AssemblyAxialLinkage,\n)\nfrom armi.reactor.converters.axialExpansionChanger.expansionData import (\n    ExpansionData,\n    iterSolidComponents,\n)\nfrom armi.reactor.converters.axialExpansionChanger.redistributeMass import RedistributeMass\nfrom armi.reactor.flags import Flags\nfrom armi.utils.customExceptions import InputError\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.blocks import Block\n    from armi.reactor.components.component import Component\n\n\ndef getDefaultReferenceAssem(assems):\n    \"\"\"Return a default reference assembly.\"\"\"\n    # if assemblies are defined in blueprints, handle meshing\n    # assume finest mesh is reference\n    assemsByNumBlocks = sorted(\n        assems,\n        key=lambda a: len(a),\n        reverse=True,\n    )\n    return assemsByNumBlocks[0] if assemsByNumBlocks else None\n\n\ndef makeAssemsAbleToSnapToUniformMesh(assems, nonUniformAssemFlags, referenceAssembly=None):\n    \"\"\"Make this set of assemblies aware of the reference mesh so they can stay uniform as they axially expand.\"\"\"\n    if not referenceAssembly:\n        referenceAssembly = getDefaultReferenceAssem(assems)\n    # make the snap lists so assems know how to expand\n    nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in nonUniformAssemFlags]\n    for a in assems:\n        if any(a.hasFlags(f) for f in nonUniformAssems):\n            continue\n        a.makeAxialSnapList(referenceAssembly)\n\n\nclass AxialExpansionChanger:\n    \"\"\"\n    Axially expand or contract assemblies or an entire core.\n\n    Attributes\n    ----------\n    linked: :py:class:`AssemblyAxialLinkage`\n        establishes object containing axial linkage information\n    expansionData: :py:class:`ExpansionData <armi.reactor.converters.axialExpansionChanger.expansionData.ExpansionData>`\n        establishes object to store and access relevant expansion data\n\n    Notes\n    -----\n    - Is designed to work with general, vertically oriented, pin-type assembly designs. It is not set up to account\n      for any other assembly type.\n    - Useful for fuel performance, thermal expansion, reactivity coefficients, etc.\n    - The axial expansion changer does not consider the expansion or contraction of fluids and therefore their\n      conservation is not guarunteed. The conservation of fluid mass is expected only if each component type on a\n      block has 1) uniform expansion rates and 2) axially isothermal fluid temperatures.\n    \"\"\"\n\n    linked: typing.Optional[AssemblyAxialLinkage]\n    expansionData: typing.Optional[ExpansionData]\n    topMostBlock: typing.Optional[\"Block\"]\n\n    # 3cm is a presumptive lower threshold for DIF3D\n    DIF3D_MIN_BLOCK_HEIGHT: float = 3.0\n    # when checking the diffference between the component and block heights, 1e-12 cm is used as a threshold to account\n    # for meaningful differences. This threshold filters out negligible differences arising from numerical precision\n    # that otherwise have a negliglble impact on the assembly post-axial expansion. Anything larger than this value is\n    # presumed to be valid of a warning that may warrant further investigation.\n    COMP_BLOCK_HEIGHT_DIFF_THRESHOLD: float = 1e-12\n\n    # Establish the class used to redistribute mass between components.\n    MASS_REDISTRIBUTOR = RedistributeMass\n\n    def __init__(self, detailedAxialExpansion: bool = False):\n        \"\"\"\n        Build an axial expansion converter.\n\n        Parameters\n        ----------\n        detailedAxialExpansion : bool, optional\n            A boolean to indicate whether or not detailedAxialExpansion is to be utilized.\n        \"\"\"\n        self._detailedAxialExpansion = detailedAxialExpansion\n        self.linked = None\n        self.expansionData = None\n        self.topMostBlock = None\n\n    @classmethod\n    def expandColdDimsToHot(\n        cls,\n        assems: list,\n        isDetailedAxialExpansion: bool,\n        referenceAssembly=None,\n    ):\n        \"\"\"Expand BOL assemblies, resolve disjoint axial mesh (if needed), and update block BOL heights.\n\n        .. impl:: Perform expansion during core construction based on block heights at a specified temperature.\n            :id: I_ARMI_INP_COLD_HEIGHT\n            :implements: R_ARMI_INP_COLD_HEIGHT\n\n            This method is designed to be used during core construction to axially thermally expand the\n            assemblies to their \"hot\" temperatures (as determined by ``Thot`` values in blueprints).\n            First, The Assembly is prepared for axial expansion via ``setAssembly``. In\n            ``applyColdHeightMassIncrease``, the number densities on each Component is adjusted to\n            reflect that Assembly inputs are at cold (i.e., ``Tinput``) temperatures. To expand to\n            the requested hot temperatures, thermal expansion factors are then computed in\n            ``computeThermalExpansionFactors``. Finally, the Assembly is axially thermally expanded in\n            ``axiallyExpandAssembly``.\n\n            If the setting ``detailedAxialExpansion`` is ``False``, then each Assembly gets its Block mesh\n            set to match that of the \"reference\" Assembly (see ``getDefaultReferenceAssem`` and ``setBlockMesh``).\n\n            Once the Assemblies are axially expanded, the Block BOL heights are updated. To account for the change in\n            Block volume from axial expansion, ``completeInitialLoading`` is called to update any volume-dependent\n            Block information.\n\n        Parameters\n        ----------\n        assems: list[:py:class:`Assembly <armi.reactor.assemblies.Assembly>`]\n            list of assemblies to be thermally expanded\n        isDetailedAxialExpansion: bool\n            If False, assemblies will be forced to conform to the reference mesh after expansion\n        referenceAssembly: :py:class:`Assembly <armi.reactor.assemblies.Assembly>`, optional\n            Assembly whose mesh other meshes will conform to if isDetailedAxialExpansion is False.\n            If not provided, will assume the finest mesh assembly which is typically fuel.\n\n        Notes\n        -----\n        Calling this method will result in an increase in mass via applyColdHeightMassIncrease!\n\n        See Also\n        --------\n        :py:meth:`applyColdHeightMassIncrease`\n        \"\"\"\n        assems = list(assems)\n        if not referenceAssembly:\n            referenceAssembly = getDefaultReferenceAssem(assems)\n        axialExpChanger = cls(isDetailedAxialExpansion)\n        for a in assems:\n            axialExpChanger.setAssembly(a, expandFromTinputToThot=True)\n            axialExpChanger.applyColdHeightMassIncrease()\n            axialExpChanger.expansionData.computeThermalExpansionFactors()\n            axialExpChanger.axiallyExpandAssembly(recalculateBurnup=False)\n        if not isDetailedAxialExpansion:\n            for a in assems:\n                a.setBlockMesh(referenceAssembly.getAxialMesh())\n        # update block BOL heights to reflect hot heights\n        for a in assems:\n            for b in a:\n                b.p.heightBOL = b.getHeight()\n                b.completeInitialLoading()\n                axialExpChanger.recalculateBurnup(b)\n\n    def performPrescribedAxialExpansion(self, a: Assembly, components: list, percents: list, setFuel=True):\n        \"\"\"Perform axial expansion/contraction of an assembly given prescribed expansion percentages.\n\n        .. impl:: Perform expansion/contraction, given a list of components and expansion coefficients.\n            :id: I_ARMI_AXIAL_EXP_PRESC\n            :implements: R_ARMI_AXIAL_EXP_PRESC\n\n            This method performs component-wise axial expansion for an Assembly given expansion coefficients\n            and a corresponding list of Components. In ``setAssembly``, the Assembly is prepared\n            for axial expansion by determining Component-wise axial linkage and checking to see if a dummy Block\n            is in place (necessary for ensuring conservation properties). The provided expansion factors are\n            then assigned to their corresponding Components in ``setExpansionFactors``. Finally, the axial\n            expansion is performed in ``axiallyExpandAssembly``\n\n        Parameters\n        ----------\n        a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            ARMI assembly to be changed\n        components : list[:py:class:`Component <armi.reactor.components.component.Component>`]\n            list of Components to be expanded\n        percents : list[float]\n            list of expansion percentages for each component listed in components\n        setFuel : boolean, optional\n            Boolean to determine whether or not fuel blocks should have their target components set\n            This is useful when target components within a fuel block need to be determined on-the-fly.\n\n        Notes\n        -----\n        - percents may be positive (expansion) or negative (contraction)\n        \"\"\"\n        self.setAssembly(a, setFuel)\n        self.expansionData.setExpansionFactors(components, percents)\n        self.axiallyExpandAssembly()\n\n    def performThermalAxialExpansion(\n        self,\n        a: Assembly,\n        tempGrid: list,\n        tempField: list,\n        setFuel: bool = True,\n        expandFromTinputToThot: bool = False,\n    ):\n        \"\"\"Perform thermal expansion/contraction for an assembly given an axial temperature grid and\n        field.\n\n        .. impl:: Perform thermal expansion/contraction, given an axial temperature distribution\n            over an assembly.\n            :id: I_ARMI_AXIAL_EXP_THERM\n            :implements: R_ARMI_AXIAL_EXP_THERM\n\n            This method performs component-wise thermal expansion for an assembly given a discrete\n            temperature distribution over the axial length of the Assembly. In ``setAssembly``, the\n            Assembly is prepared for axial expansion by determining Component-wise axial linkage and\n            checking to see if a dummy Block is in place (necessary for ensuring conservation\n            properties). The discrete temperature distribution is then leveraged to update Component\n            temperatures and compute thermal expansion factors (via\n            ``updateComponentTempsBy1DTempField`` and ``computeThermalExpansionFactors``,\n            respectively). Finally, the axial expansion is performed in ``axiallyExpandAssembly``.\n\n        Parameters\n        ----------\n        a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            ARMI assembly to be changed\n        tempGrid : float, list\n            Axial temperature grid (in cm) (i.e., physical locations where temp is stored)\n        tempField : float, list\n            Temperature values (in C) along grid\n        setFuel : boolean, optional\n            Boolean to determine whether or not fuel blocks should have their target components set\n            This is useful when target components within a fuel block need to be determined on-the-fly.\n        expandFromTinputToThot: bool\n            determines if thermal expansion factors should be calculated from c.inputTemperatureInC\n            to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)\n        \"\"\"\n        self.setAssembly(a, setFuel, expandFromTinputToThot)\n        self.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField)\n        self.expansionData.computeThermalExpansionFactors()\n        self.axiallyExpandAssembly()\n\n    def reset(self):\n        self.linked = None\n        self.expansionData = None\n\n    def setAssembly(self, a: Assembly, setFuel=True, expandFromTinputToThot=False):\n        \"\"\"Set the armi assembly to be changed and init expansion data class for assembly.\n\n        Parameters\n        ----------\n         a : :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n            ARMI assembly to be changed\n        setFuel : boolean, optional\n            Boolean to determine whether or not fuel blocks should have their target components set\n            This is useful when target components within a fuel block need to be determined on-the-fly.\n        expandFromTinputToThot: bool\n            determines if thermal expansion factors should be calculated from c.inputTemperatureInC\n            to c.temperatureInC (True) or some other reference temperature and c.temperatureInC (False)\n\n        Notes\n        -----\n        When considering thermal expansion, if there is an axial temperature distribution on the\n        assembly, the axial expansion methodology will NOT perfectly preserve mass. The magnitude of\n        the gradient of the temperature distribution is the primary factor in determining the\n        cumulative loss of mass conservation.\n        \"\"\"\n        self.linked = AssemblyAxialLinkage(a)\n        self.expansionData = ExpansionData(a, setFuel=setFuel, expandFromTinputToThot=expandFromTinputToThot)\n        self._checkAssemblyConstructionIsValid()\n\n    def _checkAssemblyConstructionIsValid(self):\n        self._isTopDummyBlockPresent()\n        self._checkForBlocksWithoutSolids()\n\n    def _checkForBlocksWithoutSolids(self):\n        \"\"\"\n        Makes sure that there aren't any blocks (other than the top-most dummy block)\n        that consist entirely of fluid components. The expansion changer doesn't know\n        what to do with such assemblies.\n        \"\"\"\n        # skip top most dummy block since that is, by design, all fluid\n        for b in self.linked.a[:-1]:\n            if all(isinstance(c.material, Fluid) for c in b.iterComponents()):\n                raise InputError(\n                    f\"Assembly {self.linked.a} is constructed improperly for use with the axial expansion changer \"\n                    f\"as block, {b}, consists of exclusively fluid component(s). If this is not a mistake, consider \"\n                    \"using the 'assemFlagsToSkipAxialExpansion' case setting to bypass performing axial expansion \"\n                    \"on this assembly.\"\n                )\n\n    def applyColdHeightMassIncrease(self):\n        \"\"\"\n        Increase component mass because they are declared at cold dims.\n\n        Notes\n        -----\n        A cold 1 cm tall component will have more mass that a component with the\n        same mass/length as a component with a hot height of 1 cm. This should be\n        called when the setting `inputHeightsConsideredHot` is used. This adjusts\n        the expansion factor applied during applyMaterialMassFracsToNumberDensities.\n        \"\"\"\n        for c in self.linked.a.iterComponents():\n            axialExpansionFactor = 1.0 + c.material.linearExpansionFactor(c.temperatureInC, c.inputTemperatureInC)\n            c.changeNDensByFactor(axialExpansionFactor)\n\n    def _isTopDummyBlockPresent(self):\n        \"\"\"Determines if top most block of assembly is a dummy block.\n\n        Notes\n        -----\n        - If true, then axial expansion will be physical for all blocks.\n        - If false, the top most block in the assembly is artificially chopped to preserve the assembly height. A\n        runLog.Warning also issued.\n        \"\"\"\n        self.topMostBlock = self.linked.a[-1]\n        if not self.topMostBlock.hasFlags(Flags.DUMMY):\n            runLog.warning(\n                f\"No dummy block present at the top of {self.linked.a}! Top most block will be artificially chopped to \"\n                \"preserve assembly height\"\n            )\n            if self._detailedAxialExpansion:\n                msg = \"Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!\"\n                runLog.error(msg)\n                raise RuntimeError(msg)\n\n    def axiallyExpandAssembly(self, recalculateBurnup: bool = True):\n        \"\"\"Utilizes assembly linkage to do axial expansion.\n\n        .. impl:: Preserve the total height of an ARMI assembly, during expansion.\n            :id: I_ARMI_ASSEM_HEIGHT_PRES\n            :implements: R_ARMI_ASSEM_HEIGHT_PRES\n\n            The total height of an Assembly is preserved by not changing the ``ztop`` position of the top-most Block in\n            an Assembly. The ``zbottom`` of the top-most Block is adjusted to match the Block immediately below it. The\n            ``height`` of the top-most Block is is then updated to reflect any expansion/contraction.\n\n        Parameters\n        ----------\n        recalculateBurnup\n            Optional parameter to skip the recalculate burnup step.\n        \"\"\"\n        mesh = [0.0]\n        runLog.debug(\n            \"Printing component expansion information (growth percentage and 'target component') for each block in \"\n            f\"assembly {self.linked.a}.\"\n        )\n        # expand all of the components\n        for b in self.linked.a:\n            for c in iterSolidComponents(b):\n                growFrac = self.expansionData.getExpansionFactor(c)\n                # component ndens and component heights are scaled to their respective growth factor\n                c.changeNDensByFactor(1.0 / growFrac)\n                c.zbottom = b.p.zbottom\n                c.height = growFrac * b.getHeight()\n                c.ztop = c.zbottom + c.height\n\n        # align blocks on target components\n        for ib, b in enumerate(self.linked.a):\n            if b is not self.topMostBlock:\n                targetComp = self.expansionData.getTargetComponent(b)\n                # redefine block bounds based on target component\n                b.p.zbottom = targetComp.zbottom\n                b.p.ztop = targetComp.ztop\n                b.p.height = b.p.ztop - b.p.zbottom\n                b.clearCache()\n                b.p.z = b.p.zbottom + b.getHeight() / 2.0\n                cLinkedAbove = self.linked.linkedComponents[targetComp].upper\n                if cLinkedAbove is not None:\n                    if self.expansionData.isTargetComponent(cLinkedAbove):\n                        # the linked component in the block above is the target component for that block. e.g., fuel to\n                        # fuel. Shift this linked target component up (expansion) or down (contraction) without changing\n                        # its height. In this case, component mass is conserved for both target components.\n                        cLinkedAbove.zbottom = targetComp.ztop\n                        cLinkedAbove.ztop = cLinkedAbove.height + cLinkedAbove.zbottom\n                    else:\n                        # the current target component type continues in the block above, but the target component in\n                        # the block above is different. e.g., the transition from stationary duct to control material in\n                        # a typical pin-based reactor control assembly design. Shift the target component in the block\n                        # above up (expansion) or down (contraction) without changing its height. In this case,\n                        # component mass is conserved for both target components.\n                        for c in iterSolidComponents(self.linked.linkedBlocks[b].upper):\n                            c.zbottom = targetComp.ztop\n                            c.ztop = c.height + c.zbottom\n\n                else:\n                    bAbove = self.linked.linkedBlocks[b].upper\n                    if bAbove is self.topMostBlock:\n                        if not bAbove.hasFlags(Flags.DUMMY):\n                            for c in iterSolidComponents(bAbove):\n                                c.zbottom = b.p.ztop\n                                c.ztop = c.zbottom + c.height\n                    else:\n                        targetCompAbove = self.expansionData.getTargetComponent(bAbove)\n                        # shift the bounds of the target component in the block above to align with the bounds of the\n                        # current block.\n                        targetCompAbove.zbottom = b.p.ztop\n                        targetCompAbove.ztop = targetCompAbove.zbottom + targetCompAbove.height\n\n                # deal with non-target components\n                for c in filter(lambda c: c is not targetComp, iterSolidComponents(b)):\n                    if self.linked.linkedComponents[c].lower is None:\n                        # this component is not axially linked to anything below and needs to shift with its\n                        # respective parent block.\n                        c.zbottom = b.p.zbottom\n                        c.ztop = c.zbottom + c.height\n\n                    cAbove = self.linked.linkedComponents[c].upper\n                    if cAbove is not None:\n                        # align components\n                        cAbove.zbottom = c.ztop\n                        cAbove.ztop = cAbove.zbottom + cAbove.height\n\n                        # redistribute mass\n                        deltaZTop = b.p.ztop - c.ztop\n                        self._checkComponentHeight(c)\n                        if deltaZTop > 0.0:\n                            self.MASS_REDISTRIBUTOR(\n                                fromComp=cAbove, toComp=c, assemName=repr(self.linked.a), deltaZTop=deltaZTop\n                            )\n                        elif deltaZTop < 0.0:\n                            self.MASS_REDISTRIBUTOR(\n                                fromComp=c, toComp=cAbove, assemName=repr(self.linked.a), deltaZTop=deltaZTop\n                            )\n\n                        # realign components based on deltaZTop\n                        self._shiftLinkedCompsForDelta(c, cAbove, deltaZTop)\n            else:\n                b.p.zbottom = self.linked.linkedBlocks[b].lower.p.ztop\n                b.p.height = b.p.ztop - b.p.zbottom\n                b.p.z = b.p.zbottom + b.getHeight() / 2.0\n                b.clearCache()\n                # If the self.topMostBlock is a dummy block, the following is meaningless as there are no solid\n                # components. However, if it is not a dummy block, we need to adjust the solid components within it in\n                # order to keep their elevation information consistent with the block.\n                for c in iterSolidComponents(b):\n                    c.zbottom = b.p.zbottom\n                    c.ztop = b.p.ztop\n                    c.height = c.ztop - c.zbottom\n\n            self._checkBlockHeight(b)\n            self._recomputeBlockMassParams(b)\n            # redo mesh -- functionality based on assembly.calculateZCoords()\n            mesh.append(b.p.ztop)\n            b.spatialLocator = self.linked.a.spatialGrid[0, 0, ib]\n\n        bounds = list(self.linked.a.spatialGrid._bounds)\n        bounds[2] = array(mesh)\n        self.linked.a.spatialGrid._bounds = tuple(bounds)\n        if recalculateBurnup:\n            for b in self.linked.a.iterBlocks(Flags.FUEL):\n                self.recalculateBurnup(b)\n\n    def _recomputeBlockMassParams(self, b: \"Block\"):\n        \"\"\"\n        After component initial mass parameters have been adjusted for expansion,\n        recompute block parameters that are derived from children.\n        \"\"\"\n        paramsToMove = (\n            \"massHmBOL\",\n            \"molesHmBOL\",\n        )\n        for paramName in paramsToMove:\n            b.p[paramName] = (\n                sum(c.p[paramName] for c in b.iterComponents() if c.p[paramName] is not None) / b.getSymmetryFactor()\n            )\n\n    def recalculateBurnup(self, b: \"Block\"):\n        \"\"\"Post axial-expansion, heavy metal may have moved between blocks; recalculate burnup.\n\n        Notes\n        -----\n        Since burnup can be calculated differently, this is meant to be populated in a downstream application subclass.\n        \"\"\"\n        pass\n\n    def _shiftLinkedCompsForDelta(self, c: \"Component\", cAbove: \"Component\", deltaZTop: float):\n        # shift the height and ztop of c downwards (-deltaZTop) or upwards (+deltaZTop)\n        c.height += deltaZTop\n        c.ztop += deltaZTop\n        # the height of cAbove grows and zbottom moves downwards (-deltaZTop) or shrinks and moves upward (+deltaZTop)\n        cAbove.height -= deltaZTop\n        cAbove.zbottom += deltaZTop\n\n    def manageCoreMesh(self, r):\n        \"\"\"Manage core mesh post assembly-level expansion.\n\n        Parameters\n        ----------\n        r : :py:class:`Reactor <armi.reactor.reactors.Reactor>`\n            ARMI reactor to have mesh modified\n\n        Notes\n        -----\n        - if no detailedAxialExpansion, then do \"cheap\" approach to uniformMesh converter.\n        - update average core mesh values with call to r.core.updateAxialMesh()\n        - oldMesh will be None during initial core construction at processLoading as it has not yet\n          been set.\n        \"\"\"\n        if not self._detailedAxialExpansion:\n            # loop through again now that the reference is adjusted and adjust the non-fuel assemblies.\n            for a in r.core.getAssemblies():\n                a.setBlockMesh(r.core.refAssem.getAxialMesh(), conserveMassFlag=\"auto\")\n\n        oldMesh = r.core.p.axialMesh\n        r.core.updateAxialMesh()\n        if oldMesh:\n            runLog.extra(\"Updated r.core.p.axialMesh (old, new)\")\n            for old, new in zip(oldMesh, r.core.p.axialMesh):\n                runLog.extra(f\"{old:.6e}\\t{new:.6e}\")\n\n    def _checkComponentHeight(self, c):\n        if c.zbottom > c.ztop:\n            msg = f\"\"\"\n            {c} has a negative height. This is unphysical.\n                Assembly: {self.linked.a}\n                    Block: {c.parent}\n                Component: {c}\n\n                Component Height = {c.ztop} - {c.zbottom} = {c.height}.\n            \"\"\"\n            raise ArithmeticError(dedent(msg))\n\n    def _checkBlockHeight(self, b):\n        \"\"\"Do some basic block height validation.\"\"\"\n        if b.getHeight() < self.DIF3D_MIN_BLOCK_HEIGHT:\n            runLog.debug(f\"Block {b.name} ({str(b.p.flags)}) has a height less than 3.0 cm. ({b.getHeight():.12e})\")\n\n        if b.getHeight() < 0.0:\n            raise ArithmeticError(f\"Block {b.name} ({str(b.p.flags)}) has a negative height. ({b.getHeight():.12e})\")\n\n        for c in iterSolidComponents(b):\n            if c.height - b.getHeight() > self.COMP_BLOCK_HEIGHT_DIFF_THRESHOLD:\n                diff = c.height - b.getHeight()\n                expectedChange = \"increase\" if diff < 0.0 else \"decrease\"\n                if c.hasFlags(Flags.FUEL) or c.hasFlags(Flags.CONTROL):\n                    msg = f\"\"\"\n                    The height of {c} has gone out of sync with its parent block!\n                        Assembly: {self.linked.a}\n                            Block: {b}\n                        Component: {c}\n\n                            Block Height = {b.getHeight()}\n                        Component Height = {c.height}\n\n                    The difference in height is {diff} cm. This difference will result in an artificial {expectedChange}\n                    in the mass of {c}. This is indicative that there are multiple axial component terminations in {b}.\n                    Per the ARMI User Manual, to preserve mass there can only be one axial component termination\n                    per block.\n                    \"\"\"\n                    runLog.warning(dedent(msg), label=\"Component height different.\")\n\n        if self.linked.linkedBlocks[b].lower:\n            lowerBlock = self.linked.linkedBlocks[b].lower\n            if lowerBlock.p.ztop != b.p.zbottom:\n                runLog.warning(\n                    \"Block heights have gone out of sync!\\n\"\n                    f\"\\t{lowerBlock.getType()}: {lowerBlock.p.ztop}\\n\"\n                    f\"\\t{b.getType()}: {b.p.zbottom}\",\n                    single=True,\n                )\n"
  },
  {
    "path": "armi/reactor/converters/axialExpansionChanger/expansionData.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Data container for axial expansion.\"\"\"\n\nfrom statistics import mean\nfrom textwrap import dedent\nfrom typing import TYPE_CHECKING, Iterable, Optional, Union\n\nfrom armi.materials import material\nfrom armi.reactor.flags import Flags\n\nTARGET_FLAGS_IN_PREFERRED_ORDER = [\n    Flags.FUEL,\n    Flags.CONTROL,\n    Flags.POISON,\n    Flags.SHIELD,\n    Flags.SLUG,\n]\n\nif TYPE_CHECKING:\n    from armi.reactor.assemblies import Assembly\n    from armi.reactor.blocks import Block\n    from armi.reactor.components import Component\n\n\ndef iterSolidComponents(b: \"Block\") -> Iterable[\"Component\"]:\n    \"\"\"Iterate over all solid components in the block.\"\"\"\n    return filter(lambda c: not isinstance(c.material, material.Fluid), b)\n\n\ndef getSolidComponents(b: \"Block\") -> list[\"Component\"]:\n    \"\"\"\n    Return list of components in the block that have solid material.\n\n    Notes\n    -----\n    Axial expansion only needs to be applied to solid materials. We should not update\n    number densities on fluid materials to account for changes in block height.\n\n    See Also\n    --------\n    :func:`iterSolidComponents` produces an iterable rather than a list and may be better\n    suited if you simply want to iterate over solids in a block.\n    \"\"\"\n    return list(iterSolidComponents(b))\n\n\nclass ExpansionData:\n    r\"\"\"Data container for axial expansion.\n\n    The primary responsibility of this class is to determine the axial expansion factors\n    for each solid component in the assembly. Expansion factors can be computed from the component\n    temperatures in :meth:`computeThermalExpansionFactors` or provided directly to the class\n    via :meth:`setExpansionFactors`.\n\n    This class relies on the concept of a \"target\" expansion component for each block. While\n    components will expand at different rates, the final height of the block must be determined.\n    The target component, determined by :meth:`determineTargetComponents`, will drive the total\n    height of the block post-expansion.\n\n    Parameters\n    ----------\n    a: :py:class:`Assembly <armi.reactor.assemblies.Assembly>`\n        Assembly to assign component-wise expansion data to\n    setFuel: bool\n        used to determine if fuel component should be set as\n        axial expansion target component during initialization.\n        see self._isFuelLocked\n    expandFromTinputToThot: bool\n        Determines if thermal expansion factors should be calculated from\n            - ``c.inputTemperatureInC`` to ``c.temperatureInC`` when ``True``, or\n            - some other reference temperature and ``c.temperatureInC`` when ``False``\n    \"\"\"\n\n    _expansionFactors: dict[\"Component\", float]\n    componentReferenceTemperature: dict[\"Component\", float]\n\n    def __init__(self, a: \"Assembly\", setFuel: bool, expandFromTinputToThot: bool):\n        self._a = a\n        self.componentReferenceTemperature = {}\n        self._expansionFactors = {}\n        self._componentDeterminesBlockHeight = {}\n        self._setAllTargetComponents(setFuel)\n        self.expandFromTinputToThot = expandFromTinputToThot\n\n    def setExpansionFactors(self, components: list[\"Component\"], expFrac: list[float]):\n        \"\"\"Sets user defined expansion fractions.\n\n        Parameters\n        ----------\n        components : List[:py:class:`Component <armi.reactor.components.component.Component>`]\n            list of Components to have their heights changed\n        expFrac : List[float]\n            list of L1/L0 height changes that are to be applied to components\n\n        Raises\n        ------\n        RuntimeError\n            If components and expFrac are different lengths\n        \"\"\"\n        if len(components) != len(expFrac):\n            raise RuntimeError(\n                \"Number of components and expansion fractions must be the same!\\n\"\n                f\"     len(components) = {len(components)}\\n\"\n                f\"        len(expFrac) = {len(expFrac)}\"\n            )\n        for exp in expFrac:\n            if exp <= 0.0:\n                raise RuntimeError(\n                    f\"Expansion factor {exp}, L1/L0, is not physical. Expansion fractions should be greater than 0.0.\"\n                )\n        for c, p in zip(components, expFrac):\n            self._expansionFactors[c] = p\n\n    def updateComponentTempsBy1DTempField(self, tempGrid, tempField):\n        \"\"\"Assign a block-average axial temperature to components.\n\n        Parameters\n        ----------\n        tempGrid : numpy array\n            1D axial temperature grid (i.e., physical locations where temp is stored)\n        tempField : numpy array\n            temperature values along grid\n\n        Notes\n        -----\n        - given a 1D axial temperature grid and distribution, searches for temperatures that fall\n          within the bounds of a block, and averages them\n        - this average temperature is then passed to self.updateComponentTemp()\n\n        Raises\n        ------\n        ValueError\n            if no temperature points found within a block\n        RuntimeError\n            if tempGrid and tempField are different lengths\n        \"\"\"\n        if len(tempGrid) != len(tempField):\n            raise RuntimeError(\"tempGrid and tempField must have the same length.\")\n\n        self.componentReferenceTemperature = {}  # reset, just to be safe\n        for b in self._a:\n            tmpMapping = []\n            for idz, z in enumerate(tempGrid):\n                if b.p.zbottom <= z <= b.p.ztop:\n                    tmpMapping.append(tempField[idz])\n                if z > b.p.ztop:\n                    break\n\n            if len(tmpMapping) == 0:\n                raise ValueError(\n                    f\"{b} has no temperature points within it!\\n\"\n                    \"Likely need to increase the refinement of the temperature grid.\"\n                )\n\n            blockAveTemp = mean(tmpMapping)\n            for c in b:\n                self.updateComponentTemp(c, blockAveTemp)\n\n    def updateComponentTemp(self, c: \"Component\", temp: float):\n        \"\"\"Update component temperatures with a provided temperature.\n\n        Parameters\n        ----------\n        c : :py:class:`Component <armi.reactor.components.component.Component>`\n            component to which the temperature, temp, is to be applied\n        temp : float\n            new component temperature in C\n\n        Notes\n        -----\n        - \"reference\" height and temperature are the current states; i.e. before\n           1) the new temperature, temp, is applied to the component, and\n           2) the component is axially expanded\n        \"\"\"\n        self.componentReferenceTemperature[c] = c.temperatureInC\n        c.setTemperature(temp)\n\n    def computeThermalExpansionFactors(self):\n        \"\"\"Computes expansion factors for all components via thermal expansion.\"\"\"\n        for b in self._a:\n            self._setComponentThermalExpansionFactors(b)\n\n    def _setComponentThermalExpansionFactors(self, b: \"Block\"):\n        \"\"\"For each component in the block, set the thermal expansion factors.\"\"\"\n        for c in iterSolidComponents(b):\n            self._perComponentThermalExpansionFactors(c)\n\n    def _perComponentThermalExpansionFactors(self, c: \"Component\"):\n        \"\"\"Set the thermal expansion factors for a single component.\"\"\"\n        if self.expandFromTinputToThot:\n            # get thermal expansion factor between c.inputTemperatureInC & c.temperatureInC\n            self._expansionFactors[c] = c.getThermalExpansionFactor()\n        elif c in self.componentReferenceTemperature:\n            growFrac = c.getThermalExpansionFactor(T0=self.componentReferenceTemperature[c])\n            self._expansionFactors[c] = growFrac\n        else:\n            # We want expansion factors relative to componentReferenceTemperature not\n            # Tinput. But for this component there isn't a componentReferenceTemperature, so\n            # we'll assume that the expansion factor is 1.0.\n            self._expansionFactors[c] = 1.0\n\n    def getExpansionFactor(self, c: \"Component\"):\n        \"\"\"Retrieves expansion factor for c.\n\n        Parameters\n        ----------\n        c : :py:class:`Component <armi.reactor.components.component.Component>`\n            Component to retrieve expansion factor for\n        \"\"\"\n        value = self._expansionFactors.get(c, 1.0)\n        return value\n\n    def _setAllTargetComponents(self, setFuel: bool):\n        \"\"\"Sets axial expansion target component on each block in the expanded assembly.\n\n        Parameters\n        ----------\n        setFuel\n            boolean to determine if fuel block should have its target component set. Useful for when\n            target components should be determined on the fly.\n        \"\"\"\n        for b in self._a:\n            self.setTargetComponent(b, setFuel)\n\n    def setTargetComponent(self, b: \"Block\", setFuel: bool):\n        \"\"\"Set the axial expansion target component on a specific Block.\n\n        Parameters\n        ----------\n        b\n            ARMI Block which is to have its axial expansion target component set.\n        setFuel\n            boolean to determine if fuel block should have its target component set. Useful for when\n            target components should be determined on the fly.\n        \"\"\"\n        if b.p.axialExpTargetComponent:\n            target = b.getComponentByName(b.p.axialExpTargetComponent)\n            self._setExpansionTarget(b, target)\n        elif b.hasFlags(Flags.PLENUM) or b.hasFlags(Flags.ACLP):\n            self.determineTargetComponent(b, Flags.CLAD)\n        elif b.hasFlags(Flags.DUMMY):\n            # Dummy blocks are intended to contain only fluid and do not need a target component\n            pass\n        elif setFuel and b.hasFlags(Flags.FUEL):\n            self._isFuelLocked(b)\n        else:\n            self.determineTargetComponent(b)\n\n    def determineTargetComponent(self, b: \"Block\", flagOfInterest: Optional[Flags] = None) -> \"Component\":\n        \"\"\"Determines the component who's expansion will determine block height.\n\n        This information is also stored on the block at ``Block.p.axialExpTargetComponent`` for faster\n        retrieval later.\n\n        Parameters\n        ----------\n        b : :py:class:`Block <armi.reactor.blocks.Block>`\n            block to specify target component for\n        flagOfInterest : :py:class:`Flags <armi.reactor.flags.Flags>`\n            the flag of interest to identify the target component\n\n        Returns\n        -------\n        Component\n            Component identified as target component, if found.\n\n        Notes\n        -----\n        - if flagOfInterest is None, finds the component within b that contains flags that\n          are defined in a preferred order of flags, or barring that, in b.p.flags\n        - if flagOfInterest is not None, finds the component that contains the flagOfInterest.\n\n        Raises\n        ------\n        RuntimeError\n            no target component found\n        RuntimeError\n            multiple target components found\n        \"\"\"\n        if flagOfInterest is None:\n            # Follow expansion of most neutronically important component, fuel then control/poison\n            for targetFlag in TARGET_FLAGS_IN_PREFERRED_ORDER:\n                candidates = b.getChildrenWithFlags(targetFlag)\n                if candidates:\n                    break\n            # some blocks/components are not included in the above list but should still be found\n            if not candidates:\n                candidates = [c for c in b.getChildren() if c.p.flags in b.p.flags]\n        else:\n            candidates = b.getChildrenWithFlags(flagOfInterest)\n        if len(candidates) == 0:\n            # if only 1 solid, be smart enought to snag it\n            solidMaterials = getSolidComponents(b)\n            if len(solidMaterials) == 1:\n                candidates = solidMaterials\n        if len(candidates) == 0:\n            raise RuntimeError(f\"No target component found!\\n   Block {b}\")\n        if len(candidates) > 1:\n            msg = f\"\"\"\n                Cannot have more than one component within a block that has the target flag!\n                Block {b}\n                    flagOfInterest {flagOfInterest}\n                    Components {candidates}\n            \"\"\"\n            raise RuntimeError(dedent(msg))\n        target = candidates[0]\n        self._setExpansionTarget(b, target)\n        return target\n\n    def _setExpansionTarget(self, b: \"Block\", target: \"Component\"):\n        self._componentDeterminesBlockHeight[target] = True\n        b.p.axialExpTargetComponent = target.name\n\n    def _isFuelLocked(self, b: \"Block\"):\n        \"\"\"Physical/realistic implementation reserved for ARMI plugin.\n\n        Parameters\n        ----------\n        b : :py:class:`Block <armi.reactor.blocks.Block>`\n            block to specify target component for\n\n        Raises\n        ------\n        RuntimeError\n            multiple fuel components found within b\n\n        Notes\n        -----\n        - This serves as an example to check for fuel/clad locking/interaction found in SFRs.\n        - A more realistic/physical implementation is reserved for ARMI plugin(s).\n        \"\"\"\n        c = b.getComponent(Flags.FUEL)\n        if c is None:\n            raise RuntimeError(f\"No fuel component within {b}!\")\n        self._setExpansionTarget(b, c)\n\n    def isTargetComponent(self, c: Union[\"Component\", None]) -> bool:\n        \"\"\"Returns bool if c is a target component.\n\n        Parameters\n        ----------\n        c : :py:class:`Component <armi.reactor.components.component.Component>`\n            Component to check target component status\n        \"\"\"\n        return bool(c in self._componentDeterminesBlockHeight)\n\n    def getTargetComponent(self, b: \"Block\"):\n        \"\"\"Returns the target component for a block.\n\n        Parameters\n        ----------\n        b\n            the block to query for the target component\n        \"\"\"\n        c = filter(self.isTargetComponent, iterSolidComponents(b))\n        try:\n            return next(c)\n        except StopIteration:\n            raise RuntimeError(f\"No target component found for {b} in {b.parent}!\")\n"
  },
  {
    "path": "armi/reactor/converters/axialExpansionChanger/redistributeMass.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport re\nimport typing\nfrom math import isclose\nfrom textwrap import dedent\n\nfrom scipy.optimize import brentq\n\nfrom armi import runLog\nfrom armi.reactor.flags import Flags\nfrom armi.utils import densityTools\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.components.component import Component\n\n\nclass RedistributeMass:\n    \"\"\"Given ``deltaZTop``, add mass from ``fromComp`` and give it to ``toComp``.\n\n    Parameters\n    ----------\n    fromComp\n        Component which is going to give mass to toComp\n    toComp\n        Component that is recieving mass from fromComp\n    deltaZTop\n        The length, in cm, of fromComp being given to toComp\n    initOnly\n        Optional parameter to only initialize the class and not perform the redistribution. If True, the redistribution\n        can be executed by calling :py:meth:`performRedistribution`.\n    \"\"\"\n\n    def __init__(\n        self, fromComp: \"Component\", toComp: \"Component\", deltaZTop: float, assemName: str, initOnly: bool = False\n    ):\n        self.fromComp = fromComp\n        self.toComp = toComp\n        self.assemblyName: str = assemName\n        self.deltaZTop = deltaZTop\n        self.massFrom: float = 0.0\n        self.massTo: float = 0.0\n        if not initOnly:\n            self.performRedistribution()\n\n    def performRedistribution(self):\n        \"\"\"Perform the mass redistribution between two compatible components.\"\"\"\n        if self.compatabilityCheck():\n            self.setNewToCompNDens()\n            self.setNewToCompTemperature()\n            if self.fromComp.p.molesHmBOL is not None and self.toComp.p.molesHmBOL is not None:\n                self._adjustMassParams()\n\n    @property\n    def fromCompVolume(self):\n        return self.fromComp.getArea() * abs(self.deltaZTop)\n\n    @property\n    def toCompVolume(self):\n        return self.toComp.getArea() * self.toComp.height\n\n    @property\n    def newVolume(self):\n        \"\"\"Compute and return the new post-redistribution volume of toComp.\"\"\"\n        return self.toCompVolume + self.fromCompVolume\n\n    def compatabilityCheck(self) -> bool:\n        \"\"\"Ensure fromComp and toComp are the same material.\n\n        Notes\n        -----\n        If the linked components are not the same material, we cannot transfer mass between materials because then the\n        resulting material has unknown properties.\n\n        Returns\n        -------\n        False if incompatible; true otherwise.\n        \"\"\"\n        if type(self.fromComp.material) is not type(self.toComp.material):\n            msg = f\"\"\"\n            Cannot redistribute mass between components that are different materials!\n                Trying to redistribute mass between the following components in {self.assemblyName}:\n                    from --> {self.fromComp.parent} : {self.fromComp} : {type(self.fromComp.material)}\n                      to --> {self.toComp.parent} : {self.toComp} : {type(self.toComp.material)}\n\n                Instead, mass will be removed from ({self.fromComp} | {type(self.fromComp.material)}) and\n                ({self.toComp} | {type(self.toComp.material)} will be artificially expanded. The consequence is that\n                mass conservation is no longer guaranteed for the {self.toComp.getType()} component type on this\n                assembly!\n            \"\"\"\n            runLog.warning(dedent(msg), label=\"Cannot redistribute mass between different materials.\", single=True)\n            return False\n        return True\n\n    def setNewToCompNDens(self):\n        \"\"\"Calculate the post-redistribution number densities for toComp and determine how much mass is in play for\n        fromComp and toComp.\n\n        Notes\n        -----\n        Only the mass of ``toComp`` is changed in this method. The mass of ``fromComp`` is changed separately by\n        changing the height of ``fromComp`` -- the number densities of ``fromComp`` are not modified. When\n        redistributing mass, if ``fromComp`` and ``toComp`` are different temperatures, the temperature of\n        ``toComp`` will change. See :py:meth:`setNewToCompTemperature`.\n        \"\"\"\n        # calculate the mass of each nuclide and then the ndens for the new mass\n        newNDens: dict[str, float] = {}\n        nucs = self._getAllNucs(self.toComp.getNuclides(), self.fromComp.getNuclides())\n        for nuc in nucs:\n            massByNucFrom = densityTools.getMassInGrams(nuc, self.fromCompVolume, self.fromComp.getNumberDensity(nuc))\n            massByNucTo = densityTools.getMassInGrams(nuc, self.toCompVolume, self.toComp.getNumberDensity(nuc))\n            newNDens[nuc] = densityTools.calculateNumberDensity(nuc, massByNucFrom + massByNucTo, self.newVolume)\n            self.massFrom += massByNucFrom\n            self.massTo += massByNucTo\n\n        # Set newNDens on toComp\n        self.toComp.setNumberDensities(newNDens)\n\n    def setNewToCompTemperature(self):\n        r\"\"\"Calculate and set the post-redistribution temperature of toComp.\n\n        Notes\n        -----\n        Calculating this new temperature is non trivial due to thermal expansion. The following defines what the area\n        of ``toComp`` is post-redistribution,\n\n        .. math::\n\n            A_1(\\hat{T}) \\left( H_1 + \\delta \\right) &= A_1(T_1) H_1 + A_2(T_2)\\delta,\\\\\n            A_1(\\hat{T}) &= \\frac{A_1(T_1) H_1 + A_2(T_2)\\delta}{H_1 + \\delta}.\n\n        Where, :math:`A_1, T_1, H_1`, are the area, temperature, and height of ``toComp``, :math:`A_2, T_2`, are the\n        area and temparature of ``fromComp``, :math:`\\delta` is the parameter ``deltaZTop``, and :math:`\\hat{T}` is\n        the new temperature of ``toComp`` post-redistribution. :func:`scipy.optimize.brentq` is used to\n        find the root of the above equation, indicating the value for :math:`\\hat{T}`\n        that finds the desired area, post-redistribution of mass.\n        \"\"\"\n        if isclose(self.fromComp.temperatureInC, self.toComp.temperatureInC, rel_tol=1e-09):\n            # per isclose documentation, rel_tol of 1e-09 is roughly equivaluent to ensuring the temps are\n            # the same to roughly 9 digits.\n            newToCompTemp = self.toComp.temperatureInC\n        else:\n            targetArea = self.newVolume / (self.toComp.height + abs(self.deltaZTop))\n            try:\n                newToCompTemp = brentq(\n                    f=lambda T: self.toComp.getArea(Tc=T) - targetArea,\n                    a=self.fromComp.temperatureInC,\n                    b=self.toComp.temperatureInC,\n                )\n            except ValueError:\n                totalMass = self.massFrom + self.massTo\n                newToCompTemp = (\n                    self.massFrom / totalMass * self.fromComp.temperatureInC\n                    + self.massTo / totalMass * self.toComp.temperatureInC\n                )\n                if (self.toComp.hasFlags(Flags.FUEL) or self.toComp.hasFlags(Flags.CONTROL)) or (\n                    self.fromComp.hasFlags(Flags.FUEL) or self.fromComp.hasFlags(Flags.CONTROL)\n                ):\n                    msg = f\"\"\"\n                    Temperature search algorithm in axial expansion has failed in {self.assemblyName}\n                    Trying to search for new temp between\n                        from --> {self.fromComp.parent} : {self.fromComp} : {type(self.fromComp.material)} at {self.fromComp.temperatureInC} C\n                        to --> {self.toComp.parent} : {self.toComp} : {type(self.toComp.material)} at {self.toComp.temperatureInC} C\n\n                    f({self.fromComp.temperatureInC}) = {self.toComp.getArea(Tc=self.fromComp.temperatureInC) - targetArea}\n                    f({self.toComp.temperatureInC}) = {self.toComp.getArea(Tc=self.toComp.temperatureInC) - targetArea}\n\n                    Instead, a mass weighted average temperature of {newToCompTemp} will be used. The consequence is that\n                    mass conservation is no longer guaranteed for this component type on this assembly!\n                    \"\"\"  # noqa: E501\n                    runLog.warning(dedent(msg), label=\"Temp Search Failure\")\n            except Exception as ee:\n                raise ee\n\n        # Do not use component.setTemperature as this mucks with the number densities we just calculated.\n        self.toComp.temperatureInC = newToCompTemp\n        self.toComp.clearCache()\n\n    @staticmethod\n    def _sortKey(item):\n        \"\"\"Break isotope string down by element, atomic weight, and metastable state for sorting. Raises a RuntimeError\n        if the string does not match the expected pattern.\n        \"\"\"\n        pattern = re.compile(\n            r\"\"\"\n            ([a-zA-Z]{1,2}) # Element\n            (\\d{1,3})?      # atomic weight (optional, e.g., \"C\")\n            ([a-zA-Z])?     # metastable state (optional, e.g., Am242M or Am242)\n            \"\"\",\n            re.VERBOSE,\n        )\n        match = re.search(pattern, item)\n        if match:\n            # Convert numeric parts to int for correct numerical sorting\n            element = match.group(1)\n            atomicWeight = int(match.group(2)) if match.group(2) else 0\n            metastable = 1 if match.group(3) else 0\n            return (atomicWeight, element, metastable)\n        raise RuntimeError(f\"Unknown isotope! - {item}\")\n\n    def _getAllNucs(self, nucsA: list[str], nucsB: list[str]) -> list[str]:\n        \"\"\"Return a list that contains all of the nuclides in nucsA and nucsB.\n\n        Notes\n        -----\n        The returned list is sorted by :py:meth:`sortKey`. Isotopes are sorted based on 1) atomic weight, 2) element,\n        and 3) metastable state.\n        \"\"\"\n        nucsToAdd = set(nucsA).union(set(nucsB))\n        return sorted(nucsToAdd, key=self._sortKey)\n\n    def _adjustMassParams(self):\n        \"\"\"Adjust massHmBOL and molesHmBOL on fromComp and toComp.\"\"\"\n        paramsToMove = (\n            \"massHmBOL\",\n            \"molesHmBOL\",\n        )\n        removalFrac = abs(self.deltaZTop) / self.fromComp.height\n        for paramName in paramsToMove:\n            if self.fromComp.p[paramName] is not None:\n                amountMoved = removalFrac * self.fromComp.p[paramName]\n                self.toComp.p[paramName] = self.toComp.p[paramName] + amountMoved\n                self.fromComp.p[paramName] = self.fromComp.p[paramName] - amountMoved\n"
  },
  {
    "path": "armi/reactor/converters/blockConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Convert block geometry from one to another, etc.\"\"\"\n\nimport copy\nimport math\n\nfrom armi import runLog\nfrom armi.reactor import blocks, components, grids\nfrom armi.reactor.flags import Flags\nfrom armi.utils.plotting import plotConvertedBlock\n\nSIN60 = math.sin(math.radians(60.0))\n\n\nclass BlockConverter:\n    \"\"\"Converts a block.\"\"\"\n\n    def __init__(self, sourceBlock):\n        \"\"\"\n        Parameters\n        ----------\n        sourceBlock : :py:class:`armi.reactor.blocks.Block`\n            An ARMI Block object to convert.\n        quite : boolean, optional\n            If True, less information is output in the runLog.\n        \"\"\"\n        self._sourceBlock = sourceBlock\n        self.convertedBlock = None  # the new block that is created.\n\n    def dissolveComponentIntoComponent(self, soluteName, solventName, minID=0.0):\n        \"\"\"\n        Make a new block that homogenized one component into another while conserving number of atoms.\n\n        Parameters\n        ----------\n        soluteName : str\n            The name of the solute component in _sourceBlock\n        solventName : str\n            The name of the solvent component in _sourceBlock\n        minID : float\n            The minimum hot temperature diameter allowed for the solvent.\n            This is useful for forcing components to not overlap.\n\n        Warning\n        -------\n        Nuclides merged into another component will be the temperature of the new component as temperature\n        is stored on the component level. In the solute and solvent are the same temperature this is not an issue.\n        Converted blocks that have dissolved components should avoid having their temperatures changed.\n        This is because the component being merged into retains its old thermal expansion properties and\n        may not be consistent with how the components would behave independently. For this reason it is\n        recommended that these blocks be made right before the physics calculation of interest and be immediately\n        discarded. Attaching them to the reactor is not recommended.\n        \"\"\"\n        runLog.extra(\n            \"Homogenizing the {} component into the {} component in block {}\".format(\n                soluteName, solventName, self._sourceBlock.getType()\n            ),\n            single=True,\n        )\n        # break up dimension links since we will be messing with this block's components\n        newBlock = copy.deepcopy(self._sourceBlock)\n        # cannot pass components directly since the new block will have new components\n        solute = newBlock.getComponentByName(soluteName)\n        solvent = newBlock.getComponentByName(solventName)\n        self._checkInputs(soluteName, solventName, solute, solvent)\n\n        soluteLinks = solute.getLinkedComponents()\n        # the area about to be added by the dimension change can be different than the simple area of the\n        # merged component due to void gaps between components\n        oldArea = solvent.getArea()\n        runLog.debug(\"removing {}\".format(solute))\n        # skip recomputation of area fractions because the blocks still have 0 height at this stage and derived\n        # shape volume computations will fail\n        soluteArea = solute.getArea()\n        solute.mergeNuclidesInto(solvent)\n        newBlock.remove(solute, recomputeAreaFractions=False)\n        self._sourceBlock = newBlock\n\n        # adjust new shape area.\n        if solvent.__class__ is components.DerivedShape:\n            pass  # If it's coolant, the auto-fill area system gets it. coolant has no links\n        else:\n            soluteID, soluteOD = (\n                solute.getDimension(\"id\", cold=False),\n                solute.getDimension(\"od\", cold=False),\n            )\n            if soluteArea >= 0.0:\n                if solvent.getDimension(\"id\", cold=False) > soluteID:\n                    runLog.debug(f\"Decreasing ID of {solvent} to accommodate {solute}.\")\n                    solvent.setDimension(\"id\", soluteID, cold=False)\n                if solvent.getDimension(\"od\", cold=False) < soluteOD:\n                    runLog.debug(f\"Increasing OD of {solvent} to accommodate {solute}.\")\n                    solvent.setDimension(\"od\", soluteOD, cold=False)\n                if solvent.getDimension(\"id\", cold=False) < minID:\n                    runLog.debug(f\"Updating the ID of {solvent} the the specified min ID: {minID}.\")\n                    solvent.setDimension(\"id\", minID, cold=False)\n            else:\n                # can only merge a negative-area component if one of the dimensions is linked\n                matchedDimension = False\n                if solvent.getDimension(\"id\", cold=False) == soluteOD:\n                    runLog.debug(f\"Increasing ID of {solvent} to accommodate {solute}.\")\n                    solvent.setDimension(\"id\", soluteID, cold=False)\n                    matchedDimension = True\n                if solvent.getDimension(\"od\", cold=False) == soluteID:\n                    runLog.debug(f\"Decreasing OD of {solvent} to accommodate {solute}.\")\n                    solvent.setDimension(\"od\", soluteOD, cold=False)\n                    matchedDimension = True\n                if not matchedDimension:\n                    errorMsg = (\n                        \"Cannot merge negative-area component {solute} into {solvent} without the two being linked.\"\n                    )\n                    runLog.error(errorMsg)\n                    raise ValueError(errorMsg)\n\n            if soluteLinks:\n                self.restablishLinks(solute, solvent, soluteLinks)\n            self._verifyExpansion(solute, solvent)\n\n        solvent.changeNDensByFactor(oldArea / solvent.getArea())\n\n    def _checkInputs(self, soluteName, solventName, solute, solvent):\n        if solute is None or solvent is None:\n            raise ValueError(\n                \"Block {} must have a {} component and a {} component to homogenize.\".format(\n                    self._sourceBlock, soluteName, solventName\n                )\n            )\n        if not (\n            isinstance(solvent, components.DerivedShape)\n            or all(isinstance(c, components.Circle) for c in (solute, solvent))\n        ):\n            raise ValueError(\n                \"Components are not of compatible shape to be merged solute: {}, solvent: {}\".format(solute, solvent)\n            )\n        if solute.getArea() < 0:\n            # allow negative-area gap\n            if not solute.hasFlags(Flags.GAP):\n                raise ValueError(\n                    \"Cannot merge solute with negative area into a solvent. {} area: {}\".format(\n                        solute, solute.getArea()\n                    )\n                )\n        if solvent.getArea() <= 0:\n            raise ValueError(\n                \"Cannot merge into a solvent with negative or 0 area. {} area: {}\".format(solvent, solvent.getArea())\n            )\n\n    def restablishLinks(self, solute, solvent, soluteLinks):\n        runLog.extra(\n            \"Solute is linked to component(s) {} and these links will be reestablished.\".format(soluteLinks),\n            single=True,\n        )\n        for linkedC in soluteLinks:\n            if linkedC in solvent.getLinkedComponents():\n                if not linkedC.containsVoidMaterial():\n                    raise ValueError(\n                        \"Non-Void component {} was linked to solute and solvent {} in converted block {}. \"\n                        \"Please dissolve this separately.\".format(linkedC, solvent, self._sourceBlock)\n                    )\n                runLog.extra(\n                    \"Removing void component {} in converted block {}.\".format(linkedC, self._sourceBlock.getType()),\n                    single=True,\n                )\n                self._sourceBlock.remove(linkedC)\n            else:\n                dims = linkedC.getDimensionNamesLinkedTo(solute)\n                runLog.extra(\n                    \"Linking component {} in converted block {} to solvent {}.\".format(\n                        linkedC, self._sourceBlock.getType(), solvent\n                    ),\n                    single=True,\n                )\n                for dimToChange, dimOfOther in dims:\n                    linkedC.setLink(dimToChange, solvent, dimOfOther)\n\n    def _verifyExpansion(self, solute, solvent):\n        validComponents = (c for c in self._sourceBlock if not isinstance(c, components.DerivedShape))\n        for c in sorted(validComponents):\n            if not isinstance(c, components.Circle) or c is solvent or c.containsVoidMaterial():\n                continue\n            if c.isEncapsulatedBy(solvent):\n                raise ValueError(\n                    \"There is a non void component {} in the location where component {} was expanded \"\n                    \"to absorb component solute {}. solvent dims {}, {} comp dims {} {}.\".format(\n                        c, solvent, solute, solvent.p.id, solvent.p.od, c.p.id, c.p.od\n                    )\n                )\n            if c.getArea() < 0.0:\n                runLog.warning(\n                    \"Component {} still has negative area after {} was dissolved into {}\".format(c, solute, solvent),\n                    single=True,\n                )\n\n    def convert(self):\n        raise NotImplementedError\n\n\nclass ComponentMerger(BlockConverter):\n    \"\"\"For a provided block, merged the solute component into the solvent component.\n\n    .. impl:: Homogenize one component into another.\n        :id: I_ARMI_BLOCKCONV0\n        :implements: R_ARMI_BLOCKCONV\n\n        This subclass of ``BlockConverter`` is meant as a one-time-use tool, to convert\n        a ``Block`` into one ``Component``. A ``Block`` is a ``Composite`` that may\n        probably has multiple ``Components`` somewhere in it. This means averaging the\n        material properties in the original ``Block``, and ensuring that the final\n        ``Component`` has the same shape and volume as the original ``Block``. This\n        subclass essentially just uses the base class method\n        ``dissolveComponentIntoComponent()`` given prescribed solute and solvent\n        materials, to define the merger.\n\n    Notes\n    -----\n    It is the job of the developer to determine if merging a Block into one Component\n    will yield valid or sane results.\n    \"\"\"\n\n    def __init__(self, sourceBlock, soluteName, solventName):\n        \"\"\"\n        Parameters\n        ----------\n        sourceBlock : :py:class:`armi.reactor.blocks.Block`\n            An ARMI Block object to convert.\n        soluteName : str\n            The name of the solute component in _sourceBlock\n        solventName : str\n            The name of the solvent component in _sourceBlock\n        quite : boolean, optional\n            If True, less information is output in the runLog.\n        \"\"\"\n        BlockConverter.__init__(self, sourceBlock)\n        self.soluteName = soluteName\n        self.solventName = solventName\n\n    def convert(self):\n        \"\"\"Return a block with the solute merged into the solvent.\"\"\"\n        self.dissolveComponentIntoComponent(self.soluteName, self.solventName)\n        return self._sourceBlock\n\n\nclass MultipleComponentMerger(BlockConverter):\n    \"\"\"\n    Dissolves multiple components and checks validity at end.\n\n    Doesn't run _verifyExpansion until the end so that the order the components are dissolved in\n    does not cause a failure. For example if two liners are dissolved into the clad and the farthest\n    liner was dissolved first, this would normally cause a ValueError in _verifyExpansion since the\n    clad would be completely expanded over a non void component.\n\n    .. impl:: Homogenize multiple components into one.\n        :id: I_ARMI_BLOCKCONV1\n        :implements: R_ARMI_BLOCKCONV\n\n        This subclass of ``BlockConverter`` is meant as a one-time-use tool, to convert\n        a multiple ``Components`` into one. This means averaging the material\n        properties in the original ``Components``, and ensuring that the final\n        ``Component`` has the same shape and volume as all of the originals. This\n        subclass essentially just uses the base class method\n        ``dissolveComponentIntoComponent()`` given prescribed solute and solvent\n        materials, to define the merger. Though care is taken here to ensure the merger\n        isn't verified until it is completely finished.\n    \"\"\"\n\n    def __init__(self, sourceBlock, soluteNames, solventName, specifiedMinID=0.0):\n        \"\"\"Standard constructor method.\n\n        Parameters\n        ----------\n        sourceBlock : :py:class:`armi.reactor.blocks.Block`\n            An ARMI Block object to convert.\n        soluteNames : list\n            List of str names of the solute components in _sourceBlock\n        solventName : str\n            The name of the solvent component in _sourceBlock\n        minID : float\n            The minimum hot temperature diameter allowed for the solvent.\n            This is useful for forcing components to not overlap.\n        quite : boolean, optional\n            If True, less information is output in the runLog.\n        \"\"\"\n        BlockConverter.__init__(self, sourceBlock)\n        self.soluteNames = soluteNames\n        self.solventName = solventName\n        self.specifiedMinID = specifiedMinID\n\n    def _verifyExpansion(self, solute, solvent):\n        \"\"\"Wait until all components are dissolved to check this.\"\"\"\n        pass\n\n    def convert(self):\n        \"\"\"Return a block with the solute merged into the solvent.\"\"\"\n        for soluteName in self.soluteNames:\n            self.dissolveComponentIntoComponent(soluteName, self.solventName, minID=self.specifiedMinID)\n        solvent = self._sourceBlock.getComponentByName(self.solventName)\n        if solvent.__class__ is not components.DerivedShape:\n            BlockConverter._verifyExpansion(self, self.soluteNames, solvent)\n        return self._sourceBlock\n\n\nclass MixedPinComponentMerger(MultipleComponentMerger):\n    def __init__(self, sourceBlock, soluteNames, solventName, pin, specifiedMinID=0.0):\n        \"\"\"\n        This BlockConverter handles mixed blocks with multiple pin types.\n        A pin is a list of circular components that share a common spatial locator and thus\n        make up a \"pin\", which is a physical construct but not a formal ARMI construct.\n\n        This class can merge multiple components at a time within a single pin. To perform\n        conversions on multiple pins within a mixed block, a new instance of this class\n        must be constructed for each pin, and then the :py:meth:`convert` method must be called in a\n        waterfall fashion -- that is, the block returned from :py:meth:`convert` should be passed\n        into the constructor of the next instance to perform a chain of component merges.\n\n        .. impl:: Homogenize multiple components into one in a single pin within a mixed pin assembly.\n            :id: I_ARMI_BLOCKCONV2\n            :implements: R_ARMI_BLOCKCONV\n\n        Parameters\n        ----------\n        sourceBlock : :py:class:`armi.reactor.blocks.Block`\n            An ARMI Block object to convert.\n        soluteNames : list\n            List of str names of the solute components in _sourceBlock\n        solventName : str\n            The name of the solvent component in _sourceBlock\n        pin : List[Component]\n            List of the components that make up the pin being converted.\n        minID : float\n            The minimum hot temperature diameter allowed for the solvent.\n            This is useful for forcing components to not overlap.\n        quite : boolean, optional\n            If True, less information is output in the runLog.\n        \"\"\"\n        super().__init__(sourceBlock, soluteNames, solventName, specifiedMinID=specifiedMinID)\n        self.pin = pin\n\n    def convert(self):\n        \"\"\"\n        Return a block with the solute merged into the solvent.\n\n        Run _verifyPinExpansion so that verification is limited to a single pin.\n        \"\"\"\n        for soluteName in self.soluteNames:\n            self.dissolveComponentIntoComponent(soluteName, self.solventName, minID=self.specifiedMinID)\n        solvent = self._sourceBlock.getComponentByName(self.solventName)\n        if solvent.__class__ is not components.DerivedShape:\n            self._verifyPinExpansion(self.soluteNames, solvent)\n        return self._sourceBlock\n\n    def _verifyPinExpansion(self, solute, solvent):\n        \"\"\"Verify the conversion of a single pin construct.\"\"\"\n        validComponents = (c for c in self.pin if not isinstance(c, components.DerivedShape))\n        for c in sorted(validComponents):\n            if c not in self._sourceBlock:\n                # c was merged\n                continue\n            if not isinstance(c, components.Circle) or c is solvent or c.containsVoidMaterial():\n                continue\n            if c.isEncapsulatedBy(solvent):\n                raise ValueError(\n                    \"There is a non void component {} in the location where component {} was expanded \"\n                    \"to absorb component solute {}. solvent dims {}, {} comp dims {} {}.\".format(\n                        c, solvent, solute, solvent.p.id, solvent.p.od, c.p.id, c.p.od\n                    )\n                )\n            if c.getArea() < 0.0:\n                runLog.warning(\n                    \"Component {} still has negative area after {} was dissolved into {}\".format(c, solute, solvent),\n                    single=True,\n                )\n\n\nclass BlockAvgToCylConverter(BlockConverter):\n    \"\"\"\n    Convert a block and driver block into a block made of a concentric circles using\n    block (homogenized) composition.\n\n    Notes\n    -----\n    This converter is intended for use in building 1-dimensional models of a set of block.\n    numInternalRings controls the number of rings to use for the source block, while the\n    numExternalRings controls the number of rings for the driver fuel block.  The number\n    of blocks to in each ring grows by 6 for each ring in hex geometry and 8 for each ring\n    in Cartesian.\n\n    This converter is opinionated in that it uses a spatial grid to determine how many\n    blocks to add based on the type of the ``sourceBlock``. For example, if the ``sourceBlock``\n    is a HexBlock then a HexGrid will be used. If the ``sourceBlock`` is a CartesianBlock\n    then a CartesianGrid without an offset will be used.\n\n    See Also\n    --------\n    HexComponentsToCylConverter: This converter is more useful if the pin lattice is in a\n    hex lattice.\n    \"\"\"\n\n    def __init__(\n        self,\n        sourceBlock,\n        driverFuelBlock=None,\n        numInternalRings=1,\n        numExternalRings=None,\n    ):\n        BlockConverter.__init__(self, sourceBlock)\n        self._driverFuelBlock = driverFuelBlock\n        self._numExternalRings = numExternalRings\n        self.convertedBlock = blocks.ThRZBlock(name=sourceBlock.name + \"-cyl\", height=sourceBlock.getHeight())\n        self.convertedBlock.setLumpedFissionProducts(sourceBlock.getLumpedFissionProductCollection())\n        self._numInternalRings = numInternalRings\n\n    def convert(self):\n        \"\"\"Return a block converted into cylindrical geometry, possibly with other block types surrounding it.\"\"\"\n        self._addBlockRings(self._sourceBlock, self._sourceBlock.getType(), self._numInternalRings, 1)\n        self._addDriverFuelRings()\n        return self.convertedBlock\n\n    def _addBlockRings(self, blockToAdd, blockName, numRingsToAdd, firstRing, mainComponent=None):\n        \"\"\"Add a homogeneous block ring to the converted block.\"\"\"\n        runLog.info(\"Converting representative block {} to its equivalent cylindrical model\".format(self._sourceBlock))\n\n        innerDiam = self.convertedBlock[-1].getDimension(\"od\") if len(self.convertedBlock) else 0.0\n\n        if mainComponent is not None:\n            newCompProps = mainComponent.material\n            tempInput = tempHot = mainComponent.temperatureInC\n        else:  # no component specified so just use block vals\n            newCompProps = \"Custom\"  # this component shouldn't change temperature anyway\n            tempInput = tempHot = blockToAdd.getAverageTempInC()\n\n        if isinstance(blockToAdd, blocks.HexBlock):\n            grid = grids.HexGrid.fromPitch(1.0)\n        elif isinstance(blockToAdd, blocks.CartesianBlock):\n            grid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n        else:\n            raise ValueError(f\"The `sourceBlock` of type {type(blockToAdd)} is not supported in {self}.\")\n\n        for ringNum in range(firstRing, firstRing + numRingsToAdd):\n            numFuelBlocksInRing = grid.getPositionsInRing(ringNum)\n            assert numFuelBlocksInRing is not None\n            fuelBlockTotalArea = numFuelBlocksInRing * blockToAdd.getArea()\n            driverOuterDiam = getOuterDiamFromIDAndArea(innerDiam, fuelBlockTotalArea)\n            driverRing = components.Circle(\n                blockName,\n                newCompProps,\n                tempInput,\n                tempHot,\n                od=driverOuterDiam,\n                id=innerDiam,\n                mult=1,\n            )\n            driverRing.setNumberDensities(blockToAdd.getNumberDensities())\n            # no flag set here since its block level, and its a block, not component...\n            self.convertedBlock.add(driverRing)\n            innerDiam = driverOuterDiam\n\n    def _addDriverFuelRings(self):\n        \"\"\"\n        Add driver fuel blocks as the outer-most surrounding ring.\n\n        Notes\n        -----\n        This is intended to be used to drive non-fuel compositions, DU, etc.\n        \"\"\"\n        if self._driverFuelBlock is None:\n            return\n        if not self._driverFuelBlock.isFuel():\n            raise ValueError(\"Driver block {} must be fuel\".format(self._driverFuelBlock))\n        if self._numExternalRings < 0:\n            raise ValueError(\n                \"Number of fuel rings is set to {}, but must be a positive integer.\".format(self._numExternalRings)\n            )\n\n        blockName = self._driverFuelBlock.getType() + \" driver\"\n        fuel = self._driverFuelBlock.getChildrenWithFlags(Flags.FUEL)[0]  # used for mat properties and temperature\n\n        self._addBlockRings(\n            self._driverFuelBlock,\n            blockName,\n            self._numExternalRings,\n            self._numInternalRings + 1,\n            mainComponent=fuel,\n        )\n\n    def plotConvertedBlock(self, fName=None):\n        \"\"\"A pass-through to preserve the API. Render an image of the converted block.\"\"\"\n        return plotConvertedBlock(self._sourceBlock, self.convertedBlock, fName)\n\n\nclass HexComponentsToCylConverter(BlockAvgToCylConverter):\n    \"\"\"\n    Converts a hexagon full of pins into a circle full of concentric circles.\n\n    Notes\n    -----\n    This is intended to capture heterogeneous effects while generating cross sections in\n    MCC3. The resulting 1D cylindrical block will not be used in subsequent core calculations.\n\n    Repeated pins/coolant rings will be built, followed by the non-pins like\n    duct/intercoolant pinComponentsRing1 | coolant | pinComponentsRing2 | coolant | ... |\n    nonpins ...\n\n    The ``ductHeterogeneous`` option allows the user to treat everything inside the duct\n    as a single homogenized composition. This could significantly reduce the memory and runtime\n    required for the lattice physics solver, and also provide an alternative approximation for\n    the spatial self-shielding effect on microscopic cross sections.\n\n    This converter expects the ``sourceBlock`` and ``driverFuelBlock`` to defined and for\n    the ``sourceBlock`` to have a spatial grid defined. Additionally, both the ``sourceBlock``\n    and ``driverFuelBlock`` must be instances of HexBlocks.\n    \"\"\"\n\n    PIN_COMPONENT_FLAGS = (\n        Flags.FUEL,\n        Flags.ANNULAR | Flags.VOID,\n        Flags.GAP,\n        Flags.BOND,\n        Flags.LINER,\n        Flags.CLAD,\n        Flags.WIRE,\n        Flags.CONTROL,\n        Flags.REFLECTOR,\n        Flags.SHIELD,\n        Flags.SLUG,\n        Flags.PIN,\n        Flags.POISON,\n    )\n\n    def __init__(\n        self,\n        sourceBlock,\n        driverFuelBlock=None,\n        numExternalRings=None,\n        mergeIntoClad=None,\n        mergeIntoFuel=None,\n        ductHeterogeneous=False,\n    ):\n        BlockAvgToCylConverter.__init__(\n            self,\n            sourceBlock,\n            driverFuelBlock=driverFuelBlock,\n            numExternalRings=numExternalRings,\n        )\n        if not isinstance(sourceBlock, blocks.HexBlock):\n            raise TypeError(\n                \"Block {} is not hexagonal and cannot be converted to an equivalent cylinder\".format(sourceBlock)\n            )\n\n        if sourceBlock.spatialGrid is None:\n            raise ValueError(\n                f\"{sourceBlock} has no spatial grid attribute, therefore \"\n                f\"the block conversion with {self.__class__.__name__} cannot proceed.\"\n            )\n\n        if driverFuelBlock is not None:\n            if not isinstance(driverFuelBlock, blocks.HexBlock):\n                raise TypeError(\n                    \"Block {} is not hexagonal and cannot be converted to an equivalent cylinder\".format(\n                        driverFuelBlock\n                    )\n                )\n        self.pinPitch = sourceBlock.getPinPitch()\n        self.mergeIntoClad = mergeIntoClad or []\n        self.mergeIntoFuel = mergeIntoFuel or []\n        self.ductHeterogeneous = ductHeterogeneous\n        self.interRingComponent = sourceBlock.getComponent(Flags.COOLANT, exact=True)\n        if not self.interRingComponent:\n            raise ValueError(f\"Block {sourceBlock} cannot be converted to rings without a `coolant` component\")\n        self._remainingCoolantFillArea = self.interRingComponent.getArea()\n\n    def convert(self):\n        \"\"\"Perform the conversion.\n\n        .. impl:: Convert hex blocks to cylindrical blocks.\n            :id:  I_ARMI_BLOCKCONV_HEX_TO_CYL\n            :implements: R_ARMI_BLOCKCONV_HEX_TO_CYL\n\n            This method converts a ``HexBlock`` to a cylindrical ``Block``. Obviously,\n            this is not a physically meaningful transition; it is a helpful\n            approximation tool for analysts. This is a subclass of\n            ``BlockAvgToCylConverter`` which is a subclass of ``BlockConverter``. This\n            converter expects the ``sourceBlock`` and ``driverFuelBlock`` to defined\n            and for the ``sourceBlock`` to have a spatial grid defined. Additionally,\n            both the ``sourceBlock`` and ``driverFuelBlock`` must be instances of\n            ``HexBlocks``.\n        \"\"\"\n        runLog.info(\"Converting representative block {} to its equivalent cylindrical model\".format(self._sourceBlock))\n        self._dissolveComponents()\n        numRings = self._sourceBlock.spatialGrid.getMinimumRings(self._sourceBlock.getNumPins())\n        pinComponents, nonPins = self._classifyComponents()\n        if self.ductHeterogeneous:\n            self._buildInsideDuct()\n        else:\n            self._buildFirstRing(pinComponents)\n            for ring in range(2, numRings + 1):\n                self._buildNthRing(pinComponents, ring)\n        self._buildNonPinRings(nonPins)\n        self._addDriverFuelRings()\n\n        for comp in self.convertedBlock.getComponents():\n            assert comp.getArea() >= 0.0, (\n                f\"{comp} in {self.convertedBlock} has a negative area of {comp.getArea()}. \"\n                \"Negative areas are not supported.\"\n            )\n\n        return self.convertedBlock\n\n    def _dissolveComponents(self):\n        # always merge wire into coolant.\n        self.dissolveComponentIntoComponent(\"wire\", \"coolant\")\n        # update coolant area to fill in wire area that was left behind.\n        self.interRingComponent = self._sourceBlock.getComponent(Flags.COOLANT, exact=True)\n        self._remainingCoolantFillArea = self.interRingComponent.getArea()\n\n        # do user-input merges into cladding\n        for componentName in self.mergeIntoClad:\n            self.dissolveComponentIntoComponent(componentName, \"clad\")\n\n        # do user-input merges into fuel\n        for componentName in self.mergeIntoFuel:\n            self.dissolveComponentIntoComponent(componentName, \"fuel\")\n\n    def _classifyComponents(self):\n        \"\"\"\n        Figure out which components are in each pin ring and which are not.\n\n        Notes\n        -----\n        Assumption is that anything with multiplicity equal to numPins is a pin (clad, wire, bond, etc.)\n        Non-pins will include things like coolant, duct, interduct, etc.\n\n        This skips components that have a negative area, which can exist if a user implements a linked\n        component containing void or non-solid materials (e.g., gaps)\n        \"\"\"\n        pinComponents, nonPins = [], []\n\n        for c in self._sourceBlock:\n            # If the area of the component is negative than this component should be skipped\n            # altogether. If not skipped, the conversion process still works, but this would\n            # result in one or more rings having an outer diameter than is smaller than the\n            # inner diameter.\n            if c.getArea() < 0.0:\n                continue\n\n            if any(c.hasFlags(f) for f in self.PIN_COMPONENT_FLAGS):\n                pinComponents.append(c)\n            elif c.name != \"coolant\":  #  coolant is addressed in self.interRingComponent\n                nonPins.append(c)\n\n        return list(sorted(pinComponents)), nonPins\n\n    def _buildInsideDuct(self):\n        \"\"\"Build a homogenized material of the components inside the duct.\"\"\"\n        blockType = self._sourceBlock.getType()\n        blockName = f\"Homogenized {blockType}\"\n        newBlock, mixtureFlags = stripComponents(self._sourceBlock, Flags.DUCT)\n        outerDiam = getOuterDiamFromIDAndArea(0.0, newBlock.getArea())\n        circle = components.Circle(\n            blockName,\n            \"_Mixture\",\n            newBlock.getAverageTempInC(),\n            newBlock.getAverageTempInC(),\n            id=0.0,\n            od=outerDiam,\n            mult=1,\n        )\n        circle.setNumberDensities(newBlock.getNumberDensities())\n        circle.p.flags = mixtureFlags\n        self.convertedBlock.add(circle)\n\n    def _buildFirstRing(self, pinComponents):\n        \"\"\"Add first ring of components to new block.\"\"\"\n        for oldC in pinComponents:\n            c = copy.deepcopy(oldC)\n            c.setName(c.name + \" 1\")\n            c.setDimension(\"mult\", 1.0)  # first ring will have dims of 1 pin\n            c.p.flags = oldC.p.flags\n            self.convertedBlock.add(c)\n\n    def _buildNthRing(self, pinComponents, ringNum):\n        \"\"\"\n        Build nth ring of pins and add them to block.\n\n        Each n-th ring is preceded with a circle of coolant between the previous ring and this one.\n        Since we blended the wire and coolant, the area of this area is supposed to include the wire area.\n\n        This will be a fuel (or control) meat surrounded on both sides by clad, bond, liner, etc. layers.\n        \"\"\"\n        numPinsInRing = self._sourceBlock.spatialGrid.getPositionsInRing(ringNum)\n        pinRadii = [c.getDimension(\"od\") / 2.0 for c in pinComponents]\n        bigRingRadii = radiiFromRingOfRods(self.pinPitch * (ringNum - 1), numPinsInRing, pinRadii)\n        nameSuffix = \" {}\".format(ringNum)\n\n        coolantOD = bigRingRadii[0] * 2.0\n        self._addCoolantRing(coolantOD, nameSuffix)\n        innerDiameter = coolantOD\n\n        compsToTransformIntoRings = pinComponents[::-1] + pinComponents[1:]\n        for i, (bcs, bigRingRadius) in enumerate(zip(compsToTransformIntoRings, bigRingRadii[1:])):\n            outerDiameter = bigRingRadius * 2.0\n            name = bcs.name + nameSuffix + str(i)\n            bigComponent = self._addSolidMaterialRing(bcs, innerDiameter, outerDiameter, name)\n            self.convertedBlock.add(bigComponent)\n            innerDiameter = outerDiameter\n\n    def _buildNonPinRings(self, nonPins):\n        \"\"\"\n        Throw each non-pin component on as an individual outer circle.\n\n        Also needs to add final coolant layer between the outer pins and the non-pins.\n        Will crash if there are things that are not circles or hexes.\n        \"\"\"\n        if not self.ductHeterogeneous:\n            # fill in the last ring of coolant using the rest\n            coolInnerDiam = self.convertedBlock[-1].getDimension(\"od\")\n            coolantOD = getOuterDiamFromIDAndArea(coolInnerDiam, self._remainingCoolantFillArea)\n            self._addCoolantRing(coolantOD, \" outer\")\n            innerDiameter = coolantOD\n        else:\n            innerDiameter = self.convertedBlock[-1].getDimension(\"od\")\n\n        for i, hexagon in enumerate(sorted(nonPins)):\n            outerDiam = getOuterDiamFromIDAndArea(innerDiameter, hexagon.getArea())  # conserve area of hex.\n            name = hexagon.name + \" {}\".format(i)\n            circularHexagon = self._addSolidMaterialRing(hexagon, innerDiameter, outerDiam, name)\n            self.convertedBlock.add(circularHexagon)\n            innerDiameter = outerDiam\n\n    @staticmethod\n    def _addSolidMaterialRing(baseComponent, innerDiameter, outDiameter, name):\n        circle = components.Circle(\n            name,\n            baseComponent.material,\n            baseComponent.temperatureInC,\n            baseComponent.temperatureInC,\n            id=innerDiameter,\n            od=outDiameter,\n            mult=1,\n        )\n        circle.setNumberDensities(baseComponent.getNumberDensities())\n        circle.p.flags = baseComponent.p.flags\n        return circle\n\n    def _addCoolantRing(self, coolantOD, nameSuffix):\n        innerDiam = self.convertedBlock[-1].getDimension(\"od\")\n        irc = self.interRingComponent\n        interRing = components.Circle(\n            irc.name + nameSuffix,\n            irc.material,\n            irc.temperatureInC,\n            irc.temperatureInC,\n            od=coolantOD,\n            id=innerDiam,\n            mult=1,\n        )\n        interRing.setNumberDensities(irc.getNumberDensities())\n        interRing.p.flags = irc.p.flags\n        self.convertedBlock.add(interRing)\n        self._remainingCoolantFillArea -= interRing.getArea()\n\n\ndef getOuterDiamFromIDAndArea(ID, area):\n    \"\"\"Return the outer diameter of an annulus with given inner diameter (ID) and area.\"\"\"\n    return math.sqrt(ID**2.0 + 4.0 * area / math.pi)  # from A = pi *(d ** 2)/4.0\n\n\ndef radiiFromHexPitches(pitches):\n    \"\"\"Return list of radii for equivalent-area circles from list of from hexagon flat-to-flat pitches.\"\"\"\n    return [x * math.sqrt(SIN60 / math.pi) for x in pitches]\n\n\ndef radiiFromHexSides(sideLengths):\n    \"\"\"Return list of radii for equivalent-area circles from list of from hexagon side lengths.\"\"\"\n    return [x * math.sqrt(3.0 * SIN60 / math.pi) for x in sideLengths]\n\n\ndef radiiFromRingOfRods(distToRodCenter, numRods, rodRadii, layout=\"hexagon\"):\n    r\"\"\"\n    Return list of radii from ring of rods.\n\n    Parameters\n    ----------\n    distToRodCenter : float\n        Distance from center of assembly to center of pin.\n    numRods : int\n        Number of rods in the ring of rods\n    rodRadii : list\n        Radii from smallest to largest. Outer radius becomes inner radius of next component.\n\n    Returns\n    -------\n    radiiList : list\n        List of radii from inner to outer. Components are added on both sides.\n\n    Notes\n    -----\n    There are two assumptions when making circles:\n\n    #. The rings are concentric about the ``radToRodCenter``.\n    #. The ring area of the fuel rods are distributed to the inside and outside\n       rings with the same thickness. ``thicknessOnEachSide`` (:math:`t`) is calculated\n       as follows:\n\n        .. math::\n            :nowrap:\n\n            \\begin{aligned}\n            r_1 &\\equiv \\text{inner rad that thickness is added to on inside} \\\\\n            r_2 &\\equiv \\text{outer rad that thickness is added to on outside} \\\\\n            \\texttt{radToRodCenter} &= \\frac{r_1 + r_2}{2} \\text{(due to being concentric)} \\\\\n            \\text{Total Area} &= \\text{Area of annulus 1} + \\text{Area of annulus 2} \\\\\n            \\text{Area of annulus 1} &= \\pi r_1^2 -  \\pi (r_1 - t)^2 \\\\\n            \\text{Area of annulus 2} &= \\pi (r_2 + t)^2 -  \\pi r_2^2 \\\\\n            t &= \\frac{\\text{Total Area}}{4\\pi\\times\\texttt{radToRodCenter}}\n            \\end{aligned}\n    \"\"\"\n    if layout == \"polygon\":\n        alpha = 2.0 * math.pi / float(numRods)\n        radToRodCenter = distToRodCenter * math.sqrt(math.sin(alpha) / alpha)\n    elif layout == \"hexagon\":\n        if numRods % 6:\n            raise ValueError(\"numRods ({}) must be a multiple of 6.\".format(numRods))\n        sideLengthOfBigHex = distToRodCenter  # for equilateral triangle\n        radToRodCenter = radiiFromHexSides([sideLengthOfBigHex])[0]\n    else:\n        raise ValueError(\"Invalid layout {}\".format(layout))\n\n    radiiFromRodCenter = []\n    rLast = bigRLast = 0.0\n    for rodRadius in rodRadii:\n        area = math.pi * (rodRadius**2.0 - rLast**2.0) * float(numRods)\n        thicknessOnEachSide = area / (4 * math.pi * radToRodCenter)\n        distFromCenterComp = bigRLast + thicknessOnEachSide\n        radiiFromRodCenter.append(radToRodCenter + distFromCenterComp)\n        radiiFromRodCenter.append(radToRodCenter - distFromCenterComp)  # build thickness on both sides\n        rLast, bigRLast = rodRadius, distFromCenterComp\n\n    return sorted(radiiFromRodCenter)\n\n\ndef stripComponents(block, compFlags):\n    \"\"\"\n    Remove all components from a block outside of the first component that matches compFlags.\n\n    Parameters\n    ----------\n    block : armi.reactor.blocks.Block\n        Source block from which to produce a modified copy\n    compFlags : armi.reactor.flags.Flags\n        Component flags to indicate which components to strip from the\n        block. All components outside of the first one that matches\n        compFlags are stripped.\n\n    Returns\n    -------\n    newBlock : armi.reactor.blocks.Block\n        Copy of source block with specified components stripped off.\n    mixtureFlags : TypeSpec\n        Combination of all component flags within newBlock.\n\n    Notes\n    -----\n    This is often used for creating a partially heterogeneous representation\n    of a block. For example, one might want to treat everything inside of a\n    specific component (such as the duct) as homogenized, while keeping a\n    heterogeneous representation of the remaining components.\n    \"\"\"\n    newBlock = copy.deepcopy(block)\n    avgBlockTemp = block.getAverageTempInC()\n    mixtureFlags = newBlock.getComponent(Flags.COOLANT).p.flags\n    innerMostComp = next(i for i, c in enumerate(sorted(newBlock.getComponents())) if c.hasFlags(compFlags))\n    outsideComp = True\n    indexedComponents = [(i, c) for i, c in enumerate(sorted(newBlock.getComponents()))]\n    for i, c in sorted(indexedComponents, reverse=True):\n        if outsideComp:\n            if i == innerMostComp:\n                compIP = c.getDimension(\"ip\")\n                outsideComp = False\n            newBlock.remove(c, recomputeAreaFractions=False)\n        else:\n            mixtureFlags = mixtureFlags | c.p.flags\n\n    # add pitch defining component with no area\n    newBlock.add(\n        components.Hexagon(\n            \"pitchComponent\",\n            \"Void\",\n            avgBlockTemp,\n            avgBlockTemp,\n            ip=compIP,\n            op=compIP,\n        )\n    )\n    return newBlock, mixtureFlags\n"
  },
  {
    "path": "armi/reactor/converters/geometryConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nChange a reactor from one geometry to another.\n\nExamples may include going from Hex to R-Z or from Third-core to full core.  This module contains\n**converters** (which create new reactor objects with different geometry), and **changers** (which\nmodify a given reactor in place) in this module.\n\nGenerally, mass is conserved in geometry conversions.\n\nWarning\n-------\nThese are mostly designed for hex geometry.\n\"\"\"\n\nimport collections\nimport copy\nimport math\nimport operator\nfrom typing import TYPE_CHECKING, Union\n\nimport numpy as np\n\nfrom armi import materials, runLog\nfrom armi.physics.neutronics.fissionProductModel import lumpedFissionProduct\nfrom armi.reactor import (\n    assemblies,\n    blocks,\n    components,\n    geometry,\n    grids,\n    parameters,\n    reactors,\n)\nfrom armi.reactor.converters import blockConverters, meshConverters\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.parameters import (\n    NEVER,\n    SINCE_LAST_GEOMETRY_TRANSFORMATION,\n    Category,\n    ParamLocation,\n)\nfrom armi.utils import hexagon, plotting, units\n\nif TYPE_CHECKING:\n    from armi.reactor import Core\n    from armi.reactor.assemblies import Assembly\n    from armi.reactor.blocks import Block\n\nBLOCK_AXIAL_MESH_SPACING = 20  # Block axial mesh spacing set for nodal diffusion calculation (cm)\nSTR_SPACE = \" \"\n\n\nclass GeometryChanger:\n    \"\"\"Geometry changer class that updates the geometry (number of assems or blocks per assem) of a given reactor.\"\"\"\n\n    def __init__(self, cs=None):\n        self._newAssembliesAdded = []\n        self._sourceReactor = None\n        self._cs = cs\n\n    def __repr__(self):\n        return \"<{}>\".format(self.__class__.__name__)\n\n    def convert(self, r):\n        \"\"\"\n        Run the conversion.\n\n        Parameters\n        ----------\n        r : Reactor object\n            The reactor to convert.\n        \"\"\"\n        raise NotImplementedError\n\n    def reset(self):\n        \"\"\"\n        When called, the reactor core model is reset to it's original configuration, or\n        parameter data from the converted reactor core model is transformed back to the origin\n        reactor state, thus cleaning up the converted reactor core model.\n\n        Notes\n        -----\n        This should be implemented on each of the geometry converters.\n        \"\"\"\n        runLog.info(f\"Resetting the state of the converted reactor core model in {self}\")\n        self._newAssembliesAdded = []\n\n\nclass GeometryConverter(GeometryChanger):\n    \"\"\"\n    Base class for GeometryConverter which makes a new converted reactor.\n\n    Examples\n    --------\n    To convert a hex case to a R-Z case, do this:\n\n    >>> from armi.reactorConverters import HexToRZConverter\n    >>> HexToRZConverter(useMostCommonXsId=False, expandReactor=False)\n    >>> geomConv.convert(r)\n    >>> newR = geomConv.convReactor\n    >>> dif3d = dif3dInterface.Dif3dInterface(\"dif3dRZ\", newR)\n    >>> dif3d.o = self.o\n    >>> dif3d.writeInput(\"rzGeom_actual.inp\")\n    \"\"\"\n\n    def __init__(self, cs=None):\n        GeometryChanger.__init__(self, cs=cs)\n        self.convReactor = None\n\n\nclass FuelAssemNumModifier(GeometryChanger):\n    \"\"\"\n    Modify the number of fuel assemblies in the reactor.\n\n    Notes\n    -----\n    - The number of fuel assemblies should ALWAYS be set for the third-core regardless of the\n      reactor geometry model.\n    - The modification is only valid for third-core and full-core geometry models.\n    \"\"\"\n\n    def __init__(self, cs):\n        GeometryChanger.__init__(self, cs)\n        self.numFuelAssems = None  # in full core.\n        self.fuelType = \"feed fuel\"\n        self.overwriteList = [Flags.REFLECTOR, Flags.SHIELD]\n        self.ringsToAdd = []\n        self.modifyReactorPower = False\n\n    def convert(self, r):\n        \"\"\"\n        Set the number of fuel assemblies in the reactor.\n\n        Notes\n        -----\n        - While adding fuel, does not modify existing fuel/control positions, but does overwrite\n          assemblies in the overwriteList (e.g. reflectors, shields)\n        - Once specified amount of fuel is in place, removes all assemblies past the outer fuel boundary\n        - To re-add reflector/shield assemblies around the new core, use the ringsToAdd attribute\n        \"\"\"\n        self._sourceReactor = r\n\n        if self._sourceReactor.core.powerMultiplier != 1 and self._sourceReactor.core.powerMultiplier != 3:\n            raise ValueError(\n                \"Invalid reactor geometry {} in {}. Reactor must be full or third core to modify the \"\n                \"number of assemblies.\".format(r.core.powerMultiplier, self)\n            )\n\n        # Set the number of fueled and non-fueled positions within the core (Full core or third-core)\n        coreGeom = \"full-core\" if self._sourceReactor.core.powerMultiplier == 1 else \"third-core\"\n        runLog.info(\"Modifying {} geometry to have {} fuel assemblies.\".format(coreGeom, self.numFuelAssems))\n        nonFuelAssems = (\n            sum(not assem.hasFlags(Flags.FUEL) for assem in self._sourceReactor.core)\n            * self._sourceReactor.core.powerMultiplier\n        )\n        self.numFuelAssems *= self._sourceReactor.core.powerMultiplier\n        totalCoreAssems = nonFuelAssems + self.numFuelAssems\n\n        # Adjust the total power of the reactor by keeping power per assembly constant\n        if self.modifyReactorPower:\n            self._sourceReactor.core.p.power *= float(self.numFuelAssems) / (\n                len(self._sourceReactor.core.getAssemblies(Flags.FUEL)) * self._sourceReactor.core.powerMultiplier\n            )\n\n        # Get the sorted assembly locations in the core (Full core or third core)\n        assemOrderList = r.core.spatialGrid.generateSortedHexLocationList(totalCoreAssems)\n        if self._sourceReactor.core.powerMultiplier == 3:\n            assemOrderList = [loc for loc in assemOrderList if r.core.spatialGrid.isInFirstThird(loc)]\n\n        # Add fuel assemblies to the core\n        addingFuelIsComplete = False\n        numFuelAssemsAdded = 0\n        for loc in assemOrderList:\n            assem = self._sourceReactor.core.childrenByLocator.get(loc)\n            if numFuelAssemsAdded < self.numFuelAssems:\n                if assem is None:\n                    raise KeyError(\"Cannot find expected fuel assem in {}\".format(loc))\n                # Add new fuel assembly to the core\n                if assem.hasFlags(self.overwriteList):\n                    fuelAssem = self._sourceReactor.core.createAssemblyOfType(assemType=self.fuelType, cs=self._cs)\n                    # Remove existing assembly in the core location before adding new assembly\n                    if assem.hasFlags(self.overwriteList):\n                        self._sourceReactor.core.removeAssembly(assem, discharge=False)\n                    self._sourceReactor.core.add(fuelAssem, loc)\n                    numFuelAssemsAdded += self._sourceReactor.core.powerMultiplier\n                else:\n                    # Keep the existing assembly in the core\n                    if assem.hasFlags(Flags.FUEL):\n                        # Count the assembly in the location if it is fuel\n                        numFuelAssemsAdded += self._sourceReactor.core.powerMultiplier\n                    else:\n                        pass\n            # Flag the completion of adding fuel assemblies (see note 1)\n            elif numFuelAssemsAdded == self.numFuelAssems:\n                addingFuelIsComplete = True\n\n            # Remove the remaining assemblies in the the assembly list once all the fuel has been added\n            if addingFuelIsComplete and assem is not None:\n                self._sourceReactor.core.removeAssembly(assem, discharge=False)\n\n        # Remove all other assemblies from the core\n        for assem in self._sourceReactor.core.getAssemblies():\n            if assem.spatialLocator not in assemOrderList:  # check if assembly is on the list\n                r.core.removeAssembly(assem, discharge=False)  # get rid of the old assembly\n\n        # Add the remaining rings of assemblies to the core\n        for assemType in self.ringsToAdd:\n            self.addRing(assemType=assemType)\n\n        # Complete the reactor loading\n        self._sourceReactor.core.processLoading(self._cs)\n        self._sourceReactor.core.numRings = self._sourceReactor.core.getNumRings()\n        self._sourceReactor.core.regenAssemblyLists()\n        self._sourceReactor.core.circularRingList = None  # need to reset this (possibly other stuff too)\n\n    def addRing(self, assemType=\"big shield\"):\n        \"\"\"\n        Add a ring of fuel assemblies around the outside of an existing core.\n\n        Works by first finding the assembly furthest from the center, then filling in\n        all assemblies that are within one pitch further with the specified assembly type\n\n        Parameters\n        ----------\n        assemType : str\n            Assembly type that will be added to the outside of the core\n        \"\"\"\n        r = self._sourceReactor\n        # first look through the core and finds the one farthest from the center\n        maxDist = 0.0\n        for assem in r.core.getAssemblies():\n            dist = np.linalg.norm(assem.spatialLocator.getGlobalCoordinates())  # get distance from origin\n            dist = round(dist, 6)  # round dist to 6 places to avoid differences due to floating point math\n            maxDist = max(maxDist, dist)\n\n        # add one hex pitch to the maximum distance to get the bounding distance for the new ring\n        hexPitch = r.core.spatialGrid.pitch\n        newRingDist = maxDist + hexPitch\n\n        maxArea = math.pi * (newRingDist + hexPitch) ** 2.0  # area that is guaranteed to bound the new core\n        maxAssemsFull = maxArea / hexagon.area(hexPitch)  # divide by hex area to get number of hexes in a full core\n\n        # generate ordered list of assembly locations\n        assemOrderList = r.core.spatialGrid.generateSortedHexLocationList(maxAssemsFull)\n        if r.core.powerMultiplier == 3:\n            assemOrderList = [loc for loc in assemOrderList if self._sourceReactor.core.spatialGrid.isInFirstThird(loc)]\n        elif r.core.powerMultiplier != 1:\n            raise RuntimeError(\"{} only works on full or 1/3 symmetry.\".format(self))\n        # add new assemblies to core within one ring\n        for locator in assemOrderList:\n            assem = r.core.childrenByLocator.get(locator)  # check on assemblies, moving radially outward\n            dist = np.linalg.norm(locator.getGlobalCoordinates())\n            dist = round(dist, 6)\n            if dist <= newRingDist:  # check distance\n                if assem is None:  # no assembly in that position, add assembly\n                    newAssem = r.core.createAssemblyOfType(assemType=assemType, cs=self._cs)\n                    r.core.add(newAssem, locator)  # put new assembly in reactor!\n                else:  # all other types of assemblies (fuel, control, etc) leave as is\n                    pass\n            else:\n                pass\n\n    def reset(self):\n        \"\"\"Resetting the reactor core model state after adding fuel assemblies is not currently supported.\"\"\"\n        raise NotImplementedError\n\n\nclass HexToRZThetaConverter(GeometryConverter):\n    \"\"\"\n    Convert hex-based cases to an equivalent R-Z-Theta full core geometry.\n\n    Parameters\n    ----------\n    converterSettings: dict\n        Settings that specify how the mesh of the RZTheta reactor should be generated. Controls the\n        number of theta regions, how to group regions, etc.\n\n        uniformThetaMesh\n            bool flag that determines if the theta mesh should be uniform or not\n\n        thetaBins\n            Number of theta bins to create\n\n        radialConversionType\n           * ``Ring Compositions`` -- to convert by composition\n\n        axialConversionType\n            * ``Axial Coordinates`` --  use\n              :py:class:`armi.reactor.converters.meshConverters._RZThetaReactorMeshConverterByAxialCoordinates`\n            * ``Axial Bins`` -- use\n              :py:class:`armi.reactor.converters.meshConverters._RZThetaReactorMeshConverterByAxialBins`\n\n        homogenizeAxiallyByFlags\n            Boolean that if set to True will ignore the `axialConversionType` input and determine a\n            mesh based on the material boundaries for each RZ region axially.\n\n    expandReactor : bool\n        If True, the HEX-Z reactor will be expanded to full core geometry prior to converting to the\n        RZT reactor. Either way the converted RZTheta core will be full core.\n    strictHomogenization : bool\n        If True, the converter will restrict HEX-Z blocks with dissimilar XS types from being\n        homogenized into an RZT block.\n    \"\"\"\n\n    _GEOMETRY_TYPE = geometry.GeomType.RZT\n    _SYMMETRY_TYPE = geometry.SymmetryType(\n        domainType=geometry.DomainType.FULL_CORE,\n        boundaryType=geometry.BoundaryType.NO_SYMMETRY,\n    )\n    _BLOCK_MIXTURE_TYPE_MAP = {\n        \"mixture control\": [\"control\"],\n        \"mixture fuel\": [\"fuel\"],\n        \"mixture radial shield\": [\"radial shield\"],\n        \"mixture axial shield\": [\"shield\"],\n        \"mixture structure\": [\n            \"grid plate\",\n            \"reflector\",\n            \"inlet nozzle\",\n            \"handling socket\",\n        ],\n        \"mixture duct\": [\"duct\"],\n        \"mixture plenum\": [\"plenum\"],\n    }\n\n    _BLOCK_MIXTURE_TYPE_EXCLUSIONS = [\"control\", \"fuel\", \"radial shield\"]\n    _MESH_BY_RING_COMP = \"Ring Compositions\"\n    _MESH_BY_AXIAL_COORDS = \"Axial Coordinates\"\n    _MESH_BY_AXIAL_BINS = \"Axial Bins\"\n\n    def __init__(self, cs, converterSettings, expandReactor=False, strictHomogenization=False):\n        GeometryConverter.__init__(self, cs)\n        self.converterSettings = converterSettings\n        self.meshConverter = None\n        self._expandSourceReactor = expandReactor\n        self._strictHomogenization = strictHomogenization\n        self._radialMeshConversionType = None\n        self._axialMeshConversionType = None\n        self._previousRadialZoneAssemTypes = None\n        self._currentRadialZoneType = None\n        self._assemsInRadialZone = collections.defaultdict(list)\n        self._newBlockNum = 0\n        self.blockMap = collections.defaultdict(list)\n        self.blockVolFracs = collections.defaultdict(dict)\n        self._homogenizeAxiallyByFlags = False\n\n    def _generateConvertedReactorMesh(self):\n        \"\"\"Convert the source reactor using the converterSettings.\"\"\"\n        runLog.info(\"Generating mesh coordinates for the reactor conversion\")\n        self._radialMeshConversionType = self.converterSettings[\"radialConversionType\"]\n        self._axialMeshConversionType = self.converterSettings[\"axialConversionType\"]\n        self._homogenizeAxiallyByFlags = self.converterSettings.get(\"homogenizeAxiallyByFlags\", False)\n        converter = None\n        if self._radialMeshConversionType == self._MESH_BY_RING_COMP:\n            if self._homogenizeAxiallyByFlags:\n                converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags(\n                    self.converterSettings\n                )\n            elif self._axialMeshConversionType == self._MESH_BY_AXIAL_COORDS:\n                converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates(\n                    self.converterSettings\n                )\n            elif self._axialMeshConversionType == self._MESH_BY_AXIAL_BINS:\n                converter = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins(self.converterSettings)\n        if converter is None:\n            raise ValueError(\n                \"No mesh converter exists for `radialConversionType` and `axialConversionType` settings \"\n                \"of {} and {}\".format(self._radialMeshConversionType, self._axialMeshConversionType)\n            )\n        self.meshConverter = converter\n        return self.meshConverter.generateMesh(self._sourceReactor)\n\n    def convert(self, r):\n        \"\"\"\n        Run the conversion to 3 dimensional R-Z-Theta.\n\n        .. impl:: Tool to convert a hex core to an RZTheta core.\n            :id: I_ARMI_CONV_3DHEX_TO_2DRZ\n            :implements: R_ARMI_CONV_3DHEX_TO_2DRZ\n\n            This method converts the hex-z mesh to r-theta-z mesh.\n            It first verifies that the geometry type of the input reactor ``r``\n            has the expected HEX geometry. Upon conversion, it determines the inner\n            and outer diameters of each ring in the r-theta-z mesh and calls\n            ``_createRadialThetaZone`` to create a radial theta zone with a homogenized mixture.\n            The axial dimension of the r-theta-z mesh is then updated by ``updateAxialMesh``.\n\n        Attributes\n        ----------\n        r : Reactor object\n            The reactor to convert.\n\n        Notes\n        -----\n        The linked requirement technically points to a child class of this class, HexToRZConverter.\n        However, this is the method where the conversion actually happens and thus the\n        implementation tag is noted here.\n\n        As a part of the RZT mesh converters it is possible to obtain a radial mesh that has\n        repeated ring numbers.  For instance, if there are fuel assemblies and control assemblies\n        within the same radial hex ring then it's possible that a radial mesh output from the\n        byRingComposition mesh converter method will look something like:\n\n        self.meshConverter.radialMesh = [2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 9, 10]\n\n        In this instance the hex ring will remain the same for multiple iterations over radial\n        direction when homogenizing the hex core into the RZT geometry. In this case, the converter\n        needs to keep track of the compositions within this ring so that it can separate this\n        repeated ring into multiple RZT rings. Each of the RZT rings should have a single\n        composition (fuel1, fuel2, control, etc.)\n\n        See Also\n        --------\n        armi.reactor.converters.meshConverters\n        \"\"\"\n        runLog.info(f\"Converting {r.core} using {self}\")\n\n        if r.core.geomType != geometry.GeomType.HEX:\n            raise ValueError(\"Cannot use {} to convert {} reactor\".format(self, str(r.core.geomType).upper()))\n\n        self._sourceReactor = r\n        self._setupSourceReactorForConversion()\n        rztSpatialGrid = self._generateConvertedReactorMesh()\n        runLog.info(rztSpatialGrid)\n        self._setupConvertedReactor(rztSpatialGrid)\n        self.convReactor.core.lib = self._sourceReactor.core.lib\n\n        innerDiameter = 0.0\n        lowerRing = 1\n        radialMeshCm = [0.0]\n        for radialIndex, upperRing in enumerate(self.meshConverter.radialMesh):\n            lowerTheta = 0.0\n            # see notes\n            self._previousRadialZoneAssemTypes = self._previousRadialZoneAssemTypes if lowerRing == upperRing else []\n            if lowerRing == upperRing:\n                lowerRing = upperRing - 1\n\n            self._setNextAssemblyTypeInRadialZone(lowerRing, upperRing)\n            self._setAssemsInRadialZone(radialIndex, lowerRing, upperRing)\n            for thetaIndex, upperTheta in enumerate(self.meshConverter.thetaMesh):\n                zoneAssems = self._getAssemsInRadialThetaZone(lowerRing, upperRing, lowerTheta, upperTheta)\n                self._writeRadialThetaZoneHeader(\n                    radialIndex,\n                    lowerRing,\n                    upperRing,\n                    thetaIndex,\n                    lowerTheta,\n                    upperTheta,\n                )\n                outerDiameter = self._createRadialThetaZone(\n                    innerDiameter,\n                    thetaIndex,\n                    radialIndex,\n                    lowerTheta,\n                    upperTheta,\n                    zoneAssems,\n                )\n                lowerTheta = upperTheta\n            innerDiameter = outerDiameter\n            lowerRing = upperRing\n            radialMeshCm.append(outerDiameter / 2.0)\n\n        # replace temporary index-based ring indices with actual radial distances\n        self.convReactor.core.spatialGrid._bounds = (\n            self.convReactor.core.spatialGrid._bounds[0],\n            np.array(radialMeshCm),\n            self.convReactor.core.spatialGrid._bounds[2],\n        )\n\n        self.convReactor.core.updateAxialMesh()\n        self.convReactor.core.summarizeReactorStats()\n\n        # Track the new assemblies that were created when the converted reactor was\n        # initialized so that the global assembly counter can be reset later.\n        self._newAssembliesAdded = self.convReactor.core.getAssemblies()\n\n    def _setNextAssemblyTypeInRadialZone(self, lowerRing, upperRing):\n        \"\"\"\n        Change the currently-active assembly type to the next active one based on a specific order.\n\n        If this is called with the same (lowerRing, upperRing) twice, the next assembly type\n        will be applied. This is useful, for instance, in putting control zones amidst fuel.\n        \"\"\"\n        sortedAssemTypes = self._getSortedAssemblyTypesInRadialZone(lowerRing, upperRing)\n        for aType in sortedAssemTypes:\n            if aType not in self._previousRadialZoneAssemTypes:\n                self._previousRadialZoneAssemTypes.append(aType)\n                self._currentRadialZoneType = aType\n                break\n\n    def _getSortedAssemblyTypesInRadialZone(self, lowerRing, upperRing):\n        \"\"\"\n        Retrieve assembly types in a radial zone between (lowerRing, upperRing), sort from highest\n        occurrence to lowest.\n\n        Notes\n        -----\n        - Assembly types are based on the assembly names and not the direct composition within each\n          assembly. For instance, if two assemblies are named `fuel 1` and `fuel 2` but they have\n          the same composition at some reactor state then they will still be separated as two\n          different assembly types.\n        \"\"\"\n        aCountByTypes = collections.Counter()\n        for a in self._getAssembliesInCurrentRadialZone(lowerRing, upperRing):\n            aCountByTypes[a.getType().lower()] += 1\n\n        # sort on tuple (int, str) to force consistent ordering of result when counts are tied\n        sortedAssemTypes = sorted(aCountByTypes, key=lambda aType: (aCountByTypes[aType], aType), reverse=True)\n        return sortedAssemTypes\n\n    def _getAssembliesInCurrentRadialZone(self, lowerRing, upperRing):\n        ringAssems = []\n        for ring in range(lowerRing, upperRing):\n            ringAssems.extend(self._sourceReactor.core.getAssembliesInSquareOrHexRing(ring))\n        return ringAssems\n\n    def _setupSourceReactorForConversion(self):\n        self._sourceReactor.core.summarizeReactorStats()\n        if self._expandSourceReactor:\n            self._expandSourceReactorGeometry()\n\n    def _setupConvertedReactor(self, grid):\n        self.convReactor = reactors.Reactor(\"ConvertedReactor\", self._sourceReactor.blueprints)\n        core = reactors.Core(\"Core\")\n        if self._cs is not None:\n            core.setOptionsFromCs(self._cs)\n        self.convReactor.add(core)\n\n        grid.symmetry = self._SYMMETRY_TYPE\n        grid.geomType = self._GEOMETRY_TYPE\n        grid.armiObject = self.convReactor.core\n        self.convReactor.core.spatialGrid = grid\n        self.convReactor.core.p.power = self._sourceReactor.core.p.power\n        self.convReactor.core.name += \" - {0}\".format(self._GEOMETRY_TYPE)\n\n    def _setAssemsInRadialZone(self, radialIndex, lowerRing, upperRing):\n        \"\"\"\n        Retrieve a list of assemblies in the reactor between (lowerRing, upperRing).\n\n        Notes\n        -----\n        self._assemsInRadialZone keeps track of the unique assemblies that are in each radial ring.\n        This ensures that no assemblies are duplicated when using self._getAssemsInRadialThetaZone()\n        \"\"\"\n        lowerTheta = 0.0\n        for _thetaIndex, upperTheta in enumerate(self.meshConverter.thetaMesh):\n            assemsInRadialThetaZone = self._getAssemsInRadialThetaZone(lowerRing, upperRing, lowerTheta, upperTheta)\n            newAssemsInRadialZone = set(assemsInRadialThetaZone)\n            oldAssemsInRadialZone = set(self._assemsInRadialZone[radialIndex])\n            self._assemsInRadialZone[radialIndex].extend(\n                sorted(list(newAssemsInRadialZone.union(oldAssemsInRadialZone)))\n            )\n            lowerTheta = upperTheta\n\n        if not self._assemsInRadialZone[radialIndex]:\n            raise ValueError(\n                \"No assemblies in radial zone {} between rings {} and {}\".format(\n                    self._assemsInRadialZone[radialIndex], lowerRing, upperRing\n                )\n            )\n\n    @staticmethod\n    def _getAssembliesInSector(core, theta1, theta2):\n        \"\"\"\n        Locate assemblies in an angular sector.\n\n        Parameters\n        ----------\n        theta1, theta2 : float\n            The angles (in degrees) in which assemblies shall be drawn.\n\n        Returns\n        -------\n        aList : list\n            List of assemblies in this sector\n        \"\"\"\n        aList = []\n\n        converter = EdgeAssemblyChanger()\n        converter.addEdgeAssemblies(core)\n        for a in core:\n            x, y, _ = a.spatialLocator.getLocalCoordinates()\n            theta = math.atan2(y, x)\n            if theta < 0.0:\n                theta = math.tau + theta\n\n            theta = math.degrees(theta)\n\n            phi = theta\n            if theta1 <= phi <= theta2 or abs(theta1 - phi) < 0.001 or abs(theta2 - phi) < 0.001:\n                aList.append(a)\n        converter.removeEdgeAssemblies(core.r.core)\n\n        if not aList:\n            raise ValueError(\"There are no assemblies in {} between angles of {} and {}\".format(core, theta1, theta2))\n\n        return aList\n\n    def _getAssemsInRadialThetaZone(self, lowerRing, upperRing, lowerTheta, upperTheta):\n        \"\"\"Retrieve list of assemblies in the reactor between (lowerRing, upperRing) and\n        (lowerTheta, upperTheta).\n        \"\"\"\n        thetaAssems = self._getAssembliesInSector(\n            self._sourceReactor.core, math.degrees(lowerTheta), math.degrees(upperTheta)\n        )\n        ringAssems = self._getAssembliesInCurrentRadialZone(lowerRing, upperRing)\n        if self._radialMeshConversionType == self._MESH_BY_RING_COMP:\n            ringAssems = self._selectAssemsBasedOnType(ringAssems)\n\n        ringAssems = set(ringAssems)\n        thetaAssems = set(thetaAssems)\n        assemsInRadialThetaZone = sorted(ringAssems.intersection(thetaAssems))\n\n        if not assemsInRadialThetaZone:\n            raise ValueError(\n                \"No assemblies in radial-theta zone between rings {} and {} and theta bounds of {} and {}\".format(\n                    lowerRing, upperRing, lowerTheta, upperTheta\n                )\n            )\n\n        return assemsInRadialThetaZone\n\n    def _selectAssemsBasedOnType(self, assems):\n        \"\"\"Retrieve a list of assemblies of a given type within a subset of an assembly list.\n\n        Parameters\n        ----------\n        assems: list\n            Subset of assemblies in the reactor.\n        \"\"\"\n        selectedAssems = []\n        for a in assems:\n            if a.getType().lower() == self._currentRadialZoneType:\n                selectedAssems.append(a)\n\n        return selectedAssems\n\n    def _createRadialThetaZone(self, innerDiameter, thetaIndex, radialIndex, lowerTheta, upperTheta, zoneAssems):\n        \"\"\"\n        Add a new stack of circles to the TRZ reactor by homogenizing assems.\n\n        Parameters\n        ----------\n        innerDiameter : float\n            The current innerDiameter of the radial-theta zone\n\n        thetaIndex : float\n            The theta index of the radial-theta zone\n\n        radialIndex : float\n            The radial index of the radial-theta zone\n\n        lowerTheta : float\n            The lower theta bound for the radial-theta zone\n\n        upperTheta : float\n            The upper theta bound for the radial-theta zone\n\n        Returns\n        -------\n        outerDiameter : float\n            The outer diameter (in cm) of the radial zone just added\n        \"\"\"\n        newAssembly = assemblies.ThRZAssembly(\"mixtureAssem\")\n        newAssembly.spatialLocator = self.convReactor.core.spatialGrid[thetaIndex, radialIndex, 0]\n        newAssembly.p.AziMesh = 2\n        newAssembly.spatialGrid = grids.AxialGrid.fromNCells(len(self.meshConverter.axialMesh), armiObject=newAssembly)\n\n        lfp = lumpedFissionProduct.lumpedFissionProductFactory(self._cs)\n\n        lowerAxialZ = 0.0\n        for axialIndex, upperAxialZ in enumerate(self.meshConverter.axialMesh):\n            # Setup the new block data\n            newBlockName = \"B{:04d}{}\".format(int(newAssembly.getNum()), chr(axialIndex + 65))\n            newBlock = blocks.ThRZBlock(newBlockName)\n\n            # Compute the homogenized block data\n            (\n                newBlockAtoms,\n                newBlockType,\n                newBlockTemp,\n                newBlockVol,\n            ) = self.createHomogenizedRZTBlock(newBlock, lowerAxialZ, upperAxialZ, zoneAssems)\n            # Compute radial zone outer diameter\n            axialSegmentHeight = upperAxialZ - lowerAxialZ\n            radialZoneVolume = self._calcRadialRingVolume(lowerAxialZ, upperAxialZ, radialIndex)\n            radialRingArea = radialZoneVolume / axialSegmentHeight * self._sourceReactor.core.powerMultiplier\n            outerDiameter = blockConverters.getOuterDiamFromIDAndArea(innerDiameter, radialRingArea)\n\n            # Set new homogenized block parameters\n            material = materials.material.Material()\n            material.name = \"mixture\"\n            material.refDens = 1.0  # generic density. Will cancel out.\n            dims = {\n                \"inner_radius\": innerDiameter / 2.0,\n                \"radius_differential\": (outerDiameter - innerDiameter) / 2.0,\n                \"inner_axial\": lowerAxialZ,\n                \"height\": axialSegmentHeight,\n                \"inner_theta\": lowerTheta,\n                \"azimuthal_differential\": (upperTheta - lowerTheta),\n                \"mult\": 1.0,\n                \"Tinput\": newBlockTemp,\n                \"Thot\": newBlockTemp,\n            }\n            for nuc in self._sourceReactor.blueprints.allNuclidesInProblem:\n                material.setMassFrac(nuc, 0.0)\n\n            newComponent = components.DifferentialRadialSegment(\"mixture\", material, **dims)\n            newBlock.p.axMesh = int(axialSegmentHeight / BLOCK_AXIAL_MESH_SPACING) + 1\n            newBlock.p.zbottom = lowerAxialZ\n            newBlock.p.ztop = upperAxialZ\n\n            newBlock.setLumpedFissionProducts(lfp)\n\n            # Assign the new block cross section type and burn up group\n            newBlock.setType(newBlockType)\n            newXsType, newEnvGroup = self._createBlendedXSID(newBlock)\n            newBlock.p.xsType = newXsType\n            newBlock.p.envGroup = newEnvGroup\n\n            # Update the block dimensions and set the block densities\n            newComponent.updateDims()  # ugh.\n            newBlock.p.height = axialSegmentHeight\n            newBlock.clearCache()\n            newBlock.add(newComponent)\n            for nuc, atoms in newBlockAtoms.items():\n                newBlock.setNumberDensity(nuc, atoms / newBlockVol)\n\n            self._writeRadialThetaZoneInfo(axialIndex + 1, axialSegmentHeight, newBlock)\n            self._checkVolumeConservation(newBlock)\n\n            newAssembly.add(newBlock)\n            lowerAxialZ = upperAxialZ\n\n        newAssembly.calculateZCoords()  # builds mesh\n        self.convReactor.core.add(newAssembly)\n\n        return outerDiameter\n\n    def _calcRadialRingVolume(self, lowerZ, upperZ, radialIndex):\n        \"\"\"Compute the total volume of a list of assemblies within a ring between two axial heights.\"\"\"\n        ringVolume = 0.0\n        for assem in self._assemsInRadialZone[radialIndex]:\n            for b, heightHere in assem.getBlocksBetweenElevations(lowerZ, upperZ):\n                ringVolume += b.getVolume() * heightHere / b.getHeight()\n\n        if not ringVolume:\n            raise ValueError(\"Ring volume of ring {} is 0.0\".format(radialIndex + 1))\n\n        return ringVolume\n\n    def _checkVolumeConservation(self, newBlock):\n        \"\"\"Write the volume fractions of each hex block within the homogenized RZT block.\"\"\"\n        newBlockVolumeFraction = 0.0\n        for hexBlock in self.blockMap[newBlock]:\n            newBlockVolumeFraction += self.blockVolFracs[newBlock][hexBlock]\n\n        if abs(newBlockVolumeFraction - 1.0) > 0.00001:\n            raise ValueError(\n                \"The volume fraction of block {} is {} and not 1.0. An error occurred when \"\n                \"converting the reactor geometry.\".format(newBlock, newBlockVolumeFraction)\n            )\n\n    def createHomogenizedRZTBlock(self, homBlock, lowerAxialZ, upperAxialZ, radialThetaZoneAssems):\n        \"\"\"\n        Create the homogenized RZT block by computing the average atoms in the zone.\n\n        Additional calculations are performed to determine the homogenized block type, the block\n        average temperature, and the volume fraction of each hex block that is in the new\n        homogenized block.\n        \"\"\"\n        homBlockXsTypes = set()\n        numHexBlockByType = collections.Counter()\n        homBlockAtoms = collections.defaultdict(int)\n        homBlockVolume = 0.0\n        homBlockTemperature = 0.0\n        for assem in radialThetaZoneAssems:\n            blocksHere = assem.getBlocksBetweenElevations(lowerAxialZ, upperAxialZ)\n            for b, heightHere in blocksHere:\n                homBlockXsTypes.add(b.p.xsType)\n                numHexBlockByType[b.getType().lower()] += 1\n                blockVolumeHere = b.getVolume() * heightHere / b.getHeight()\n                if blockVolumeHere == 0.0:\n                    raise ValueError(\"Geometry conversion failed. Block {} has zero volume\".format(b))\n                homBlockVolume += blockVolumeHere\n                homBlockTemperature += b.getAverageTempInC() * blockVolumeHere\n\n                numDensities = b.getNumberDensities()\n\n                for nucName, nDen in numDensities.items():\n                    homBlockAtoms[nucName] += nDen * blockVolumeHere\n                self.blockMap[homBlock].append(b)\n                self.blockVolFracs[homBlock][b] = blockVolumeHere\n        # Notify if blocks with different xs types are being homogenized. May be undesired behavior.\n        if len(homBlockXsTypes) > 1:\n            msg = (\n                \"Blocks {} with dissimilar XS IDs are being homogenized in {} between axial heights\"\n                \" {} cm and {} cm. \".format(\n                    self.blockMap[homBlock],\n                    self.convReactor.core,\n                    lowerAxialZ,\n                    upperAxialZ,\n                )\n            )\n            if self._strictHomogenization:\n                raise ValueError(msg + \"Modify mesh converter settings before proceeding.\")\n            else:\n                runLog.extra(msg)\n\n        homBlockType = self._getHomogenizedBlockType(numHexBlockByType)\n        homBlockTemperature = homBlockTemperature / homBlockVolume\n        for b in self.blockMap[homBlock]:\n            self.blockVolFracs[homBlock][b] = self.blockVolFracs[homBlock][b] / homBlockVolume\n\n        return homBlockAtoms, homBlockType, homBlockTemperature, homBlockVolume\n\n    def _getHomogenizedBlockType(self, numHexBlockByType):\n        \"\"\"\n        Generate the homogenized block mixture type based on the frequency of hex block types that\n        were merged together.\n\n        Notes\n        -----\n        self._BLOCK_MIXTURE_TYPE_EXCLUSIONS:\n            The normal function of this method is to assign the mixture name based on the number of\n            occurrences of the block type. This list stops that and assigns the mixture based on the\n            first occurrence. (i.e. if the mixture has a set of blocks but it comes across one with\n            the name of 'control' the process will stop and the new mixture type will be set to\n            'mixture control'.\n\n        self._BLOCK_MIXTURE_TYPE_MAP:\n            A dictionary that provides the name of blocks that are condensed together\n        \"\"\"\n        assignedMixtureBlockType = None\n\n        # Find the most common block type out of the types in the block mixture type exclusions list\n        excludedBlockTypesInBlock = set(\n            [x for x in self._BLOCK_MIXTURE_TYPE_EXCLUSIONS for y in numHexBlockByType if x in y]\n        )\n        if excludedBlockTypesInBlock:\n            for blockType in self._BLOCK_MIXTURE_TYPE_EXCLUSIONS:\n                if blockType in excludedBlockTypesInBlock:\n                    assignedMixtureBlockType = \"mixture \" + blockType\n                    return assignedMixtureBlockType\n\n        # Assign block type by most common hex block type\n        mostCommonHexBlockType = sorted(numHexBlockByType.most_common(1))[0][0]  # sort needed for tie break\n\n        for mixtureType in sorted(self._BLOCK_MIXTURE_TYPE_MAP):\n            validBlockTypesInMixture = self._BLOCK_MIXTURE_TYPE_MAP[mixtureType]\n            for validBlockType in validBlockTypesInMixture:\n                if validBlockType in mostCommonHexBlockType:\n                    assignedMixtureBlockType = mixtureType\n                    return assignedMixtureBlockType\n\n        assignedMixtureBlockType = \"mixture structure\"\n        runLog.debug(\n            f\"The mixture type for this homogenized block {mostCommonHexBlockType} \"\n            f\"was not determined and is defaulting to {assignedMixtureBlockType}\"\n        )\n\n        return assignedMixtureBlockType\n\n    def _createBlendedXSID(self, newBlock):\n        \"\"\"Generate the blended XS id using the most common XS id in the hexIdList.\"\"\"\n        ids = [hexBlock.getMicroSuffix() for hexBlock in self.blockMap[newBlock]]\n        xsTypeList, envGroupList = zip(*ids)\n\n        xsType, _count = collections.Counter(xsTypeList).most_common(1)[0]\n        envGroup, _count = collections.Counter(envGroupList).most_common(1)[0]\n\n        return xsType, envGroup\n\n    def _writeRadialThetaZoneHeader(self, radIdx, lowerRing, upperRing, thIdx, lowerTheta, upperTheta):\n        radialAssemType = \"({})\".format(self._currentRadialZoneType) if self._currentRadialZoneType is not None else \"\"\n        runLog.info(\"Creating: Radial Zone {}, Theta Zone {} {}\".format(radIdx + 1, thIdx + 1, radialAssemType))\n        runLog.extra(\n            \"{} Hex Rings: [{}, {}), Theta Revolutions: [{:.2f}, {:.2f})\".format(\n                9 * STR_SPACE,\n                lowerRing,\n                upperRing,\n                lowerTheta * units.RAD_TO_REV,\n                upperTheta * units.RAD_TO_REV,\n            )\n        )\n        runLog.debug(\n            \"{} Axial Zone - Axial Height (cm) Block Number Block Type             XS ID : \"\n            \"Original Hex Block XS ID(s)\".format(9 * STR_SPACE)\n        )\n        runLog.debug(\n            \"{} ---------- - ----------------- ------------ ---------------------- ----- : \"\n            \"---------------------------\".format(9 * STR_SPACE)\n        )\n\n    def _writeRadialThetaZoneInfo(self, axIdx, axialSegmentHeight, blockObj):\n        \"\"\"\n        Create a summary of the mapping between the converted reactor block ids to the hex\n        reactor block ids.\n        \"\"\"\n        self._newBlockNum += 1\n        hexBlockXsIds = []\n        for hexBlock in self.blockMap[blockObj]:\n            hexBlockXsIds.append(hexBlock.getMicroSuffix())\n\n        runLog.debug(\n            \"{} {:<10} - {:<17.3f} {:<12} {:<22} {:<5} : {}\".format(\n                9 * STR_SPACE,\n                axIdx,\n                axialSegmentHeight,\n                self._newBlockNum,\n                blockObj.getType(),\n                blockObj.getMicroSuffix(),\n                hexBlockXsIds,\n            )\n        )\n\n    def _expandSourceReactorGeometry(self):\n        \"\"\"Expansion of the reactor geometry to build the R-Z-Theta core model.\"\"\"\n        runLog.info(\"Expanding source reactor core to a full core model\")\n        reactorExpander = ThirdCoreHexToFullCoreChanger(self._cs)\n        reactorExpander.convert(self._sourceReactor)\n        self._sourceReactor.core.summarizeReactorStats()\n\n    def plotConvertedReactor(self, fNameBase=None):\n        \"\"\"\n        Generate plots for the converted RZT reactor. A pass-through to preserve the API.\n\n        Parameters\n        ----------\n        fNameBase : str, optional\n            A name that will form the basis of the N plots that are generated by this method. Will get split on\n            extension and have numbers added. Should be like ``coreMap.png``.\n        \"\"\"\n        return plotting.plotConvertedRZTReactor(self.convReactor, fNameBase)\n\n    def reset(self):\n        \"\"\"Clear out attribute data, including holding the state of the converted reactor core model.\"\"\"\n        self.meshConverter = None\n        self._radialMeshConversionType = None\n        self._axialMeshConversionType = None\n        self._previousRadialZoneAssemTypes = None\n        self._currentRadialZoneType = None\n        self._assemsInRadialZone = collections.defaultdict(list)\n        self._newBlockNum = 0\n        self.blockMap = collections.defaultdict(list)\n        self.blockVolFracs = collections.defaultdict(dict)\n        self.convReactor = None\n        super().reset()\n\n\nclass HexToRZConverter(HexToRZThetaConverter):\n    \"\"\"\n    Create a new reactor with R-Z coordinates from the Hexagonal-Z reactor.\n\n    This is a subclass of the HexToRZThetaConverter. See the HexToRZThetaConverter for\n    explanation and setup of the converterSettings.\n    \"\"\"\n\n    _GEOMETRY_TYPE = geometry.GeomType.RZ\n\n\nclass ThirdCoreHexToFullCoreChanger(GeometryChanger):\n    \"\"\"\n    Change third-core models to full core in place.\n\n    Does not generate a new reactor object.\n\n    Examples\n    --------\n    >>> converter = ThirdCoreHexToFullCoreChanger()\n    >>> converter.convert(myReactor)\n    \"\"\"\n\n    EXPECTED_INPUT_SYMMETRY = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n\n    def __init__(self, cs=None):\n        GeometryChanger.__init__(self, cs)\n        self.listOfAssemblyVolIntegratedParamsToScale = []\n        self.listOfBlockVolIntegratedParamsToScale = []\n        self.grid = None\n\n    @staticmethod\n    def _scaleVolIntegratedParams(obj, paramList, direction):\n        if direction == \"up\":\n            op = operator.mul\n        elif direction == \"down\":\n            op = operator.truediv\n\n        for param in paramList:\n            if obj.p[param] is None:\n                continue\n            if type(obj.p[param]) is list:\n                # some params like volume-integrated mg flux are lists\n                obj.p[param] = [op(val, 3) for val in obj.p[param]]\n            else:\n                obj.p[param] = op(obj.p[param], 3)\n\n    def convert(self, r: reactors.Reactor):\n        \"\"\"\n        Run the conversion.\n\n        .. impl:: Convert a one-third-core geometry to a full-core geometry.\n            :id: I_ARMI_THIRD_TO_FULL_CORE0\n            :implements: R_ARMI_THIRD_TO_FULL_CORE\n\n            This method first checks if the input reactor is already full core. If full-core\n            symmetry is detected, the input reactor is returned. If not, it then verifies that the\n            input reactor has the expected one-third core symmetry and HEX geometry.\n\n            Upon conversion, it loops over the assembly vector of the source one-third core model,\n            copies and rotates each source assembly to create new assemblies, and adds them on the\n            full-core grid. For the center assembly, it modifies its parameters.\n\n            Finally, it sets the domain type to full core.\n\n        Parameters\n        ----------\n        sourceReactor : Reactor object\n            The reactor to convert.\n        \"\"\"\n        self._sourceReactor = r\n\n        if self._sourceReactor.core.isFullCore:\n            # already full core from geometry file. No need to copy symmetry over.\n            runLog.important(\"Detected that full core reactor already exists. Cannot expand.\")\n            return self._sourceReactor\n        elif not (\n            self._sourceReactor.core.symmetry == self.EXPECTED_INPUT_SYMMETRY\n            and self._sourceReactor.core.geomType == geometry.GeomType.HEX\n        ):\n            raise ValueError(\n                \"ThirdCoreHexToFullCoreChanger requires the input to have third core hex geometry. \"\n                \"Geometry received was {} {} {}\".format(\n                    self._sourceReactor.core.symmetry.domain,\n                    self._sourceReactor.core.symmetry.boundary,\n                    self._sourceReactor.core.geomType,\n                )\n            )\n\n        edgeChanger = EdgeAssemblyChanger()\n        edgeChanger.removeEdgeAssemblies(self._sourceReactor.core)\n        runLog.info(\"Expanding to full core geometry\")\n\n        # store a copy of the 1/3 geometry grid, so that we can use it to find symmetric\n        # locations, while the core has a full-core grid so that it does not yell at us\n        # for adding stuff outside of the first 1/3\n        self.grid = copy.deepcopy(self._sourceReactor.core.spatialGrid)\n\n        # Set the core grid's symmetry early, since the core uses it for error checks\n        self._sourceReactor.core.symmetry = geometry.SymmetryType(\n            geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY\n        )\n\n        for a in self._sourceReactor.core.getAssemblies():\n            # make extras and add them too. since the input is assumed to be 1/3 core.\n            otherLocs = self.grid.getSymmetricEquivalents(a.spatialLocator.indices)\n            thisZone = (\n                self._sourceReactor.core.zones.findZoneItIsIn(a) if len(self._sourceReactor.core.zones) > 0 else None\n            )\n            angle = 2 * math.pi / (len(otherLocs) + 1)\n            count = 1\n            for i, j in otherLocs:\n                newAssem = copy.deepcopy(a)\n                newAssem.makeUnique()\n                newAssem.rotate(count * angle)\n                count += 1\n                self._sourceReactor.core.add(newAssem, self._sourceReactor.core.spatialGrid[i, j, 0])\n                if thisZone:\n                    thisZone.addLoc(newAssem.getLocation())\n                self._newAssembliesAdded.append(newAssem)\n                self._updateThirdToFullCoreLocHist(newAssem, count - 2)\n\n            if a.getLocation() == \"001-001\":\n                runLog.extra(f\"Modifying parameters in central assembly {a} to convert from 1/3 to full core\")\n\n                if not self.listOfBlockVolIntegratedParamsToScale:\n                    # populate the list with all parameters that are VOLUME_INTEGRATED\n                    (\n                        self.listOfBlockVolIntegratedParamsToScale,\n                        _,\n                    ) = _generateListOfParamsToScale(self._sourceReactor.core.getFirstBlock(), paramsToScaleSubset=[])\n                if not self.listOfAssemblyVolIntegratedParamsToScale:\n                    (self.listOfAssemblyVolIntegratedParamsToScale, _) = _generateListOfParamsToScale(\n                        self._sourceReactor.core.getFirstAssembly(), paramsToScaleSubset=[]\n                    )\n                self._scaleVolIntegratedParams(a, self.listOfAssemblyVolIntegratedParamsToScale, \"up\")\n                for b in a:\n                    self._scaleVolIntegratedParams(b, self.listOfBlockVolIntegratedParamsToScale, \"up\")\n\n        # set domain after expanding, because it isn't actually full core until it's\n        # full core; setting the domain causes the core to clear its caches.\n        self._sourceReactor.core.symmetry = geometry.SymmetryType(\n            geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY\n        )\n\n    def restorePreviousGeometry(self, r=None):\n        \"\"\"Undo the changes made by convert by going back to 1/3 core.\n\n        .. impl:: Restore a one-third-core geometry to a full-core geometry.\n            :id: I_ARMI_THIRD_TO_FULL_CORE1\n            :implements: R_ARMI_THIRD_TO_FULL_CORE\n\n            This method is a reverse process of the method ``convert``. It converts the full-core\n            reactor model back to the original one-third core reactor model by removing the added\n            assemblies and changing the parameters of the center assembly from full core to one\n            third core.\n        \"\"\"\n        r = r or self._sourceReactor\n\n        # remove the assemblies that were added when the conversion happened.\n        if bool(self._newAssembliesAdded):\n            for a in self._newAssembliesAdded:\n                r.core.removeAssembly(a, discharge=False)\n\n            r.core.symmetry = geometry.SymmetryType.fromAny(self.EXPECTED_INPUT_SYMMETRY)\n\n            # change the central assembly params back to 1/3\n            a = r.core.getAssemblyWithStringLocation(\"001-001\")\n            runLog.extra(f\"Modifying parameters in central assembly {a} to revert from full to 1/3 core\")\n            self._scaleVolIntegratedParams(a, self.listOfAssemblyVolIntegratedParamsToScale, \"down\")\n            for b in a:\n                self._scaleVolIntegratedParams(b, self.listOfBlockVolIntegratedParamsToScale, \"down\")\n        self.reset()\n\n    def _updateThirdToFullCoreLocHist(self, newAssembly, otherLocIndex):\n        \"\"\"\n        Update the assembly location history parameter to ensure created assemblies have\n        the correct movement histories for their corresponding full core location.\n        \"\"\"\n        newLocHist = []\n        for r, p in newAssembly.p.ringPosHist:\n            if r not in assemblies.Assembly.NOT_IN_CORE:\n                # ring/pos may come in as strings and need to be cast as ints.\n                i, j = self.grid.getIndicesFromRingAndPos(int(r), int(p))\n                otherLocs = self.grid.getSymmetricEquivalents([i, j, 0])\n                otherLoc = otherLocs[otherLocIndex]\n                r, p = self.grid.indicesToRingPos(*otherLoc)\n            newLocHist.append((r, p))\n        newAssembly.p.ringPosHist = newLocHist\n\n\nclass EdgeAssemblyChanger(GeometryChanger):\n    \"\"\"\n    Add/remove \"edge assemblies\" for Finite difference or MCNP cases.\n\n    Examples\n    --------\n        edgeChanger = EdgeAssemblyChanger()\n        edgeChanger.removeEdgeAssemblies(reactor.core)\n    \"\"\"\n\n    def addEdgeAssemblies(self, core):\n        \"\"\"\n        Add the assemblies on the 120 degree symmetric line to 1/3 symmetric cases.\n\n        Needs to be called before a finite difference (DIF3D, DIFNT) or MCNP calculation.\n\n        .. impl:: Add assemblies along the 120-degree line to a reactor.\n            :id: I_ARMI_ADD_EDGE_ASSEMS0\n            :implements: R_ARMI_ADD_EDGE_ASSEMS\n\n            Edge assemblies on the 120-degree symmetric line of a one-third core reactor model are\n            added because they are needed for DIF3D-finite difference or MCNP models. This is done\n            by copying the assemblies from the lower boundary and placing them in their reflective\n            positions on the upper boundary of the symmetry line.\n\n        Parameters\n        ----------\n        reactor : Reactor\n            Reactor to modify\n\n        See Also\n        --------\n        removeEdgeAssemblies : removes the edge assemblies\n        \"\"\"\n        if core.isFullCore:\n            return\n\n        if self._newAssembliesAdded:\n            runLog.important(\"Skipping addition of edge assemblies because they are already there\")\n            return\n\n        assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES)\n        assembliesOnUpperBoundary = []\n        for a in assembliesOnLowerBoundary:\n            a.clearCache()  # symmetry factors of these assemblies will change since they are now half assems.\n            a2 = copy.deepcopy(a)\n            a2.makeUnique()\n            assembliesOnUpperBoundary.append(a2)\n\n        if not assembliesOnUpperBoundary:\n            runLog.extra(\"No edge assemblies to add\")\n\n        # Move the assemblies into their reflective position on symmetry line 3\n        for a in assembliesOnUpperBoundary:\n            # loc will now be either an empty set [], or two different locations\n            # in our case, we only want the first of the two locations\n            locs = core.spatialGrid.getSymmetricEquivalents(a.spatialLocator)\n            if locs:\n                i, j = locs[0]\n                spatialLocator = core.spatialGrid[i, j, 0]\n                if core.childrenByLocator.get(spatialLocator):\n                    runLog.warning(\"Edge assembly already exists in {0}. Not adding.\".format(locs[0]))\n                    continue\n                # add the copied assembly to the reactor list\n                runLog.debug(\"Adding edge assembly {0} to {1} to the reactor\".format(a, spatialLocator))\n                core.add(a, spatialLocator)\n                self._newAssembliesAdded.append(a)\n\n        parameters.ALL_DEFINITIONS.resetAssignmentFlag(SINCE_LAST_GEOMETRY_TRANSFORMATION)\n\n    def removeEdgeAssemblies(self, core):\n        \"\"\"\n        Remove the edge assemblies in preparation for the nodal diffusion approximation.\n\n        This makes use of the assemblies knowledge of if it is in a region that it needs to be\n        removed.\n\n        .. impl:: Remove assemblies along the 120-degree line from a reactor.\n            :id: I_ARMI_ADD_EDGE_ASSEMS1\n            :implements: R_ARMI_ADD_EDGE_ASSEMS\n\n            This method is the reverse process of the method ``addEdgeAssemblies``. It is needed for\n            the DIF3D-Nodal calculation. It removes the assemblies on the 120-degree symmetry line.\n\n        See Also\n        --------\n        addEdgeAssemblies : adds the edge assemblies\n        \"\"\"\n        if core.isFullCore:\n            return\n\n        assembliesOnLowerBoundary = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES)\n        # Don't use newAssembliesAdded b/c this may be BOL cleaning of a fresh case that has edge\n        # assems.\n        edgeAssemblies = core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES)\n\n        for a in edgeAssemblies:\n            runLog.debug(\n                \"Removing edge assembly {} from {} from the reactor without discharging\".format(\n                    a, a.spatialLocator.getRingPos()\n                )\n            )\n            core.removeAssembly(a, discharge=False)\n\n        if edgeAssemblies:\n            for a in assembliesOnLowerBoundary:\n                a.clearCache()  # clear cached area since symmetry factor will change\n            # Reset the SINCE_LAST_GEOMETRY_TRANSFORMATION flag, so that subsequent geometry\n            # conversions don't erroneously think they've been changed inside this geometry\n            # conversion\n            pDefs = parameters.ALL_DEFINITIONS.unchanged_since(NEVER)\n            pDefs.setAssignmentFlag(SINCE_LAST_GEOMETRY_TRANSFORMATION)\n        else:\n            runLog.debug(\"No edge assemblies to remove.\")\n\n        self.reset()\n\n    @staticmethod\n    def scaleParamsRelatedToSymmetry(core: reactors.Core, paramsToScaleSubset=None):\n        \"\"\"\n        Scale volume-dependent params like power to account for cut-off edges.\n\n        These params are at half their full hex value. Scale them right before deleting their\n        symmetric identicals. The two operations (scaling them and then removing others) is\n        identical to combining two half-assemblies into a full one.\n\n        See Also\n        --------\n        armi.reactor.converters.geometryConverter.EdgeAssemblyChanger.removeEdgeAssemblies\n        armi.reactor.blocks.HexBlock.getSymmetryFactor\n        \"\"\"\n        runLog.extra(\"Scaling edge-assembly parameters to account for full hexes instead of two halves\")\n        completeListOfBlockParamsToScale = _generateListOfParamsToScale(core.getFirstBlock(), paramsToScaleSubset)\n        symmetricAssems = (\n            core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES),\n            core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES),\n        )\n        if not all(symmetricAssems):\n            runLog.extra(\"No edge-assemblies found to scale parameters for.\")\n\n        for a, aSymmetric in zip(*symmetricAssems):\n            for b, bSymmetric in zip(a, aSymmetric):\n                _scaleParamsInBlock(b, bSymmetric, completeListOfBlockParamsToScale)\n\n\ndef _generateListOfParamsToScale(obj: Union[\"Core\", \"Assembly\", \"Block\"], paramsToScaleSubset):\n    fluxParamsToScale = (\n        obj.p.paramDefs.inCategory(Category.fluxQuantities).inCategory(Category.multiGroupQuantities).names\n    )\n    listOfVolumeIntegratedParamsToScale = obj.p.paramDefs.atLocation(ParamLocation.VOLUME_INTEGRATED).since(\n        SINCE_LAST_GEOMETRY_TRANSFORMATION\n    )\n    listOfVolumeIntegratedParamsToScale = listOfVolumeIntegratedParamsToScale.names\n    if paramsToScaleSubset:\n        listOfVolumeIntegratedParamsToScale = [\n            pn for pn in paramsToScaleSubset if pn in listOfVolumeIntegratedParamsToScale\n        ]\n    return (listOfVolumeIntegratedParamsToScale, fluxParamsToScale)\n\n\ndef _scaleParamsInBlock(b, bSymmetric, completeListOfParamsToScale):\n    \"\"\"Scale volume-integrated params to include their identical symmetric assemblies.\"\"\"\n    listOfVolumeIntegratedParamsToScale, fluxParamsToScale = completeListOfParamsToScale\n    for paramName in [pn for pn in listOfVolumeIntegratedParamsToScale if np.any(b.p[pn])]:\n        runLog.debug(\n            \"Scaling {} in symmetric identical assemblies\".format(paramName),\n            single=True,\n        )\n        if paramName in fluxParamsToScale:\n            _scaleFluxValues(b, bSymmetric, paramName)  # updated volume weighted fluxes\n        else:\n            b.p[paramName] = b.p[paramName] + bSymmetric.p[paramName]\n\n\ndef _scaleFluxValues(b, bSymmetric, paramName):\n    totalVol = b.getVolume() + bSymmetric.getVolume()\n\n    b.p[paramName] = [f + fSymmetric for f, fSymmetric in zip(b.p[paramName], bSymmetric.p[paramName])]\n\n    newTotalFlux = sum(b.p[paramName]) / totalVol\n\n    if paramName == \"mgFlux\":\n        b.p.flux = newTotalFlux\n    elif paramName == \"adjMgFlux\":\n        b.p.fluxAdj = newTotalFlux\n    elif paramName == \"mgFluxGamma\":\n        b.p.fluxGamma = newTotalFlux\n"
  },
  {
    "path": "armi/reactor/converters/meshConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Mesh specifiers update the mesh structure of a reactor by increasing or decreasing the number of mesh coordinates.\"\"\"\n\nimport collections\nimport itertools\nimport math\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.reactor import grids\nfrom armi.utils import units\n\n\nclass MeshConverter:\n    \"\"\"\n    Base class for the reactor mesh conversions.\n\n    Parameters\n    ----------\n    converterSettings : dict\n        A set of str, value settings used in mesh conversion. Required\n        settings are implementation specific.\n    \"\"\"\n\n    def __init__(self, converterSettings: dict):\n        self._converterSettings = converterSettings\n\n    def generateMesh(self, r=None):\n        raise NotImplementedError\n\n    def writeMeshData(self):\n        raise NotImplementedError\n\n\nclass RZThetaReactorMeshConverter(MeshConverter):\n    \"\"\"\n    Handles mesh conversions for r-z-theta reactor geometries.\n\n    Attributes\n    ----------\n    converterSettings: dict\n        This is a dictionary of settings that are used for the RZThetaReactorMeshConverter.\n        Required converter settings: ``uniformThetaMesh``,``thetaBins``\n\n    See Also\n    --------\n    RZThetaReactorMeshConverterByRingCompositionAxialBins\n    RZThetaReactorMeshConverterByRingCompositionAxialCoordinates\n    \"\"\"\n\n    def __init__(self, converterSettings):\n        MeshConverter.__init__(self, converterSettings)\n        self._useUniformThetaMesh = None\n        self._numThetaMeshBins = None\n        self._axialSegsPerBin = None\n        self._ringsPerBin = None\n        self._numRingsInCore = None\n        self._assemsInCore = None\n        self._coreAxialMeshCoords = None\n        self.radialMesh = None\n        self.axialMesh = None\n        self.thetaMesh = None\n        self.numRingBins = None\n        self.numAxialMeshBins = None\n        self.numThetaMeshBins = None\n\n    def generateMesh(self, r=None):\n        core = r.core\n        converterSettings = self._converterSettings\n        self._useUniformThetaMesh = converterSettings[\"uniformThetaMesh\"]\n        self._numThetaMeshBins = converterSettings[\"thetaBins\"]\n        self._converterSettings = converterSettings\n        self._numRingsInCore = core.getNumHexRings()\n        self._assemsInCore = core.getAssemblies()\n        self._coreAxialMeshCoords = core.findAllAxialMeshPoints(applySubMesh=False)\n        self.setAxialMesh(core)\n        self._checkAxialMeshList()\n        self.setThetaMesh()\n        self._checkThetaMeshList()\n        self.setRingsToConvert(core)\n        self._checkRingList(core)\n        self.numRingBins = len(self.radialMesh)\n        self.numAxialMeshBins = len(self.axialMesh)\n        self.numThetaMeshBins = len(self.thetaMesh)\n        self.writeMeshData()\n\n        # Build mesh reactor mesh\n        # thetaMesh doesn't include the zero point so add it back in.\n        # axial mesh is handled on assemblies so make this 2-D.\n\n        mesh = grids.ThetaRZGrid(bounds=([0.0] + self.thetaMesh, self.radialMesh, (0.0, 0.0)))\n        return mesh\n\n    def writeMeshData(self):\n        \"\"\"\n        Write a summary table of the radial, axial, and theta bins that will be used for geometry conversion.\n\n        Notes\n        -----\n        This should be on the ``ThetaRZGrid`` object.\n        \"\"\"\n        binCombinations = self.numRingBins * self.numAxialMeshBins * self.numThetaMeshBins\n        runLog.info(\"Total mesh bins (r, z, theta): {0}\".format(binCombinations))\n        runLog.info(\n            \"  Radial bins: {}\\n  Axial bins:  {}\\n  Theta bins:  {}\".format(\n                self.numRingBins, self.numAxialMeshBins, self.numThetaMeshBins\n            )\n        )\n        self._writeMeshLogData()\n\n    def _writeMeshLogData(self):\n        self._logMeshData(self.radialMesh, \"Radial ring indices:\", \"int\")\n        self._logMeshData(self.axialMesh, \"Axial mesh coordinates:\", \"float\")\n        self._logMeshData(self.thetaMesh, \"Theta mesh coordinates:\", \"float\")\n\n    def _logMeshData(self, listType, listName, listDataType):\n        if listDataType == \"float\":\n            listType = [\"{:<8.3f}\".format(floatValue) for floatValue in listType]\n        runLog.extra(\"{0} {1}\".format(listName, listType))\n\n    def setRingsToConvert(self, core):\n        raise NotImplementedError\n\n    def setAxialMesh(self, core):\n        raise NotImplementedError\n\n    def setThetaMesh(self):\n        \"\"\"Generate a uniform theta mesh in radians.\"\"\"\n        if self._useUniformThetaMesh is None:\n            raise ValueError(\"useUniformThetaMesh setting was not specified in the converterSettings\")\n        if self._numThetaMeshBins is None:\n            raise ValueError(\"numThetaMeshBins were specified in the converterSettings\")\n        if self._useUniformThetaMesh:\n            self._generateUniformThetaMesh()\n        else:\n            self._generateNonUniformThetaMesh()\n\n    def _generateUniformThetaMesh(self):\n        \"\"\"Create a uniform theta mesh over 2*pi using the user specified number of theta bins.\"\"\"\n        self.thetaMesh = list(np.linspace(0, 2 * math.pi, self._numThetaMeshBins + 1)[1:])\n\n    def _generateNonUniformThetaMesh(self):\n        raise NotImplementedError(\"Non-uniform theta mesh not implemented. Use uniform theta mesh.\")\n\n    def _checkRingList(self, core):\n        \"\"\"Check for any errors in the radial rings.\"\"\"\n        minRingNum = 1\n        self.radialMesh = sorted(self.radialMesh)\n        rings = checkLastValueInList(self.radialMesh, \"rings\", self._numRingsInCore + 1, adjustLastValue=True)\n        maxAssemsInOuterRing = core.getMaxAssembliesInHexRing(self._numRingsInCore)\n        assemsInOuterRing = len(core.getAssembliesInSquareOrHexRing(self._numRingsInCore))\n        if (maxAssemsInOuterRing - assemsInOuterRing) > 0 and len(self.thetaMesh) > 1:\n            self._combineLastTwoRadialBins()\n        checkListBounds(rings, \"rings\", minRingNum, self._numRingsInCore + 1)\n\n    def _combineLastTwoRadialBins(self):\n        if (self.radialMesh[-1] - self.radialMesh[-2]) == 1:\n            runLog.extra(\n                \"Outermost ring of the core {} is not fully filled and will be homogenized with the \"\n                \"previous ring {}\".format(self.radialMesh[-1], self.radialMesh[-2])\n            )\n            self.radialMesh.pop(-1)\n            self.radialMesh.pop(-2)\n            self.radialMesh.append(self.radialMesh[-1])\n\n    def _checkAxialMeshList(self):\n        \"\"\"Check for errors in the axial mesh coordinates.\"\"\"\n        minAxialCoordInReactor = self._coreAxialMeshCoords[0]\n        maxAxialCoordInReactor = self._coreAxialMeshCoords[-1]\n        self.axialMesh = sorted(set(self.axialMesh))\n        checkListBounds(self.axialMesh, \"axialMesh\", minAxialCoordInReactor, maxAxialCoordInReactor)\n        self.axialMesh = checkLastValueInList(self.axialMesh, \"axialMesh\", maxAxialCoordInReactor, adjustLastValue=True)\n\n    def _checkThetaMeshList(self):\n        \"\"\"Check for errors in the theta mesh coordinates.\"\"\"\n        self.thetaMesh = sorted(set(self.thetaMesh))\n        checkListBounds(self.thetaMesh, \"thetaMesh\", 0.0, 2 * math.pi)\n        self.thetaMesh = checkLastValueInList(self.thetaMesh, \"axialMesh\", 2 * math.pi)\n\n\nclass _RZThetaReactorMeshConverterByAxialCoordinates(RZThetaReactorMeshConverter):\n    \"\"\"Generate an axial mesh based on user provided axial mesh coordinates.\"\"\"\n\n    def setAxialMesh(self, core):\n        \"\"\"Set up the reactor's new radial rings based on a user-specified axial coordinate list (axial mesh).\"\"\"\n        self.axialMesh = self._converterSettings[\"axialMesh\"]\n\n\nclass _RZThetaReactorMeshConverterByAxialBins(RZThetaReactorMeshConverter):\n    \"\"\"\n    Generate an axial mesh based on user provided axial bins.\n\n    Notes\n    -----\n    The new mesh structure is formed by merging multiply \"bins\" together (i.e. numPerBin\n    = 2 and the original mesh is [1, 2, 3, 4, 5, 6, 7, 8], the new mesh structure will\n    be [2, 4, 6, 8]).\n    \"\"\"\n\n    def setAxialMesh(self, core):\n        \"\"\"\n        Set up axial mesh coordinates using user-specified number of axial segments per bins.\n\n        Notes\n        -----\n        Example:\n            Original core axial mesh list - [25.0, 50.0, 75.0, 100.0, 175.0] cm\n            axialSegsPerBin = 2\n            Merged core axial mesh list - [50.0, 100.0, 175.0] cm\n        \"\"\"\n        self._axialSegsPerBin = self._converterSettings[\"axialSegsPerBin\"]\n        self._mergeAxialMeshByAxialSegsPerBin()\n\n    def _mergeAxialMeshByAxialSegsPerBin(self):\n        axialStartNum = 0\n        totalAxialSegsInCore = len(self._coreAxialMeshCoords) - 1\n        axialMeshIndices = generateBins(totalAxialSegsInCore, self._axialSegsPerBin, axialStartNum)\n        self.axialMesh = [0] * len(axialMeshIndices)\n        for axialMeshIndex, locIndex in enumerate(axialMeshIndices):\n            self.axialMesh[axialMeshIndex] = self._coreAxialMeshCoords[locIndex]\n\n\nclass _RZThetaReactorMeshConverterByAxialFlags(RZThetaReactorMeshConverter):\n    \"\"\"Generate an axial mesh based on examining the block flags axially across the core.\"\"\"\n\n    def setAxialMesh(self, core):\n        \"\"\"\n        Generate an axial mesh based on examining the block flags axially across the core.\n\n        Notes\n        -----\n        This approach is useful as it will create the largest material regions possible to minimize number of axially\n        regions within the converted reactor core. This class not only looks at the block flags axially, but will add\n        new mesh points for regions where the blocks of the same flag differ by XSID.\n        \"\"\"\n        axialMeshCoordinates = collections.defaultdict(set)\n        for a in core.getAssemblies():\n            blockFlags = set([(b.p.flags, b.getMicroSuffix()) for b in a])\n            for flags, xsID in blockFlags:\n                meshes = []\n                for b in a.iterBlocks(flags):\n                    # Skip this block if it has a different XS ID than the\n                    # current target.\n                    if b.getMicroSuffix() != xsID:\n                        continue\n\n                    # Neglect any zero mesh points as zero points are implicit\n                    if b.p.zbottom != 0.0:\n                        meshes.append(round(b.p.zbottom, units.FLOAT_DIMENSION_DECIMALS))\n                    if b.p.ztop != 0.0:\n                        meshes.append(round(b.p.ztop, units.FLOAT_DIMENSION_DECIMALS))\n                axialMeshCoordinates[a].add(min(meshes))\n                axialMeshCoordinates[a].add(max(meshes))\n        self.axialMesh = sorted(set(itertools.chain(*axialMeshCoordinates.values())))\n\n\nclass _RZThetaReactorMeshConverterByRingComposition(RZThetaReactorMeshConverter):\n    \"\"\"Generate a new mesh based on the radial compositions in the core.\"\"\"\n\n    def __init__(self, cs):\n        RZThetaReactorMeshConverter.__init__(self, cs)\n        self._ringCompositions = None\n\n    def setRingsToConvert(self, core):\n        \"\"\"Set up the reactor's new radial rings based on the ring compositions (assembly types).\"\"\"\n        self.radialMesh, self._ringCompositions = self._getCompositionTypesPerRing(core)\n\n    def _getCompositionTypesPerRing(self, core):\n        \"\"\"Set composition of each ring in the reactor by the assembly type.\"\"\"\n        ringIndices = []\n        ringCompositions = []\n        numRings = [r for r in range(1, self._numRingsInCore + 1)]\n        for _i, ring in enumerate(numRings):\n            # Note that this needs to be in a HEX ring - Circular ring mode\n            # is not supported.\n            assemsInRing = core.getAssembliesInSquareOrHexRing(ring)\n            compsInRing = []\n            for a in assemsInRing:\n                assemType = a.getType().lower()\n                if assemType not in compsInRing:\n                    compsInRing.append(assemType)\n            for c in compsInRing:\n                ringIndices.append(ring + 1)\n                ringCompositions.append(c)\n        return ringIndices, ringCompositions\n\n    def _checkRingList(self, core):\n        \"\"\"Check for initialization errors in the radial ring list provided by the user.\"\"\"\n        minRingNum = 1\n        self.radialMesh = sorted(self.radialMesh)\n        rings = checkLastValueInList(self.radialMesh, \"rings\", self._numRingsInCore + 1, adjustLastValue=True)\n        checkListBounds(rings, \"rings\", minRingNum, self._numRingsInCore + 1)\n\n    def _writeMeshLogData(self):\n        radialIndices = [i + 1 for i in range(len(self.radialMesh))]\n        self._logMeshData(radialIndices, \"Radial ring indices:\", \"int\")\n        self._logMeshData(self._ringCompositions, \"Radial ring compositions:\", \"str\")\n        self._logMeshData(self.axialMesh, \"Axial mesh coordinates:\", \"float\")\n        self._logMeshData(self.thetaMesh, \"Theta mesh coordinates:\", \"float\")\n\n\nclass RZThetaReactorMeshConverterByRingCompositionAxialBins(\n    _RZThetaReactorMeshConverterByRingComposition,\n    _RZThetaReactorMeshConverterByAxialBins,\n):\n    \"\"\"\n    Generate a new mesh based on the radial compositions and axial bins in the core.\n\n    See Also\n    --------\n    _RZThetaReactorMeshConverterByRingComposition\n    _RZThetaReactorMeshConverterByAxialBins\n    \"\"\"\n\n    pass\n\n\nclass RZThetaReactorMeshConverterByRingCompositionAxialCoordinates(\n    _RZThetaReactorMeshConverterByRingComposition,\n    _RZThetaReactorMeshConverterByAxialCoordinates,\n):\n    \"\"\"\n    Generate a new mesh based on the radial compositions and axial coordinates in the core.\n\n    See Also\n    --------\n    _RZThetaReactorMeshConverterByRingComposition\n    _RZThetaReactorMeshConverterByAxialCoordinates\n    \"\"\"\n\n    pass\n\n\nclass RZThetaReactorMeshConverterByRingCompositionAxialFlags(\n    _RZThetaReactorMeshConverterByRingComposition,\n    _RZThetaReactorMeshConverterByAxialFlags,\n):\n    \"\"\"\n    Generate a new mesh based on the radial compositions and axial material\n    (based on block flags) regions in the core.\n\n    See Also\n    --------\n    _RZThetaReactorMeshConverterByRingComposition\n    _RZThetaReactorMeshConverterByAxialFlags\n    \"\"\"\n\n    pass\n\n\ndef checkLastValueInList(inputList, listName, expectedValue, eps=0.001, adjustLastValue=False):\n    \"\"\"Check that the last value in the list is equal to the expected value within +/- eps.\"\"\"\n    msg = \"The last value in {} is {} and should be {}\".format(listName, inputList[-1], expectedValue)\n    if not np.isclose(inputList[-1], expectedValue, eps):\n        if adjustLastValue:\n            del inputList[-1]\n            inputList.append(expectedValue)\n            runLog.extra(msg)\n            runLog.extra(\"Updating {} in {} to {}\".format(inputList[-1], listName, expectedValue))\n        else:\n            raise ValueError(msg)\n    return inputList\n\n\ndef checkListBounds(inputList, listName, minVal, maxVal, eps=0.001):\n    \"\"\"Ensure that each value in a list does not exceed the allowable bounds.\"\"\"\n    for value in inputList:\n        minDiff = value - minVal\n        maxDiff = value - maxVal\n        if minDiff < -eps or maxDiff > eps:\n            raise ValueError(\n                \"Invalid values {} out of expected bounds {} to {}\".format(listName, minVal - eps, maxVal + eps)\n            )\n\n\ndef generateBins(totalNumDataPoints, numPerBin, minNum):\n    \"\"\"Fill in a list based on the total number of data points and the number of data points per bin.\"\"\"\n    listToFill = []\n    if numPerBin >= totalNumDataPoints:\n        listToFill.append(totalNumDataPoints)\n    else:\n        currentNum = 0\n        while currentNum < totalNumDataPoints:\n            currentNum += numPerBin\n            if currentNum > totalNumDataPoints:\n                currentNum = totalNumDataPoints\n            if currentNum > minNum:\n                listToFill.append(currentNum)\n    return listToFill\n"
  },
  {
    "path": "armi/reactor/converters/parameterSweeps/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Parameter Sweeps package.\"\"\"\n"
  },
  {
    "path": "armi/reactor/converters/parameterSweeps/generalParameterSweepConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for general core parameter sweeps.\"\"\"\n\nfrom armi.physics.neutronics.settings import (\n    CONF_EPS_EIG,\n    CONF_EPS_FSAVG,\n    CONF_EPS_FSPOINT,\n)\nfrom armi.reactor.converters.geometryConverters import GeometryConverter\n\n\nclass ParameterSweepConverter(GeometryConverter):\n    \"\"\"Abstract parameter sweep converter object.\"\"\"\n\n    PRIORITY = None\n\n    def __init__(self, cs, parameter):\n        GeometryConverter.__init__(self, cs)\n        self._parameter = parameter\n\n    def convert(self, r=None):\n        self._sourceReactor = r\n\n\nclass SettingsModifier(ParameterSweepConverter):\n    \"\"\"Modifies basic setting parameters.\"\"\"\n\n    def __init__(self, cs, settingToModify, parameter):\n        ParameterSweepConverter.__init__(self, cs, parameter)\n        self.modifier = settingToModify\n\n    def convert(self, r=None):\n        ParameterSweepConverter.convert(self, r)\n        sType = self._cs.getSetting(self.modifier).underlyingType\n        if sType is not type(None):\n            # NOTE: this won't work with \"new-style\" settings related to the plugin system.\n            # Using the type of the setting._default may be more appropriate if there are issues.\n            self._cs = self._cs.modified(newSettings={self.modifier: sType(self._parameter)})\n\n\nclass NeutronicConvergenceModifier(ParameterSweepConverter):\n    \"\"\"Adjusts the neutronics convergence parameters.\"\"\"\n\n    def convert(self, r=None):\n        ParameterSweepConverter.convert(self, r)\n        fs = 1.0e-12 + self._parameter * 1.0e-3\n\n        newSettings = {}\n        newSettings[CONF_EPS_FSAVG] = fs\n        newSettings[CONF_EPS_FSPOINT] = fs\n        newSettings[CONF_EPS_EIG] = 1.0e-14 + self._parameter * 1.0e-4\n\n        self._cs = self._cs.modified(newSettings=newSettings)\n"
  },
  {
    "path": "armi/reactor/converters/parameterSweeps/tests/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/converters/parameterSweeps/tests/test_paramSweepConverters.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module to test parameter sweep converters.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi.physics.neutronics.settings import CONF_EPS_FSPOINT\nfrom armi.reactor.converters.parameterSweeps.generalParameterSweepConverters import (\n    NeutronicConvergenceModifier,\n    ParameterSweepConverter,\n    SettingsModifier,\n)\nfrom armi.testing import loadTestReactor\nfrom armi.tests import TEST_ROOT\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass TestParamSweepConverters(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        self.cs = self.o.cs\n\n    def test_paramSweepConverter(self):\n        \"\"\"Basic test of the param sweep converter.\"\"\"\n        con = ParameterSweepConverter(self.cs, \"FakeParam\")\n        self.assertEqual(con._parameter, \"FakeParam\")\n\n        con.convert(self.r)\n        self.assertEqual(con._sourceReactor, self.r)\n\n    def test_neutronicConvergenceModifier(self):\n        \"\"\"Super basic test of the Neutronic Convergence Modifier.\"\"\"\n        custom = NeutronicConvergenceModifier(self.cs, 1000)\n        self.assertEqual(custom._parameter, 1000)\n\n        custom.convert(self.r)\n        self.assertAlmostEqual(custom._cs[CONF_EPS_FSPOINT], 1, delta=1e-3)\n\n    def test_settingsModifier(self):\n        \"\"\"Super basic test of the Settings Modifier.\"\"\"\n        con = SettingsModifier(self.cs, \"comment\", \"FakeParam\")\n        self.assertEqual(con._parameter, \"FakeParam\")\n\n        con.convert(self.r)\n        self.assertEqual(con._sourceReactor, self.r)\n\n        # NOTE: Settings objects are not modified, but we point to new objects\n        self.assertIn(\"Simple test input\", self.cs[\"comment\"])\n        self.assertEqual(con._cs[\"comment\"], \"FakeParam\")\n"
  },
  {
    "path": "armi/reactor/converters/pinTypeBlockConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUtilities that perturb specific types of block objects.\n\nThis code is relatively design-specific and will only work given\ncertain object designs. At the moment it only works on Block objects.\n\nNotes\n-----\nThese were once Block method and were moved here as part of an ongoing\neffort to remove design-specific assumptions from the reactor model.\n\nThese operations are shared by code that modifies objects in place during runtime\nand also for inputModifiers that change inputs for parameter sweeping.\n\n\"\"\"\n\nimport math\n\nfrom armi import runLog\nfrom armi.reactor.flags import Flags\n\n\ndef adjustSmearDensity(obj, value, bolBlock=None):\n    r\"\"\"\n    Modifies the *cold* smear density of a fuel pin by adding or removing fuel dimension.\n\n    Adjusts fuel dimension while keeping cladding ID constant\n\n    sd = fuel_r**2/clad_ir**2  =(fuel_od/2)**2 / (clad_id/2)**2 = fuel_od**2 / clad_id**2\n    new fuel_od = sqrt(sd*clad_id**2)\n\n    useful for optimization cases\n\n    Parameters\n    ----------\n    value : float\n        new smear density as a fraction.  This fraction must\n        evaluate between 0.0 and 1.0\n\n    bolBlock : Block, optional\n        See completeInitialLoading. Required for ECPT cases\n\n    \"\"\"\n    if value <= 0.0 or value > 1.0:\n        raise ValueError(\"Cannot modify smear density of {0} to {1}. Must be a positive fraction\".format(obj, value))\n    fuel = obj.getComponent(Flags.FUEL)\n    if not fuel:\n        runLog.warning(\n            \"Cannot modify smear density of {0} because it is not fuel\".format(obj),\n            single=True,\n            label=\"adjust smear density\",\n        )\n        return\n\n    clad = obj.getComponent(Flags.CLAD)\n    cladID = clad.getDimension(\"id\", cold=True)\n    fuelID = fuel.getDimension(\"id\", cold=True)\n\n    if fuelID > 0.0:  # Annular fuel (Adjust fuel ID to get new smear density)\n        fuelOD = fuel.getDimension(\"od\", cold=True)\n        newID = fuelOD * math.sqrt(1.0 - value)\n        fuel.setDimension(\"id\", newID)\n    else:  # Slug fuel (Adjust fuel OD to get new smear density)\n        newOD = math.sqrt(value * cladID**2)\n        fuel.setDimension(\"od\", newOD)\n\n    # update things like hm at BOC and smear density parameters.\n    obj.completeInitialLoading(bolBlock=bolBlock)\n\n\ndef adjustCladThicknessByOD(obj, value):\n    \"\"\"Modifies the cladding thickness by adjusting the cladding outer diameter.\"\"\"\n    clad = _getCladdingComponentToModify(obj, value)\n    if clad is None:\n        return\n    innerDiam = clad.getDimension(\"id\", cold=True)\n    clad.setDimension(\"od\", innerDiam + 2.0 * value)\n\n\ndef adjustCladThicknessByID(obj, value):\n    \"\"\"\n    Modifies the cladding thickness by adjusting the cladding inner diameter.\n\n    Notes\n    -----\n    This WILL adjust the fuel smear density\n    \"\"\"\n    clad = _getCladdingComponentToModify(obj, value)\n    if clad is None:\n        return\n    od = clad.getDimension(\"od\", cold=True)\n    clad.setDimension(\"id\", od - 2.0 * value)\n\n\ndef _getCladdingComponentToModify(obj, value):\n    clad = obj.getComponent(Flags.CLAD)\n    if not clad:\n        runLog.warning(\"{} does not have a cladding component to modify.\".format(obj))\n    if value < 0.0:\n        raise ValueError(\"Cannot modify {} on {} due to a negative modifier {}\".format(clad, obj, value))\n    return clad\n"
  },
  {
    "path": "armi/reactor/converters/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_assemblyAxialLinkage.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\nfrom typing import TYPE_CHECKING, Callable, Type\nfrom unittest import TestCase\n\nfrom armi.reactor.assemblies import HexAssembly, grids\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.blueprints import Blueprints\nfrom armi.reactor.components import UnshapedComponent\nfrom armi.reactor.components.basicShapes import Circle, Hexagon, Rectangle\nfrom armi.reactor.components.complexShapes import Helix\nfrom armi.reactor.converters.axialExpansionChanger.assemblyAxialLinkage import (\n    AssemblyAxialLinkage,\n    AxialLink,\n    _checkOverlap,\n)\nfrom armi.reactor.converters.tests.test_axialExpansionChanger import (\n    AxialExpansionTestBase,\n    _buildDummySodium,\n    buildTestAssemblyWithFakeMaterial,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.settings.caseSettings import Settings\n\nif TYPE_CHECKING:\n    from armi.reactor.components import Component\n\nTWOPIN_BLOCK = \"\"\"\n    fuel twoPin: &block_fuel_twoPin\n        grid name: twoPin\n        fuel 1: &component_fueltwoPin\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.8\n            latticeIDs: [1]\n        fuel 2:\n            <<: *component_fueltwoPin\n            latticeIDs: [2]\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.6\n\"\"\"\n\nONEPIN_BLOCK = \"\"\"\n    fuel onePin: &block_fuel_onePin\n        grid name: onePin\n        fuel 1:\n            <<: *component_fueltwoPin\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.6\n\"\"\"\n\nCORRECT_ASSEMBLY = \"\"\"\n    fuel pass:\n        specifier: LA\n        blocks: [*block_fuel_twoPin, *block_fuel_twoPin]\n        height: [25.0, 25.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n\"\"\"\n\nWRONG_ASSEMBLY = \"\"\"\n    fuel fail:\n        specifier: LA\n        blocks: [*block_fuel_twoPin, *block_fuel_onePin]\n        height: [25.0, 25.0]\n        axial mesh points: [1, 1]\n        xs types: [A, A]\n\"\"\"\n\nTWOPIN_GRID = \"\"\"\n    twoPin:\n       geom: hex_corners_up\n       symmetry: full\n       lattice map: |\n         - - -  1 1 1 1\n           - - 1 1 2 1 1\n            - 1 1 1 1 1 1\n             1 2 1 2 1 2 1\n              1 1 1 1 1 1\n               1 1 2 1 1\n                1 1 1 1\n\"\"\"\n\nONEPIN_GRID = \"\"\"\n    onePin:\n       geom: hex_corners_up\n       symmetry: full\n       lattice map: |\n         - - -  1 1 1 1\n           - - 1 1 1 1 1\n            - 1 1 1 1 1 1\n             1 1 1 1 1 1 1\n              1 1 1 1 1 1\n               1 1 1 1 1\n                1 1 1 1\n\"\"\"\n\n\ndef createMultipinBlueprints(blockDef: list[str], assemDef: list[str], gridDef: list[str]) -> str:\n    multiPinDef = \"blocks:\"\n    for block in blockDef:\n        multiPinDef += block\n    multiPinDef += \"\\nassemblies:\"\n    for assem in assemDef:\n        multiPinDef += assem\n    multiPinDef += \"\\ngrids:\"\n    for grid in gridDef:\n        multiPinDef += grid\n\n    return multiPinDef\n\n\nclass TestAxialLinkHelper(TestCase):\n    \"\"\"Tests for the AxialLink dataclass / namedtuple like class.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.LOWER_BLOCK = _buildDummySodium(98, 10)\n\n    def test_override(self):\n        \"\"\"Test lower attribute can be set after construction.\"\"\"\n        empty = AxialLink()\n        self.assertIsNone(empty.lower)\n        empty.lower = self.LOWER_BLOCK\n        self.assertIs(empty.lower, self.LOWER_BLOCK)\n\n    def test_construct(self):\n        \"\"\"Test lower attributes can be set at construction.\"\"\"\n        link = AxialLink(self.LOWER_BLOCK)\n        self.assertIs(link.lower, self.LOWER_BLOCK)\n\n\nclass TestAreAxiallyLinked(AxialExpansionTestBase):\n    \"\"\"Provide test coverage for the different cases in assemblyAxialLinkage.areAxiallyLinked.\"\"\"\n\n    def test_mismatchComponentType(self):\n        \"\"\"Case 4; component type mismatch.\"\"\"\n        compDims = (\"test\", \"FakeMat\", 25.0, 25.0)  # name, material, Tinput, Thot\n        comp1 = Circle(*compDims, od=1.0, id=0.0)\n        comp2 = Hexagon(*compDims, op=1.0, ip=0.0)\n        self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2))\n\n    def test_unshapedComponents(self):\n        \"\"\"Case 1; unshaped components.\"\"\"\n        compDims = {\"Tinput\": 25.0, \"Thot\": 25.0}\n        comp1 = UnshapedComponent(\"unshaped_1\", \"FakeMat\", **compDims)\n        comp2 = UnshapedComponent(\"unshaped_2\", \"FakeMat\", **compDims)\n        self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2))\n\n    def test_componentMult(self):\n        \"\"\"Case 3; multiplicity based linking.\"\"\"\n        compDims = (\"test\", \"FakeMat\", 25.0, 25.0)\n        comp1 = Circle(*compDims, od=1.0, id=0.0)\n        comp2 = Circle(*compDims, od=1.0, id=0.0)\n        # mult are same, comp1 and comp2 are linked\n        self.assertTrue(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2))\n        # mult is different, now they are not linked\n        comp2.p.mult = 2\n        self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(comp1, comp2))\n\n    def test_multiIndexLocation(self):\n        \"\"\"Case 2; block-grid based linking.\"\"\"\n        cs = Settings()\n        multiPinBPs = createMultipinBlueprints([TWOPIN_BLOCK], [CORRECT_ASSEMBLY], [TWOPIN_GRID])\n        with io.StringIO(multiPinBPs) as stream:\n            bps = Blueprints.load(stream)\n            bps._prepConstruction(cs)\n            lowerB: HexBlock = bps.assemblies[\"fuel pass\"][0]\n            upperB: HexBlock = bps.assemblies[\"fuel pass\"][1]\n            lowerFuel1, lowerFuel2 = lowerB.getComponents(Flags.FUEL)\n            upperFuel1, _upperFuel2 = upperB.getComponents(Flags.FUEL)\n            # same grid locs, are linked\n            self.assertTrue(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel1, upperFuel1))\n            # different grid locs, are not linked\n            self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel2, upperFuel1))\n\n    def test_multiIndexLocation_Fail(self):\n        \"\"\"Case 2; block-grid based linking.\"\"\"\n        cs = Settings()\n        multiPinBPs = createMultipinBlueprints(\n            [TWOPIN_BLOCK, ONEPIN_BLOCK], [WRONG_ASSEMBLY], [TWOPIN_GRID, ONEPIN_GRID]\n        )\n        with io.StringIO(multiPinBPs) as stream:\n            bps = Blueprints.load(stream)\n            bps._prepConstruction(cs)\n            lowerB: HexBlock = bps.assemblies[\"fuel fail\"][0]\n            upperB: HexBlock = bps.assemblies[\"fuel fail\"][1]\n            lowerFuel1, lowerFuel2 = lowerB.getComponents(Flags.FUEL)\n            upperFuel1 = upperB.getComponent(Flags.FUEL)\n            # different/not exact match grid locs, are not linked\n            self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel1, upperFuel1))\n            # different/not exact match grid locs, are not linked\n            self.assertFalse(AssemblyAxialLinkage.areAxiallyLinked(lowerFuel2, upperFuel1))\n\n\nclass TestCheckOverlap(AxialExpansionTestBase):\n    \"\"\"Test axial linkage between components via the AssemblyAxialLinkage._checkOverlap.\"\"\"\n\n    def setUp(self):\n        \"\"\"Contains common dimensions for all component class types.\"\"\"\n        super().setUp()\n        self.common = (\"test\", \"FakeMat\", 25.0, 25.0)  # name, material, Tinput, Thot\n\n    def runTest(\n        self,\n        componentsToTest: dict[Type[\"Component\"], dict[str, float]],\n        assertion: Callable,\n    ):\n        \"\"\"Runs various linkage tests.\n\n        Parameters\n        ----------\n        componentsToTest\n            dictionary keys indicate the component type for ``typeA`` and ``typeB`` checks. the values indicate the\n            neccessary geometry specifications of the ``typeA`` and ``typeB`` components.\n        assertion\n            unittest.TestCase assertion\n\n        Notes\n        -----\n        - components \"typeA\" and \"typeB\" are assumed to be candidates for axial linking\n        - two assertions: 1) comparing \"typeB\" component to \"typeA\"; 2) comparing \"typeA\" component to \"typeB\"\n        - the different assertions are particularly useful for comparing two annuli\n        \"\"\"\n        for method, dims in componentsToTest.items():\n            typeA = method(*self.common, **dims[0])\n            typeB = method(*self.common, **dims[1])\n            msg = f\"{self._testMethodName} failed for component type {str(method)}!\"\n            assertion(_checkOverlap(typeA, typeB), msg=msg)\n            assertion(_checkOverlap(typeB, typeA), msg=msg)\n\n    def test_overlappingSolidPins(self):\n        componentTypesToTest = {\n            Circle: [{\"od\": 0.5, \"id\": 0.0}, {\"od\": 1.0, \"id\": 0.0}],\n            Hexagon: [{\"op\": 0.5, \"ip\": 0.0}, {\"op\": 1.0, \"ip\": 0.0}],\n            Rectangle: [\n                {\n                    \"lengthOuter\": 0.5,\n                    \"lengthInner\": 0.0,\n                    \"widthOuter\": 0.5,\n                    \"widthInner\": 0.0,\n                },\n                {\n                    \"lengthOuter\": 1.0,\n                    \"lengthInner\": 0.0,\n                    \"widthOuter\": 1.0,\n                    \"widthInner\": 0.0,\n                },\n            ],\n            Helix: [\n                {\"od\": 0.5, \"axialPitch\": 1.0, \"helixDiameter\": 1.0},\n                {\"od\": 1.0, \"axialPitch\": 1.0, \"helixDiameter\": 1.0},\n            ],\n        }\n        self.runTest(componentTypesToTest, self.assertTrue)\n\n    def test_solidPinNotOverlappingAnnulus(self):\n        componentTypesToTest = {\n            Circle: [{\"od\": 0.5, \"id\": 0.0}, {\"od\": 1.0, \"id\": 0.6}],\n        }\n        self.runTest(componentTypesToTest, self.assertFalse)\n\n    def test_solidPinOverlappingWithAnnulus(self):\n        componentTypesToTest = {\n            Circle: [{\"od\": 0.7, \"id\": 0.0}, {\"od\": 1.0, \"id\": 0.6}],\n        }\n        self.runTest(componentTypesToTest, self.assertTrue)\n\n    def test_annularPinNotOverlappingWithAnnulus(self):\n        componentTypesToTest = {\n            Circle: [{\"od\": 0.6, \"id\": 0.3}, {\"od\": 1.0, \"id\": 0.6}],\n        }\n        self.runTest(componentTypesToTest, self.assertFalse)\n\n    def test_annularPinOverlappingWithAnnuls(self):\n        componentTypesToTest = {\n            Circle: [{\"od\": 0.7, \"id\": 0.3}, {\"od\": 1.0, \"id\": 0.6}],\n        }\n        self.runTest(componentTypesToTest, self.assertTrue)\n\n    def test_thinPinOverlapThickAnnulus(self):\n        \"\"\"Thin annular Pin overlapping with this annulus.\"\"\"\n        componentTypesToTest = {Circle: [{\"od\": 0.7, \"id\": 0.3}, {\"od\": 0.6, \"id\": 0.5}]}\n        self.runTest(componentTypesToTest, self.assertTrue)\n\n    def test_AnnularHexOverlappingThickAnnularHex(self):\n        componentTypesToTest = {Hexagon: [{\"op\": 1.0, \"ip\": 0.8}, {\"op\": 1.2, \"ip\": 0.8}]}\n        self.runTest(componentTypesToTest, self.assertTrue)\n\n\nclass TestMultipleComponentLinkage(AxialExpansionTestBase):\n    \"\"\"Ensure that multiple component axial linkage can be caught.\"\"\"\n\n    def test_getLinkedComponents(self):\n        \"\"\"Test for multiple component axial linkage.\"\"\"\n        linked = AssemblyAxialLinkage(buildTestAssemblyWithFakeMaterial(\"FakeMat\"))\n        b = linked.a.getFirstBlockByType(\"fuel\")\n        fuelComp = b.getComponent(Flags.FUEL)\n        cladComp = b.getComponent(Flags.CLAD)\n        fuelComp.setDimension(\"od\", 0.5 * (cladComp.getDimension(\"id\") + cladComp.getDimension(\"od\")))\n        with self.assertRaisesRegex(\n            RuntimeError,\n            expected_regex=\"Multiple component axial linkages have been found for \",\n        ):\n            linked._getLinkedComponents(b, fuelComp)\n\n\nclass TestBlockLink(TestCase):\n    \"\"\"Test the ability to link blocks in an assembly.\"\"\"\n\n    def test_singleBlock(self):\n        \"\"\"Test an edge case where a single block exists.\"\"\"\n        b = _buildDummySodium(300, 50)\n        links = AssemblyAxialLinkage.getLinkedBlocks([b])\n        self.assertEqual(len(links), 1)\n        self.assertIn(b, links)\n        linked = links.pop(b)\n        self.assertIsNone(linked.lower)\n\n    def test_multiBlock(self):\n        \"\"\"Test links with multiple blocks.\"\"\"\n        N_BLOCKS = 5\n        blocks = [_buildDummySodium(300, 50) for _ in range(N_BLOCKS)]\n        links = AssemblyAxialLinkage.getLinkedBlocks(blocks)\n        first = blocks[0]\n        lowLink = links[first]\n        self.assertIsNone(lowLink.lower)\n        for ix in range(1, N_BLOCKS - 1):\n            current = blocks[ix]\n            below = blocks[ix - 1]\n            link = links[current]\n            self.assertIs(link.lower, below)\n        top = blocks[-1]\n        lastLink = links[top]\n        self.assertIs(lastLink.lower, blocks[-2])\n\n    def test_emptyBlocks(self):\n        \"\"\"Test even smaller edge case when no blocks are passed.\"\"\"\n        with self.assertRaisesRegex(ValueError, \"No blocks passed. Cannot determine links\"):\n            AssemblyAxialLinkage.getLinkedBlocks([])\n\n    def test_onAssembly(self):\n        \"\"\"Test assembly behavior is the same as sequence of blocks.\"\"\"\n        assembly = HexAssembly(\"test\")\n        N_BLOCKS = 5\n        assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=N_BLOCKS)\n        assembly.spatialGrid.armiObject = assembly\n\n        blocks = []\n        for _ in range(N_BLOCKS):\n            b = _buildDummySodium(300, 10)\n            assembly.add(b)\n            blocks.append(b)\n\n        fromBlocks = AssemblyAxialLinkage.getLinkedBlocks(blocks)\n        fromAssem = AssemblyAxialLinkage.getLinkedBlocks(assembly)\n\n        self.assertSetEqual(set(fromBlocks), set(fromAssem))\n\n        for b, bLink in fromBlocks.items():\n            aLink = fromAssem[b]\n            self.assertIs(aLink.lower, bLink.lower)\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_axialExpansionChanger.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test axialExpansionChanger.\"\"\"\n\nimport collections\nimport copy\nimport os\nimport unittest\nfrom statistics import mean\nfrom typing import Callable\n\nfrom numpy import array, linspace, zeros\n\nfrom armi import materials\nfrom armi.materials import _MATERIAL_NAMESPACE_ORDER, custom\nfrom armi.reactor.assemblies import HexAssembly, grids\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.components import Component, DerivedShape\nfrom armi.reactor.components.basicShapes import Circle, Hexagon\nfrom armi.reactor.converters.axialExpansionChanger import (\n    AssemblyAxialLinkage,\n    AxialExpansionChanger,\n    ExpansionData,\n    getSolidComponents,\n    iterSolidComponents,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.testing import loadTestReactor\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import units\nfrom armi.utils.customExceptions import InputError\n\n\nclass AxialExpansionTestBase(unittest.TestCase):\n    \"\"\"Common methods and variables for unit tests.\"\"\"\n\n    Steel_Component_Lst = [\n        Flags.DUCT,\n        Flags.GRID_PLATE,\n        Flags.HANDLING_SOCKET,\n        Flags.INLET_NOZZLE,\n        Flags.CLAD,\n        Flags.WIRE,\n        Flags.ACLP,\n        Flags.GUIDE_TUBE,\n    ]\n\n    @classmethod\n    def setUpClass(cls):\n        cls.origNameSpace = _MATERIAL_NAMESPACE_ORDER\n        # set namespace order for materials so that fake HT9 material can be found\n        materials.setMaterialNamespaceOrder(\n            [\n                \"armi.reactor.converters.tests.test_axialExpansionChanger\",\n                \"armi.materials\",\n            ]\n        )\n\n    def setUp(self):\n        self.obj = AxialExpansionChanger()\n        self.componentMass = collections.defaultdict(list)\n        self.componentDensity = collections.defaultdict(list)\n        self.totalAssemblySteelMass = []\n        self.blockZtop = collections.defaultdict(list)\n\n    @classmethod\n    def tearDownClass(cls):\n        # reset global namespace\n        materials.setMaterialNamespaceOrder(cls.origNameSpace)\n\n    def _getConservationMetrics(self, a):\n        \"\"\"Retrieves and stores various conservation metrics.\n\n        - useful for verification and unittesting\n        - Finds and stores:\n            1. mass and density of target components\n            2. mass of assembly steel\n            3. block heights\n        \"\"\"\n        totalSteelMass = 0.0\n        for b in a:\n            # store block ztop\n            self.blockZtop[b].append(b.p.ztop)\n            for c in iterSolidComponents(b):\n                # store mass and density of component\n                self.componentMass[c].append(c.getMass())\n                self.componentDensity[c].append(c.material.getProperty(\"density\", c.temperatureInK))\n                # store steel mass for assembly\n                if c.p.flags in self.Steel_Component_Lst:\n                    totalSteelMass += c.getMass()\n\n        self.totalAssemblySteelMass.append(totalSteelMass)\n\n\nclass Temperature:\n    \"\"\"Create and store temperature grid/field.\"\"\"\n\n    def __init__(\n        self,\n        L,\n        coldTemp=100.0,\n        hotInletTemp=360.0,\n        numTempGridPts=25,\n        tempSteps=100,\n        uniform=False,\n    ):\n        \"\"\"\n        Parameters\n        ----------\n        L : float\n            length of self.tempGrid. Should be the height of the corresponding assembly.\n        coldTemp : float\n            component as-built temperature\n        hotInletTemp : float\n            temperature closest to bottom of assembly. Interpreted as\n            inlet temp at nominal operations.\n        numTempGridPts : integer\n            the number of temperature measurement locations along the\n            z-axis of the assembly\n        tempSteps : integer\n            the number of temperatures to create (analogous to time steps)\n        \"\"\"\n        self.tempSteps = tempSteps\n        self.tempGrid = linspace(0.0, L, num=numTempGridPts)\n        self.tempField = zeros((tempSteps, numTempGridPts))\n        self._generateTempField(coldTemp, hotInletTemp, uniform)\n\n    def _generateTempField(self, coldTemp, hotInletTemp, uniform):\n        \"\"\"Generate temperature field and grid.\n\n        - all temperatures are in C\n        - temperature field : temperature readings (e.g., from T/H calculation)\n        - temperature grid : physical locations in which temperature is measured\n        \"\"\"\n        # Generate temp field\n        self.tempField[0, :] = coldTemp\n        if not uniform:\n            for i in range(1, self.tempSteps):\n                self.tempField[i, :] = (\n                    coldTemp\n                    + (i + 1) / (self.tempSteps / 3) * self.tempGrid\n                    + (hotInletTemp - coldTemp) * (i + 1) / self.tempSteps\n                )\n        else:\n            tmp = linspace(coldTemp, hotInletTemp, self.tempSteps)\n            for i in range(1, self.tempSteps):\n                self.tempField[i, :] = tmp[i]\n\n\nclass TestAxialExpansionHeight(AxialExpansionTestBase):\n    \"\"\"Verify that test assembly is expanded correctly.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.a = buildTestAssemblyWithFakeMaterial(name=\"FakeMat\")\n\n        self.temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10)\n\n        # get the right/expected answer\n        self._generateComponentWiseExpectedHeight()\n\n        # do the axial expansion\n        for idt in range(self.temp.tempSteps):\n            self.obj.performThermalAxialExpansion(self.a, self.temp.tempGrid, self.temp.tempField[idt, :], setFuel=True)\n            self._getConservationMetrics(self.a)\n\n    def test_AssemblyAxialExpansionHeight(self):\n        \"\"\"Test the axial expansion gives correct heights for component-based expansion.\"\"\"\n        for idt in range(self.temp.tempSteps):\n            for ib, b in enumerate(self.a):\n                self.assertAlmostEqual(\n                    self.trueZtop[ib, idt],\n                    self.blockZtop[b][idt],\n                    places=7,\n                    msg=f\"Block height is not correct. {b}; Temp Step = {idt}\",\n                )\n\n    def _generateComponentWiseExpectedHeight(self):\n        \"\"\"Calculate the expected height, external of AssemblyAxialExpansion.\"\"\"\n        assem = buildTestAssemblyWithFakeMaterial(name=\"FakeMat\")\n        aveBlockTemp = zeros((len(assem), self.temp.tempSteps))\n        self.trueZtop = zeros((len(assem), self.temp.tempSteps))\n        self.trueHeight = zeros((len(assem), self.temp.tempSteps))\n        self.trueZtop[-1, :] = assem[-1].p.ztop\n\n        for idt in range(self.temp.tempSteps):\n            # get average block temp\n            for ib in range(len(assem)):\n                aveBlockTemp[ib, idt] = self._getAveTemp(ib, idt, assem)\n            # get block ztops\n            for ib, b in enumerate(assem[:-1]):\n                if ib > 0:\n                    b.p.zbottom = assem[ib - 1].p.ztop\n                if idt > 0:\n                    dll = (0.02 * aveBlockTemp[ib, idt] - 0.02 * aveBlockTemp[ib, idt - 1]) / (\n                        100.0 + 0.02 * aveBlockTemp[ib, idt - 1]\n                    )\n                    thermExpansionFactor = 1.0 + dll\n                    b.p.ztop = thermExpansionFactor * b.p.height + b.p.zbottom\n                self.trueZtop[ib, idt] = b.p.ztop\n            # get block heights\n            for ib, b in enumerate(assem):\n                b.p.height = b.p.ztop - b.p.zbottom\n                self.trueHeight[ib, idt] = b.p.height\n\n    def _getAveTemp(self, ib, idt, assem):\n        tmpMapping = []\n        for idz, z in enumerate(self.temp.tempGrid):\n            if assem[ib].p.zbottom <= z <= assem[ib].p.ztop:\n                tmpMapping.append(self.temp.tempField[idt][idz])\n            if z > assem[ib].p.ztop:\n                break\n\n        return mean(tmpMapping)\n\n\nclass TestConservation(AxialExpansionTestBase):\n    \"\"\"Verify that conservation is maintained in assembly-level axial expansion.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.a = buildTestAssemblyWithFakeMaterial(name=\"FakeMat\")\n\n    def expandAssemForMassConservationTest(self):\n        \"\"\"Do the thermal expansion and store conservation metrics of interest.\"\"\"\n        # create a semi-realistic/physical variable temperature grid over the assembly\n        temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10)\n        for idt in range(temp.tempSteps):\n            self.obj.performThermalAxialExpansion(\n                self.a,\n                temp.tempGrid,\n                temp.tempField[idt, :],\n            )\n            self._getConservationMetrics(self.a)\n\n    def test_thermExpansContractConserv_simple(self):\n        \"\"\"Thermally expand and then contract to ensure original state is recovered.\n\n        .. test:: Thermally expand and then contract to ensure original assembly is recovered.\n            :id: T_ARMI_AXIAL_EXP_THERM0\n            :tests: R_ARMI_AXIAL_EXP_THERM\n\n        Notes\n        -----\n        Temperature field is always isothermal and initially at 25 C.\n        \"\"\"\n        isothermalTempList = [100.0, 350.0, 250.0, 100.0]\n        a = buildTestAssemblyWithFakeMaterial(name=\"HT9\")\n        origMesh = a.getAxialMesh()[:-1]\n        origMasses, origNDens = self._getComponentMassAndNDens(a)\n        origDetailedNDens = self._setComponentDetailedNDens(a, origNDens)\n        axialExpChngr = AxialExpansionChanger(detailedAxialExpansion=True)\n\n        tempGrid = linspace(0.0, a.getHeight())\n        for temp in isothermalTempList:\n            # compute expected change in number densities\n            c = a[0][0]\n            radialGrowthFrac = c.material.getThermalExpansionDensityReduction(\n                prevTempInC=c.temperatureInC, newTempInC=temp\n            )\n            axialGrowthFrac = c.getThermalExpansionFactor(T0=c.temperatureInC, Tc=temp)\n            totGrowthFrac = axialGrowthFrac / radialGrowthFrac\n            # Set new isothermal temp and expand\n            tempField = array([temp] * len(tempGrid))\n            oldMasses, oldNDens = self._getComponentMassAndNDens(a)\n            oldDetailedNDens = self._getComponentDetailedNDens(a)\n            axialExpChngr.performThermalAxialExpansion(a, tempGrid, tempField)\n            newMasses, newNDens = self._getComponentMassAndNDens(a)\n            newDetailedNDens = self._getComponentDetailedNDens(a)\n            self._checkMass(oldMasses, newMasses)\n            self._checkNDens(oldNDens, newNDens, totGrowthFrac)\n            self._checkDetailedNDens(oldDetailedNDens, newDetailedNDens, totGrowthFrac)\n\n        # make sure that the assembly returned to the original state\n        for orig, new in zip(origMesh, a.getAxialMesh()):\n            self.assertAlmostEqual(orig, new, places=12)\n        self._checkMass(origMasses, newMasses)\n        self._checkNDens(origNDens, newNDens, 1.0)\n        self._checkDetailedNDens(origDetailedNDens, newDetailedNDens, 1.0)\n\n    def test_thermExpansContractionConserv_complex(self):\n        \"\"\"Thermally expand and then contract to ensure original state is recovered.\n\n        Notes\n        -----\n        Assemblies with liners are not supported and not considered for conservation testing.\n        \"\"\"\n        _oCold, rCold = loadTestReactor(\n            os.path.join(TEST_ROOT, \"detailedAxialExpansion\"),\n            customSettings={\"inputHeightsConsideredHot\": False},\n        )\n        assems = list(rCold.blueprints.assemblies.values())\n        for a in assems:\n            if a.hasFlags([Flags.MIDDLE, Flags.ANNULAR]):\n                # assemblies with the above flags have liners and conservation of them is not currently supported\n                continue\n            self.complexConservationTest(a)\n\n    def complexConservationTest(self, a: HexAssembly):\n        # get total assembly fluid mass pre-expansion\n        preExpAssemFluidMass = self._getTotalAssemblyFluidMass(a)\n\n        origMesh = a.getAxialMesh()[:-1]\n        origMasses, origNDens = self._getComponentMassAndNDens(a)\n        axialExpChngr = AxialExpansionChanger(detailedAxialExpansion=True)\n        axialExpChngr.setAssembly(a)\n        tempAdjust = [50.0, 50.0, -50.0, -50.0]\n        for temp in tempAdjust:\n            # adjust component temperatures by temp\n            for b in a:\n                if \"control\" in str(b):\n                    # skirting around a problem with test B4C temperature inputs\n                    continue\n                for c in iterSolidComponents(b):\n                    axialExpChngr.expansionData.updateComponentTemp(c, c.temperatureInC + temp)\n            # get U235/B10 and FE56 mass pre-expansion\n            prevFE56Mass = a.getMass(\"FE56\")\n            if a.hasFlags([Flags.FUEL, Flags.CONTROL]):\n                prevMass = a.getMass(\"U235\" if a.hasFlags(Flags.FUEL) else \"B10\")\n            # compute thermal expansion coeffs and expand\n            axialExpChngr.expansionData.computeThermalExpansionFactors()\n            axialExpChngr.axiallyExpandAssembly()\n            # ensure that total U235/B10 and FE56 mass is conserved post-expansion\n            newFE56Mass = a.getMass(\"FE56\")\n            self.assertAlmostEqual(newFE56Mass / prevFE56Mass, 1.0, places=14, msg=f\"{a}\")\n            if a.hasFlags([Flags.FUEL, Flags.CONTROL]):\n                newMass = a.getMass(\"U235\" if a.hasFlags(Flags.FUEL) else \"B10\")\n                self.assertAlmostEqual(newMass / prevMass, 1.0, places=14, msg=f\"{a}\")\n\n        newMasses, newNDens = self._getComponentMassAndNDens(a)\n        # make sure that the assembly returned to the original state\n        for orig, new in zip(origMesh, a.getAxialMesh()):\n            self.assertAlmostEqual(orig, new, places=12, msg=f\"{a}\")\n        self._checkMass(origMasses, newMasses)\n        self._checkNDens(origNDens, newNDens, 1.0)\n\n        # get total assembly fluid mass post-expansion\n        postExpAssemFluidMass = self._getTotalAssemblyFluidMass(a)\n        # verify that the total assembly fluid mass is preserved through expansion\n        self.assertAlmostEqual(preExpAssemFluidMass, postExpAssemFluidMass, places=11)\n\n    def test_expansionContractionConservation(self):\n        \"\"\"Expand all components and then contract back to original state.\n\n        .. test:: Expand all components and then contract back to original state.\n            :id: T_ARMI_AXIAL_EXP_PRESC0\n            :tests: R_ARMI_AXIAL_EXP_PRESC\n\n        Notes\n        -----\n        - uniform expansion over all components within the assembly\n        - 10 total expansion steps: 5 at +1.01 L1/L0, and 5 at -(1.01^-1) L1/L0\n        \"\"\"\n        a = buildTestAssemblyWithFakeMaterial(name=\"FakeMat\")\n        axExpChngr = AxialExpansionChanger()\n        origMesh = a.getAxialMesh()\n        origMasses, origNDens = self._getComponentMassAndNDens(a)\n        componentLst = [c for b in a for c in iterSolidComponents(b)]\n        expansionGrowthFrac = 1.01\n        contractionGrowthFrac = 1.0 / expansionGrowthFrac\n        for i in range(0, 10):\n            if i < 5:\n                growthFrac = expansionGrowthFrac\n                fracLst = growthFrac + zeros(len(componentLst))\n            else:\n                growthFrac = contractionGrowthFrac\n                fracLst = growthFrac + zeros(len(componentLst))\n            oldMasses, oldNDens = self._getComponentMassAndNDens(a)\n            # do the expansion\n            axExpChngr.performPrescribedAxialExpansion(a, componentLst, fracLst, setFuel=True)\n            newMasses, newNDens = self._getComponentMassAndNDens(a)\n            self._checkMass(oldMasses, newMasses)\n            self._checkNDens(oldNDens, newNDens, growthFrac)\n\n        # make sure that the assembly returned to the original state\n        for orig, new in zip(origMesh, a.getAxialMesh()):\n            self.assertAlmostEqual(orig, new, places=13)\n        self._checkMass(origMasses, newMasses)\n        self._checkNDens(origNDens, newNDens, 1.0)\n\n    def _checkMass(self, prevMass, newMass):\n        for prev, new in zip(prevMass.values(), newMass.values()):\n            self.assertAlmostEqual(prev, new, places=11)\n\n    def _checkNDens(self, prevNDen, newNDens, ratio):\n        for prevComp, newComp in zip(prevNDen.values(), newNDens.values()):\n            self.assertEqual(len(prevComp), len(newComp))\n            for nuc in prevComp.keys():\n                # some ndens values are 0.0, only check non-zero values\n                if prevComp[nuc]:\n                    self.assertAlmostEqual(prevComp[nuc] / newComp[nuc], ratio)\n\n    def _checkDetailedNDens(self, prevDetailedNDen, newDetailedNDens, ratio):\n        \"\"\"Check whether the detailedNDens of two input dictionaries containing the detailedNDens\n        arrays for all components of an assembly are conserved.\n        \"\"\"\n        for prevComp, newComp in zip(prevDetailedNDen.values(), newDetailedNDens.values()):\n            for prev, new in zip(prevComp, newComp):\n                if prev:\n                    self.assertAlmostEqual(prev / new, ratio, msg=f\"{prev} / {new}\")\n\n    @staticmethod\n    def _getComponentMassAndNDens(a):\n        masses = {}\n        nDens = {}\n        for b in a:\n            for c in iterSolidComponents(b):\n                masses[c] = c.getMass()\n                nDens[c] = c.getNumberDensities()\n        return masses, nDens\n\n    @staticmethod\n    def _setComponentDetailedNDens(a, nDens):\n        \"\"\"Returns a dictionary that contains detailedNDens for all components in an assembly object\n        input which are set to the corresponding component number densities from a number density\n        dictionary input.\n        \"\"\"\n        detailedNDens = {}\n        for b in a:\n            for c in getSolidComponents(b):\n                c.p.detailedNDens = copy.deepcopy([val for val in nDens[c].values()])\n                detailedNDens[c] = c.p.detailedNDens\n        return detailedNDens\n\n    @staticmethod\n    def _getComponentDetailedNDens(a):\n        \"\"\"Returns a dictionary containing all solid components and their corresponding\n        detailedNDens from an assembly object input.\n        \"\"\"\n        detailedNDens = {}\n        for b in a:\n            for c in getSolidComponents(b):\n                detailedNDens[c] = copy.deepcopy(c.p.detailedNDens)\n        return detailedNDens\n\n    def test_targetComponentMassConservation(self):\n        \"\"\"Tests mass conservation for target components.\"\"\"\n        self.expandAssemForMassConservationTest()\n        for cName, masses in self.componentMass.items():\n            for i in range(1, len(masses)):\n                self.assertAlmostEqual(masses[i], masses[i - 1], msg=f\"{cName} mass not right\")\n\n        for cName, density in self.componentDensity.items():\n            for i in range(1, len(density)):\n                self.assertLess(density[i], density[i - 1], msg=f\"{cName} density not right.\")\n\n        for i in range(1, len(self.totalAssemblySteelMass)):\n            self.assertAlmostEqual(\n                self.totalAssemblySteelMass[i],\n                self.totalAssemblySteelMass[i - 1],\n                msg=\"Total assembly steel mass is not conserved.\",\n            )\n\n    def test_noMovementACLP(self):\n        \"\"\"Ensures the above core load pad (ACLP) does not move during fuel-only expansion.\n\n        .. test:: Ensure the ACLP does not move during fuel-only expansion.\n            :id: T_ARMI_AXIAL_EXP_PRESC1\n            :tests: R_ARMI_AXIAL_EXP_PRESC\n\n        .. test:: Ensure the component volumes are correctly updated during prescribed expansion.\n            :id: T_ARMI_AXIAL_EXP_PRESC2\n            :tests: R_ARMI_AXIAL_EXP_PRESC\n        \"\"\"\n        # build test assembly with ACLP\n        assembly = HexAssembly(\"testAssemblyType\")\n        assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)\n        assembly.spatialGrid.armiObject = assembly\n        assembly.add(_buildTestBlock(\"shield\", \"FakeMat\", 100.0, 10.0))\n        assembly.add(_buildTestBlock(\"fuel\", \"FakeMat\", 100.0, 10.0))\n        assembly.add(_buildTestBlock(\"fuel\", \"FakeMat\", 100.0, 10.0))\n        assembly.add(_buildTestBlock(\"plenum\", \"FakeMat\", 100.0, 10.0, True))\n        assembly.add(_buildTestBlock(\"aclp\", \"FakeMat\", 100.0, 10.0, True))  # \"aclp plenum\" also works\n        assembly.add(_buildTestBlock(\"plenum\", \"FakeMat\", 100.0, 10.0, True))\n        assembly.add(_buildDummySodium(100.0, 10.0))\n        assembly.calculateZCoords()\n        assembly.reestablishBlockOrder()\n\n        # get zCoords for aclp\n        aclp = assembly.getChildrenWithFlags(Flags.ACLP)[0]\n        aclpZTop = aclp.p.ztop\n        aclpZBottom = aclp.p.zbottom\n\n        # expand fuel\n        # get fuel components\n        cList = [c for b in assembly for c in b if c.hasFlags(Flags.FUEL)]\n        # 1.01 L1/L0 growth of fuel components\n        pList = zeros(len(cList)) + 1.01\n        chngr = AxialExpansionChanger()\n        chngr.performPrescribedAxialExpansion(assembly, cList, pList, setFuel=True)\n\n        # do assertion\n        self.assertEqual(\n            aclpZBottom,\n            aclp.p.zbottom,\n            msg=\"ACLP zbottom has changed. It should not with fuel component only expansion!\",\n        )\n        self.assertEqual(\n            aclpZTop,\n            aclp.p.ztop,\n            msg=\"ACLP ztop has changed. It should not with fuel component only expansion!\",\n        )\n\n        # verify that the component volumes are correctly updated\n        for b in assembly:\n            for c in b:\n                self.assertAlmostEqual(\n                    c.getArea() * b.getHeight(),\n                    c.getVolume(),\n                    places=12,\n                )\n\n    @staticmethod\n    def _getTotalAssemblyFluidMass(assembly) -> float:\n        totalAssemblyFluidMass = 0.0\n        for b in assembly:\n            for c in b:\n                if isinstance(c.material, materials.material.Fluid):\n                    totalAssemblyFluidMass += c.getMass()\n        return totalAssemblyFluidMass\n\n    def test_reset(self):\n        self.obj.setAssembly(self.a)\n        self.obj.reset()\n        self.assertIsNone(self.obj.linked)\n        self.assertIsNone(self.obj.expansionData)\n\n    def test_computeThermalExpansionFactors(self):\n        \"\"\"Ensure expansion factors are as expected.\"\"\"\n        self.obj.setAssembly(self.a)\n        stdThermExpFactor = {}\n        newTemp = 500.0\n        # apply new temp to the pin and clad components of each block\n        for b in self.a:\n            for c in b.iterComponents([Flags.FUEL, Flags.CLAD]):\n                stdThermExpFactor[c] = c.getThermalExpansionFactor()\n                self.obj.expansionData.updateComponentTemp(c, newTemp)\n\n        self.obj.expansionData.computeThermalExpansionFactors()\n\n        # skip dummy block, it's just coolant and doesn't expand.\n        for b in self.a[:-1]:\n            for c in b:\n                if c.hasFlags([Flags.FUEL, Flags.CLAD]):\n                    self.assertNotEqual(\n                        stdThermExpFactor[c],\n                        self.obj.expansionData.getExpansionFactor(c),\n                        msg=f\"Block {b}, Component {c}, thermExpCoeff not right.\\n\",\n                    )\n                else:\n                    self.assertEqual(\n                        self.obj.expansionData.getExpansionFactor(c),\n                        1.0,\n                        msg=f\"Block {b}, Component {c}, thermExpCoeff not right.\\n\",\n                    )\n\n\nclass TestManageCoreMesh(unittest.TestCase):\n    \"\"\"Verify that manage core mesh unifies the mesh for detailedAxialExpansion: False.\"\"\"\n\n    def setUp(self):\n        self.axialExpChngr = AxialExpansionChanger()\n        _o, self.r = loadTestReactor(os.path.join(TEST_ROOT, \"detailedAxialExpansion\"))\n\n        self.oldAxialMesh = self.r.core.p.axialMesh\n        self.componentLst = []\n        for b in self.r.core.refAssem:\n            if b.hasFlags([Flags.FUEL, Flags.PLENUM]):\n                self.componentLst.extend(getSolidComponents(b))\n        # expand refAssem by 1.01 L1/L0\n        expansionGrowthFracs = 1.01 + zeros(len(self.componentLst))\n        (\n            self.origDetailedNDens,\n            self.origVolumes,\n        ) = self._getComponentDetailedNDensAndVol(self.componentLst)\n        self.axialExpChngr.performPrescribedAxialExpansion(\n            self.r.core.refAssem, self.componentLst, expansionGrowthFracs, setFuel=True\n        )\n\n    def test_manageCoreMesh(self):\n        self.axialExpChngr.manageCoreMesh(self.r)\n        newAxialMesh = self.r.core.p.axialMesh\n        # all solid components in fuel + plenum block expand so the first three points are not expected to change\n        for old, new in zip(self.oldAxialMesh[3:-1], newAxialMesh[3:-1]):\n            self.assertLess(old, new)\n\n    def test_componentConservation(self):\n        self.axialExpChngr.manageCoreMesh(self.r)\n        newDetailedNDens, newVolumes = self._getComponentDetailedNDensAndVol(self.componentLst)\n        for c in newVolumes.keys():\n            self._checkMass(\n                self.origDetailedNDens[c],\n                self.origVolumes[c],\n                newDetailedNDens[c],\n                newVolumes[c],\n                c,\n            )\n\n    def _getComponentDetailedNDensAndVol(self, componentLst):\n        \"\"\"Returns a tuple containing dictionaries of detailedNDens and volumes of all components\n        from a component list input.\n        \"\"\"\n        detailedNDens = {}\n        volumes = {}\n        for c in componentLst:\n            c.p.detailedNDens = [val for val in c.getNumberDensities().values()]\n            detailedNDens[c] = copy.deepcopy(c.p.detailedNDens)\n            volumes[c] = c.getVolume()\n        return (detailedNDens, volumes)\n\n    def _checkMass(self, origDetailedNDens, origVolume, newDetailedNDens, newVolume, c):\n        for prevMass, newMass in zip(origDetailedNDens * origVolume, newDetailedNDens * newVolume):\n            if c.parent.hasFlags(Flags.FUEL):\n                self.assertAlmostEqual(prevMass, newMass, delta=1e-12, msg=f\"{c}, {c.parent}\")\n            else:\n                # should not conserve mass here as it is structural material above active fuel\n                self.assertAlmostEqual(newMass / prevMass, 1.00, msg=f\"{c}, {c.parent}\")\n\n\nclass TestExceptions(AxialExpansionTestBase):\n    \"\"\"Verify exceptions are caught.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.a = buildTestAssemblyWithFakeMaterial(name=\"FakeMatException\")\n        self.obj.setAssembly(self.a)\n\n    def test_isTopDummyBlockPresent(self):\n        # build test assembly without dummy\n        assembly = HexAssembly(\"testAssemblyType\")\n        assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)\n        assembly.spatialGrid.armiObject = assembly\n        assembly.add(_buildTestBlock(\"shield\", \"FakeMat\", 100.0, 10.0))\n        assembly.calculateZCoords()\n        assembly.reestablishBlockOrder()\n        # create instance of expansion changer\n        obj = AxialExpansionChanger(detailedAxialExpansion=True)\n        with self.assertRaisesRegex(\n            RuntimeError,\n            \"Cannot run detailedAxialExpansion without a dummy block at the top of the assembly!\",\n        ):\n            obj.setAssembly(assembly)\n\n    def test_setExpansionFactors(self):\n        cList = self.a.getFirstBlock().getChildren()\n        with self.assertRaisesRegex(\n            RuntimeError,\n            \"Number of components and expansion fractions must be the same!\",\n        ):\n            self.obj.expansionData.setExpansionFactors(cList, range(len(cList) + 1))\n\n        with self.assertRaisesRegex(\n            RuntimeError,\n            \"L1/L0, is not physical. Expansion fractions should be greater than 0.0.\",\n        ):\n            self.obj.expansionData.setExpansionFactors(cList, zeros(len(cList)))\n\n        with self.assertRaisesRegex(\n            RuntimeError,\n            \"L1/L0, is not physical. Expansion fractions should be greater than 0.0.\",\n        ):\n            self.obj.expansionData.setExpansionFactors(cList, zeros(len(cList)) - 10.0)\n\n    def test_updateCompTempsBy1DTempFieldValError(self):\n        tempGrid = [5.0, 15.0, 35.0]\n        tempField = linspace(100.0, 310.0, 3)\n        with self.assertRaisesRegex(ValueError, \"has no temperature points within it!\"):\n            self.obj.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField)\n\n    def test_updateCompTempsBy1DTempFieldError(self):\n        tempGrid = [5.0, 15.0, 35.0]\n        tempField = linspace(100.0, 310.0, 10)\n        with self.assertRaisesRegex(RuntimeError, \"tempGrid and tempField must have the same length.\"):\n            self.obj.expansionData.updateComponentTempsBy1DTempField(tempGrid, tempField)\n\n    def test_AssemblyAxialExpansionException(self):\n        \"\"\"Test that negative height exception is caught.\"\"\"\n        # manually set axial exp target component for code coverage\n        self.a[0].p.axialExpTargetComponent = self.a[0][0].name\n        temp = Temperature(self.a.getTotalHeight(), numTempGridPts=11, tempSteps=10)\n        with self.assertRaisesRegex(ArithmeticError, \"has a negative height\"):\n            for idt in range(temp.tempSteps):\n                self.obj.expansionData.updateComponentTempsBy1DTempField(temp.tempGrid, 2 * temp.tempField[idt, :])\n                self.obj.expansionData.computeThermalExpansionFactors()\n                self.obj.axiallyExpandAssembly()\n\n    def test_isFuelLocked(self):\n        \"\"\"Ensures that the RuntimeError statement in ExpansionData::_isFuelLocked is raised appropriately.\n\n        Notes\n        -----\n        This is implemented by creating a fuel block that contains no fuel component and passing it to\n        ExpansionData._isFuelLocked.\n        \"\"\"\n        expdata = ExpansionData(HexAssembly(\"testAssemblyType\"), setFuel=True, expandFromTinputToThot=False)\n        bNoFuel = HexBlock(\"fuel\", height=10.0)\n        shieldDims = {\n            \"Tinput\": 100.0,\n            \"Thot\": 100.0,\n            \"od\": 0.76,\n            \"id\": 0.00,\n            \"mult\": 127.0,\n        }\n        shield = Circle(\"shield\", \"FakeMat\", **shieldDims)\n        bNoFuel.add(shield)\n        with self.assertRaisesRegex(RuntimeError, f\"No fuel component within {bNoFuel}!\"):\n            expdata._isFuelLocked(bNoFuel)\n\n\nclass TestDetermineTargetComponent(AxialExpansionTestBase):\n    \"\"\"Verify determineTargetComponent method is properly updating _componentDeterminesBlockHeight.\"\"\"\n\n    def setUp(self):\n        super().setUp()\n        self.expData = ExpansionData([], setFuel=True, expandFromTinputToThot=True)\n        coolDims = {\"Tinput\": 100.0, \"Thot\": 100.0}\n        self.coolant = DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n\n    def test_getTargetComponent(self):\n        b = HexBlock(\"fuel\", height=10.0)\n        fuelDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n        cladDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n        fuel = Circle(\"fuel\", \"FakeMat\", **fuelDims)\n        clad = Circle(\"clad\", \"FakeMat\", **cladDims)\n        b.add(fuel)\n        b.add(clad)\n        b.add(self.coolant)\n        self.expData.setTargetComponent(b, True)\n        self.assertEqual(fuel, self.expData.getTargetComponent(b))\n\n    def test_getTargetComponent_NoneFound(self):\n        b = HexBlock(\"fuel\", height=10.0)\n        with self.assertRaisesRegex(RuntimeError, f\"No target component found for {b} in\"):\n            self.expData.getTargetComponent(b)\n\n    def test_determineTargetComponent(self):\n        \"\"\"Provides coverage for searching TARGET_FLAGS_IN_PREFERRED_ORDER.\"\"\"\n        b = HexBlock(\"fuel\", height=10.0)\n        fuelDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n        cladDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n        fuel = Circle(\"fuel\", \"FakeMat\", **fuelDims)\n        clad = Circle(\"clad\", \"FakeMat\", **cladDims)\n        b.add(fuel)\n        b.add(clad)\n        b.add(self.coolant)\n        self._checkTarget(b, fuel)\n\n    def _checkTarget(self, b: HexBlock, expected: Component):\n        \"\"\"Call determineTargetMethod and compare what we get with expected.\"\"\"\n        # Value unset initially\n        self.assertFalse(b.p.axialExpTargetComponent)\n        target = self.expData.determineTargetComponent(b)\n        self.assertIs(target, expected)\n        self.assertTrue(\n            self.expData.isTargetComponent(target),\n            msg=f\"determineTargetComponent failed to recognize intended component: {expected}\",\n        )\n        self.assertEqual(\n            b.p.axialExpTargetComponent,\n            expected.name,\n            msg=f\"determineTargetComponent failed to recognize intended component: {expected}\",\n        )\n\n    def test_determineTargetCompBlockWithMultiFlags(self):\n        \"\"\"Provides coverage for searching TARGET_FLAGS_IN_PREFERRED_ORDER with multiple flags.\"\"\"\n        # build a block that has two flags as well as a component matching each\n        b = HexBlock(\"fuel poison\", height=10.0)\n        fuelDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.9, \"id\": 0.5, \"mult\": 200.0}\n        poisonDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 0.5, \"id\": 0.0, \"mult\": 10.0}\n        fuel = Circle(\"fuel\", \"FakeMat\", **fuelDims)\n        poison = Circle(\"poison\", \"FakeMat\", **poisonDims)\n        b.add(fuel)\n        b.add(poison)\n        b.add(self.coolant)\n        self._checkTarget(b, fuel)\n\n    def test_specifyTargetComp_NotFound(self):\n        \"\"\"Ensure RuntimeError gets raised when no target component is found.\"\"\"\n        b = HexBlock(\"fuel\", height=10.0)\n        b.add(self.coolant)\n        b.setType(\"fuel\")\n        with self.assertRaisesRegex(RuntimeError, \"No target component found!\"):\n            self.expData.determineTargetComponent(b)\n        with self.assertRaisesRegex(RuntimeError, \"No target component found!\"):\n            self.expData.determineTargetComponent(b, Flags.FUEL)\n\n    def test_specifyTargetComp_singleSolid(self):\n        \"\"\"Ensures that specifyTargetComponent is smart enough to set the only solid as the target component.\"\"\"\n        b = HexBlock(\"plenum\", height=10.0)\n        ductDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"op\": 17, \"ip\": 0.0, \"mult\": 1.0}\n        duct = Hexagon(\"duct\", \"FakeMat\", **ductDims)\n        b.add(duct)\n        b.add(self.coolant)\n        b.getVolumeFractions()\n        b.setType(\"plenum\")\n        self._checkTarget(b, duct)\n\n    def test_specifyTargetComp_MultiFound(self):\n        \"\"\"Ensure RuntimeError is hit when multiple target components are found.\n\n        Notes\n        -----\n        This can occur if a block has a mixture of fuel types. E.g., different fuel materials,\n        or different fuel geometries.\n        \"\"\"\n        b = HexBlock(\"fuel\", height=10.0)\n        fuelAnnularDims = {\n            \"Tinput\": 100.0,\n            \"Thot\": 100.0,\n            \"od\": 0.9,\n            \"id\": 0.5,\n            \"mult\": 100.0,\n        }\n        fuelDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"od\": 1.0, \"id\": 0.0, \"mult\": 10.0}\n        fuel = Circle(\"fuel\", \"FakeMat\", **fuelDims)\n        fuelAnnular = Circle(\"fuel annular\", \"FakeMat\", **fuelAnnularDims)\n        b.add(fuel)\n        b.add(fuelAnnular)\n        b.add(self.coolant)\n        b.setType(\"FuelBlock\")\n        with self.assertRaisesRegex(\n            RuntimeError,\n            \"Cannot have more than one component within a block that has the target flag!\",\n        ):\n            self.expData.determineTargetComponent(b, flagOfInterest=Flags.FUEL)\n\n    def test_manuallySetTargetComponent(self):\n        \"\"\"\n        Ensures that target components can be manually set (is done in practice via blueprints).\n\n        .. test:: Allow user-specified target axial expansion components on a given block.\n            :id: T_ARMI_MANUAL_TARG_COMP\n            :tests: R_ARMI_MANUAL_TARG_COMP\n        \"\"\"\n        b = HexBlock(\"dummy\", height=10.0)\n        ductDims = {\"Tinput\": 100.0, \"Thot\": 100.0, \"op\": 17, \"ip\": 0.0, \"mult\": 1.0}\n        duct = Hexagon(\"duct\", \"FakeMat\", **ductDims)\n        b.add(duct)\n        b.add(self.coolant)\n        b.getVolumeFractions()\n        b.setType(\"duct\")\n\n        # manually set target component\n        b.setAxialExpTargetComp(duct)\n        self.assertEqual(\n            b.p.axialExpTargetComponent,\n            duct.name,\n        )\n\n        # check that target component is stored on expansionData object correctly\n        self.expData._componentDeterminesBlockHeight[b.getComponentByName(b.p.axialExpTargetComponent)] = True\n        self.assertTrue(self.expData.isTargetComponent(duct))\n\n\nclass TestGetSolidComponents(unittest.TestCase):\n    \"\"\"Verify that getSolidComponents returns just solid components.\"\"\"\n\n    def test_getSolidComponents(self):\n        \"\"\"Show that getSolidComponents produces a list of solids, and is consistent with iterSolidComponents.\"\"\"\n        a = buildTestAssemblyWithFakeMaterial(name=\"HT9\")\n        for b in a:\n            solids = getSolidComponents(b)\n            ids = set(map(id, solids))\n            for c in iterSolidComponents(b):\n                self.assertNotEqual(c.material.name, \"Sodium\")\n                self.assertIn(id(c), ids, msg=f\"Found non-solid {c}\")\n                ids.remove(id(c))\n            self.assertFalse(\n                ids,\n                msg=\"Inconsistency between getSolidComponents and iterSolidComponents\",\n            )\n\n    def test_checkForBlocksWithoutSolids(self):\n        a = buildTestAssemblyWithFakeMaterial(name=\"Sodium\")\n        changer = AxialExpansionChanger()\n        changer.linked = AssemblyAxialLinkage(a)\n        with self.assertRaisesRegex(\n            InputError,\n            expected_regex=\"is constructed improperly for use with the axial expansion changer\",\n        ):\n            changer._checkForBlocksWithoutSolids()\n\n\nclass TestInputHeightsConsideredHot(unittest.TestCase):\n    \"\"\"Verify thermal expansion for process loading of core.\"\"\"\n\n    def setUp(self):\n        \"\"\"This test uses a different armiRun.yaml than the default.\"\"\"\n        o, r = loadTestReactor(\n            os.path.join(TEST_ROOT, \"detailedAxialExpansion\"),\n            customSettings={\"inputHeightsConsideredHot\": True},\n        )\n\n        self.stdAssems = list(r.core)\n\n        oCold, rCold = loadTestReactor(\n            os.path.join(TEST_ROOT, \"detailedAxialExpansion\"),\n            customSettings={\"inputHeightsConsideredHot\": False},\n        )\n\n        self.testAssems = list(rCold.core)\n\n    def test_coldAssemblyExpansion(self):\n        \"\"\"Block heights are cold and should be expanded.\n\n        .. test:: Preserve the total height of a compatible ARMI assembly.\n            :id: T_ARMI_ASSEM_HEIGHT_PRES\n            :tests: R_ARMI_ASSEM_HEIGHT_PRES\n\n        .. test:: Axial expansion can be prescribed in blueprints for core construction.\n            :id: T_ARMI_INP_COLD_HEIGHT\n            :tests: R_ARMI_INP_COLD_HEIGHT\n\n        Notes\n        -----\n        For R_ARMI_INP_COLD_HEIGHT, the action of axial expansion occurs in setUp() during core\n        construction, specifically in\n        :py:meth:`constructAssem <armi.reactor.blueprints.Blueprints.constructAssem>`\n\n        Two assertions here:\n            1. total assembly height should be preserved (through use of top dummy block)\n            2. in armi.tests.detailedAxialExpansion.refSmallReactorBase.yaml, Thot > Tinput\n               resulting in a non-zero DeltaT. Each block in the expanded case should therefore be a\n               different height than that of the standard case.\n        \"\"\"\n        for aStd, aExp in zip(self.stdAssems, self.testAssems):\n            self.assertAlmostEqual(\n                aStd.getTotalHeight(),\n                aExp.getTotalHeight(),\n                msg=\"Std Assem {0} ({1}) and Exp Assem {2} ({3}) are not the same height!\".format(\n                    aStd, aStd.getTotalHeight(), aExp, aExp.getTotalHeight()\n                ),\n            )\n            for bStd, bExp in zip(aStd, aExp):\n                if any(isinstance(c.material, custom.Custom) for c in bStd):\n                    checkColdBlockHeight(bStd, bExp, self.assertAlmostEqual, \"the same\")\n                else:\n                    checkColdBlockHeight(bStd, bExp, self.assertNotEqual, \"different\")\n                    if bStd.hasFlags(Flags.FUEL):\n                        self.checkColdHeightBlockMass(bStd, bExp, \"U235\")\n                    elif bStd.hasFlags(Flags.CONTROL):\n                        self.checkColdHeightBlockMass(bStd, bExp, \"B10\")\n                    for cExp in iterSolidComponents(bExp):\n                        if cExp.zbottom == bExp.p.zbottom and cExp.ztop == bExp.p.ztop:\n                            matDens = cExp.material.density(Tc=cExp.temperatureInC)\n                            compDens = cExp.density()\n                            msg = (\n                                f\"{cExp} {cExp.material} in {bExp} in {aExp} was not at correct density. \\n\"\n                                + f\"expansion = {bExp.p.height / bStd.p.height} \\n\"\n                                + f\"density = {matDens}, component density = {compDens} \\n\"\n                            )\n                            self.assertAlmostEqual(\n                                matDens,\n                                compDens,\n                                places=12,\n                                msg=msg,\n                            )\n\n    def checkColdHeightBlockMass(self, bStd: HexBlock, bExp: HexBlock, nuclide: str):\n        \"\"\"Checks that nuclide masses for blocks with input cold heights and\n        \"inputHeightsConsideredHot\": True are underpredicted.\n\n        Notes\n        -----\n        If blueprints have cold blocks heights with \"inputHeightsConsideredHot\": True in the inputs,\n        then the nuclide densities are thermally expanded but the block height is not. This\n        ultimately results in nuclide masses being underpredicted relative to the case where both\n        nuclide densities and block heights are thermally expanded.\n        \"\"\"\n        self.assertGreater(bExp.getMass(nuclide), bStd.getMass(nuclide))\n\n\ndef checkColdBlockHeight(bStd: HexBlock, bExp: HexBlock, assertType: Callable, strForAssertion: str):\n    assertType(\n        bStd.getHeight(),\n        bExp.getHeight(),\n        msg=\"Assembly: {0} -- Std Block {1} ({2}) and Exp Block {3} ({4}) should have {5:s} heights!\".format(\n            bStd.parent,\n            bStd,\n            bStd.getHeight(),\n            bExp,\n            bExp.getHeight(),\n            strForAssertion,\n        ),\n    )\n\n\ndef buildTestAssemblyWithFakeMaterial(name: str, hot: bool = False):\n    \"\"\"Create test assembly consisting of list of fake material.\n\n    Parameters\n    ----------\n    name : string\n        determines which fake material to use\n    \"\"\"\n    if not hot:\n        hotTemp = 100.0\n        height = 10.0\n    else:\n        hotTemp = 200.0\n        height = 10.0 + 0.02 * (200.0 - 100.0)\n\n    assembly = HexAssembly(\"testAssemblyType\")\n    assembly.spatialGrid = grids.AxialGrid.fromNCells(numCells=1)\n    assembly.spatialGrid.armiObject = assembly\n    assembly.add(_buildTestBlock(\"shield\", name, hotTemp, height))\n    assembly.add(_buildTestBlock(\"fuel\", name, hotTemp, height))\n    assembly.add(_buildTestBlock(\"fuel\", name, hotTemp, height))\n    assembly.add(_buildTestBlock(\"plenum\", name, hotTemp, height, True))\n    assembly.add(_buildDummySodium(hotTemp, height))\n    assembly.calculateZCoords()\n    assembly.reestablishBlockOrder()\n    return assembly\n\n\ndef _buildTestBlock(blockType: str, name: str, hotTemp: float, height: float, plenum: bool = False) -> HexBlock:\n    \"\"\"Return a simple pin type block filled with coolant and surrounded by duct.\n\n    Parameters\n    ----------\n    blockType\n        determines which type of block you're building\n    name\n        determines which material to use\n    hotTemp\n        the hot temperature of the block. This is synonomous with Thot in blueprints.\n    height\n        the height of the block\n    plenum\n        boolean to indicate if this is a plenum. if true, the pin is replaced by an air-filled gap.\n\n    Returns\n    -------\n    HexBlock for testing.\n    \"\"\"\n    b = HexBlock(blockType, height=height)\n\n    fuelDims = {\"Tinput\": 100.0, \"Thot\": hotTemp, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n    ductDims = {\"Tinput\": 100.0, \"Thot\": hotTemp, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    mainType = Circle(blockType, name, **fuelDims)\n    bond = Circle(\"bond\", \"Sodium\", Tinput=100.0, Thot=hotTemp, od=0.78, id=0.76, mult=127.0)\n    clad = Circle(\"clad\", name, Tinput=100.0, Thot=hotTemp, od=0.80, id=0.78, mult=127.0)\n    duct = Hexagon(\"duct\", name, **ductDims)\n\n    coolant = DerivedShape(\"coolant\", \"Sodium\", Tinput=100.0, Thot=hotTemp)\n    intercoolant = Hexagon(\n        \"intercoolant\",\n        \"Sodium\",\n        Tinput=100.0,\n        Thot=hotTemp,\n        op=17.0,\n        ip=ductDims[\"op\"],\n        mult=1.0,\n    )\n\n    if plenum:\n        b.add(Circle(\"gap\", \"Air\", **fuelDims))\n    else:\n        b.add(mainType)\n    b.add(bond)\n    b.add(clad)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n    b.setType(blockType)\n\n    b.getVolumeFractions()\n    b.completeInitialLoading()\n\n    return b\n\n\ndef _buildDummySodium(hotTemp: float, height: float):\n    \"\"\"Build a dummy sodium block.\"\"\"\n    b = HexBlock(\"dummy\", height=height)\n\n    dummy = Hexagon(\"dummy coolant\", \"Sodium\", Tinput=100.0, Thot=hotTemp, op=17, ip=0.0, mult=1.0)\n\n    b.add(dummy)\n    b.getVolumeFractions()\n    b.setType(\"dummy\")\n\n    return b\n\n\nclass FakeMat(materials.ht9.HT9):\n    \"\"\"Fake material used to verify armi.reactor.converters.axialExpansionChanger.\n\n    Notes\n    -----\n    - specifically used in TestAxialExpansionHeight to verify axialExpansionChanger produces\n      expected heights from hand calculation\n    - also used to verify mass and height conservation resulting from even amounts of expansion and\n      contraction. See TestConservation.\n    \"\"\"\n\n    name = \"FakeMat\"\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"A fake linear expansion percent.\"\"\"\n        Tc = units.getTc(Tc, Tk)\n        return 0.02 * Tc\n\n\nclass FakeMatException(materials.ht9.HT9):\n    \"\"\"Fake material used to verify TestExceptions.\n\n    Notes\n    -----\n    - the only difference between this and `class Fake(HT9)` above is that the thermal expansion\n      factor is higher to ensure that a negative block height is caught in\n      TestExceptions:test_AssemblyAxialExpansionException.\n    \"\"\"\n\n    name = \"FakeMatException\"\n\n    def linearExpansionPercent(self, Tk=None, Tc=None):\n        \"\"\"A fake linear expansion percent.\"\"\"\n        Tc = units.getTc(Tc, Tk)\n        return 0.08 * Tc\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_axialExpansionChanger_MultiPin.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport copy\nimport io\nfrom dataclasses import dataclass\nfrom typing import TYPE_CHECKING, Optional\nfrom unittest.mock import MagicMock\n\nfrom numpy import array, array_equal, full\n\nfrom armi.materials.material import Fluid\nfrom armi.reactor.blueprints import Blueprints\nfrom armi.reactor.components.component import Component\nfrom armi.reactor.converters.axialExpansionChanger.axialExpansionChanger import AxialExpansionChanger\nfrom armi.reactor.converters.axialExpansionChanger.expansionData import iterSolidComponents\nfrom armi.reactor.converters.axialExpansionChanger.redistributeMass import RedistributeMass\nfrom armi.reactor.converters.tests.test_axialExpansionChanger import AxialExpansionTestBase\nfrom armi.reactor.flags import Flags, TypeSpec\nfrom armi.settings.caseSettings import Settings\nfrom armi.testing.singleMixedAssembly import BLOCK_DEFINITIONS_2PIN, GRID_DEFINITION, buildMixedPinAssembly\n\nif TYPE_CHECKING:\n    from armi.reactor.assemblies import HexAssembly\n    from armi.reactor.blocks import HexBlock\n\nFINE_ASSEMBLY_DEF = \"\"\"\nassemblies:\n    multi pin fuel:\n        specifier: LA\n        blocks: [\n            *block_grid_plate, *block_fuel_multiPin_axial_shield,\n            *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin,\n            *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin,\n            *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin,\n            *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin,\n            *block_duct, *block_dummy\n        ]\n        height: [\n            1.0, 1.0,\n            0.5, 0.5, 0.5,\n            0.5, 0.5, 0.5,\n            0.5, 0.5, 1.0,\n            1.0, 1.0, 1.0,\n            1.0, 1.0\n        ]\n        axial mesh points: [\n            1, 1,\n            1, 1, 1,\n            1, 1, 1,\n            1, 1, 1,\n            1, 1, 1,\n            1, 1\n        ]\n        xs types: [\n            A, A,\n            B, B, B,\n            B, B, B,\n            B, B, C,\n            C, D, D,\n            A, A\n        ]\n\"\"\"  # noqa: E501\n\n\n@dataclass\nclass StoreMassAndTemp:\n    cType: str\n    mass: float\n    HMmass: float\n    HMmassBOL: float\n    HMmolesBOL: float\n    temp: float\n\n\nclass TestMultiPinConservationBase(AxialExpansionTestBase):\n    @classmethod\n    def setUpClass(cls):\n        super().setUpClass()\n        cls.aRef = buildMixedPinAssembly()\n        cls.places = 12\n\n    def setUp(self):\n        self.a = copy.deepcopy(self.aRef)\n        self.axialExpChngr = AxialExpansionChanger()\n        self.axialExpChngr.setAssembly(self.a)\n        self.initConservationValues()\n\n    def initConservationValues(self):\n        # get original masses for conservation checks\n        self.origTotalCMassByFlag = self.getTotalCompMassByFlag(self.a)\n        self.initialTotalHMMolesBOL = self.initialTotalHMMassBOL = 0.0\n        for _, b in self._iterFuelBlocks():\n            for c in b.iterChildrenWithFlags(Flags.FUEL):\n                self.initialTotalHMMolesBOL += c.p.molesHmBOL\n                self.initialTotalHMMassBOL += c.p.massHmBOL\n\n    def getTotalCompMassByFlag(self, a: \"HexAssembly\") -> dict[TypeSpec, float]:\n        \"\"\"Get the total mass of all components in the assembly, except Bond components.\n\n        Notes\n        -----\n        The axial expansion changer does not consider the expansion or contraction of fluids and therefore their\n        conservation is not guarunteed. The conservation of fluid mass is expected only if each component type on a\n        block has 1) uniform expansion rates and 2) axially isothermal fluid temperatures. For multipin assemblies,\n        the former is generally not met for Bond components; however since there is only one coolant and intercoolant\n        component in general, the conservation of mass for these components expected if axially isothermal fluid\n        temperatures are present.\n        \"\"\"\n        totalCMassByFlags: dict[Flags, float] = collections.defaultdict(float)\n        for b in a:\n            for c in iterSolidComponents(b):\n                totalCMassByFlags[c.p.flags] += c.getMass()\n            for c in filter(self._isFluidButNotBond, b):\n                totalCMassByFlags[c.p.flags] += c.getMass()\n\n        return totalCMassByFlags\n\n    @staticmethod\n    def _isFluidButNotBond(c):\n        \"\"\"Determine if a component is a fluid, but not Bond.\"\"\"\n        return isinstance(c, Component) and isinstance(c.material, Fluid) and not c.hasFlags(Flags.BOND)\n\n    def _iterTestFuelCompsOnBlock(self, b: \"HexBlock\"):\n        \"\"\"Iterate over components in b that exactly contain Flags.FUEL, Flags.TEST, and Flags.DEPLETABLE.\"\"\"\n        yield from b.iterChildrenWithFlags(Flags.FUEL | Flags.TEST | Flags.DEPLETABLE, exactMatch=True)\n\n    def _iterFuelBlocks(self):\n        \"\"\"Iterate over blocks in self.a that have Flags.FUEL. Enumerator index starts at 1 to support scaling\n        block-wise values.\n        \"\"\"\n        yield from enumerate(filter(lambda b: b.hasFlags(Flags.FUEL), self.a), start=1)\n\n    def checkConservation(self):\n        \"\"\"Conservation of axial expansion is measured by ensuring the following is the same post expansion: 1) total\n        assembly mass per component flag, 2) total assembly height, and 3) total moles heavy metal at BOL.\n        \"\"\"\n        newTotalCMassByFlag = self.getTotalCompMassByFlag(self.a)\n        for origMass, (cFlag, newMass) in zip(self.origTotalCMassByFlag.values(), newTotalCMassByFlag.items()):\n            self.assertAlmostEqual(origMass, newMass, places=self.places, msg=f\"{cFlag} are not the same!\")\n\n        self.assertAlmostEqual(self.aRef.getTotalHeight(), self.a.getTotalHeight(), places=self.places)\n\n        totalHMMolesBOL = totalHMMassBOL = 0\n        for _, b in self._iterFuelBlocks():\n            for c in b.iterChildrenWithFlags(Flags.FUEL):\n                totalHMMolesBOL += c.p.molesHmBOL\n                totalHMMassBOL += c.p.massHmBOL\n        self.assertAlmostEqual(totalHMMolesBOL, self.initialTotalHMMolesBOL, places=self.places)\n        self.assertAlmostEqual(totalHMMassBOL, self.initialTotalHMMassBOL, places=self.places)\n\n\nclass TestRedistributeMass(TestMultiPinConservationBase):\n    b0: \"HexBlock\"\n    b1: \"HexBlock\"\n    c0: Component\n    origC0Temp: float\n    c1: Component\n    origC1Temp: float\n\n    def setUp(self):\n        super().setUp()\n        self.b0 = self.a.getFirstBlock(Flags.FUEL)\n        self.b1 = self.axialExpChngr.linked.linkedBlocks[self.b0].upper\n        self.c0 = next(filter(lambda c: c.getType() == \"fuel test\", self.b0))\n        self.c1 = self.axialExpChngr.linked.linkedComponents[self.c0].upper\n\n    def test_getAllNucs(self):\n        nucsA = [\"Zr90\", \"Zr91\", \"Zr92\", \"U235\", \"U238\"]\n        nucsB = [\"Zr90\", \"Zr91\", \"Zr92\", \"U233\", \"U238\", \"I131\", \"XE131\", \"NP237\", \"AM242\", \"AM242M\"]\n        nucsC = RedistributeMass(MagicMock(), MagicMock(), MagicMock(), MagicMock(), initOnly=True)._getAllNucs(\n            nucsA, nucsB\n        )\n        # ensure nucsA and nucsB haven't changed\n        self.assertTrue(\n            array_equal(\n                array(nucsA),\n                array([\"Zr90\", \"Zr91\", \"Zr92\", \"U235\", \"U238\"]),\n            )\n        )\n        self.assertTrue(\n            array_equal(\n                array(nucsB),\n                array([\"Zr90\", \"Zr91\", \"Zr92\", \"U233\", \"U238\", \"I131\", \"XE131\", \"NP237\", \"AM242\", \"AM242M\"]),\n            )\n        )\n        # ensure nucsC is correct\n        self.assertTrue(\n            array_equal(\n                array(nucsC),\n                array([\"Zr90\", \"Zr91\", \"Zr92\", \"I131\", \"XE131\", \"U233\", \"U235\", \"NP237\", \"U238\", \"AM242\", \"AM242M\"]),\n            )\n        )\n\n    def test_adjustMassParams(self):\n        self._initializeTest(1.05, fromComp=self.c0)\n\n        # component-level params\n        initialFromMassBOL = self.c0.p.massHmBOL\n        initialFromMolesBOL = self.c0.p.molesHmBOL\n        initialToMassBOL = self.c1.p.massHmBOL\n        initialToMolesBOL = self.c1.p.molesHmBOL\n\n        dist = RedistributeMass(\n            fromComp=self.c0, toComp=self.c1, assemName=repr(self.a), deltaZTop=self.deltaZTop, initOnly=True\n        )\n        dist._adjustMassParams()\n        self.assertLess(self.c0.p.massHmBOL, initialFromMassBOL)\n        self.assertLess(self.c0.p.molesHmBOL, initialFromMolesBOL)\n        self.assertGreater(self.c1.p.massHmBOL, initialToMassBOL)\n        self.assertGreater(self.c1.p.molesHmBOL, initialToMolesBOL)\n        self.assertAlmostEqual(self.c0.p.massHmBOL + self.c1.p.massHmBOL, initialFromMassBOL + initialToMassBOL)\n        self.assertAlmostEqual(self.c0.p.molesHmBOL + self.c1.p.molesHmBOL, initialFromMolesBOL + initialToMolesBOL)\n\n        # block-level params\n        initialFromMassBOL = self.b0.p.massHmBOL\n        initialFromMolesBOL = self.b0.p.molesHmBOL\n        initialToMassBOL = self.b1.p.massHmBOL\n        initialToMolesBOL = self.b1.p.molesHmBOL\n        self.axialExpChngr._recomputeBlockMassParams(self.b0)\n        self.axialExpChngr._recomputeBlockMassParams(self.b1)\n\n        self.assertLess(self.b0.p.massHmBOL, initialFromMassBOL)\n        self.assertLess(self.b0.p.molesHmBOL, initialFromMolesBOL)\n        self.assertGreater(self.b1.p.massHmBOL, initialToMassBOL)\n        self.assertGreater(self.b1.p.molesHmBOL, initialToMolesBOL)\n        self.assertAlmostEqual(self.b0.p.massHmBOL + self.b1.p.massHmBOL, initialFromMassBOL + initialToMassBOL)\n        self.assertAlmostEqual(self.b0.p.molesHmBOL + self.b1.p.molesHmBOL, initialFromMolesBOL + initialToMolesBOL)\n\n    def test_shiftLinkedCompsForDelta(self):\n        \"\"\"Ensure that given a deltaZTop, component elevations are adjusted appropriately.\"\"\"\n        self._initializeTest(growFrac=1.0, fromComp=self.c0)  # setting fromComp is meaningless here\n        # set what they should be after adjusting\n        delta = 0.1\n        refC0Height = self.c0.height + delta\n        refC0Ztop = self.c0.ztop + delta\n        refC1Height = self.c1.height - delta\n        refC1Zbottom = self.c1.zbottom + delta\n        self.axialExpChngr._shiftLinkedCompsForDelta(self.c0, self.c1, delta)\n        self.assertAlmostEqual(refC0Height, self.c0.height, places=self.places)\n        self.assertAlmostEqual(refC1Height, self.c1.height, places=self.places)\n        self.assertAlmostEqual(refC0Ztop, self.c0.ztop, places=self.places)\n        self.assertAlmostEqual(refC1Zbottom, self.c1.zbottom, places=self.places)\n\n    def test_redistributeMassNonTargetExpNoTherm(self):\n        \"\"\"With no temperature changes anywere, grow c0 by 10% and show that 10% of the c0 mass is moved to c1.\n\n        Notes\n        -----\n        - C0 grows resulting in c0 giving 10% of its mass to c1. c1 height does not change so its mass gains 10%.\n        - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly\n          during the transfer of mass. For this test, since this is not thermal expansion, we show that the component\n          temperatures do not change.\n        \"\"\"\n        growFrac = 1.10\n        self._initializeTest(growFrac, fromComp=self.c0)\n        self._redistributeMassWithTempAssert(fromComp=self.c0, toComp=self.c1, thermalExp=False)\n\n    def test_addMassToCompNonTargetCompNoTherm(self):\n        \"\"\"With no temperature changes anywere, shrink c0 by 10% and show that 10% of the c1 mass is moved to c0.\n\n        Notes\n        -----\n        - C0 shrinks resulting in c1 giving 10% of its mass to c0. c1 height does not change so it's mass loses 10%.\n        - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly\n          during the transfer of mass. For this test, since this is not thermal expansion, we show that the component\n          temperatures do not change.\n        \"\"\"\n        growFrac = 0.9\n        self._initializeTest(growFrac, fromComp=self.c1)\n        self._redistributeMassWithTempAssert(fromComp=self.c1, toComp=self.c0, thermalExp=False)\n\n    def test_addMassToCompNonTargetComprYesTherm(self):\n        \"\"\"Decrease c0 by 100 deg C and and show that c1 mass is moved to c0.\n\n        Notes\n        -----\n        - C0 shrinks resulting in c1 giving X% of its mass to c0. c1 height does not change so its mass loses X%.\n        - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly\n          during the transfer of mass. For this test, we show that the temperature of c0 increases and the temperature\n          of c1 does not change. The increase in temperature for c0 is due to the contribution from the hotter c1\n          component.\n        \"\"\"\n        newTemp = self.c0.temperatureInC - 100.0\n        # updateComponentTemp updates ndens for update in AREA only\n        self.axialExpChngr.expansionData.updateComponentTemp(self.c0, newTemp)\n        self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n        growFrac = self.axialExpChngr.expansionData.getExpansionFactor(self.c0)\n\n        self._initializeTest(growFrac, fromComp=self.c1)\n        self._redistributeMassWithTempAssert(fromComp=self.c1, toComp=self.c0, thermalExp=True)\n\n    def test_addMassToCompNonTargetExpanYesTherm(self):\n        \"\"\"Increase c0 by 100 deg C and and show that c0 mass is moved to c1.\n\n        Notes\n        -----\n        - C0 expands resulting in c0 giving X% of its mass to c1. c0 height does not change so its mass loses X%.\n        - Additional assertions on temperature exist to ensure that the component temperatures are managed correctly\n          during the transfer of mass. For this test, we show that the temperature of c1 increases and the temperature\n          of c0 does not change. The increase in temperature is due to the contribution from the hotter c0 component.\n        \"\"\"\n        newTemp = self.c0.temperatureInC + 100.0\n        # updateComponentTemp updates ndens for update in AREA only\n        self.axialExpChngr.expansionData.updateComponentTemp(self.c0, newTemp)\n        self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n        growFrac = self.axialExpChngr.expansionData.getExpansionFactor(self.c0)\n\n        self._initializeTest(growFrac, fromComp=self.c0)\n        self._redistributeMassWithTempAssert(fromComp=self.c0, toComp=self.c1, thermalExp=True)\n\n    def _updateToCompElevations(self, toComp: Component):\n        \"\"\"Shift ``toComp`` based on expansion or contraction of ``fromComp``, as indicated by ``self.deltaZTop``.\n\n        Notes\n        -----\n        If deltaZTop is negative, this indicates that ``fromComp`` has expanded and ``toComp`` needs to be shifted\n        upwards. If deltaZtop is positive, this indicates that ``fromComp`` has contracted and ``toComp`` need to be\n        shifted downwards.\n        \"\"\"\n        if self.deltaZTop < 0.0:\n            toComp.zbottom -= self.deltaZTop\n            toComp.height -= self.deltaZTop\n            toComp.ztop = toComp.zbottom + toComp.height\n        else:\n            toComp.ztop += self.deltaZTop\n            toComp.height += self.deltaZTop\n        # adjust b1 elevations based on c1\n        toComp.parent.ztop = toComp.ztop\n        toComp.parent.zbottom = toComp.zbottom\n        toComp.parent.p.height = toComp.height\n        toComp.parent.clearCache()\n\n    def _updateFromCompElevations(self, fromComp: Component):\n        if self.deltaZTop < 0.0:\n            # adjust b1 elevations based on c1\n            fromComp.ztop += self.deltaZTop\n            fromComp.height += self.deltaZTop\n        else:\n            fromComp.zbottom += self.deltaZTop\n            fromComp.height -= self.deltaZTop\n        # adjust b0 elevations based on c0\n        fromComp.parent.ztop = fromComp.ztop\n        fromComp.parent.zbottom = fromComp.zbottom\n        fromComp.parent.p.height = fromComp.parent.ztop - fromComp.parent.zbottom\n        # clear the cache to update volume calculations\n        fromComp.parent.clearCache()\n\n    def _initializeTest(self, growFrac: float, fromComp: Component):\n        \"\"\"Initialize the tests.\n\n        Notes\n        -----\n        1) Store reference mass and temperature information.\n        1) Set elevations of components and blocks post-expansion.\n        3) Store the amount of mass expeceted to be redistributed between components.\n        \"\"\"\n        # set the original mass and temperature of the components post expansion and pre redistribution\n\n        self.originalC0 = StoreMassAndTemp(\n            self.c0.parent.name,\n            self.c0.getMass(),\n            self.c0.getHMMass(),\n            self.c0.p.massHmBOL,\n            self.c0.p.molesHmBOL,\n            self.c0.temperatureInC,\n        )\n        self.originalC1 = StoreMassAndTemp(\n            self.c1.parent.name,\n            self.c1.getMass(),\n            self.c1.getHMMass(),\n            self.c1.p.massHmBOL,\n            self.c1.p.molesHmBOL,\n            self.c1.temperatureInC,\n        )\n\n        # adjust c0 elevations per growFrac\n        self.c0.zbottom = self.b0.p.zbottom\n        self.c0.height = self.b0.getHeight() * growFrac\n        self.c0.ztop = self.c0.zbottom + self.c0.height\n        # update the ndens of c0 for the change in height\n        self.c0.changeNDensByFactor(1.0 / growFrac)\n\n        # calculate deltaZTop to inform how much mass will be redistributed\n        self.deltaZTop = self.b0.p.ztop - self.c0.ztop\n\n        # initialize component elevations for self.b1\n        for c in self.b1:\n            c.zbottom = self.b1.p.zbottom\n            c.height = self.b1.getHeight()\n            c.ztop = c.zbottom + c.height\n        self.b1.clearCache()\n\n        if fromComp is self.c0:\n            fromHeight = self.c0.height\n            self.redistributedMass = self.originalC0.mass * abs(self.deltaZTop) / fromHeight\n            self.redistributedBOLMass = self.originalC0.HMmassBOL * abs(self.deltaZTop) / fromHeight\n            self.redistributedBOLMoles = self.originalC0.HMmolesBOL * abs(self.deltaZTop) / fromHeight\n        else:\n            fromHeight = self.c1.height\n            self.redistributedMass = self.originalC1.mass * abs(self.deltaZTop) / fromHeight\n            self.redistributedBOLMass = self.originalC1.HMmassBOL * abs(self.deltaZTop) / fromHeight\n            self.redistributedBOLMoles = self.originalC1.HMmolesBOL * abs(self.deltaZTop) / fromHeight\n\n    def _getReferenceData(self, fromComp: Component, toComp: Optional[Component]):\n        \"\"\"Pull the reference data needed for ``fromComp`` and ``toComp``.\"\"\"\n        fromCompRefData = self.originalC0 if fromComp.parent.name == self.originalC0.cType else self.originalC1\n        if toComp is None:\n            toCompRefData = None\n        else:\n            toCompRefData = self.originalC0 if toComp.parent.name == self.originalC0.cType else self.originalC1\n        return fromCompRefData, toCompRefData\n\n    def _redistributeMassWithTempAssert(self, fromComp: Component, toComp: Component, thermalExp: bool):\n        \"\"\"Perform the mass redistribution from ``fromComp`` to ``toComp``.\n\n        Notes\n        -----\n        Two assertions are done: 1) the correct amount of mass is moved to ``toComp``. 2) the resulting temperatures\n        for ``fromComp`` and ``toComp`` are correct.\n        \"\"\"\n        # move mass from ``fromComp`` to ``toComp``\n        RedistributeMass(fromComp=fromComp, toComp=toComp, assemName=repr(self.a), deltaZTop=self.deltaZTop)\n\n        fromCompRefData, toCompRefData = self._getReferenceData(fromComp, toComp)\n        self._updateToCompElevations(toComp=toComp)\n        self._updateFromCompElevations(fromComp=fromComp)\n\n        # ensure the toComp mass increases by amountBeingRedistributed\n        self.assertAlmostEqual(\n            toComp.getMass(),\n            toCompRefData.mass + self.redistributedMass,\n            places=self.places,\n        )\n        HMfrac = toCompRefData.HMmass / toCompRefData.mass\n        self.assertAlmostEqual(\n            toComp.getHMMass(),\n            toCompRefData.HMmass + self.redistributedMass * HMfrac,\n            places=self.places,\n        )\n        self.assertAlmostEqual(\n            toComp.p.massHmBOL,\n            toCompRefData.HMmassBOL + self.redistributedBOLMass,\n            places=self.places,\n        )\n        self.assertAlmostEqual(\n            toComp.p.molesHmBOL,\n            toCompRefData.HMmolesBOL + self.redistributedBOLMoles,\n            places=self.places,\n        )\n\n        # fromComp temperature should not change because we've only removed mass\n        self.assertEqual(fromComp.temperatureInC, fromCompRefData.temp)\n        # we expect the new temperature to be greater because we added mass from a\n        # material with a higher temperature\n        if thermalExp:\n            self.assertGreater(toComp.temperatureInC, toCompRefData.temp)\n        else:\n            self.assertEqual(toComp.temperatureInC, toCompRefData.temp)\n\n        # ensure the fromComp mass decreases by redisributedMass\n        self.assertAlmostEqual(fromComp.getMass(), fromCompRefData.mass - self.redistributedMass, places=self.places)\n        HMfrac = fromCompRefData.HMmass / fromCompRefData.mass\n        self.assertAlmostEqual(\n            fromComp.getHMMass(),\n            fromCompRefData.HMmass - self.redistributedMass * HMfrac,\n            places=self.places,\n        )\n        self.assertAlmostEqual(\n            fromComp.p.massHmBOL,\n            fromCompRefData.HMmassBOL - self.redistributedBOLMass,\n            places=self.places,\n        )\n        self.assertAlmostEqual(\n            fromComp.p.molesHmBOL,\n            fromCompRefData.HMmolesBOL - self.redistributedBOLMoles,\n            places=self.places,\n        )\n\n\nclass TestMultiPinConservation(TestMultiPinConservationBase):\n    def setUp(self):\n        super().setUp()\n\n    def test_expandThermalBothFuel(self):\n        \"\"\"Perform thermal expansion on both fuel and test fuel components.\n\n        Notes\n        -----\n        - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution.\n        - The test fuel and fuel components are scaled by different temperatures to simulate each pin design\n          existing at different temperatures.\n        - The 150 deg C and 50 deg C based temperature changes are arbitrarily chosen.\n        \"\"\"\n        for i, b in self._iterFuelBlocks():\n            for c in b.iterChildrenWithFlags(Flags.FUEL):\n                if c.hasFlags(Flags.TEST):\n                    newTemp = c.temperatureInC + 150.0 * i\n                else:\n                    newTemp = c.temperatureInC + 50.0 * i\n                self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp)\n        self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n    def test_roundTripThermalBothFuel(self):\n        \"\"\"Perform thermal expansion on both fuel and test fuel components and ensure that mass and total assembly\n        height is recovered.\n\n        Notes\n        -----\n        - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution.\n        - The test fuel and fuel components are scaled by different temperatures to simulate each pin design\n          existing at different temperatures.\n        - The 75 deg C and 50 deg C based temperature changes are arbitrarily chosen.\n        \"\"\"\n        tempAdjust = [50, -50]\n        for temp in tempAdjust:\n            for i, b in self._iterFuelBlocks():\n                for c in b.iterChildrenWithFlags(Flags.FUEL):\n                    if c.hasFlags(Flags.TEST):\n                        testTemp = temp + 25 if temp > 0 else temp - 25\n                        newTemp = c.temperatureInC + testTemp * i\n                    else:\n                        newTemp = c.temperatureInC + temp * i\n                    self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp)\n            self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n            self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n    def test_expandThermal(self):\n        \"\"\"Perform thermal expansion on the test fuel component.\n\n        Notes\n        -----\n        - Each block is scaled by an increasing temperature to simulate a variable axial temperature distribution.\n        - The 100 deg C based temperature changes is arbitrarily chosen.\n        - An extra assertion in done in this test to ensure that isotopes uniquely found in each test are not dropped\n          when moving mass between blocks. See the tables below for additional information on what is expected.\n\n          ==========  ==================\n          Component    Isotopes Present\n          ==========  ==================\n          0            XE131\n          1            I131\n          2            NP237\n          3            CM242\n          ==========  ==================\n\n          then after the axial expansion routine, we show that the following exists,\n\n          ==========  ==================\n          Component    Isotopes Present\n          ==========  ==================\n          0            XE131\n          1            I131, XE131\n          2            NP237, I131\n          3            CM242, NP237\n          ==========  ==================\n        \"\"\"\n        nucs = [\"XE131\", \"I131\", \"NP237\", \"CM242\"]\n        for i, c in enumerate(self.a.iterComponents([Flags.FUEL, Flags.TEST, Flags.DEPLETABLE], exact=True)):\n            self.assertEqual(c.getNumberDensity(nucs[i]), 0.0)\n            c.setNumberDensity(nucs[i], 1e-3)\n\n        # recalcualte the initial mass with the new isotope additions\n        self.origTotalCMassByFlag = self.getTotalCompMassByFlag(self.a)\n\n        for i, b in self._iterFuelBlocks():\n            for c in self._iterTestFuelCompsOnBlock(b):\n                newTemp = c.temperatureInC + 100.0 * i\n                self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp)\n        self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n        expectedNucsPresent = [[\"XE131\"], [\"XE131\", \"I131\"], [\"I131\", \"NP237\"], [\"NP237\", \"CM242\"]]\n        for i, c in enumerate(self.a.iterComponents([Flags.FUEL, Flags.TEST, Flags.DEPLETABLE], exact=True)):\n            for nuc in expectedNucsPresent[i]:\n                self.assertNotEqual(c.getNumberDensity(nuc), 0.0, msg=f\"{nuc} not present in {c}!\")\n\n    def test_contractThermal(self):\n        \"\"\"Perform thermal contraction on the test fuel component.\n\n        Notes\n        -----\n        - Each block is scaled by a decreasing temperature to simulate a variable axial temperature distribution.\n        - The -100 deg C based temperature changes is arbitrarily chosen.\n        \"\"\"\n        for i, b in self._iterFuelBlocks():\n            for c in self._iterTestFuelCompsOnBlock(b):\n                newTemp = c.temperatureInC - 100.0 * i\n                self.axialExpChngr.expansionData.updateComponentTemp(c, newTemp)\n        self.axialExpChngr.expansionData.computeThermalExpansionFactors()\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n    def test_expandPrescribed(self):\n        \"\"\"Perform prescribed expansion on the test fuel component.\n\n        Notes\n        -----\n        - The factor of 1.2 for component expansion is arbitrarily chosen. Note, if too large of a value is chosen,\n          the upper block heights will go negative and the axial expansion changer will hit a RuntimeError.\n        \"\"\"\n        cList = []\n        for _i, b in self._iterFuelBlocks():\n            for c in self._iterTestFuelCompsOnBlock(b):\n                cList.append(c)\n        pList = full(len(cList), 1.2)\n        self.axialExpChngr.expansionData.setExpansionFactors(cList, pList)\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n    def test_contractPrescribed(self):\n        \"\"\"Perform prescribed contraction on the test fuel component.\n\n        Notes\n        -----\n        - The factor of 0.9 for component contraction is arbitrarily chosen.\n        \"\"\"\n        cList = []\n        for _i, b in self._iterFuelBlocks():\n            for c in self._iterTestFuelCompsOnBlock(b):\n                cList.append(c)\n        pList = full(len(cList), 0.9)\n        self.axialExpChngr.expansionData.setExpansionFactors(cList, pList)\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n    def test_expandAndContractPrescribed(self):\n        \"\"\"Perform prescribed expansion and contraction on the test fuel component.\n\n        Notes\n        -----\n        - Each block is scaled by a different value to simulate a variable axial expansion profile (e.g., burnup driven\n          axial expansion commonly found in sodium fast reactors).\n        - The factor of +/- 0.01 for component expansion/contraction is arbitrarily chosen. Note, if too large of a\n          value is chosen, the upper block heights will go negative and the axial expansion changer will hit a\n          RuntimeError.\n        \"\"\"\n        cList = []\n        pList = []\n        for i, b in self._iterFuelBlocks():\n            for c in b.iterChildrenWithFlags(Flags.FUEL):\n                if c.hasFlags(Flags.TEST):\n                    pList.append(1.0 + 0.01 * i)\n                else:\n                    pList.append(1.0 - 0.01 * i)\n                cList.append(c)\n        self.axialExpChngr.expansionData.setExpansionFactors(cList, pList)\n        self.axialExpChngr.axiallyExpandAssembly()\n        self.checkConservation()\n\n\nclass TestExceptionForMultiPin(TestMultiPinConservationBase):\n    def setUp(self):\n        cs = Settings()\n        with io.StringIO(BLOCK_DEFINITIONS_2PIN + FINE_ASSEMBLY_DEF + GRID_DEFINITION) as stream:\n            blueprints = Blueprints.load(stream)\n            blueprints._prepConstruction(cs)\n        self.a = list(blueprints.assemblies.values())[0]\n        self.axialExpChngr = AxialExpansionChanger()\n        self.axialExpChngr.setAssembly(self.a)\n\n    def test_failExpansionNegativeCompHeight(self):\n        \"\"\"Show that the negative component height check can be caught.\"\"\"\n        cList = []\n        for _i, b in self._iterFuelBlocks():\n            for c in b.iterChildrenWithFlags(Flags.FUEL | Flags.DEPLETABLE, exactMatch=True):\n                cList.append(c)\n        pList = full(len(cList), 1.3)\n        self.axialExpChngr.expansionData.setExpansionFactors(cList, pList)\n        with self.assertRaisesRegex(ArithmeticError, expected_regex=\"has a negative height\"):\n            self.axialExpChngr.axiallyExpandAssembly()\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_blockConverter.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test block conversions.\"\"\"\n\nimport math\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom armi.physics.neutronics.isotopicDepletion.isotopicDepletionInterface import (\n    isDepletable,\n)\nfrom armi.reactor import blocks, components, grids\nfrom armi.reactor.converters import blockConverters\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_blocks import buildLinkedFuelBlock, loadTestBlock\nfrom armi.testing import TEST_ROOT, loadTestReactor\nfrom armi.testing.singleMixedAssembly import buildMixedThreePinAssembly\nfrom armi.utils import hexagon\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\ndef buildSimpleFuelBlockNegativeArea():\n    \"\"\"\n    Return a simple block containing fuel, clad, duct, and coolant.\n\n    The block has a negative-area gap between fuel and cladding for testing.\n    \"\"\"\n    b = blocks.HexBlock(\"fuel\", height=10.0)\n\n    fuelDims = {\"Tinput\": 25, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n    cladDims = {\"Tinput\": 25, \"Thot\": 600, \"od\": 0.80, \"id\": 0.76, \"mult\": 127.0}\n    ductDims = {\"Tinput\": 25, \"Thot\": 600, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    intercoolantDims = {\n        \"Tinput\": 400,\n        \"Thot\": 400,\n        \"op\": 17.0,\n        \"ip\": ductDims[\"op\"],\n        \"mult\": 1.0,\n    }\n    coolDims = {\"Tinput\": 25.0, \"Thot\": 400}\n\n    fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n    gapDims = {\n        \"Tinput\": 25,\n        \"Thot\": 600,\n        \"od\": \"clad.id\",\n        \"id\": \"fuel.od\",\n        \"mult\": 127.0,\n    }\n    gapDims[\"components\"] = {\"fuel\": fuel, \"clad\": clad}\n    gap = components.Circle(\"gap\", \"Void\", **gapDims)\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n    intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n    b.add(fuel)\n    b.add(gap)\n    b.add(clad)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n\n    b.getVolumeFractions()\n\n    return b\n\n\ndef buildSimpleFuelBlockNegativeAreaBond():\n    \"\"\"\n    Return a simple block containing fuel, clad, duct, and coolant.\n\n    The block has a negative-area bond between fuel and cladding for testing.\n    \"\"\"\n    b = blocks.HexBlock(\"fuel\", height=10.0)\n\n    fuelDims = {\"Tinput\": 25, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n    cladDims = {\"Tinput\": 25, \"Thot\": 600, \"od\": 0.80, \"id\": 0.76, \"mult\": 127.0}\n    ductDims = {\"Tinput\": 25, \"Thot\": 600, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    intercoolantDims = {\n        \"Tinput\": 400,\n        \"Thot\": 400,\n        \"op\": 17.0,\n        \"ip\": ductDims[\"op\"],\n        \"mult\": 1.0,\n    }\n    coolDims = {\"Tinput\": 25.0, \"Thot\": 400}\n\n    fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n    bondDims = {\n        \"Tinput\": 25,\n        \"Thot\": 600,\n        \"od\": \"clad.id\",\n        \"id\": \"fuel.od\",\n        \"mult\": 127.0,\n    }\n    bondDims[\"components\"] = {\"fuel\": fuel, \"clad\": clad}\n    bond = components.Circle(\"bond\", \"Sodium\", **bondDims)\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n    intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n    b.add(fuel)\n    b.add(bond)\n    b.add(clad)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n\n    b.getVolumeFractions()\n\n    return b\n\n\nclass TestBlockConverter(unittest.TestCase):\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_dissolveWireIntoCoolant(self):\n        \"\"\"\n        Test dissolving wire into coolant.\n\n        .. test:: Homogenize one component into another.\n            :id: T_ARMI_BLOCKCONV0\n            :tests: R_ARMI_BLOCKCONV\n        \"\"\"\n        self._test_dissolve(loadTestBlock(), \"wire\", \"coolant\")\n        hotBlock = loadTestBlock(cold=False)\n        self._test_dissolve(hotBlock, \"wire\", \"coolant\")\n        hotBlock = self._perturbTemps(hotBlock, \"wire\", 127, 700)\n        self._test_dissolve(hotBlock, \"wire\", \"coolant\")\n\n    def test_dissolveLinerIntoClad(self):\n        \"\"\"\n        Test dissolving liner into clad.\n\n        .. test:: Homogenize one component into another.\n            :id: T_ARMI_BLOCKCONV1\n            :tests: R_ARMI_BLOCKCONV\n        \"\"\"\n        self._test_dissolve(loadTestBlock(), \"outer liner\", \"clad\")\n        hotBlock = loadTestBlock(cold=False)\n        self._test_dissolve(hotBlock, \"outer liner\", \"clad\")\n        hotBlock = self._perturbTemps(hotBlock, \"outer liner\", 127, 700)\n        self._test_dissolve(hotBlock, \"outer liner\", \"clad\")\n\n    def test_dissolveBondIntoClad(self):\n        \"\"\"\n        Test dissolving linked bond into coolant.\n\n        .. test:: Homogenize a linked component into another.\n            :id: T_ARMI_BLOCKCONV2\n            :tests: R_ARMI_BLOCKCONV\n        \"\"\"\n        self._test_dissolve(buildLinkedFuelBlock(), \"bond\", \"clad\")\n\n    def _perturbTemps(self, block, cName, tCold, tHot):\n        \"\"\"Give the component different ref and hot temperatures than in test_Blocks.\"\"\"\n        c = block.getComponent(Flags.fromString(cName))\n        c.refTemp, c.refHot = tCold, tHot\n        c.setTemperature(tHot)\n        return block\n\n    def _test_dissolve(self, block, soluteName, solventName):\n        converter = blockConverters.ComponentMerger(block, soluteName, solventName)\n        convertedBlock = converter.convert()\n        self.assertNotIn(soluteName, convertedBlock.getComponentNames())\n        self._checkAreaAndComposition(block, convertedBlock)\n\n    def test_dissolveMultiple(self):\n        \"\"\"Test dissolving multiple components into another.\"\"\"\n        self._test_dissolve_multi(loadTestBlock(), [\"wire\", \"clad\"], \"coolant\")\n        self._test_dissolve_multi(loadTestBlock(), [\"inner liner\", \"outer liner\"], \"clad\")\n\n    def test_dissolveMixedAssembly(self):\n        \"\"\"Test dissolving multiple components into another in a mixed assembly.\"\"\"\n        mixedAssem = buildMixedThreePinAssembly()\n        b = mixedAssem.getBlocks(Flags.FUEL)[1]\n        annularPin = b.getComponents([Flags.ANNULAR, Flags.LINER, Flags.GAP])\n        testPin = []\n        hostPin = []\n        for c in b:\n            if c in annularPin:\n                continue\n            if c.hasFlags([Flags.COOLANT, Flags.INTERCOOLANT, Flags.DUCT]):\n                continue\n            if c.hasFlags(Flags.TEST):\n                testPin.append(c)\n            hostPin.append(c)\n        convertedBlock1 = self._test_dissolve_mixedAssembly(b, [\"wire\", \"clad\"], \"coolant\", hostPin)\n        convertedBlock2 = self._test_dissolve_mixedAssembly(convertedBlock1, [\"clad test\"], \"coolant\", testPin)\n        convertedBlock3 = self._test_dissolve_mixedAssembly(\n            convertedBlock2, [\"annular void\"], \"annular fuel test\", testPin\n        )\n        convertedBlock4 = self._test_dissolve_mixedAssembly(\n            convertedBlock3, [\"gap2\", \"liner\", \"gap1\"], \"annular clad test\", testPin\n        )\n        self._checkAreaAndComposition(b, convertedBlock4)\n\n    def test_dissolveZeroArea(self):\n        \"\"\"Test dissolving a zero-area component into another.\"\"\"\n        self._test_dissolve(loadTestBlock(), \"gap2\", \"outer liner\")\n\n    def test_dissolveIntoZeroArea(self):\n        \"\"\"Test dissolving a component into a zero-area solvent (raises ValueError).\"\"\"\n        with self.assertRaises(ValueError):\n            self._test_dissolve(loadTestBlock(), \"outer liner\", \"gap2\")\n\n    def test_dissolveNegativeArea(self):\n        \"\"\"Test dissolving a zero-area gap component into another.\"\"\"\n        self._test_dissolve(buildSimpleFuelBlockNegativeArea(), \"gap\", \"clad\")\n\n    def test_dissolveNegativeAreaBond(self):\n        \"\"\"Test dissolving a zero-area non-gap component into another.\"\"\"\n        with self.assertRaises(ValueError):\n            self._test_dissolve(buildSimpleFuelBlockNegativeAreaBond(), \"bond\", \"clad\")\n\n    def test_dissolveIntoNegativeArea(self):\n        \"\"\"Test dissolving a zero-area component into another.\"\"\"\n        with self.assertRaises(ValueError):\n            self._test_dissolve(buildSimpleFuelBlockNegativeArea(), \"clad\", \"gap\")\n\n    def _test_dissolve_multi(self, block, soluteNames, solventName):\n        converter = blockConverters.MultipleComponentMerger(block, soluteNames, solventName)\n        convertedBlock = converter.convert()\n        for soluteName in soluteNames:\n            self.assertNotIn(soluteName, convertedBlock.getComponentNames())\n        self._checkAreaAndComposition(block, convertedBlock)\n\n    def _test_dissolve_mixedAssembly(self, block, soluteNames, solventName, pin):\n        converter = blockConverters.MixedPinComponentMerger(block, soluteNames, solventName, pin)\n        convertedBlock = converter.convert()\n        for soluteName in soluteNames:\n            self.assertNotIn(soluteName, convertedBlock.getComponentNames())\n        self._checkAreaAndComposition(block, convertedBlock)\n        return convertedBlock\n\n    def test_build_NthRing(self):\n        \"\"\"Test building of one ring.\"\"\"\n        RING = 6\n        block = loadTestBlock(cold=False)\n        block.spatialGrid = grids.HexGrid.fromPitch(1.0)\n\n        numPinsInRing = 30\n        converter = blockConverters.HexComponentsToCylConverter(block)\n        fuel, clad = _buildJoyoFuel()\n        pinComponents = [fuel, clad]\n        converter._buildFirstRing(pinComponents)\n        converter.pinPitch = 0.76\n        converter._buildNthRing(pinComponents, RING)\n        components = converter.convertedBlock\n        self.assertEqual(components[3].name.split()[0], components[-1].name.split()[0])\n        self.assertAlmostEqual(clad.getNumberDensity(\"FE56\"), components[1].getNumberDensity(\"FE56\"))\n        self.assertAlmostEqual(\n            components[3].getArea() + components[-1].getArea(),\n            clad.getArea() * numPinsInRing / clad.getDimension(\"mult\"),\n        )\n\n    def test_buildInsideDuct(self):\n        \"\"\"Test building inside the duct.\"\"\"\n        block = loadTestBlock(cold=False)\n        block.spatialGrid = grids.HexGrid.fromPitch(1.0)\n        converter = blockConverters.HexComponentsToCylConverter(block)\n        converter._buildInsideDuct()\n        insideBlock = converter.convertedBlock\n        ductIP = block.getComponent(Flags.DUCT).getDimension(\"ip\")\n        bondMass = block.getComponent(Flags.BOND).getMass(\"NA\")\n        coolantMass = block.getComponent(Flags.COOLANT).getMass(\"NA\")\n        self.assertAlmostEqual(insideBlock.getMass(\"U235\"), block.getMass(\"U235\"))\n        self.assertAlmostEqual(insideBlock.getMass(\"NA\"), bondMass + coolantMass)\n        self.assertAlmostEqual(insideBlock.getArea(), ductIP**2 * math.sqrt(3) / 2)\n\n    def test_convert(self):\n        \"\"\"Test conversion with no fuel driver.\n\n        .. test:: Convert hex blocks to cylindrical blocks.\n            :id:  T_ARMI_BLOCKCONV_HEX_TO_CYL1\n            :tests: R_ARMI_BLOCKCONV_HEX_TO_CYL\n        \"\"\"\n        block = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL)\n        block.spatialGrid = grids.HexGrid.fromPitch(1.0)\n\n        converter = blockConverters.HexComponentsToCylConverter(block)\n        converter.convert()\n\n        for compType in [Flags.FUEL, Flags.CLAD, Flags.DUCT]:\n            self.assertAlmostEqual(\n                block.getComponent(compType).getArea(),\n                sum([component.getArea() for component in converter.convertedBlock if component.hasFlags(compType)]),\n            )\n            for c in converter.convertedBlock.getComponents(compType):\n                self.assertEqual(block.getComponent(compType).temperatureInC, c.temperatureInC)\n\n        self.assertEqual(block.getHeight(), converter.convertedBlock.getHeight())\n        self._checkAreaAndComposition(block, converter.convertedBlock)\n        self._checkCiclesAreInContact(converter.convertedBlock)\n\n    def test_convertHexWithFuelDriver(self):\n        \"\"\"Test conversion with fuel driver.\n\n        .. test:: Convert hex blocks to cylindrical blocks.\n            :id:  T_ARMI_BLOCKCONV_HEX_TO_CYL0\n            :tests: R_ARMI_BLOCKCONV_HEX_TO_CYL\n        \"\"\"\n        driverBlock = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL)\n\n        block = loadTestReactor(TEST_ROOT)[1].core.getFirstBlock(Flags.CONTROL)\n        control = block.getComponent(Flags.CONTROL)\n\n        # add depletable flag to see if it is carried\n        control.p.flags |= Flags.DEPLETABLE\n\n        driverBlock.spatialGrid = None\n        block.spatialGrid = grids.HexGrid.fromPitch(1.0)\n\n        convertedWithoutDriver = self._testConvertWithDriverRings(\n            block,\n            driverBlock,\n            blockConverters.HexComponentsToCylConverter,\n            hexagon.numPositionsInRing,\n        )\n\n        self.assertEqual(5, len([c for c in convertedWithoutDriver if isDepletable(c)]))\n        self.assertEqual(5, len([c for c in convertedWithoutDriver if c.hasFlags(Flags.CONTROL)]))\n        self.assertEqual(9, len([c for c in convertedWithoutDriver if c.hasFlags(Flags.CLAD)]))\n\n        # This should fail because a spatial grid is required on the block.\n        driverBlock.spatialGrid = None\n        block.spatialGrid = None\n        with self.assertRaises(ValueError):\n            self._testConvertWithDriverRings(\n                block,\n                driverBlock,\n                blockConverters.HexComponentsToCylConverter,\n                hexagon.numPositionsInRing,\n            )\n\n        # The ``BlockAvgToCylConverter`` should work without any spatial grid defined because it assumes the grid based\n        # on the block type.\n        driverBlock.spatialGrid = None\n        block.spatialGrid = None\n\n        convertedWithoutDriver = self._testConvertWithDriverRings(\n            block,\n            driverBlock,\n            blockConverters.BlockAvgToCylConverter,\n            hexagon.numPositionsInRing,\n        )\n        # block went to 1 component\n        self.assertEqual(1, len([c for c in convertedWithoutDriver]))\n\n    def test_convertHexWithFuelDrOnNegCompAreaBlock(self):\n        \"\"\"\n        Tests the conversion of a control block with linked components, where a component contains a\n        negative area due to thermal expansion.\n        \"\"\"\n        driverBlock = loadTestReactor(TEST_ROOT)[1].core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL)\n\n        block = buildControlBlockWithLinkedNegativeAreaComponent()\n        areas = [c.getArea() for c in block]\n\n        # Check that a negative area component exists.\n        self.assertLess(min(areas), 0.0)\n\n        driverBlock.spatialGrid = None\n        block.spatialGrid = grids.HexGrid.fromPitch(1.0)\n\n        converter = blockConverters.HexComponentsToCylConverter(block, driverFuelBlock=driverBlock, numExternalRings=2)\n        convertedBlock = converter.convert()\n        # The area is increased because the negative area components are\n        # removed.\n        self.assertGreater(convertedBlock.getArea(), block.getArea())\n\n    def test_convertCartesianLatticeWithFuelDriver(self):\n        \"\"\"Test conversion with fuel driver.\"\"\"\n        r = loadTestReactor(TEST_ROOT, inputFileName=\"zpprTest.yaml\")[1]\n        driverBlock = r.core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.FUEL)\n        block = r.core.getAssemblies(Flags.FUEL)[2].getFirstBlock(Flags.BLANKET)\n\n        driverBlock.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n        block.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n\n        converter = blockConverters.BlockAvgToCylConverter\n        self._testConvertWithDriverRings(block, driverBlock, converter, lambda n: (n - 1) * 8)\n\n    def _testConvertWithDriverRings(self, block, driverBlock, converterToTest, getNumInRing):\n        area = block.getArea()\n        numExternalFuelRings = [1, 2, 3, 4]\n        numBlocks = 1\n        for externalRings in numExternalFuelRings:\n            numBlocks += getNumInRing(externalRings + 1)\n            converter = converterToTest(block, driverFuelBlock=driverBlock, numExternalRings=externalRings)\n            convertedBlock = converter.convert()\n            self.assertAlmostEqual(area * numBlocks, convertedBlock.getArea())\n            self._checkCiclesAreInContact(convertedBlock)\n            plotFile = \"convertedBlock_{0}.svg\".format(externalRings)\n            converter.plotConvertedBlock(fName=plotFile)\n            os.remove(plotFile)\n\n            for c in list(reversed(convertedBlock))[:externalRings]:\n                self.assertTrue(c.isFuel(), \"c was {}\".format(c.name))\n                # remove external driver rings in preparation to check composition\n                convertedBlock.remove(c)\n            convBlockWithoutDriver = convertedBlock\n            self._checkAreaAndComposition(block, convBlockWithoutDriver)\n\n        return convBlockWithoutDriver\n\n    def _checkAreaAndComposition(self, block, convertedBlock):\n        self.assertAlmostEqual(block.getArea(), convertedBlock.getArea())\n        unmergedNucs = block.getNumberDensities()\n        convDens = convertedBlock.getNumberDensities()\n        errorMessage = \"\"\n        nucs = set(unmergedNucs) | set(convDens)\n        for nucName in nucs:\n            n1, n2 = unmergedNucs[nucName], convDens[nucName]\n            try:\n                self.assertAlmostEqual(n1, n2)\n            except AssertionError:\n                errorMessage += \"\\nnuc {} not equal. unmerged: {} merged: {}\".format(nucName, n1, n2)\n        self.assertTrue(not errorMessage, errorMessage)\n        bMass = block.getMass()\n        self.assertAlmostEqual(bMass, convertedBlock.getMass())\n        self.assertGreater(bMass, 0.0)  # verify it isn't empty\n\n    def _checkCiclesAreInContact(self, convertedCircleBlock):\n        numComponents = len(convertedCircleBlock)\n        self.assertGreater(numComponents, 1)\n        self.assertTrue(all(isinstance(c, components.Circle) for c in convertedCircleBlock))\n\n        lastCompOD = None\n        lastComp = None\n        for c in sorted(convertedCircleBlock):\n            thisID = c.getDimension(\"id\")\n            thisOD = c.getDimension(\"od\")\n            if lastCompOD is None:\n                self.assertTrue(\n                    thisID == 0,\n                    \"The inner component {} should have an ID of zero\".format(c),\n                )\n            else:\n                self.assertTrue(\n                    thisID == lastCompOD,\n                    \"The component {} with id {} was not in contact with the \"\n                    \"previous component ({}) that had od {}\".format(c, thisID, lastComp, lastCompOD),\n                )\n            lastCompOD = thisOD\n            lastComp = c\n\n\nclass TestToCircles(unittest.TestCase):\n    def test_fromHex(self):\n        actualRadii = blockConverters.radiiFromHexPitches([7.47, 7.85, 8.15])\n        expected = [3.92203, 4.12154, 4.27906]\n        self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5))\n\n    def test_fromRingOfRods(self):\n        # JOYO-LMFR-RESR-001, rev 1, Table A.2, 5th layer (ring 6)\n        actualRadii = blockConverters.radiiFromRingOfRods(0.76 * 5, 6 * 5, [0.28, 0.315])\n        expected = [3.24034, 3.28553, 3.62584, 3.67104]\n        self.assertTrue(np.allclose(expected, actualRadii, rtol=1e-5))\n\n\ndef _buildJoyoFuel():\n    \"\"\"Build some JOYO components.\"\"\"\n    fuel = components.Circle(\n        name=\"fuel\",\n        material=\"UO2\",\n        Tinput=20.0,\n        Thot=20.0,\n        od=0.28 * 2,\n        id=0.0,\n        mult=91,\n    )\n    clad = components.Circle(\n        name=\"clad\",\n        material=\"HT9\",\n        Tinput=20.0,\n        Thot=20.0,\n        od=0.315 * 2,\n        id=0.28 * 2,\n        mult=91,\n    )\n    return fuel, clad\n\n\ndef buildControlBlockWithLinkedNegativeAreaComponent():\n    \"\"\"\n    Return a block that contains a bond component that resolves to a negative area once the fuel and\n    clad thermal expansion have occurred.\n    \"\"\"\n    b = blocks.HexBlock(\"control\", height=10.0)\n\n    controlDims = {\"Tinput\": 25.0, \"Thot\": 600, \"od\": 0.77, \"id\": 0.00, \"mult\": 127.0}\n    bondDims = {\n        \"Tinput\": 600,\n        \"Thot\": 600,\n        \"od\": \"clad.id\",\n        \"id\": \"control.od\",\n        \"mult\": 127.0,\n    }\n    cladDims = {\"Tinput\": 25.0, \"Thot\": 450, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n    wireDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 450,\n        \"od\": 0.1,\n        \"id\": 0.0,\n        \"mult\": 127.0,\n        \"axialPitch\": 30.0,\n        \"helixDiameter\": 0.9,\n    }\n    ductDims = {\"Tinput\": 25.0, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    intercoolantDims = {\n        \"Tinput\": 400,\n        \"Thot\": 400,\n        \"op\": 17.0,\n        \"ip\": ductDims[\"op\"],\n        \"mult\": 1.0,\n    }\n    coolDims = {\"Tinput\": 25.0, \"Thot\": 400}\n\n    control = components.Circle(\"control\", \"UZr\", **controlDims)\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n    # This sets up the linking of the bond to the fuel and the clad components.\n    bond = components.Circle(\"bond\", \"Sodium\", components={\"control\": control, \"clad\": clad}, **bondDims)\n    wire = components.Helix(\"wire\", \"HT9\", **wireDims)\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n    intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n    b.add(control)\n    b.add(bond)\n    b.add(clad)\n    b.add(wire)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n\n    return b\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_geometryConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module to test geometry converters.\"\"\"\n\nimport math\nimport os\nimport unittest\n\nfrom numpy.testing import assert_allclose\n\nfrom armi import runLog\nfrom armi.reactor import blocks, geometry, grids\nfrom armi.reactor.converters import geometryConverters, uniformMesh\nfrom armi.reactor.flags import Flags\nfrom armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils import directoryChangers, plotting\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass TestGeometryConverters(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = loadTestReactor(TEST_ROOT)\n        self.cs = self.o.cs\n\n    def test_addRing(self):\n        \"\"\"Tests that ``addRing`` adds the correct number of fuel assemblies to the test reactor.\"\"\"\n        converter = geometryConverters.FuelAssemNumModifier(self.cs)\n        converter.numFuelAssems = 7\n        converter.ringsToAdd = 1 * [\"radial shield\"]\n        converter.convert(self.r)\n\n        numAssems = len(self.r.core)\n        self.assertEqual(numAssems, 13)  # should end up with 6 reflector assemblies per 1/3rd Core\n        locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(4, 1)\n        shieldtype = self.r.core.childrenByLocator[locator].getType()\n        self.assertEqual(shieldtype, \"radial shield\")  # check that the right thing was added\n\n        # one more test with an uneven number of rings\n        converter.numFuelAssems = 8\n        converter.convert(self.r)\n        numAssems = len(self.r.core)\n        self.assertEqual(numAssems, 19)  # should wind up with 11 reflector assemblies per 1/3rd core\n\n    def test_setNumberOfFuelAssems(self):\n        \"\"\"Tests that ``setNumberOfFuelAssems`` properly changes the number of fuel assemblies.\"\"\"\n        # tests ability to add fuel assemblies\n        converter = geometryConverters.FuelAssemNumModifier(self.cs)\n        converter.numFuelAssems = 60\n        converter.convert(self.r)\n        numFuelAssems = 0\n        for assem in self.r.core:\n            if assem.hasFlags(Flags.FUEL):\n                numFuelAssems += 1\n        self.assertEqual(numFuelAssems, 60)\n\n        # checks that existing fuel assemblies are preserved\n        locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1)\n        fueltype = self.r.core.childrenByLocator[locator].getType()\n        self.assertEqual(fueltype, \"igniter fuel\")\n\n        # checks that existing control rods are preserved\n        locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 1)\n        controltype = self.r.core.childrenByLocator[locator].getType()\n        self.assertEqual(controltype, \"primary control\")\n\n        # checks that existing reflectors are overwritten with feed fuel\n        locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(9, 5)\n        oldshieldtype = self.r.core.childrenByLocator[locator].getType()\n        self.assertEqual(oldshieldtype, \"feed fuel\")\n\n        # checks that outer assemblies are removed\n        locator = self.r.core.spatialGrid.getLocatorFromRingAndPos(9, 1)\n        with self.assertRaises(KeyError):\n            _ = self.r.core.childrenByLocator[locator]\n\n        # tests ability to remove fuel assemblies\n        converter.numFuelAssems = 20\n        converter.convert(self.r)\n        numFuelAssems = 0\n        for assem in self.r.core:\n            if assem.hasFlags(Flags.FUEL):\n                numFuelAssems += 1\n        self.assertEqual(numFuelAssems, 20)\n\n    def test_getAssembliesInSector(self):\n        allAssems = self.r.core.getAssemblies()\n        fullSector = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 360)\n        self.assertGreaterEqual(len(fullSector), len(allAssems))  # could be > due to edge assems\n        third = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 30)\n        # could solve this analytically based on test core size\n        self.assertAlmostEqual(25, len(third))\n        oneLine = geometryConverters.HexToRZConverter._getAssembliesInSector(self.r.core, 0, 0.001)\n        self.assertAlmostEqual(5, len(oneLine))  # same here\n\n\nclass TestHexToRZConverter(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = loadTestReactor(TEST_ROOT)\n        reduceTestReactorRings(self.r, self.o.cs, 2)\n        self.cs = self.o.cs\n\n        runLog.setVerbosity(\"extra\")\n        self._expandReactor = False\n        self._massScaleFactor = 1.0\n        if not self._expandReactor:\n            self._massScaleFactor = 3.0\n\n    def tearDown(self):\n        del self.o\n        del self.cs\n        del self.r\n\n    def test_convert(self):\n        \"\"\"Test HexToRZConverter.convert().\n\n        Notes\n        -----\n        Ensure the converted reactor has 1) nuclides and nuclide masses that match the\n        original reactor, 2) for a given (r,z,theta) location the expected block type exists,\n        3) the converted reactor has the right (r,z,theta) coordinates, and 4) the converted\n        reactor blocks all have a single (homogenized) component.\n\n        .. test:: Convert a 3D hex reactor core to an RZ-Theta core.\n            :id: T_ARMI_CONV_3DHEX_TO_2DRZ\n            :tests: R_ARMI_CONV_3DHEX_TO_2DRZ\n        \"\"\"\n        # make the reactor smaller, because of a test parallelization edge case\n        for ring in [9, 8, 7, 6, 5, 4, 3]:\n            self.r.core.removeAssembliesInRing(ring, self.o.cs)\n\n        converterSettings = {\n            \"radialConversionType\": \"Ring Compositions\",\n            \"axialConversionType\": \"Axial Coordinates\",\n            \"uniformThetaMesh\": True,\n            \"thetaBins\": 1,\n            \"axialMesh\": [25, 50, 75, 100, 150, 175],\n            \"thetaMesh\": [2 * math.pi],\n        }\n\n        expectedMassDict, expectedNuclideList = self._getExpectedData()\n        geomConv = geometryConverters.HexToRZConverter(self.cs, converterSettings, expandReactor=self._expandReactor)\n        geomConv.convert(self.r)\n        newR = geomConv.convReactor\n\n        self._checkBlockComponents(newR)\n        self._checkNuclidesMatch(expectedNuclideList, newR)\n        self._checkNuclideMasses(expectedMassDict, newR)\n        self._checkBlockAtMeshPoint(geomConv)\n        self._checkReactorMeshCoordinates(geomConv)\n        _figs = geomConv.plotConvertedReactor()\n        with directoryChangers.TemporaryDirectoryChanger():\n            geomConv.plotConvertedReactor(\"fname\")\n\n        # bonus test: reset() works after converter has filled in values\n        geomConv.reset()\n        self.assertIsNone(geomConv.convReactor)\n        self.assertIsNone(geomConv._radialMeshConversionType)\n        self.assertIsNone(geomConv._axialMeshConversionType)\n        self.assertIsNone(geomConv._currentRadialZoneType)\n        self.assertEqual(geomConv._newBlockNum, 0)\n\n    def _checkBlockAtMeshPoint(self, geomConv):\n        b = plotting._getBlockAtMeshPoint(geomConv.convReactor, 0.0, 2.0 * math.pi, 0.0, 12.0, 50.0, 75.0)\n        self.assertTrue(b.hasFlags(Flags.FUEL))\n\n    def _checkReactorMeshCoordinates(self, geomConv):\n        thetaMesh, radialMesh, axialMesh = plotting._getReactorMeshCoordinates(geomConv.convReactor)\n        expectedThetaMesh = [math.pi * 2.0]\n        expectedAxialMesh = [25.0, 50.0, 75.0, 100.0, 150.0, 175.0]\n        expectedRadialMesh = [\n            8.794379,\n            23.26774,\n        ]\n        assert_allclose(expectedThetaMesh, thetaMesh)\n        assert_allclose(expectedRadialMesh, radialMesh)\n        assert_allclose(expectedAxialMesh, axialMesh)\n\n    def _getExpectedData(self):\n        \"\"\"Retrieve the mass of all nuclides in the reactor prior to converting.\"\"\"\n        expectedMassDict = {}\n        expectedNuclideList = self.r.blueprints.allNuclidesInProblem\n        for nuclide in sorted(expectedNuclideList):\n            expectedMassDict[nuclide] = self.r.core.getMass(nuclide)\n        return expectedMassDict, expectedNuclideList\n\n    def _checkBlockComponents(self, newR):\n        for b in newR.core.iterBlocks():\n            if len(b) != 1:\n                raise ValueError(\"Block {} has {} components and should only have 1\".format(b, len(b)))\n\n    def _checkNuclidesMatch(self, expectedNuclideList, newR):\n        \"\"\"Check that the nuclide lists match before and after conversion.\"\"\"\n        actualNuclideList = newR.blueprints.allNuclidesInProblem\n        if set(expectedNuclideList) != set(actualNuclideList):\n            diffList = sorted(set(expectedNuclideList).difference(actualNuclideList))\n            diffList += sorted(set(actualNuclideList).difference(expectedNuclideList))\n            runLog.warning(diffList)\n            raise ValueError(\n                \"{0} nuclides do not match between the original and converted reactor\".format(len(diffList))\n            )\n\n    def _checkNuclideMasses(self, expectedMassDict, newR):\n        \"\"\"Check that all nuclide masses in the new reactor are equivalent to before the conversion.\"\"\"\n        massMismatchCount = 0\n        for nuclide in expectedMassDict.keys():\n            expectedMass = expectedMassDict[nuclide]\n            actualMass = newR.core.getMass(nuclide) / self._massScaleFactor\n            if round(abs(expectedMass - actualMass), 7) != 0.0:\n                print(\"{:6s} {:10.2f} {:10.2f}\".format(nuclide, expectedMass, actualMass))\n                massMismatchCount += 1\n\n        # Raise error if there are any inconsistent masses\n        if massMismatchCount > 0:\n            raise ValueError(\n                \"{0} nuclides have masses that are not consistent after the conversion\".format(massMismatchCount)\n            )\n\n    def test_createHomogenizedRZTBlock(self):\n        newBlock = blocks.ThRZBlock(\"testBlock\", self.cs)\n        a = self.r.core[0]\n        converterSettings = {}\n        geomConv = geometryConverters.HexToRZConverter(self.cs, converterSettings, expandReactor=self._expandReactor)\n        volumeExpected = a.getVolume()\n        (\n            _atoms,\n            _newBlockType,\n            _newBlockTemp,\n            newBlockVol,\n        ) = geomConv.createHomogenizedRZTBlock(newBlock, 0, a.getHeight(), [a])\n\n        # The volume of the radialZone and the radialThetaZone should be equal for RZ geometry\n        self.assertAlmostEqual(volumeExpected, newBlockVol)\n\n\nclass TestEdgeAssemblyChanger(unittest.TestCase):\n    def setUp(self):\n        \"\"\"Use the related setup in the testFuelHandlers module.\"\"\"\n        self.o, self.r = loadTestReactor(TEST_ROOT)\n        reduceTestReactorRings(self.r, self.o.cs, 3)\n\n    def tearDown(self):\n        del self.o\n        del self.r\n\n    def test_edgeAssemblies(self):\n        \"\"\"Sanity check on adding edge assemblies.\n\n        .. test:: Test adding/removing assemblies from a reactor.\n            :id: T_ARMI_ADD_EDGE_ASSEMS\n            :tests: R_ARMI_ADD_EDGE_ASSEMS\n        \"\"\"\n\n        def getAssemByRingPos(ringPos: tuple):\n            for a in self.r.core:\n                if a.spatialLocator.getRingPos() == ringPos:\n                    return a\n            return None\n\n        numAssemsOrig = len(self.r.core)\n        # assert that there is no assembly in the (3, 4) (ring, position).\n        self.assertIsNone(getAssemByRingPos((3, 4)))\n        # add the assembly\n        converter = geometryConverters.EdgeAssemblyChanger()\n        converter.addEdgeAssemblies(self.r.core)\n        numAssemsWithEdgeAssem = len(self.r.core)\n        # assert that there is an assembly in the (3, 4) (ring, position).\n        self.assertIsNotNone(getAssemByRingPos((3, 4)))\n        self.assertTrue(numAssemsWithEdgeAssem > numAssemsOrig)\n\n        # try to add the assembly again (you can't)\n        with mockRunLogs.BufferLog() as mock:\n            converter.addEdgeAssemblies(self.r.core)\n            self.assertIn(\"Skipping addition of edge assemblies\", mock.getStdout())\n            self.assertTrue(numAssemsWithEdgeAssem, len(self.r.core))\n\n        # must be added after geom transform\n        for b in self.o.r.core.iterBlocks():\n            b.p.power = 1.0\n        converter.scaleParamsRelatedToSymmetry(self.r.core)\n        a = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES)[0]\n        self.assertTrue(all(b.p.power == 2.0 for b in a), \"Powers were not scaled\")\n\n        # remove the assembly that was added\n        converter.removeEdgeAssemblies(self.r.core)\n        self.assertIsNone(getAssemByRingPos((3, 4)))\n        self.assertEqual(numAssemsOrig, len(self.r.core))\n\n\nclass TestThirdCoreHexToFullCoreChanger(unittest.TestCase):\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT, inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\"\n        )\n\n        # initialize the block powers to a uniform power profile, accounting for the loaded reactor being 1/3 core\n        numBlocksInFullCore = 0\n        for a in self.r.core:\n            if a.getLocation() == \"001-001\":\n                for b in a:\n                    numBlocksInFullCore += 1\n            else:\n                for b in a:\n                    # account for the 1/3 symmetry\n                    numBlocksInFullCore += 3\n        for a in self.r.core:\n            if a.getLocation() == \"001-001\":\n                for b in a:\n                    b.p[\"power\"] = self.o.cs[\"power\"] / numBlocksInFullCore / 3\n            else:\n                for b in a:\n                    b.p[\"power\"] = self.o.cs[\"power\"] / numBlocksInFullCore\n\n    def tearDown(self):\n        del self.o\n        del self.r\n        self.td.__exit__(None, None, None)\n\n    def test_growToFullCoreFromThirdCore(self):\n        \"\"\"Test that a hex core can be converted from a third core to a full core geometry.\n\n        .. test:: Convert a third-core to a full-core geometry and then restore it.\n            :id: T_ARMI_THIRD_TO_FULL_CORE0\n            :tests: R_ARMI_THIRD_TO_FULL_CORE\n        \"\"\"\n\n        def getLTAAssems():\n            aList = []\n            for a in self.r.core:\n                if a.getType() == \"lta fuel\":\n                    aList.append(a)\n            return aList\n\n        # Check the initialization of the third core model\n        self.assertFalse(self.r.core.isFullCore)\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n        initialNumBlocks = len(self.r.core.getBlocks())\n        assems = getLTAAssems()\n        expectedLoc = [(3, 2)]\n        # set ringPosHist to be propagated to full core\n        assem = self.r.core.getAssemblyWithStringLocation(\"003-002\")\n        assem.p.ringPosHist = [(3, 2), (3, 12), (2, 2), (3, 2)]\n\n        for i, a in enumerate(assems):\n            self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i])\n        self.assertAlmostEqual(self.r.core.getTotalBlockParam(\"power\"), self.o.cs[\"power\"] / 3, places=5)\n        self.assertGreater(\n            self.r.core.getTotalBlockParam(\"power\", calcBasedOnFullObj=True),\n            self.o.cs[\"power\"] / 3,\n        )\n\n        # Perform reactor conversion\n        changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs)\n        changer.convert(self.r)\n\n        # Check the full core conversion is successful\n        self.assertTrue(self.r.core.isFullCore)\n        self.assertGreater(len(self.r.core.getBlocks()), initialNumBlocks)\n        self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE)\n        assems = getLTAAssems()\n        expectedLoc = [(3, 2), (3, 6), (3, 10)]\n        expectedRingPosHists = [\n            [(3, 2), (3, 12), (2, 2), (3, 2)],\n            [(3, 6), (3, 4), (2, 4), (3, 6)],\n            [(3, 10), (3, 8), (2, 6), (3, 10)],\n        ]\n        for i, a in enumerate(assems):\n            self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i])\n            self.assertListEqual(a.p.ringPosHist, expectedRingPosHists[i])\n\n        # ensure that block power is handled correctly\n        self.assertAlmostEqual(self.r.core.getTotalBlockParam(\"power\"), self.o.cs[\"power\"], places=5)\n        self.assertAlmostEqual(\n            self.r.core.getTotalBlockParam(\"power\", calcBasedOnFullObj=True),\n            self.o.cs[\"power\"],\n            places=5,\n        )\n\n        # Check that the geometry can be restored to a third core\n        changer.restorePreviousGeometry(self.r)\n        self.assertEqual(initialNumBlocks, len(self.r.core.getBlocks()))\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n        self.assertFalse(self.r.core.isFullCore)\n        self.assertAlmostEqual(self.r.core.getTotalBlockParam(\"power\"), self.o.cs[\"power\"] / 3, places=5)\n        assems = getLTAAssems()\n        expectedLoc = [(3, 2)]\n        for i, a in enumerate(assems):\n            self.assertEqual(a.spatialLocator.getRingPos(), expectedLoc[i])\n\n    def test_initNewFullReactor(self):\n        \"\"\"Test that initNewReactor will growToFullCore if necessary.\"\"\"\n        # Perform reactor conversion\n        changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs)\n        changer.convert(self.r)\n\n        converter = uniformMesh.NeutronicsUniformMeshConverter(self.o.cs)\n        newR = converter.initNewReactor(self.r, self.o.cs)\n\n        # Check the full core conversion is successful\n        self.assertTrue(self.r.core.isFullCore)\n        self.assertTrue(newR.core.isFullCore)\n        self.assertEqual(newR.core.symmetry.domain, geometry.DomainType.FULL_CORE)\n\n    def test_skipGrowToFullCoreWhenAlreadyFullCore(self):\n        \"\"\"Test that hex core is not modified when third core to full core changer is called on an\n        already full core geometry.\n\n        .. test: Convert a one-third core to full core and restore back to one-third core.\n            :id: T_ARMI_THIRD_TO_FULL_CORE2\n            :tests: R_ARMI_THIRD_TO_FULL_CORE\n        \"\"\"\n        # Check the initialization of the third core model and convert to a full core\n        self.assertFalse(self.r.core.isFullCore)\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n        numBlocksThirdCore = len(self.r.core.getBlocks())\n        # convert the third core to full core\n        changer = geometryConverters.ThirdCoreHexToFullCoreChanger(self.o.cs)\n        with mockRunLogs.BufferLog() as mock:\n            changer.convert(self.r)\n            self.assertIn(\"Expanding to full core geometry\", mock.getStdout())\n        numBlocksFullCore = len(self.r.core.getBlocks())\n        self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE)\n        # try to convert to full core again (it shouldn't do anything)\n        with mockRunLogs.BufferLog() as mock:\n            changer.convert(self.r)\n            self.assertIn(\n                \"Detected that full core reactor already exists. Cannot expand.\",\n                mock.getStdout(),\n            )\n        self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE)\n        self.assertEqual(numBlocksFullCore, len(self.r.core.getBlocks()))\n        # restore back to 1/3 core\n        with mockRunLogs.BufferLog() as mock:\n            changer.restorePreviousGeometry(self.r)\n            self.assertIn(\"revert from full to 1/3 core\", mock.getStdout())\n        self.assertEqual(numBlocksThirdCore, len(self.r.core.getBlocks()))\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_meshConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests of RZ Mesh Converter.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.reactor.converters import geometryConverters, meshConverters\nfrom armi.testing import TESTING_ROOT, loadTestReactor\n\n\nclass TestRZReactorMeshConverter(unittest.TestCase):\n    \"\"\"Loads a hex reactor and converts its mesh to RZTheta coordinates.\"\"\"\n\n    def setUp(self):\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT, inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\"\n        )\n        self._converterSettings = {\n            \"uniformThetaMesh\": True,\n            \"thetaBins\": 1,\n            \"thetaMesh\": [2 * math.pi],\n            \"axialMesh\": [25.0, 50.0, 174.0],\n            \"axialSegsPerBin\": 1,\n        }\n\n    def test_meshByRingCompAxialBinsSmallCore(self):\n        expectedRadialMesh = [2, 3, 4, 4]\n        expectedAxialMesh = [15.0, 35.32, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins(self._converterSettings)\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n\n    def test_meshByRingCompoAxialCoordsSmallCore(self):\n        expectedRadialMesh = [2, 3, 4, 4]\n        expectedAxialMesh = [25.0, 50.0, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates(\n            self._converterSettings\n        )\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n\n    def test_meshByRingCompAxialFlagsSmallCore(self):\n        expectedRadialMesh = [2, 3, 4, 4]\n        expectedAxialMesh = [15.0, 35.32, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags(self._converterSettings)\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n\n    def _growReactor(self):\n        modifier = geometryConverters.FuelAssemNumModifier(self.o.cs)\n        modifier.numFuelAssems = 1\n        modifier.ringsToAdd = 3 * [\"inner fuel\"] + [\"middle core fuel\"]\n        modifier.convert(self.r)\n        self._converterSettingsLargerCore = {\n            \"uniformThetaMesh\": True,\n            \"thetaBins\": 1,\n            \"thetaMesh\": [2 * math.pi],\n            \"axialMesh\": [25.0, 30.0, 60.0, 90.0, 105.2151, 152.0, 174.0],\n            \"axialSegsPerBin\": 2,\n        }\n\n    def test_meshByRingCompAxialBinsLargeCore(self):\n        self._growReactor()\n        expectedRadialMesh = [2, 3, 4, 5, 6]\n        expectedAxialMesh = [35.32, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialBins(\n            self._converterSettingsLargerCore\n        )\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n\n    def test_meshByRingCompAxialCoordsLargeCore(self):\n        self._growReactor()\n        expectedRadialMesh = [2, 3, 4, 5, 6]\n        expectedAxialMesh = [25.0, 30.0, 60.0, 90.0, 105.2151, 152.0, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialCoordinates(\n            self._converterSettingsLargerCore\n        )\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n\n    def test_meshByRingCompAxialFlagsLargeCore(self):\n        self._growReactor()\n        expectedRadialMesh = [2, 3, 4, 5, 6]\n        expectedAxialMesh = [15.0, 35.32, 226.46]\n        expectedThetaMesh = [2 * math.pi]\n\n        meshConvert = meshConverters.RZThetaReactorMeshConverterByRingCompositionAxialFlags(\n            self._converterSettingsLargerCore\n        )\n        meshConvert.generateMesh(self.r)\n\n        self.assertListEqual(meshConvert.radialMesh, expectedRadialMesh)\n        self.assertListEqual(meshConvert.axialMesh, expectedAxialMesh)\n        self.assertListEqual(meshConvert.thetaMesh, expectedThetaMesh)\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_pinTypeBlockConverters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Unit tests for pin type block converters.\"\"\"\n\nimport copy\nimport unittest\n\nfrom armi.reactor.converters.pinTypeBlockConverters import (\n    adjustCladThicknessByID,\n    adjustCladThicknessByOD,\n    adjustSmearDensity,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_blocks import buildSimpleFuelBlock, loadTestBlock\n\n\nclass TestPinTypeConverters(unittest.TestCase):\n    def setUp(self):\n        self.block = loadTestBlock()\n\n    def test_adjustCladThicknessByOD(self):\n        thickness = 0.05\n        clad = self.block.getComponent(Flags.CLAD)\n        ref = clad.getDimension(\"id\", cold=True) + 2.0 * thickness\n        adjustCladThicknessByOD(self.block, thickness)\n        cur = clad.getDimension(\"od\", cold=True)\n        curThickness = (clad.getDimension(\"od\", cold=True) - clad.getDimension(\"id\", cold=True)) / 2.0\n        self.assertAlmostEqual(cur, ref)\n        self.assertAlmostEqual(curThickness, thickness)\n\n    def test_adjustCladThicknessByID(self):\n        thickness = 0.05\n        clad = self.block.getComponent(Flags.CLAD)\n        ref = clad.getDimension(\"od\", cold=True) - 2.0 * thickness\n        adjustCladThicknessByID(self.block, thickness)\n        cur = clad.getDimension(\"id\", cold=True)\n        curThickness = (clad.getDimension(\"od\", cold=True) - clad.getDimension(\"id\", cold=True)) / 2.0\n        self.assertAlmostEqual(cur, ref)\n        self.assertAlmostEqual(curThickness, thickness)\n\n\nclass MassConservationTests(unittest.TestCase):\n    r\"\"\"Tests designed to verify mass conservation during thermal expansion.\"\"\"\n\n    def setUp(self):\n        self.b = buildSimpleFuelBlock()\n\n    def test_adjustSmearDensity(self):\n        r\"\"\"Tests the getting, setting, and getting of smear density functions.\"\"\"\n        bolBlock = copy.deepcopy(self.b)\n\n        s = self.b.getSmearDensity(cold=False)\n\n        fuel = self.b.getComponent(Flags.FUEL)\n        clad = self.b.getComponent(Flags.CLAD)\n\n        self.assertAlmostEqual(s, (fuel.getDimension(\"od\") ** 2) / clad.getDimension(\"id\") ** 2, 8)\n\n        adjustSmearDensity(self.b, self.b.getSmearDensity(), bolBlock=bolBlock)\n\n        s2 = self.b.getSmearDensity(cold=False)\n\n        self.assertAlmostEqual(s, s2, 8)\n\n        adjustSmearDensity(self.b, 0.733, bolBlock=bolBlock)\n        self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8)\n\n        # try annular fuel\n        clad = self.b.getComponent(Flags.CLAD)\n        fuel = self.b.getComponent(Flags.FUEL)\n\n        fuel.setDimension(\"od\", clad.getDimension(\"id\", cold=True))\n        fuel.setDimension(\"id\", 0.0001)\n\n        adjustSmearDensity(self.b, 0.733, bolBlock=bolBlock)\n        self.assertAlmostEqual(0.733, self.b.getSmearDensity(), 8)\n"
  },
  {
    "path": "armi/reactor/converters/tests/test_uniformMesh.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the uniform mesh geometry converter.\"\"\"\n\nimport collections\nimport copy\nimport os\nimport random\nimport unittest\nfrom unittest.mock import Mock\n\nimport numpy as np\n\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.physics.neutronics.settings import CONF_XS_KERNEL\nfrom armi.reactor.converters import uniformMesh\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_assemblies, test_blocks\nfrom armi.settings.fwSettings.globalSettings import CONF_UNIFORM_MESH_MINIMUM_SIZE\nfrom armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings\nfrom armi.tests import ISOAA_PATH, TEST_ROOT\n\n_ISOTXS_CACHE = None\n\n\ndef _getIsotxsLibrary():\n    \"\"\"These tests don't modify the isotxs lib, so we only need to load it once.\"\"\"\n    global _ISOTXS_CACHE\n    if _ISOTXS_CACHE is None:\n        _ISOTXS_CACHE = isotxs.readBinary(ISOAA_PATH)\n    return _ISOTXS_CACHE\n\n\nclass DummyFluxOptions:\n    def __init__(self, cs):\n        self.cs = cs\n        self.photons = False\n        self.calcReactionRatesOnMeshConversion = True\n\n\nclass TestConverterFactory(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n        )\n\n        self.dummyOptions = DummyFluxOptions(self.o.cs)\n\n    def test_converterFactory(self):\n        self.dummyOptions.photons = False\n        neutronConverter = uniformMesh.converterFactory(self.dummyOptions)\n        self.assertTrue(neutronConverter, uniformMesh.NeutronicsUniformMeshConverter)\n\n        self.dummyOptions.photons = True\n        gammaConverter = uniformMesh.converterFactory(self.dummyOptions)\n        self.assertTrue(gammaConverter, uniformMesh.GammaUniformMeshConverter)\n\n\nclass TestAssemblyUniformMesh(unittest.TestCase):\n    \"\"\"\n    Tests individual operations of the uniform mesh converter.\n\n    Uses the test reactor for detailedAxialExpansion\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.o, cls.r = loadTestReactor(inputFilePath=os.path.join(TEST_ROOT, \"detailedAxialExpansion\"))\n\n        cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs)\n        cls.converter._sourceReactor = cls.r\n        cls.converter._setParamsToUpdate(\"in\")\n\n    def test_makeAssemWithUniformMesh(self):\n        sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER)\n        # assign different flags to test flag preservation\n        sourceAssem.p.flags = Flags.FUEL | Flags.IGNITER | Flags.TEST\n\n        self.converter._generateUniformMesh(minimumMeshSize=0.01)\n        b = sourceAssem.getFirstBlock(Flags.FUEL)\n        newAssem = self.converter.makeAssemWithUniformMesh(\n            sourceAssem,\n            self.converter._uniformMesh,\n            paramMapper=uniformMesh.ParamMapper([], [\"power\"], b),\n            mapNumberDensities=True,\n        )\n        self.assertEqual(newAssem.p.flags, sourceAssem.p.flags)\n        # chnage sourceAssem flags to verify that a unique copy was made\n        sourceAssem.p.flags = Flags.FUEL | Flags.IGNITER\n        self.assertNotEqual(newAssem.p.flags, sourceAssem.p.flags)\n\n        prevB = None\n        for newB in newAssem:\n            sourceB = sourceAssem.getBlockAtElevation(newB.p.z)\n            if newB.isFuel() and sourceB.isFuel():\n                self.assertEqual(newB.p[\"xsType\"], sourceB.p[\"xsType\"])\n            elif not newB.isFuel() and not sourceB.isFuel():\n                self.assertEqual(newB.p[\"xsType\"], sourceB.p[\"xsType\"])\n            elif newB.isFuel() and not sourceB.isFuel():\n                # a newB that is fuel can overwrite the xsType of a nonfuel sourceB;\n                # this is the expected behavior immediately above the fuel block\n                self.assertEqual(newB.p[\"xsType\"], prevB.p[\"xsType\"])\n            elif sourceB.isFuel() and not newB.isFuel():\n                raise ValueError(\n                    f\"The source block {sourceB} is fuel but uniform mesh convertercreated a nonfuel block {newB}.\"\n                )\n            prevB = newB\n\n        newAssemNumberDens = newAssem.getNumberDensities()\n        for nuc, val in sourceAssem.getNumberDensities().items():\n            self.assertAlmostEqual(val, newAssemNumberDens[nuc])\n\n        for nuc, val in sourceAssem.getNumberDensities().items():\n            if not val:\n                continue\n            self.assertAlmostEqual(newAssem.getNumberOfAtoms(nuc) / sourceAssem.getNumberOfAtoms(nuc), 1.0)\n\n    def test_makeAssemWithUniformMeshSubmesh(self):\n        \"\"\"If sourceAssem has submesh, check that newAssem splits into separate blocks.\"\"\"\n        # assign axMesh to blocks randomly\n        sourceAssem = self.r.core.refAssem\n        for i, b in enumerate(sourceAssem):\n            b.p.axMesh = i % 2 + 1\n\n        self.r.core.updateAxialMesh()\n        newAssem = self.converter.makeAssemWithUniformMesh(\n            sourceAssem,\n            self.r.core.p.axialMesh[1:],\n            paramMapper=uniformMesh.ParamMapper([], [\"power\"], b),\n        )\n\n        self.assertNotEqual(len(newAssem), len(sourceAssem))\n        newHeights = [b.getHeight() for b in newAssem]\n        sourceHeights = [b.getHeight() / b.p.axMesh for b in sourceAssem for i in range(b.p.axMesh)]\n        self.assertListEqual(newHeights, sourceHeights)\n\n    def test_makeAssemUniformMeshParams(self):\n        \"\"\"Tests creating a uniform mesh assembly while mapping both number densities and specified parameters.\"\"\"\n        sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER)\n        for b in sourceAssem:\n            b.p.flux = 1.0\n            b.p.power = 10.0\n            b.p.mgFlux = [1.0, 2.0]\n\n        # Create a new assembly that has the same mesh as the source assem, but also demonstrates the transfer of number\n        # densities and parameter data as a 1:1 mapping without any volume integration/data migration based on a\n        # differing mesh.\n        bpNames = [\"flux\", \"power\", \"mgFlux\"]\n        newAssem = self.converter.makeAssemWithUniformMesh(\n            sourceAssem,\n            sourceAssem.getAxialMesh(),\n            paramMapper=uniformMesh.ParamMapper([], bpNames, b),\n            mapNumberDensities=True,\n        )\n        for b, origB in zip(newAssem, sourceAssem):\n            self.assertEqual(b.p.flux, 1.0)\n            self.assertEqual(b.p.power, 10.0)\n            self.assertListEqual(list(b.p.mgFlux), [1.0, 2.0])\n\n            self.assertEqual(b.p.flux, origB.p.flux)\n            self.assertEqual(b.p.power, origB.p.power)\n            self.assertListEqual(list(b.p.mgFlux), list(origB.p.mgFlux))\n            originalNDens = origB.getNumberDensities()\n            for nuc, val in b.getNumberDensities().items():\n                self.assertAlmostEqual(val, originalNDens[nuc])\n\n        # Now, let us update the flux, power, and mgFlux on the new assembly and test that it can be transferred back to\n        # the source assembly.\n        for b in newAssem:\n            b.p.flux = 2.0\n            b.p.power = 20.0\n            b.p.mgFlux = [2.0, 4.0]\n        bpNames = [\"flux\", \"power\", \"mgFlux\"]\n        uniformMesh.UniformMeshGeometryConverter.setAssemblyStateFromOverlaps(\n            sourceAssembly=newAssem,\n            destinationAssembly=sourceAssem,\n            paramMapper=uniformMesh.ParamMapper([], bpNames, b),\n        )\n        for b, updatedB in zip(newAssem, sourceAssem):\n            self.assertEqual(b.p.flux, 2.0)\n            self.assertEqual(b.p.power, 20.0)\n            self.assertListEqual(list(b.p.mgFlux), [2.0, 4.0])\n\n            self.assertEqual(b.p.flux, updatedB.p.flux)\n            self.assertEqual(b.p.power, updatedB.p.power)\n            self.assertListEqual(list(b.p.mgFlux), list(updatedB.p.mgFlux))\n            originalNDens = updatedB.getNumberDensities()\n            for nuc, val in b.getNumberDensities().items():\n                self.assertAlmostEqual(val, originalNDens[nuc])\n\n    def test_clearAssemblyState(self):\n        \"\"\"Tests clearing the parameter state of an assembly and returning the cached parameters.\"\"\"\n        sourceAssem = self.r.core.getFirstAssembly(Flags.IGNITER)\n        for b in sourceAssem:\n            b.p.flux = 1.0\n            b.p.power = 10.0\n            b.p.mgFlux = [1.0, 2.0]\n\n        for b in sourceAssem:\n            self.assertEqual(b.p.flux, 1.0)\n            self.assertEqual(b.p.power, 10.0)\n            self.assertListEqual(list(b.p.mgFlux), [1.0, 2.0])\n\n        # Let's test the clearing of the assigned parameters on the source assembly.\n        cachedBlockParams = uniformMesh.UniformMeshGeometryConverter.clearStateOnAssemblies(\n            [sourceAssem],\n            blockParamNames=[\"flux\", \"power\", \"mgFlux\"],\n            cache=True,\n        )\n        for b in sourceAssem:\n            self.assertEqual(b.p.flux, b.p.pDefs[\"flux\"].default)\n            self.assertEqual(b.p.power, b.p.pDefs[\"flux\"].default)\n            self.assertEqual(b.p.mgFlux, b.p.pDefs[\"mgFlux\"].default)\n\n            self.assertEqual(cachedBlockParams[b][\"flux\"], 1.0)\n            self.assertEqual(cachedBlockParams[b][\"power\"], 10.0)\n            self.assertListEqual(list(cachedBlockParams[b][\"mgFlux\"]), [1.0, 2.0])\n\n\nclass TestUniformMeshGenerator(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        newSettings = {CONF_XS_KERNEL: \"MC2v2\", CONF_UNIFORM_MESH_MINIMUM_SIZE: 3.0}\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings=newSettings,\n        )\n\n        cls.r.core.lib = _getIsotxsLibrary()\n\n        # make the mesh a little non-uniform\n        a4 = cls.r.core[4]\n        a4[2].setHeight(a4[2].getHeight() * 1.05)\n        a3 = cls.r.core[3]\n        a3[2].setHeight(a3[2].getHeight() * 1.20)\n\n    def setUp(self):\n        self.generator = uniformMesh.UniformMeshGenerator(self.r, self.o.cs[CONF_UNIFORM_MESH_MINIMUM_SIZE])\n\n    def test_computeAverageAxialMesh(self):\n        refMesh = self.r.core.findAllAxialMeshPoints([self.r.core.getFirstAssembly(Flags.FUEL)])[1:]\n        self.generator._computeAverageAxialMesh()\n        avgMesh = self.generator._commonMesh\n\n        self.assertEqual(len(refMesh), len(avgMesh))\n        self.assertEqual(refMesh[0], avgMesh[0])\n        self.assertNotEqual(refMesh[4], avgMesh[4], \"Not equal above the fuel.\")\n\n    def test_filterMesh(self):\n        \"\"\"\n        Test that the mesh can be correctly filtered.\n\n        .. test:: Produce a uniform mesh with a size no smaller than a user-specified value.\n            :id: T_ARMI_UMC_MIN_MESH1\n            :tests: R_ARMI_UMC_MIN_MESH\n        \"\"\"\n        meshList = [1.0, 3.0, 4.0, 7.0, 9.0, 12.0, 16.0, 19.0, 20.0]\n        anchorPoints = [4.0, 16.0]\n        combinedMesh = self.generator._filterMesh(\n            meshList,\n            self.generator.minimumMeshSize,\n            anchorPoints,\n            preference=\"bottom\",\n        )\n        self.assertListEqual(combinedMesh, [1.0, 4.0, 7.0, 12.0, 16.0, 19.0])\n\n        combinedMesh = self.generator._filterMesh(\n            meshList,\n            self.generator.minimumMeshSize,\n            anchorPoints,\n            preference=\"top\",\n        )\n        self.assertListEqual(combinedMesh, [1.0, 4.0, 9.0, 12.0, 16.0, 20.0])\n\n        anchorPoints = [3.0, 4.0]\n        with self.assertRaises(ValueError):\n            self.generator._filterMesh(\n                meshList,\n                self.generator.minimumMeshSize,\n                anchorPoints,\n                preference=\"top\",\n            )\n\n    def test_filteredTopAndBottom(self):\n        fuelBottoms, fuelTops = self.generator._getFilteredMeshTopAndBottom(Flags.FUEL)\n        self.assertListEqual(fuelBottoms, [15.0])\n        self.assertListEqual(fuelTops, [35.32])\n\n        # ctrlAndFuelBottoms and ctrlAndFuelTops include the fuelBottoms and fuelTops, respectively\n        (\n            ctrlAndFuelBottoms,\n            ctrlAndFuelTops,\n        ) = self.generator._getFilteredMeshTopAndBottom(Flags.CONTROL, fuelBottoms, fuelTops)\n        self.assertListEqual(ctrlAndFuelBottoms, [15.0])\n        self.assertListEqual(ctrlAndFuelTops, [35.32])\n\n    def test_generateCommonMesh(self):\n        \"\"\"\n        Covers generateCommonmesh() and _decuspAxialMesh().\n\n        .. test:: Produce a uniform mesh with a size no smaller than a user-specified value.\n            :id: T_ARMI_UMC_MIN_MESH0\n            :tests: R_ARMI_UMC_MIN_MESH\n\n        .. test:: Preserve the boundaries of fuel and control material.\n            :id: T_ARMI_UMC_NON_UNIFORM0\n            :tests: R_ARMI_UMC_NON_UNIFORM\n        \"\"\"\n        self.generator.generateCommonMesh()\n        expectedMesh = [\n            15.0,\n            25.16,\n            35.32,\n            60.06580357142856,\n            84.81160714285714,\n            109.55741071428572,\n            134.3032142857143,\n            159.04901785714287,\n            183.79482142857142,\n            208.540625,\n            233.2864285714286,\n        ]\n        for i, item in enumerate(list(self.generator._commonMesh)):\n            self.assertAlmostEqual(item, expectedMesh[i])\n\n\nclass TestUniformMeshComponents(unittest.TestCase):\n    \"\"\"Tests individual operations of the uniform mesh converter.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n        )\n        cls.r.core.lib = _getIsotxsLibrary()\n\n        # make the mesh a little non-uniform\n        a = cls.r.core[4]\n        a[2].setHeight(a[2].getHeight() * 1.05)\n\n    def setUp(self):\n        self.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=self.o.cs)\n        self.converter._sourceReactor = self.r\n\n    def test_blueprintCopy(self):\n        \"\"\"Ensure that necessary blueprint attributes are set.\"\"\"\n        convReactor = self.converter.initNewReactor(self.converter._sourceReactor, self.o.cs)\n        converted = convReactor.blueprints\n        original = self.converter._sourceReactor.blueprints\n        # NOTE: items within toCompare must be list or \"list-like\", like an ordered set\n        toCompare = [\"activeNuclides\", \"allNuclidesInProblem\", \"elementsToExpand\", \"inertNuclides\"]\n        for attr in toCompare:\n            for c, o in zip(getattr(converted, attr), getattr(original, attr)):\n                self.assertEqual(c, o)\n\n        # ensure that the assemblies were copied over\n        self.assertTrue(converted.assemblies, msg=\"Assembly objects not copied!\")\n\n\ndef applyNonUniformHeightDistribution(reactor):\n    \"\"\"Modifies some assemblies to have non-uniform axial meshes.\"\"\"\n    for a in reactor.core:\n        delta = 0.0\n        for b in a[:-1]:\n            origHeight = b.getHeight()\n            newHeight = origHeight * (1 + 0.03 * random.uniform(-1, 1))\n            b.setHeight(newHeight)\n            delta += newHeight - origHeight\n\n        a[-1].setHeight(a[-1].getHeight() - delta)\n        a.calculateZCoords()\n\n\nclass TestUniformMesh(unittest.TestCase):\n    \"\"\"Tests full uniform mesh converter, using a smaller test reactor.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # random seed to support random mesh in unit tests below\n        random.seed(987324987234)\n\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={CONF_XS_KERNEL: \"MC2v2\"},\n        )\n        cls.r.core.lib = _getIsotxsLibrary()\n        cls.r.core.p.keff = 1.0\n        cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True)\n\n        # reactor parameters\n        cls.r.core.p.beta = 700\n        cls.r.core.p.betaComponents = [100, 150, 150, 100, 100, 100]\n        cls.r.core.p.power = 10\n        cls.reactorParamNames = [\"beta\", \"betaComponents\", \"power\", \"keff\", \"keffUnc\"]\n        cls.converter._cachedReactorCoreParamData = {\"powerDensity\": 1.0}\n        cls.paramMapper = uniformMesh.ParamMapper(cls.reactorParamNames, [], cls.r.core.getFirstBlock())\n\n    def test_convertNumberDensities(self):\n        \"\"\"\n        Test the reactor mass before and after conversion.\n\n        .. test:: Make a copy of the reactor where the new reactor core has a uniform axial mesh.\n            :id: T_ARMI_UMC\n            :tests: R_ARMI_UMC\n        \"\"\"\n        refMass = self.r.core.getMass(\"U235\")\n        # perturb the heights of the assemblies -> changes the mass of everything in the core\n        applyNonUniformHeightDistribution(self.r)\n        perturbedCoreMass = self.r.core.getMass(\"U235\")\n        self.assertNotEqual(refMass, perturbedCoreMass)\n        self.converter.convert(self.r)\n\n        uniformReactor = self.converter.convReactor\n        uniformMass = uniformReactor.core.getMass(\"U235\")\n\n        # conversion conserved mass\n        self.assertAlmostEqual(perturbedCoreMass, uniformMass)\n        # conversion didn't change source reactor mass\n        self.assertAlmostEqual(self.r.core.getMass(\"U235\"), perturbedCoreMass)\n        # conversion results in uniform axial mesh\n        refAssemMesh = self.converter.convReactor.core.refAssem.getAxialMesh()\n        for a in self.converter.convReactor.core:\n            mesh = a.getAxialMesh()\n            for ref, check in zip(refAssemMesh, mesh):\n                self.assertEqual(ref, check)\n\n\nclass TestUniformMeshLargeReactor(unittest.TestCase):\n    \"\"\"Tests full uniform mesh converter, using a larger test reactor.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # random seed to support random mesh in unit tests below\n        random.seed(987324987234)\n\n        cls.o, cls.r = loadTestReactor(TEST_ROOT, customSettings={CONF_XS_KERNEL: \"MC2v2\"})\n        reduceTestReactorRings(cls.r, cls.o.cs, 2)\n        cls.r.core.lib = _getIsotxsLibrary()\n        cls.r.core.p.keff = 1.0\n        cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True)\n\n        # reactor parameters\n        cls.r.core.p.beta = 700\n        cls.r.core.p.betaComponents = [100, 150, 150, 100, 100, 100]\n        cls.r.core.p.power = 10\n        cls.reactorParamNames = [\"beta\", \"betaComponents\", \"power\", \"keff\", \"keffUnc\"]\n        cls.converter._cachedReactorCoreParamData = {\"powerDensity\": 1.0}\n        cls.paramMapper = uniformMesh.ParamMapper(cls.reactorParamNames, [], cls.r.core.getFirstBlock())\n\n    def test_applyStateToOriginal(self):\n        \"\"\"\n        Test applyStateToOriginal() to revert mesh conversion.\n\n        .. test:: Map select parameters from composites on the new mesh to the original mesh.\n            :id: T_ARMI_UMC_PARAM_BACKWARD0\n            :tests: R_ARMI_UMC_PARAM_BACKWARD\n        \"\"\"\n        applyNonUniformHeightDistribution(self.r)  # NOTE: this perturbs the ref mass\n\n        self.converter.convert(self.r)\n        for ib, b in enumerate(self.converter.convReactor.core.iterBlocks()):\n            b.p.mgFlux = list(range(1, 34))\n            b.p.adjMgFlux = list(range(1, 34))\n            b.p.fastFlux = 2.0\n            b.p.flux = 5.0\n            b.p.power = 5.0\n            b.p.pdens = 0.5\n            b.p.fluxPeak = 10.0 + (-1) ** ib\n\n        # check integral and density params\n        assemblyPowers = [a.calcTotalParam(\"power\") for a in self.converter.convReactor.core]\n        totalPower = self.converter.convReactor.core.calcTotalParam(\"power\", generationNum=2)\n        totalPower2 = self.converter.convReactor.core.calcTotalParam(\"pdens\", volumeIntegrated=True, generationNum=2)\n\n        self.converter.applyStateToOriginal()\n\n        for b in self.r.core.iterBlocks():\n            self.assertAlmostEqual(b.p.fastFlux, 2.0)\n            self.assertAlmostEqual(b.p.flux, 5.0)\n            self.assertAlmostEqual(b.p.pdens, 0.5)\n\n            # fluxPeak is mapped differently as a ParamLocation.MAX value\n            # make sure that it's one of the two exact possible values\n            self.assertIn(b.p.fluxPeak, [9.0, 11.0])\n\n        for expectedPower, a in zip(assemblyPowers, self.r.core):\n            self.assertAlmostEqual(a.calcTotalParam(\"power\"), expectedPower)\n\n        self.assertAlmostEqual(\n            self.r.core.calcTotalParam(\"pdens\", volumeIntegrated=True, generationNum=2),\n            totalPower2,\n        )\n        self.assertAlmostEqual(self.r.core.calcTotalParam(\"power\", generationNum=2), totalPower)\n\n        self.converter.updateReactionRates()\n        for a in self.r.core:\n            for b in a:\n                self.assertTrue(b.p.rateAbs)\n                self.assertTrue(b.p.rateCap)\n\n        # reactor parameters\n        self.assertEqual(self.r.core.p.power, 10)\n        self.assertEqual(self.r.core.p.beta, 700)\n        self.assertEqual(self.r.core.p.powerDensity, 1.0)\n        self.assertEqual(self.r.core.p.keff, 1.0)\n        self.assertEqual(self.r.core.p.keffUnc, 0.0)\n        self.assertListEqual(self.r.core.p.betaComponents, [100, 150, 150, 100, 100, 100])\n\n\nclass TestCalcReationRates(unittest.TestCase):\n    def test_calcReactionRatesBlockList(self):\n        \"\"\"\n        Test that the efficient reaction rate code executes and sets a param > 0.0.\n\n        .. test:: Return the reaction rates for a given list of ArmiObjects.\n            :id: T_ARMI_FLUX_RX_RATES_BY_XS_ID\n            :tests: R_ARMI_FLUX_RX_RATES\n        \"\"\"\n        b = test_blocks.loadTestBlock()\n        test_blocks.applyDummyData(b)\n        self.assertAlmostEqual(b.p.rateAbs, 0.0)\n        blockList = [copy.deepcopy(b) for _i in range(3)]\n        xsID = b.getMicroSuffix()\n        xsNucDict = {nuc: b.core.lib.getNuclide(nuc, xsID) for nuc in b.getNuclides()}\n        uniformMesh.UniformMeshGeometryConverter._calcReactionRatesBlockList(blockList, 1.01, xsNucDict)\n        for b in blockList:\n            self.assertGreater(b.p.rateAbs, 0.0)\n            vfrac = b.getComponentAreaFrac(Flags.FUEL)\n            self.assertEqual(b.p.fisDens, b.p.rateFis / vfrac)\n            self.assertEqual(b.p.fisDensHom, b.p.rateFis)\n\n\nclass TestGammaUniformMesh(unittest.TestCase):\n    \"\"\"Tests gamma uniform mesh converter.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # random seed to support random mesh in unit tests below\n        random.seed(987324987234)\n\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={CONF_XS_KERNEL: \"MC2v2\"},\n        )\n        cls.r.core.lib = _getIsotxsLibrary()\n        cls.r.core.p.keff = 1.0\n        cls.converter = uniformMesh.GammaUniformMeshConverter(cs=cls.o.cs)\n\n    def test_convertNumberDensities(self):\n        refMass = self.r.core.getMass(\"U235\")\n        applyNonUniformHeightDistribution(self.r)  # this changes the mass of everything in the core\n        perturbedCoreMass = self.r.core.getMass(\"U235\")\n        self.assertNotEqual(refMass, perturbedCoreMass)\n        self.converter.convert(self.r)\n\n        uniformReactor = self.converter.convReactor\n        uniformMass = uniformReactor.core.getMass(\"U235\")\n\n        self.assertAlmostEqual(perturbedCoreMass, uniformMass)  # conversion conserved mass\n        # conversion didn't change source reactor mass\n        self.assertAlmostEqual(self.r.core.getMass(\"U235\"), perturbedCoreMass)\n\n    def test_applyStateToOriginal(self):\n        \"\"\"\n        Test applyStateToOriginal() to revert mesh conversion.\n\n        .. test:: Map select parameters from composites on the new mesh to the original mesh.\n            :id: T_ARMI_UMC_PARAM_BACKWARD1\n            :tests: R_ARMI_UMC_PARAM_BACKWARD\n        \"\"\"\n        applyNonUniformHeightDistribution(self.r)  # note: this perturbs the ref. mass\n\n        # set original parameters on pre-mapped core with non-uniform assemblies\n        for b in self.r.core.iterBlocks():\n            b.p.mgFlux = list(range(33))\n            b.p.adjMgFlux = list(range(33))\n            b.p.fastFlux = 2.0\n            b.p.flux = 5.0\n            b.p.power = 5.0\n            b.p.linPow = 2.0\n\n        # set new parameters on core with uniform assemblies (emulate a physics kernel)\n        self.converter.convert(self.r)\n        for b in self.converter.convReactor.core.iterBlocks():\n            b.p.powerGamma = 0.5\n            b.p.powerNeutron = 0.5\n            b.p.linPow = 10.0\n            b.p.power = b.p.powerGamma + b.p.powerNeutron\n\n        # check integral and density params\n        assemblyPowers = [a.calcTotalParam(\"power\") for a in self.converter.convReactor.core]\n        assemblyGammaPowers = [a.calcTotalParam(\"powerGamma\") for a in self.converter.convReactor.core]\n        totalPower = self.converter.convReactor.core.calcTotalParam(\"power\", generationNum=2)\n        totalPowerGamma = self.converter.convReactor.core.calcTotalParam(\"powerGamma\", generationNum=2)\n\n        self.converter.applyStateToOriginal()\n\n        for b in self.r.core.iterBlocks():\n            # equal to original value because these were never mapped\n            self.assertEqual(b.p.fastFlux, 2.0)\n            self.assertEqual(b.p.flux, 5.0)\n\n            # not equal because blocks are different size\n            self.assertNotEqual(b.p.powerGamma, 0.5)\n            self.assertNotEqual(b.p.powerNeutron, 0.5)\n            self.assertNotEqual(b.p.power, 1.0)\n\n            # has updated value\n            self.assertAlmostEqual(b.p.linPow, 10.0)\n\n        # equal because these are mapped\n        for expectedPower, expectedGammaPower, a in zip(assemblyPowers, assemblyGammaPowers, self.r.core):\n            self.assertAlmostEqual(a.calcTotalParam(\"power\"), expectedPower)\n            self.assertAlmostEqual(a.calcTotalParam(\"powerGamma\"), expectedGammaPower)\n\n        self.assertAlmostEqual(self.r.core.calcTotalParam(\"powerGamma\", generationNum=2), totalPowerGamma)\n        self.assertAlmostEqual(self.r.core.calcTotalParam(\"power\", generationNum=2), totalPower)\n\n\nclass TestParamConversion(unittest.TestCase):\n    def setUp(self):\n        \"\"\"\n        Build two assemblies.\n\n        The source assembly has two blocks, heights 3 and 7 cm. The destination has one big block that's 10 cm. Flux is\n        set to 5 and 10 respectively on the two source blocks. They are populated with arbitrary flux and pdens values.\n        \"\"\"\n        self.sourceAssem, self.destinationAssem = test_assemblies.buildTestAssemblies()[2:]\n        self.height1 = 3.0\n        self.height2 = 7.0\n        self.sourceAssem[0].setHeight(self.height1)\n        self.sourceAssem[0].p.flux = 5.0\n        self.sourceAssem[1].setHeight(self.height2)\n        self.sourceAssem[1].p.flux = 10.0\n        self.sourceAssem.calculateZCoords()\n\n        self.destinationAssem[0].setHeight(self.height1 + self.height2)\n        self.destinationAssem.calculateZCoords()\n\n        # This sets up a caching for the `mgNeutronVelocity` block parameter on each of the blocks of the destination\n        # assembly without setting the data on the blocks of the source assembly to demonstrate that only new parameters\n        # set on the source assembly will be mapped to the destination assembly. This ensures that parameters that are\n        # not being set on the source assembly are not cleared out on the destination assembly with\n        # `setAssemblyStateFromOverlaps` is called.\n        self._cachedBlockParamData = collections.defaultdict(dict)\n        for b in self.destinationAssem:\n            self._cachedBlockParamData[b][\"mgNeutronVelocity\"] = [1.0] * 33\n            b.p[\"mgNeutronVelocity\"] = self._cachedBlockParamData[b][\"mgNeutronVelocity\"]\n\n    def test_setStateFromOverlaps(self):\n        \"\"\"\n        Test that state is translated correctly from source to dest assems.\n\n        Here we set flux and pdens to 3 on the source blocks.\n\n        .. test:: Map select parameters from composites on the original mesh to the new mesh.\n            :id: T_ARMI_UMC_PARAM_FORWARD\n            :tests: R_ARMI_UMC_PARAM_FORWARD\n        \"\"\"\n        paramList = [\"flux\", \"pdens\"]\n        for pName in paramList:\n            for b in self.sourceAssem:\n                b.p[pName] = 3\n\n        bpNames = paramList + [\"mgNeutronVelocity\"]\n        uniformMesh.UniformMeshGeometryConverter.setAssemblyStateFromOverlaps(\n            self.sourceAssem,\n            self.destinationAssem,\n            paramMapper=uniformMesh.ParamMapper([], bpNames, b),\n        )\n\n        for paramName in paramList:\n            sourceVal1 = self.sourceAssem[0].p[paramName]\n            sourceVal2 = self.sourceAssem[1].p[paramName]\n            self.assertAlmostEqual(\n                self.destinationAssem[0].p[paramName],\n                (sourceVal1 * self.height1 + sourceVal2 * self.height2) / (self.height1 + self.height2),\n            )\n\n        for b in self.sourceAssem:\n            self.assertIsNone(b.p.mgNeutronVelocity)\n\n        for b in self.destinationAssem:\n            self.assertListEqual(\n                b.p.mgNeutronVelocity,\n                self._cachedBlockParamData[b][\"mgNeutronVelocity\"],\n            )\n\n\nclass TestUMNonUAssemFlags(unittest.TestCase):\n    \"\"\"Tests a reactor conversion with only a subset of assemblies being defined as having a non-uniform mesh.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # random seed to support random mesh in unit tests below\n        random.seed(987324987234)\n\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\n                CONF_XS_KERNEL: \"MC2v2\",\n                \"nonUniformAssemFlags\": [\"primary control\"],\n            },\n        )\n        cls.r.core.lib = _getIsotxsLibrary()\n        cls.r.core.p.keff = 1.0\n        cls.converter = uniformMesh.NeutronicsUniformMeshConverter(cs=cls.o.cs, calcReactionRates=True)\n\n    def test_reactorConversion(self):\n        \"\"\"Tests the reactor conversion to and from the original reactor.\"\"\"\n        self.assertTrue(self.converter._hasNonUniformAssems)\n        self.assertTrue(self.r.core.lib)\n        self.assertEqual(self.r.core.p.keff, 1.0)\n\n        controlAssems = self.r.core.getAssemblies(Flags.PRIMARY | Flags.CONTROL)\n        # Add a bunch of multi-group flux to the control assemblies in the core to demonstrate that data can be mapped\n        # back to the original control rod assemblies if they are changed. Additionally, this will check that\n        # block-level reaction rates are being calculated (i.e., `rateAbs`).\n        for a in controlAssems:\n            for b in a:\n                b.p.mgFlux = [1.0] * 33\n                self.assertFalse(b.p.rateAbs)\n\n        self.converter.convert(self.r)\n        self.assertEqual(len(controlAssems), len(self.converter._nonUniformAssemStorage))\n\n        self.converter.applyStateToOriginal()\n        self.assertEqual(len(self.converter._nonUniformAssemStorage), 0)\n        for a in controlAssems:\n            for b in a:\n                self.assertTrue(all(b.getMgFlux()))\n                self.assertTrue(b.p.rateAbs)\n\n        self.converter.updateReactionRates()\n        for a in controlAssems:\n            for b in a:\n                self.assertTrue(b.p.rateCap)\n                self.assertTrue(b.p.rateAbs)\n\n\nclass TestParamMapper(unittest.TestCase):\n    \"\"\"Test how the ParamMapper maps params.\"\"\"\n\n    def setUp(self):\n        sourceAssem, destinationAssem = test_assemblies.buildTestAssemblies()[2:]\n        self.sourceBlock = sourceAssem.getBlocks()[0]\n        self.destinationBlock = destinationAssem.getBlocks()[0]\n\n        # volume integrated parameters\n        self.sourceBlock.p.power = 2.0\n        self.sourceBlock.p.mgFlux = np.array([2.0, 2.0, 2.0])\n        self.volumeIntegratedParameterNames = [\"power\", \"mgFlux\"]\n        # non-volume integrated parameters\n        self.sourceBlock.p.rateFis = 2.0\n        self.sourceBlock.p.linPowByPin = np.array([2.0, 2.0, 2.0])\n        self.regularParameterNames = [\"rateFis\", \"linPowByPin\"]\n        self.allParameterNames = self.volumeIntegratedParameterNames + self.regularParameterNames\n\n        self.sourceBlock.getSymmetryFactor = Mock()\n        self.destinationBlock.getSymmetryFactor = Mock()\n\n    def mappingTestHelper(self, expectedRatioVolumeIntegrated):\n        \"\"\"\n        Test helper to run block comparison when mapping parameters.\n\n        Parameters\n        ----------\n        expectedRatioVolumeIntegrated : int, float\n            The ratio expected for volume integrated parameters when dividing the destination value by the source value.\n        \"\"\"\n        paramMapper = uniformMesh.ParamMapper([], self.allParameterNames, self.sourceBlock)\n        sourceValues = paramMapper.paramGetter(self.sourceBlock, self.allParameterNames)\n        paramMapper.paramSetter(self.destinationBlock, sourceValues, self.allParameterNames)\n        for paramName in self.volumeIntegratedParameterNames:\n            ratio = self.destinationBlock.p[paramName] / self.sourceBlock.p[paramName]\n            np.testing.assert_equal(ratio, expectedRatioVolumeIntegrated)\n        for paramName in self.regularParameterNames:\n            ratio = self.destinationBlock.p[paramName] / self.sourceBlock.p[paramName]\n            np.testing.assert_equal(ratio, 1)\n\n    def test_mappingSameSymmetry(self):\n        \"\"\"Test mapping parameters between blocks with similar and dissimilar symmetry factors.\"\"\"\n        self.sourceBlock.getSymmetryFactor.return_value = 3\n        self.destinationBlock.getSymmetryFactor.return_value = 3\n        self.mappingTestHelper(1)\n\n    def test_mappingDifferentSymmetry(self):\n        \"\"\"Test mapping parameters between blocks with similar and dissimilar symmetry factors.\"\"\"\n        self.sourceBlock.getSymmetryFactor.return_value = 3\n        self.destinationBlock.getSymmetryFactor.return_value = 1\n        self.mappingTestHelper(3)\n"
  },
  {
    "path": "armi/reactor/converters/uniformMesh.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nConverts reactor with arbitrary axial meshing (e.g. multiple assemblies with different\naxial meshes) to one with a global uniform axial mesh.\n\nUseful for preparing inputs for physics codes that require structured meshes\nfrom a more flexible ARMI reactor mesh.\n\nThis is implemented generically but includes a concrete subclass for\nneutronics-specific parameters. This is used for build input files\nfor codes like DIF3D which require axially uniform meshes.\n\nRequirements\n------------\n1. Build an average reactor with aligned axial meshes from a reactor with arbitrarily\n   unaligned axial meshes in a way that conserves nuclide mass\n2. Translate state information computed on the uniform mesh back to the unaligned mesh.\n3. For neutronics cases, all neutronics-related block params should be translated, as\n   well as the multigroup real and adjoint flux.\n\n\n.. warning::\n    This procedure can cause numerical diffusion in some cases. For example,\n    if a control rod tip block has a large coolant block below it, things like peak\n    absorption rate can get lost into it. We recalculate some but not all\n    reaction rates in the re-mapping process based on a flux remapping. To avoid this,\n    finer meshes will help. Always perform mesh sensitivity studies to ensure appropriate\n    convergence for your needs.\n\nExamples\n--------\n    converter = uniformMesh.NeutronicsUniformMeshConverter()\n    converter.convert(reactor)\n    uniformReactor = converter.convReactor\n    # do calcs, then:\n    converter.applyStateToOriginal()\n\nThe mesh mapping happens as described in the figure:\n\n.. figure:: /.static/axial_homogenization.png\n\n\"\"\"\n\nimport collections\nimport copy\nimport typing\nfrom timeit import default_timer as timer\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.physics.neutronics.globalFlux import RX_ABS_MICRO_LABELS, RX_PARAM_NAMES\nfrom armi.reactor import grids, parameters\nfrom armi.reactor.converters.geometryConverters import GeometryConverter\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.reactors import Core, Reactor\nfrom armi.settings.fwSettings.globalSettings import CONF_UNIFORM_MESH_MINIMUM_SIZE\nfrom armi.utils import plotting\nfrom armi.utils.mathematics import average1DWithinTolerance\n\nif typing.TYPE_CHECKING:\n    from armi.reactor.blocks import Block\n\nHEAVY_METAL_PARAMS = [\"molesHmBOL\", \"massHmBOL\"]\n\n\ndef converterFactory(globalFluxOptions):\n    if globalFluxOptions.photons:\n        return GammaUniformMeshConverter(globalFluxOptions.cs)\n    else:\n        return NeutronicsUniformMeshConverter(\n            globalFluxOptions.cs,\n            calcReactionRates=globalFluxOptions.calcReactionRatesOnMeshConversion,\n        )\n\n\nclass UniformMeshGenerator:\n    \"\"\"\n    This class generates a common axial mesh to for the uniform mesh converter to use. The\n    generation algorithm starts with the simple ``average1DWithinTolerance`` utility function\n    to compute a representative \"average\" of the assembly meshes in the reactor. It then modifies\n    that mesh to more faithfully represent important material boundaries of fuel and control\n    absorber material.\n\n    The decusping feature is controlled with the case setting ``uniformMeshMinimumSize``. If no\n    value is provided for this setting, the uniform mesh generator will skip the decusping step\n    and just provide the result of ``_computeAverageAxialMesh``.\n    \"\"\"\n\n    def __init__(self, r, minimumMeshSize=None):\n        \"\"\"\n        Initialize an object to generate an appropriate common axial mesh to use for uniform mesh conversion.\n\n        Parameters\n        ----------\n        r : :py:class:`Reactor <armi.reactor.reactors.Reactor>` object.\n            Reactor for which a common mesh is generated\n        minimumMeshSize : float, optional\n            Minimum allowed separation between axial mesh points in cm\n            If no minimum mesh size is provided, no \"decusping\" is performed\n        \"\"\"\n        self._sourceReactor = r\n        self.minimumMeshSize = minimumMeshSize\n        self._commonMesh = None\n\n    def generateCommonMesh(self):\n        \"\"\"\n        Generate a common axial mesh to use.\n\n        .. impl:: Try to preserve the boundaries of fuel and control material.\n            :id: I_ARMI_UMC_NON_UNIFORM\n            :implements: R_ARMI_UMC_NON_UNIFORM\n\n            A core-wide mesh is computed via ``_computeAverageAxialMesh`` which\n            operates by first collecting all the mesh points for every assembly\n            (``allMeshes``) and then averaging them together using\n            ``average1DWithinTolerance``. An attempt to preserve fuel and control\n            material boundaries is accomplished by moving fuel region boundaries\n            to accommodate control rod boundaries. Note this behavior only occurs\n            by calling ``_decuspAxialMesh`` which is dependent on ``minimumMeshSize``\n            being defined (this is controlled by the ``uniformMeshMinimumSize`` setting).\n\n        .. impl:: Produce a mesh with a size no smaller than a user-specified value.\n            :id: I_ARMI_UMC_MIN_MESH\n            :implements: R_ARMI_UMC_MIN_MESH\n\n            If a minimum mesh size ``minimumMeshSize`` is provided, calls\n            ``_decuspAxialMesh`` on the core-wide mesh to maintain that minimum size\n            while still attempting to honor fuel and control material boundaries. Relies\n            ultimately on ``_filterMesh`` to remove mesh points that violate the minimum\n            size. Note that ``_filterMesh`` will always respect the minimum mesh size,\n            even if this means losing a mesh point that represents a fuel or control\n            material boundary.\n\n        Notes\n        -----\n        Attempts to reduce the effect of fuel and control rod absorber smearing\n        (\"cusping\" effect) by keeping important material boundaries in the common mesh.\n        \"\"\"\n        self._computeAverageAxialMesh()\n        if self.minimumMeshSize is not None:\n            self._decuspAxialMesh()\n\n    def _computeAverageAxialMesh(self, includeSubMesh: bool = True):\n        \"\"\"\n        Computes an average axial mesh based on the core's reference assembly.\n\n        Parameters\n        ----------\n        includeSubMesh: bool, optional\n            Whether to include the computational axial submesh in the average mesh.\n\n        Notes\n        -----\n        This iterates over all the assemblies in the core and collects all assembly meshes\n        that have the same number of fine-mesh points as the `refAssem` for the core. Based on\n        this, the proposed uniform mesh will be some average of many assemblies in the core.\n        The reason for this is to account for the fact that multiple assemblies (i.e., fuel assemblies)\n        may have a different mesh due to differences in thermal and/or burn-up expansion.\n\n        Averaging all the assembly meshes that have the same number of points can be undesirable\n        in certain corner cases because no preference is assigned based on assembly type. For\n        example: if the reflector assemblies have the same number of mesh points as the fuel\n        assemblies but the size of the blocks is slightly different, the reflector mesh can influence\n        the uniform mesh and effectively pull it away from the fuel mesh boundaries, potentially\n        resulting in smearing (i.e., homogenization) of fuel with non-fuel materials. This is an\n        undesirable outcome. In the future, it may be advantageous to determine a better way of\n        sorting and prioritizing assembly meshes for generating the uniform mesh.\n        \"\"\"\n        src = self._sourceReactor\n        refAssem = src.core.refAssem\n\n        refNumPoints = len(src.core.findAllAxialMeshPoints([refAssem], applySubMesh=includeSubMesh)[1:])\n        allMeshes = []\n        for a in src.core:\n            # Get the mesh points of the assembly, neglecting the first coordinate\n            # (typically zero).\n            aMesh = src.core.findAllAxialMeshPoints([a], applySubMesh=includeSubMesh)[1:]\n            if len(aMesh) == refNumPoints:\n                allMeshes.append(aMesh)\n\n        averageMesh = average1DWithinTolerance(np.array(allMeshes))\n        self._commonMesh = np.array(averageMesh)\n\n    def _decuspAxialMesh(self):\n        \"\"\"\n        Preserve control rod material boundaries to reduce control rod cusping effect.\n\n        Notes\n        -----\n        Uniform mesh conversion can lead to axial smearing of control assembly material, which causes\n        a pronounced control rod \"cusping\" affect in the differential rod worth. This function\n        modifies the uniform mesh to honor fuel and control rod material boundaries while avoiding excessively\n        small mesh sizes.\n\n        If adding control rod material boundaries to the mesh creates excessively small mesh regions,\n        this function will move internal fuel region boundaries to make room for the control rod boundaries.\n\n        This function operates by filtering out mesh points that are too close together while always holding on\n        to the specified \"anchor\" points in the mesh. The anchor points are built up progressively as the\n        appropriate bottom and top boundaries of fuel and control assemblies are determined.\n        \"\"\"\n        # filter fuel material boundaries to minimum mesh size\n        filteredBottomFuel, filteredTopFuel = self._getFilteredMeshTopAndBottom(Flags.FUEL)\n        materialBottoms, materialTops = self._getFilteredMeshTopAndBottom(\n            Flags.CONTROL, filteredBottomFuel, filteredTopFuel\n        )\n\n        # combine the bottoms and tops into one list with bottom preference\n        allMatBounds = materialBottoms + materialTops\n        materialAnchors = self._filterMesh(\n            allMatBounds,\n            self.minimumMeshSize,\n            filteredBottomFuel + filteredTopFuel,\n            preference=\"bottom\",\n            warn=True,\n        )\n\n        runLog.extra(\n            \"Attempting to honor control and fuel material boundaries in uniform mesh \"\n            f\"for {self} while also keeping minimum mesh size of {self.minimumMeshSize}. \"\n            f\"Material boundaries are: {allMatBounds}\"\n        )\n\n        # combine material bottom boundaries with full mesh using bottom preference\n        meshWithBottoms = self._filterMesh(\n            list(self._commonMesh) + materialBottoms,\n            self.minimumMeshSize,\n            materialBottoms,\n            preference=\"bottom\",\n        )\n        # combine material top boundaries with full mesh using top preference\n        meshWithTops = self._filterMesh(\n            list(self._commonMesh) + materialTops,\n            self.minimumMeshSize,\n            materialTops,\n            preference=\"top\",\n        )\n        # combine all mesh points using all material boundaries as anchors with top preference\n        # top vs. bottom preference is somewhat arbitrary here\n        combinedMesh = self._filterMesh(\n            list(set(meshWithBottoms + meshWithTops)),\n            self.minimumMeshSize,\n            materialAnchors,\n            preference=\"top\",\n        )\n\n        self._commonMesh = np.array(combinedMesh)\n\n    def _filterMesh(self, meshList, minimumMeshSize, anchorPoints, preference=\"bottom\", warn=False):\n        \"\"\"\n        Check for mesh violating the minimum mesh size and remove them if necessary.\n\n        Parameters\n        ----------\n        meshList : list of float, required\n            List of mesh points to be filtered by minimum mesh size\n        minimumMeshSize : float, required\n            Minimum allowed separation between axial mesh points in cm\n        anchorPoints : list of float, required\n            These mesh points will not be removed. Note that the anchor points must be separated by\n            at least the ``minimumMeshSize``.\n        preference : str, optional\n            When neither mesh point is in the list of ``anchorPoints``, which mesh point is given preference\n            (\"bottom\" or \"top\")\n        warn : bool, optional\n            Whether to log a warning when a mesh is removed. This is true if a\n            control material boundary is removed, but otherwise it is false.\n        \"\"\"\n        if preference == \"bottom\":\n            meshList = sorted(list(set(meshList)))\n        elif preference == \"top\":\n            meshList = sorted(list(set(meshList)), reverse=True)\n        else:\n            raise ValueError(\n                f\"Mesh filtering preference {preference} is not an option! Preference must be either bottom or top\"\n            )\n\n        while True:\n            for i in range(len(meshList) - 1):\n                difference = abs(meshList[i + 1] - meshList[i])\n                if difference < minimumMeshSize:\n                    if meshList[i] in anchorPoints and meshList[i + 1] in anchorPoints:\n                        errorMsg = (\n                            \"Attempting to remove two anchor points!\\n\"\n                            \"The uniform mesh minimum size for decusping is smaller than the \"\n                            \"gap between anchor points, which cannot be removed:\\n\"\n                            f\"{meshList[i]}, {meshList[i + 1]}, gap = {abs(meshList[i] - meshList[i + 1])}\"\n                        )\n                        runLog.error(errorMsg)\n                        raise ValueError(errorMsg)\n                    if meshList[i + 1] in anchorPoints:\n                        removeIndex = i\n                    else:\n                        removeIndex = i + 1\n\n                    if warn:\n                        runLog.warning(\n                            f\"{meshList[i + 1]} is too close to {meshList[i]}! \"\n                            f\"Difference = {difference} is less than mesh size \"\n                            f\"tolerance of {minimumMeshSize}. The uniform mesh will \"\n                            f\"remove {meshList[removeIndex]}.\"\n                        )\n                    break\n            else:\n                return sorted(meshList)\n            meshList.pop(removeIndex)\n\n    def _getFilteredMeshTopAndBottom(self, flags, bottoms=None, tops=None):\n        \"\"\"\n        Get the bottom and top boundaries of fuel assemblies and filter them based on the ``minimumMeshSize``.\n\n        Parameters\n        ----------\n        flags : armi.reactor.flags.Flags\n            The assembly and block flags for which to preserve material boundaries\n            ``getAssemblies()`` and ``getBlocks()`` are both called with the default, ``exact=False``\n        bottoms : list[float], optional\n            Mesh \"anchors\" for material bottom boundaries\n        tops : list[float], optional\n            Mesh \"anchors\" for material top boundaries\n\n        Returns\n        -------\n        filteredBottoms : the bottom of assembly materials, filtered to a minimum separation of\n            ``minimumMeshSize`` with preference for the lowest bounds\n        filteredTops : the top of assembly materials, filtered to a minimum separation of\n            ``minimumMeshSize`` with preference for the top bounds\n        \"\"\"\n\n        def firstBlockBottom(a, flags):\n            return a.getFirstBlock(flags).p.zbottom\n\n        def lastBlockTop(a, flags):\n            return a.getBlocks(flags)[-1].p.ztop\n\n        filteredBoundaries = dict()\n        for meshList, preference, meshGetter, extreme in [\n            (bottoms, \"bottom\", firstBlockBottom, min),\n            (tops, \"top\", lastBlockTop, max),\n        ]:\n            matBoundaries = set(meshList) if meshList is not None else set()\n            for a in self._sourceReactor.core.getAssemblies(flags):\n                matBoundaries.add(meshGetter(a, flags))\n            anchors = meshList if meshList is not None else [extreme(matBoundaries)]\n            filteredBoundaries[preference] = self._filterMesh(\n                matBoundaries, self.minimumMeshSize, anchors, preference=preference\n            )\n\n        return filteredBoundaries[\"bottom\"], filteredBoundaries[\"top\"]\n\n\nclass UniformMeshGeometryConverter(GeometryConverter):\n    \"\"\"\n    This geometry converter can be used to change the axial mesh structure of the\n    reactor core.\n\n    Notes\n    -----\n    There are several staticmethods available on this class that allow for:\n\n        - Creation of a new reactor without applying a new uniform axial mesh. See:\n          `<UniformMeshGeometryConverter.initNewReactor>`\n        - Creation of a new assembly with a new axial mesh applied. See:\n          `<UniformMeshGeometryConverter.makeAssemWithUniformMesh>`\n        - Resetting the parameter state of an assembly back to the defaults for the\n          provided block parameters. See:\n          `<UniformMeshGeometryConverter.clearStateOnAssemblies>`\n        - Mapping number densities and block parameters between one assembly to\n          another. See: `<UniformMeshGeometryConverter.setAssemblyStateFromOverlaps>`\n\n    This class is meant to be extended for specific physics calculations that require a\n    uniform mesh. The child types of this class should define custom\n    `reactorParamsToMap` and `blockParamsToMap` attributes, and the\n    `_setParamsToUpdate` method to specify the precise parameters that need to be\n    mapped in each direction between the non-uniform and uniform mesh assemblies. The\n    definitions should avoid mapping block parameters in both directions because the\n    mapping process will cause numerical diffusion. The behavior of\n    `setAssemblyStateFromOverlaps` is dependent on the direction in which the mapping\n    is being applied to prevent the numerical diffusion problem.\n\n    - \"in\" is used when mapping parameters into the uniform assembly\n      from the non-uniform assembly.\n    - \"out\" is used when mapping parameters from the uniform assembly back\n      to the non-uniform assembly.\n\n    .. warning::\n        If a parameter is calculated by a physics solver while the reactor is in its\n        converted (uniform mesh) state, that parameter *must* be included in the list\n        of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform\n        reactor; otherwise, it will be lost. These lists are defined through the\n        `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and\n        `blockParamMappingCategories` attributes and applies custom logic to create a list of\n        parameters to be mapped in each direction.\n    \"\"\"\n\n    reactorParamMappingCategories = {\n        \"in\": [],\n        \"out\": [],\n    }\n    blockParamMappingCategories = {\n        \"in\": [],\n        \"out\": [],\n    }\n    _TEMP_STORAGE_NAME_SUFFIX = \"-TEMP\"\n\n    def __init__(self, cs=None):\n        GeometryConverter.__init__(self, cs)\n        self._uniformMesh = None\n        self.calcReactionRates = False\n        self.includePinCoordinates = False\n\n        self.paramMapper = None\n\n        # These dictionaries represent back-up data from the source reactor\n        # that can be recovered if the data is not being brought back from\n        # the uniform mesh reactor when ``applyStateToOriginal`` to called.\n        # This prevents clearing out data on the original reactor that should\n        # be preserved since no changes were applied.\n        self._cachedReactorCoreParamData = {}\n\n        self._nonUniformMeshFlags = None\n        self._hasNonUniformAssems = None\n        self._nonUniformAssemStorage = set()\n        self._minimumMeshSize = None\n\n        if cs is not None:\n            self._nonUniformMeshFlags = [Flags.fromStringIgnoreErrors(f) for f in cs[\"nonUniformAssemFlags\"]]\n            self._hasNonUniformAssems = any(self._nonUniformMeshFlags)\n            self._minimumMeshSize = cs[CONF_UNIFORM_MESH_MINIMUM_SIZE]\n\n    def convert(self, r=None):\n        \"\"\"\n        Create a new reactor core with a uniform mesh.\n\n        .. impl:: Make a copy of the reactor where the new core has a uniform axial mesh.\n            :id: I_ARMI_UMC\n            :implements: R_ARMI_UMC\n\n            Given a source Reactor, ``r``, as input and when ``_hasNonUniformAssems`` is ``False``,\n            a new Reactor is created in ``initNewReactor``. This new Reactor contains copies of select\n            information from the input source Reactor (e.g., Operator, Blueprints, cycle, timeNode, etc).\n            The uniform mesh to be applied to the new Reactor is calculated in ``_generateUniformMesh``\n            (see :need:`I_ARMI_UMC_NON_UNIFORM` and :need:`I_ARMI_UMC_MIN_MESH`). New assemblies with this\n            uniform mesh are created in ``_buildAllUniformAssemblies`` and added to the new Reactor.\n            Core-level parameters are then mapped from the source Reactor to the new Reactor in\n            ``_mapStateFromReactorToOther``. Finally, the core-wide axial mesh is updated on the new Reactor\n            via ``updateAxialMesh``.\n\n\n        .. impl:: Map select parameters from composites on the original mesh to the new mesh.\n            :id: I_ARMI_UMC_PARAM_FORWARD\n            :implements: R_ARMI_UMC_PARAM_FORWARD\n\n            In ``_mapStateFromReactorToOther``, Core-level parameters are mapped from the source Reactor\n            to the new Reactor. If requested, block-level parameters can be mapped using an averaging\n            equation as described in ``setAssemblyStateFromOverlaps``.\n        \"\"\"\n        if r is None:\n            raise ValueError(f\"No reactor provided in {self}\")\n\n        completeStartTime = timer()\n        self._sourceReactor = r\n        self._setParamsToUpdate(\"in\")\n\n        # Here we are taking a short cut to homogenizing the core by only focusing on the\n        # core assemblies that need to be homogenized. This will have a large speed up\n        # since we don't have to create an entirely new reactor perform the data mapping.\n        if self._hasNonUniformAssems:\n            runLog.extra(\n                f\"Replacing non-uniform assemblies in reactor {r}, \"\n                \"with assemblies whose axial mesh is uniform with \"\n                f\"the core's reference assembly mesh: {r.core.refAssem.getAxialMesh()}\"\n            )\n            self.convReactor = self._sourceReactor\n            self.convReactor.core.updateAxialMesh()\n            for assem in self.convReactor.core.getAssemblies(self._nonUniformMeshFlags):\n                homogAssem = self.makeAssemWithUniformMesh(\n                    assem,\n                    self.convReactor.core.p.axialMesh[1:],\n                    paramMapper=self.paramMapper,\n                    includePinCoordinates=self.includePinCoordinates,\n                )\n                homogAssem.spatialLocator = assem.spatialLocator\n\n                # Remove this assembly from the core and add it to the temporary storage\n                # so that it can be replaced with the homogenized assembly. Note that we\n                # do not call `removeAssembly()` because this will delete the core\n                # assembly from existence rather than only stripping its spatialLocator.\n                if assem.spatialLocator in self.convReactor.core.childrenByLocator:\n                    self.convReactor.core.childrenByLocator.pop(assem.spatialLocator)\n                self.convReactor.core.remove(assem)\n                self.convReactor.core.assembliesByName.pop(assem.getName(), None)\n                for b in assem:\n                    self.convReactor.core.blocksByName.pop(b.getName(), None)\n\n                assem.setName(assem.getName() + self._TEMP_STORAGE_NAME_SUFFIX)\n                self._nonUniformAssemStorage.add(assem)\n                self.convReactor.core.add(homogAssem)\n        else:\n            runLog.extra(f\"Building copy of {r} with a uniform axial mesh.\")\n            self.convReactor = self.initNewReactor(r, self._cs)\n            self._generateUniformMesh(minimumMeshSize=self._minimumMeshSize)\n            self._buildAllUniformAssemblies()\n            self._mapStateFromReactorToOther(self._sourceReactor, self.convReactor, mapBlockParams=False)\n            self._newAssembliesAdded = self.convReactor.core.getAssemblies()\n\n        self.convReactor.core.updateAxialMesh()\n        self.convReactor.core.zones = self._sourceReactor.core.zones\n        self._checkConversion()\n        completeEndTime = timer()\n        runLog.extra(f\"Reactor core conversion time: {completeEndTime - completeStartTime} seconds\")\n\n    def _generateUniformMesh(self, minimumMeshSize):\n        \"\"\"\n        Generate a common axial mesh to use for uniform mesh conversion.\n\n        Parameters\n        ----------\n        minimumMeshSize : float, required\n            Minimum allowed separation between axial mesh points in cm\n        \"\"\"\n        generator = UniformMeshGenerator(self._sourceReactor, minimumMeshSize=minimumMeshSize)\n        generator.generateCommonMesh()\n        self._uniformMesh = generator._commonMesh\n\n    @staticmethod\n    def initNewReactor(sourceReactor, cs):\n        \"\"\"Build a new, yet empty, reactor with the same settings as sourceReactor.\n\n        Parameters\n        ----------\n        sourceReactor : :py:class:`Reactor <armi.reactor.reactors.Reactor>`\n            original reactor object to be copied\n        cs: Setting\n            Complete settings object\n        \"\"\"\n        # developer note: deepcopy on the blueprint object ensures that all relevant blueprints\n        # attributes are set. Simply calling blueprints.loadFromCs() just initializes\n        # a blueprints object and may not set all necessary attributes. E.g., some\n        # attributes are set when assemblies are added in coreDesign.construct(), however\n        # since we skip that here, they never get set; therefore the need for the deepcopy.\n        bp = copy.deepcopy(sourceReactor.blueprints)\n        newReactor = Reactor(sourceReactor.name, bp)\n        coreDesign = bp.systemDesigns[\"core\"]\n\n        coreDesign.construct(cs, bp, newReactor, loadComps=False)\n        newReactor.p.cycle = sourceReactor.p.cycle\n        newReactor.p.timeNode = sourceReactor.p.timeNode\n        newReactor.p.maxAssemNum = sourceReactor.p.maxAssemNum\n        newReactor.core.p.coupledIteration = sourceReactor.core.p.coupledIteration\n        newReactor.core.lib = sourceReactor.core.lib\n        newReactor.core.setPitchUniform(sourceReactor.core.getAssemblyPitch())\n        newReactor.o = sourceReactor.o  # This is needed later for geometry transformation\n\n        # check if the sourceReactor has been modified from the blueprints\n        if sourceReactor.core.isFullCore and not newReactor.core.isFullCore:\n            _geometryConverter = newReactor.core.growToFullCore(cs)\n\n        return newReactor\n\n    def applyStateToOriginal(self):\n        \"\"\"\n        Apply the state of the converted reactor back to the original reactor,\n        mapping number densities and block parameters.\n\n        .. impl:: Map select parameters from composites on the new mesh to the original mesh.\n            :id: I_ARMI_UMC_PARAM_BACKWARD\n            :implements: R_ARMI_UMC_PARAM_BACKWARD\n\n            To ensure that the parameters on the original Reactor are from the converted Reactor,\n            the first step is to clear the Reactor-level parameters on the original Reactor\n            (see ``_clearStateOnReactor``). ``_mapStateFromReactorToOther`` is then called\n            to map Core-level parameters and, optionally, averaged Block-level parameters\n            (see :need:`I_ARMI_UMC_PARAM_FORWARD`).\n        \"\"\"\n        runLog.extra(f\"Applying uniform neutronics results from {self.convReactor} to {self._sourceReactor}\")\n        completeStartTime = timer()\n\n        # map the block parameters back to the non-uniform assembly\n        self._setParamsToUpdate(\"out\")\n\n        # If we have non-uniform mesh assemblies then we need to apply a\n        # different approach to undo the geometry transformations on an\n        # assembly by assembly basis.\n        if self._hasNonUniformAssems:\n            for assem in self._sourceReactor.core.getAssemblies(self._nonUniformMeshFlags):\n                for storedAssem in self._nonUniformAssemStorage:\n                    if storedAssem.getName() == assem.getName() + self._TEMP_STORAGE_NAME_SUFFIX:\n                        self.setAssemblyStateFromOverlaps(\n                            assem,\n                            storedAssem,\n                            self.paramMapper,\n                            mapNumberDensities=False,\n                            calcReactionRates=self.calcReactionRates,\n                        )\n\n                        # Remove the stored assembly from the temporary storage list\n                        # and replace the current assembly with it.\n                        storedAssem.spatialLocator = assem.spatialLocator\n                        storedAssem.setName(assem.getName())\n                        self._nonUniformAssemStorage.remove(storedAssem)\n                        self._sourceReactor.core.removeAssembly(assem, discharge=False)\n                        self._sourceReactor.core.add(storedAssem)\n                        break\n                else:\n                    runLog.error(\n                        f\"No assembly matching name {assem.getName()} \"\n                        f\"was found in the temporary storage list. {assem} \"\n                        \"will persist as an axially unified assembly. \"\n                        \"This is likely not intended.\"\n                    )\n\n            self._sourceReactor.core.updateAxialMesh()\n        else:\n            # Clear the state of the original source reactor to ensure that\n            # a clean mapping between the converted reactor for data that has been\n            # changed. In this case, we cache the original reactor's data so that\n            # after the mapping has been applied, we can recover data from any\n            # parameters that did not change.\n            self._cachedReactorCoreParamData = {}\n            self._clearStateOnReactor(self._sourceReactor, cache=True)\n            self._mapStateFromReactorToOther(self.convReactor, self._sourceReactor)\n\n            # We want to map the converted reactor core's library to the source reactor\n            # because in some instances this has changed (i.e., when generating cross sections).\n            self._sourceReactor.core.lib = self.convReactor.core.lib\n\n        completeEndTime = timer()\n        runLog.extra(f\"Parameter remapping time: {completeEndTime - completeStartTime} seconds\")\n\n    @staticmethod\n    def makeAssemWithUniformMesh(\n        sourceAssem,\n        newMesh,\n        paramMapper=None,\n        mapNumberDensities=True,\n        includePinCoordinates=False,\n    ):\n        \"\"\"\n        Build new assembly based on a source assembly but apply the uniform mesh.\n\n        Notes\n        -----\n        This creates a new assembly based on the provided source assembly, applies\n        a new uniform mesh and then maps number densities and block-level parameters\n        to the new assembly from the source assembly.\n\n        Parameters\n        ----------\n        sourceAssem : `Assembly <armi.reactor.assemblies.Assembly>` object\n            Assembly that is used to map number densities and block-level parameters to\n            a new mesh structure.\n        newMesh : List[float]\n            A list of the new axial mesh coordinates of the blocks. Note that these mesh\n            coordinates are in cm and should represent the top axial mesh coordinates of\n            the new blocks.\n        paramMapper : ParamMapper\n            Object that contains list of parameters to be mapped and has methods for mapping\n        mapNumberDensities : bool, optional\n            If True, number densities will be mapped from the source assembly to the new assembly.\n            This is True by default, but this can be set to False to only map block-level parameters if\n            the names are provided in `blockParamNames`. It can be useful to set this to False in circumstances\n            where the ``setNumberDensitiesFromOverlaps`` does not conserve mass and for some edge cases.\n            This can show up in specific instances with moving meshes (i.e., control rods) in some applications.\n            In those cases, the mapping of number densities can be treated independent of this more general\n            implementation.\n\n        See Also\n        --------\n        setAssemblyStateFromOverlaps\n            This can be used to reverse the number density and parameter mappings\n            between two assemblies.\n        \"\"\"\n        newAssem = UniformMeshGeometryConverter._createNewAssembly(sourceAssem)\n        newAssem.p.assemNum = sourceAssem.p.assemNum\n        runLog.debug(f\"Creating a uniform mesh of {newAssem}\")\n        bottom = 0.0\n\n        def checkPriorityFlags(b):\n            \"\"\"\n            Check that a block has the flags that are prioritized for uniform mesh conversion.\n\n            Also check that it's not different type of block that is a superset of the\n            priority flags, like \"Flags.FUEL | Flags.PLENUM\"\n            \"\"\"\n            priorityFlags = [Flags.FUEL, Flags.CONTROL, Flags.SHIELD | Flags.RADIAL]\n            return b.hasFlags(priorityFlags) and not b.hasFlags(Flags.PLENUM)\n\n        for topMeshPoint in newMesh:\n            overlappingBlockInfo = sourceAssem.getBlocksBetweenElevations(bottom, topMeshPoint)\n            # This is not expected to occur given that the assembly mesh is consistent with\n            # the blocks within it, but this is added for defensive programming and to\n            # highlight a developer issue.\n            if not overlappingBlockInfo:\n                raise ValueError(\n                    f\"No blocks found between {bottom:.3f} and {topMeshPoint:.3f} in {sourceAssem}. \"\n                    f\"Ensure a valid mesh is provided. Mesh given: {newMesh}\"\n                )\n\n            # Iterate over the blocks that are within this region and\n            # select one as a \"source\" for determining which cross section\n            # type to use. This uses the following rules:\n            #     1. Determine the total height corresponding to each XS type that\n            #     appears for blocks with FUEL, CONTROL, or SHIELD|RADIAL flags in this domain.\n            #     2. Determine the single XS type that represents the largest fraction\n            #     of the total height of FUEL, CONTROL, or SHIELD|RADIAL cross sections.\n            #     3. Use the first block of the majority XS type as the source block.\n            #     4. If none of the special block types are present(fuelOrAbsorber == False),\n            #     use the xs type that represents the largest fraction of the destination block.\n            typeHeight = collections.defaultdict(float)\n            blocks = [b for b, _h in overlappingBlockInfo]\n            fuelOrAbsorber = any(checkPriorityFlags(b) for b in blocks)\n            for b, h in overlappingBlockInfo:\n                if checkPriorityFlags(b) or not fuelOrAbsorber:\n                    typeHeight[b.p.xsType] += h\n\n            sourceBlock = None\n            # xsType is the one with the majority of overlap\n            xsType = next(k for k, v in typeHeight.items() if v == max(typeHeight.values()))\n            for b in blocks:\n                if checkPriorityFlags(b) or not fuelOrAbsorber:\n                    if b.p.xsType == xsType:\n                        sourceBlock = b\n                        break\n\n            if len(typeHeight) > 1:\n                if sourceBlock:\n                    totalHeight = sum(typeHeight.values())\n                    runLog.debug(\n                        f\"Multiple XS types exist between {bottom} and {topMeshPoint}. \"\n                        f\"Using the XS type from the largest region, {xsType}\"\n                    )\n                    for xs, h in typeHeight.items():\n                        heightFrac = h / totalHeight\n                        runLog.debug(f\"XSType {xs}: {heightFrac:.4f}\")\n\n            block = sourceBlock.createHomogenizedCopy(includePinCoordinates)\n            block.p.xsType = xsType\n            block.setHeight(topMeshPoint - bottom)\n            block.p.axMesh = 1\n            newAssem.add(block)\n            bottom = topMeshPoint\n\n        newAssem.reestablishBlockOrder()\n        newAssem.calculateZCoords()\n\n        UniformMeshGeometryConverter.setAssemblyStateFromOverlaps(\n            sourceAssem,\n            newAssem,\n            paramMapper,\n            mapNumberDensities,\n        )\n        return newAssem\n\n    @staticmethod\n    def setAssemblyStateFromOverlaps(\n        sourceAssembly,\n        destinationAssembly,\n        paramMapper,\n        mapNumberDensities=False,\n        calcReactionRates=False,\n    ):\n        r\"\"\"\n        Set state data (i.e., number densities and block-level parameters) on a assembly based on a source\n        assembly with a different axial mesh.\n\n        This solves an averaging equation from the source to the destination.\n\n        .. math::\n            <P> = \\frac{\\int_{z_1}^{z_2} P(z) dz}{\\int_{z_1}^{z_2} dz}\n\n        which can be solved piecewise for z-coordinates along the source blocks.\n\n        Notes\n        -----\n        * If the parameter is volume integrated (e.g., flux, linear power)\n          then calculate the fractional contribution from the source block.\n        * If the parameter is not volume integrated (e.g., volumetric reaction rate)\n          then calculate the fraction contribution on the destination block.\n          This smears the parameter over the destination block.\n\n        Parameters\n        ----------\n        sourceAssembly : Assembly\n            assem that has the state\n        destinationAssembly : Assembly\n            assem that has is getting the state from sourceAssembly\n        paramMapper : ParamMapper\n            Object that contains list of parameters to be mapped and has methods for mapping\n        mapNumberDensities : bool, optional\n            If True, number densities will be mapped from the source assembly to the destination assembly.\n            This is True by default, but this can be set to False to only map block-level parameters if\n            the names are provided in `blockParamNames`. It can be useful to set this to False in circumstances\n            where the ``setNumberDensitiesFromOverlaps`` does not conserve mass and for some edge cases.\n            This can show up in specific instances with moving meshes (i.e., control rods) in some applications.\n            In those cases, the mapping of number densities can be treated independent of this more general\n            implementation.\n        calcReactionRates : bool, optional\n            If True, the neutron reaction rates will be calculated on each block within the destination\n            assembly. Note that this will skip the reaction rate calculations for a block if it does\n            not contain a valid multi-group flux.\n\n        See Also\n        --------\n        setNumberDensitiesFromOverlaps : does this but does smarter caching for number densities.\n        \"\"\"\n        for destBlock in destinationAssembly:\n            zLower = destBlock.p.zbottom\n            zUpper = destBlock.p.ztop\n            destinationBlockHeight = destBlock.getHeight()\n            # Determine which blocks in the uniform mesh source assembly are\n            # within the lower and upper bounds of the destination block.\n            sourceBlocksInfo = sourceAssembly.getBlocksBetweenElevations(zLower, zUpper)\n\n            if abs(zUpper - zLower) < 1e-6 and not sourceBlocksInfo:\n                continue\n            elif not sourceBlocksInfo:\n                raise ValueError(\n                    \"An error occurred when attempting to map to the \"\n                    f\"results from {sourceAssembly} to {destinationAssembly}. \"\n                    f\"No blocks in {sourceAssembly} exist between the axial \"\n                    f\"elevations of {zLower:<12.5f} cm and {zUpper:<12.5f} cm. \"\n                    \"This a major bug in the uniform mesh converter that should \"\n                    \"be reported to the developers.\"\n                )\n\n            if mapNumberDensities:\n                setNumberDensitiesFromOverlaps(destBlock, sourceBlocksInfo)\n\n            # Iterate over each of the blocks that were found in the uniform mesh\n            # source assembly within the lower and upper bounds of the destination\n            # block and perform the parameter mapping.\n            if paramMapper is not None:\n                updatedDestVals = collections.defaultdict(float)\n                for sourceBlock, sourceBlockOverlapHeight in sourceBlocksInfo:\n                    sourceBlockVals = paramMapper.paramGetter(\n                        sourceBlock,\n                        paramMapper.blockParamNames,\n                    )\n                    sourceBlockHeight = sourceBlock.getHeight()\n\n                    for paramName, sourceBlockVal in zip(paramMapper.blockParamNames, sourceBlockVals):\n                        if sourceBlockVal is None:\n                            continue\n                        if paramMapper.isPeak[paramName]:\n                            updatedDestVals[paramName] = max(sourceBlockVal, updatedDestVals[paramName])\n                        else:\n                            if paramMapper.isVolIntegrated[paramName]:\n                                denominator = sourceBlockHeight\n                            else:\n                                denominator = destinationBlockHeight\n                            integrationFactor = sourceBlockOverlapHeight / denominator\n                            updatedDestVals[paramName] += sourceBlockVal * integrationFactor\n\n                paramMapper.paramSetter(destBlock, updatedDestVals.values(), updatedDestVals.keys())\n\n        # If requested, the reaction rates will be calculated based on the\n        # mapped neutron flux and the XS library.\n        if calcReactionRates:\n            if paramMapper is None:\n                runLog.warning(\n                    f\"Reaction rates requested for {destinationAssembly}, but no ParamMapper \"\n                    \"was provided to setAssemblyStateFromOverlaps(). Reaction rates calculated \"\n                    \"will reflect the intended result without new parameter values being mapped in.\"\n                )\n            core = sourceAssembly.getAncestor(lambda c: isinstance(c, Core))\n            if core is not None:\n                UniformMeshGeometryConverter._calculateReactionRates(\n                    lib=core.lib, keff=core.p.keff, assem=destinationAssembly\n                )\n            else:\n                runLog.warning(\n                    f\"Reaction rates requested for {destinationAssembly}, but no core object \"\n                    \"exists. This calculation will be skipped.\",\n                    single=True,\n                    label=\"Block reaction rate calculation skipped due to insufficient multi-group flux data.\",\n                )\n\n    def clearStateOnAssemblies(assems, blockParamNames=None, cache=True):\n        \"\"\"\n        Clears the parameter state of blocks for a list of assemblies.\n\n        Parameters\n        ----------\n        assems : List[`Assembly <armi.reactor.assemblies.Assembly>`]\n            List of assembly objects.\n        blockParamNames : List[str], optional\n            A list of block parameter names to clear on the given assemblies.\n        cache : bool\n            If True, the block parameters that were cleared are stored\n            and returned as a dictionary of ``{b: {param1: val1, param2: val2}, b2: {...}, ...}``\n        \"\"\"\n        if blockParamNames is None:\n            blockParamNames = []\n\n        cachedBlockParamData = collections.defaultdict(dict)\n\n        if not assems:\n            return cachedBlockParamData\n\n        blocks = []\n        for a in assems:\n            blocks.extend(a)\n        firstBlock = blocks[0]\n        for paramName in blockParamNames:\n            defaultValue = firstBlock.p.pDefs[paramName].default\n            for b in blocks:\n                if cache:\n                    cachedBlockParamData[b][paramName] = b.p[paramName]\n                b.p[paramName] = defaultValue\n\n        return cachedBlockParamData\n\n    def plotConvertedReactor(self):\n        \"\"\"Generate a radial layout image of the converted reactor core. A pass-through to preserve the API.\"\"\"\n        plotting.plotRadialReactorLayouts(self.convReactor)\n\n    def reset(self):\n        \"\"\"Clear out stored attributes and reset the global assembly number.\"\"\"\n        self._cachedReactorCoreParamData = {}\n        super().reset()\n\n    def _setParamsToUpdate(self, direction):\n        \"\"\"\n        Activate conversion of the specified parameters.\n\n        Notes\n        -----\n        The parameters mapped into and out of the uniform mesh will vary depending on\n        the physics kernel using the uniform mesh. The parameters to be mapped in each\n        direction are defined as a class attribute. New options can be created by extending\n        the base class with different class attributes for parameters to map, and applying\n        special modifications to these categorized lists with the `_setParamsToUpdate` method.\n\n        This base class `_setParamsToUpdate()` method should not be called, so this raises a\n        NotImplementedError.\n\n        Parameters\n        ----------\n        direction : str\n            \"in\" or \"out\". The direction of mapping; \"in\" to the uniform mesh assembly, or \"out\" of it.\n            Different parameters are mapped in each direction.\n\n        Raises\n        ------\n        NotImplementedError\n        \"\"\"\n        raise NotImplementedError\n\n    def _checkConversion(self):\n        \"\"\"Perform checks to ensure conversion occurred properly.\"\"\"\n        pass\n\n    @staticmethod\n    def _createNewAssembly(sourceAssembly):\n        a = sourceAssembly.__class__(sourceAssembly.getType())\n        a.spatialGrid = grids.AxialGrid.fromNCells(len(sourceAssembly))\n        a.setName(sourceAssembly.getName())\n        a.p.flags = sourceAssembly.p.flags\n        return a\n\n    def _buildAllUniformAssemblies(self):\n        \"\"\"\n        Loop through each new block for each mesh point and apply conservation of atoms.\n        We use the submesh and allow blocks to be as small as the smallest submesh to\n        avoid unnecessarily diffusing small blocks into huge ones (e.g. control blocks\n        into plenum).\n        \"\"\"\n        runLog.debug(\n            f\"Creating new assemblies from {self._sourceReactor.core} with a uniform mesh of {self._uniformMesh}\"\n        )\n        for sourceAssem in self._sourceReactor.core:\n            newAssem = self.makeAssemWithUniformMesh(\n                sourceAssem,\n                self._uniformMesh,\n                paramMapper=self.paramMapper,\n                includePinCoordinates=self.includePinCoordinates,\n            )\n            src = sourceAssem.spatialLocator\n            newLoc = self.convReactor.core.spatialGrid[src.i, src.j, 0]\n            self.convReactor.core.add(newAssem, newLoc)\n\n    def _clearStateOnReactor(self, reactor, cache):\n        \"\"\"\n        Delete existing state that will be updated so they don't increment.\n\n        The summations should start at zero but will happen for all overlaps.\n        \"\"\"\n        runLog.debug(\"Clearing params from source reactor that will be converted.\")\n        for rp in self.paramMapper.reactorParamNames:\n            if cache:\n                self._cachedReactorCoreParamData[rp] = reactor.core.p[rp]\n            reactor.core.p[rp] = 0.0\n\n    def _mapStateFromReactorToOther(self, sourceReactor, destReactor, mapNumberDensities=False, mapBlockParams=True):\n        \"\"\"\n        Map parameters from one reactor to another.\n\n        Notes\n        -----\n        This is a basic parameter mapping routine that can be used by most sub-classes.\n        If special mapping logic is required, this method can be defined on sub-classes as necessary.\n        \"\"\"\n        # Map reactor core parameters\n        for paramName in self.paramMapper.reactorParamNames:\n            # Check if the source reactor has a value assigned for this\n            # parameter and if so, then apply it. Otherwise, revert back to\n            # the original value.\n            paramDefined = isinstance(sourceReactor.core.p[paramName], np.ndarray) or sourceReactor.core.p[paramName]\n            if paramDefined or paramName not in self._cachedReactorCoreParamData:\n                val = sourceReactor.core.p[paramName]\n            else:\n                val = self._cachedReactorCoreParamData[paramName]\n            destReactor.core.p[paramName] = val\n\n        if mapBlockParams:\n            # Map block parameters\n            for aSource in sourceReactor.core:\n                aDest = destReactor.core.getAssemblyByName(aSource.getName())\n                UniformMeshGeometryConverter.setAssemblyStateFromOverlaps(\n                    aSource,\n                    aDest,\n                    self.paramMapper,\n                    mapNumberDensities,\n                    calcReactionRates=False,\n                )\n\n            # If requested, the reaction rates will be calculated based on the\n            # mapped neutron flux and the XS library.\n            if self.calcReactionRates:\n                self._calculateReactionRatesEfficient(destReactor.core, sourceReactor.core.p.keff)\n\n        # Clear the cached data after it has been mapped to prevent issues with\n        # holding on to block data long-term.\n        self._cachedReactorCoreParamData = {}\n\n    @staticmethod\n    def _calculateReactionRatesEfficient(core, keff):\n        \"\"\"\n        First, sort blocks into groups by XS type. Then, we just need to grab micros for each XS type once.\n\n        Iterate over list of blocks with the given XS type; calculate reaction rates for these blocks\n        \"\"\"\n        xsTypeGroups = collections.defaultdict(list)\n        for b in core.iterBlocks():\n            xsTypeGroups[b.getMicroSuffix()].append(b)\n\n        for xsID, blockList in xsTypeGroups.items():\n            nucSet = set()\n            for b in blockList:\n                nucSet.update(nuc for nuc, ndens in b.getNumberDensities().items() if ndens > 0.0)\n            xsNucDict = {nuc: core.lib.getNuclide(nuc, xsID) for nuc in nucSet}\n            UniformMeshGeometryConverter._calcReactionRatesBlockList(blockList, keff, xsNucDict)\n\n    @staticmethod\n    def _calculateReactionRates(lib, keff, assem):\n        \"\"\"\n        Calculates the neutron reaction rates on the given assembly.\n\n        Notes\n        -----\n        If a block in the assembly does not contain any multi-group flux\n        than the reaction rate calculation for this block will be skipped.\n        \"\"\"\n        from armi.physics.neutronics.globalFlux import globalFluxInterface\n\n        for b in assem:\n            # Checks if the block has a multi-group flux defined and if it\n            # does not then this will skip the reaction rate calculation. This\n            # is captured by the TypeError, due to a `NoneType` divide by float\n            # error.\n            try:\n                b.getMgFlux()\n            except TypeError:\n                continue\n            globalFluxInterface.calcReactionRates(b, keff, lib)\n\n    @staticmethod\n    def _calcReactionRatesBlockList(objList, keff, xsNucDict):\n        r\"\"\"\n        Compute 1-group reaction rates for the objects in objList (usually a block).\n\n        :meta public:\n\n        .. impl:: Return the reaction rates for a given ArmiObject\n            :id: I_ARMI_FLUX_RX_RATES_BY_XS_ID\n            :implements: R_ARMI_FLUX_RX_RATES\n\n            This is an alternative implementation of :need:`I_ARMI_FLUX_RX_RATES` that\n            is more efficient when computing reaction rates for a large set of blocks\n            that share a common set of microscopic cross sections.\n\n            For more detail on the reation rate calculations, see :need:`I_ARMI_FLUX_RX_RATES`.\n\n        Parameters\n        ----------\n        objList : List[Block]\n            The list of objects to compute reaction rates on. Notionally this could be upgraded to be\n            any kind of ArmiObject but with params defined as they are it currently is only\n            implemented for a block.\n\n        keff : float\n            The keff of the core. This is required to get the neutron production rate correct\n            via the neutron balance statement (since nuSigF has a 1/keff term).\n\n        xsNucDict: Dict[str, XSNuclide]\n            Microscopic cross sections to use in computing the reaction rates. Keys are\n            nuclide names (e.g., \"U235\") and values are the associated XSNuclide objects\n            from the cross section library, which contain the microscopic cross section\n            data for a given nuclide in the current cross section group.\n        \"\"\"\n        for obj in objList:\n            rate = collections.defaultdict(float)\n\n            numberDensities = obj.getNumberDensities()\n            try:\n                mgFlux = np.array(obj.getMgFlux())\n            except TypeError:\n                continue\n\n            for nucName, numberDensity in numberDensities.items():\n                if numberDensity == 0.0:\n                    continue\n                nucRate = collections.defaultdict(float)\n\n                micros = xsNucDict[nucName].micros\n\n                # absorption is fission + capture (no n2n here)\n                for name in RX_ABS_MICRO_LABELS:\n                    volumetricRR = numberDensity * mgFlux.dot(micros[name])\n                    nucRate[\"rateAbs\"] += volumetricRR\n                    if name != \"fission\":\n                        nucRate[\"rateCap\"] += volumetricRR\n                    else:\n                        nucRate[\"rateFis\"] += volumetricRR\n                        # scale nu by keff.\n                        nusigmaF = micros[\"fission\"] * micros.neutronsPerFission\n                        nucRate[\"rateProdFis\"] += numberDensity * mgFlux.dot(nusigmaF) / keff\n\n                nucRate[\"rateProdN2n\"] += 2.0 * numberDensity * mgFlux.dot(micros.n2n)\n\n                for rx in RX_PARAM_NAMES:\n                    if nucRate[rx]:\n                        rate[rx] += nucRate[rx]\n\n            for paramName in RX_PARAM_NAMES:\n                obj.p[paramName] = rate[paramName]  # put in #/cm^3/s\n\n            if rate[\"rateFis\"] > 0.0:\n                fuelVolFrac = obj.getComponentAreaFrac(Flags.FUEL)\n                obj.p.fisDens = np.nan if fuelVolFrac == 0 else rate[\"rateFis\"] / fuelVolFrac\n                obj.p.fisDensHom = rate[\"rateFis\"]\n            else:\n                obj.p.fisDens = 0.0\n                obj.p.fisDensHom = 0.0\n\n    def updateReactionRates(self):\n        \"\"\"\n        Update reaction rates on converted assemblies.\n\n        Notes\n        -----\n        In some cases, we may want to read flux into a converted reactor from a\n        pre-existing physics output instead of mapping it in from the pre-conversion\n        source reactor. This method can be called after reading that flux in to\n        calculate updated reaction rates derived from that flux.\n        \"\"\"\n        if self._hasNonUniformAssems:\n            for assem in self.convReactor.core.getAssemblies(self._nonUniformMeshFlags):\n                self._calculateReactionRates(self.convReactor.core.lib, self.convReactor.core.p.keff, assem)\n        else:\n            self._calculateReactionRatesEfficient(self.convReactor.core, self.convReactor.core.p.keff)\n\n\nclass NeutronicsUniformMeshConverter(UniformMeshGeometryConverter):\n    \"\"\"\n    A uniform mesh converter that specifically maps neutronics parameters.\n\n    Notes\n    -----\n    This uniform mesh converter is intended for setting up an eigenvalue\n    (fission-source) neutronics solve. There are no block parameters that need\n    to be mapped in for a basic eigenvalue calculation, just number densities.\n    The results of the calculation are mapped out (i.e., back to the non-uniform\n    mesh). The results mapped out include things like flux, power, and reaction\n    rates.\n\n    .. warning::\n        If a parameter is calculated by a physics solver while the reactor is in its\n        converted (uniform mesh) state, that parameter *must* be included in the list\n        of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform\n        reactor; otherwise, it will be lost. These lists are defined through the\n        `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and\n        `blockParamMappingCategories` attributes and applies custom logic to create a list of\n        parameters to be mapped in each direction.\n    \"\"\"\n\n    reactorParamMappingCategories = {\n        \"in\": [parameters.Category.neutronics],\n        \"out\": [parameters.Category.neutronics],\n    }\n    blockParamMappingCategories = {\n        \"in\": [],\n        \"out\": [\n            parameters.Category.detailedAxialExpansion,\n            parameters.Category.multiGroupQuantities,\n            parameters.Category.pinQuantities,\n        ],\n    }\n\n    def __init__(self, cs=None, calcReactionRates=True):\n        \"\"\"\n        Parameters\n        ----------\n        cs : obj, optional\n            Case settings object.\n\n        calcReactionRates : bool, optional\n            Set to True by default, but if set to False the reaction\n            rate calculation after the neutron flux is remapped will\n            not be calculated.\n        \"\"\"\n        UniformMeshGeometryConverter.__init__(self, cs)\n        self.calcReactionRates = calcReactionRates\n\n    def _setParamsToUpdate(self, direction):\n        \"\"\"\n        Activate conversion of the specified parameters.\n\n        Notes\n        -----\n        For the fission-source neutronics calculation, there are no block parameters\n        that need to be mapped in. This function applies additional filters to the\n        list of categories defined in `blockParamMappingCategories[out]` to avoid mapping\n        out cumulative parameters like DPA or burnup. These parameters should not\n        exist on the neutronics uniform mesh assembly anyway, but this filtering\n        provides an added layer of safety to prevent data from being inadvertently\n        overwritten.\n\n        Parameters\n        ----------\n        direction : str\n            \"in\" or \"out\". The direction of mapping; \"in\" to the uniform mesh assembly, or \"out\" of it.\n            Different parameters are mapped in each direction.\n        \"\"\"\n        reactorParamNames = []\n        blockParamNames = []\n\n        for category in self.reactorParamMappingCategories[direction]:\n            reactorParamNames.extend(self._sourceReactor.core.p.paramDefs.inCategory(category).names)\n        b = self._sourceReactor.core.getFirstBlock()\n        excludedCategories = [parameters.Category.gamma]\n        if direction == \"out\":\n            excludedCategories.append(parameters.Category.cumulative)\n            excludedCategories.append(parameters.Category.cumulativeOverCycle)\n        excludedParamNames = []\n        for category in excludedCategories:\n            excludedParamNames.extend(b.p.paramDefs.inCategory(category).names)\n        for category in self.blockParamMappingCategories[direction]:\n            blockParamNames.extend(\n                [name for name in b.p.paramDefs.inCategory(category).names if name not in excludedParamNames]\n            )\n        if direction == \"in\":\n            # initial heavy metal masses are needed to calculate burnup in MWd/kg\n            blockParamNames.extend(HEAVY_METAL_PARAMS)\n\n        # remove any duplicates (from parameters that have multiple categories)\n        blockParamNames = list(set(blockParamNames))\n        self.paramMapper = ParamMapper(reactorParamNames, blockParamNames, b)\n\n\nclass GammaUniformMeshConverter(UniformMeshGeometryConverter):\n    \"\"\"\n    A uniform mesh converter that specifically maps gamma parameters.\n\n    Notes\n    -----\n    This uniform mesh converter is intended for setting up a fixed-source gamma transport solve.\n    Some block parameters from the neutronics solve, such as `b.p.mgFlux`, may need to be mapped\n    into the uniform mesh reactor so that the gamma source can be calculated by the ARMI plugin\n    performing gamma transport. Parameters that are updated with gamma transport results, such\n    as `powerGenerated`, `powerNeutron`, and `powerGamma`, need to be mapped back to the\n    non-uniform reactor.\n\n    .. warning::\n        If a parameter is calculated by a physics solver while the reactor is in its\n        converted (uniform mesh) state, that parameter *must* be included in the list\n        of `reactorParamNames` or `blockParamNames` to be mapped back to the non-uniform\n        reactor; otherwise, it will be lost. These lists are defined through the\n        `_setParamsToUpdate` method, which uses the `reactorParamMappingCategories` and\n        `blockParamMappingCategories` attributes and applies custom logic to create a list of\n        parameters to be mapped in each direction.\n    \"\"\"\n\n    reactorParamMappingCategories = {\n        \"in\": [parameters.Category.neutronics],\n        \"out\": [parameters.Category.neutronics],\n    }\n    blockParamMappingCategories = {\n        \"in\": [\n            parameters.Category.multiGroupQuantities,\n        ],\n        \"out\": [\n            parameters.Category.gamma,\n            parameters.Category.neutronics,\n        ],\n    }\n\n    def _setParamsToUpdate(self, direction):\n        \"\"\"\n        Activate conversion of the specified parameters.\n\n        Notes\n        -----\n        For gamma transport, only a small subset of neutronics parameters need to be\n        mapped out. The set is defined in this method. There are conditions on the\n        output blockParamMappingCategories: only non-cumulative, gamma parameters are mapped out.\n        This avoids numerical diffusion of cumulative parameters or those created by the\n        initial eigenvalue neutronics solve from being mapped in both directions by the\n        mesh converter for the fixed-source gamma run.\n\n        Parameters\n        ----------\n        direction : str\n            \"in\" or \"out\". The direction of mapping; \"in\" to the uniform mesh assembly, or \"out\" of it.\n            Different parameters are mapped in each direction.\n        \"\"\"\n        reactorParamNames = []\n        blockParamNames = []\n\n        for category in self.reactorParamMappingCategories[direction]:\n            reactorParamNames.extend(self._sourceReactor.core.p.paramDefs.inCategory(category).names)\n        b = self._sourceReactor.core.getFirstBlock()\n        if direction == \"out\":\n            excludeList = (\n                b.p.paramDefs.inCategory(parameters.Category.cumulative).names\n                + b.p.paramDefs.inCategory(parameters.Category.cumulativeOverCycle).names\n            )\n        else:\n            excludeList = b.p.paramDefs.inCategory(parameters.Category.gamma).names\n        for category in self.blockParamMappingCategories[direction]:\n            blockParamNames.extend(\n                [name for name in b.p.paramDefs.inCategory(category).names if name not in excludeList]\n            )\n\n        # remove any duplicates (from parameters that have multiple categories)\n        blockParamNames = list(set(blockParamNames))\n        self.paramMapper = ParamMapper(reactorParamNames, blockParamNames, b)\n\n\nclass ParamMapper:\n    \"\"\"\n    Utility for parameter setters/getters that can be used when\n    transferring data from one assembly to another during the mesh\n    conversion process. Stores some data like parameter defaults and\n    properties to save effort of accessing paramDefs many times for\n    the same data.\n    \"\"\"\n\n    def __init__(self, reactorParamNames: list[str], blockParamNames: list[str], b: \"Block\"):\n        \"\"\"\n        Initialize the list of parameter defaults.\n\n        The ParameterDefinitionCollection lookup is very slow, so this we do it once\n        and store it as a hashed list.\n        \"\"\"\n        self.paramDefaults = {paramName: b.p.pDefs[paramName].default for paramName in blockParamNames}\n\n        # Determine which parameters are volume integrated\n        self.isVolIntegrated = {\n            paramName: b.p.paramDefs[paramName].atLocation(parameters.ParamLocation.VOLUME_INTEGRATED)\n            for paramName in blockParamNames\n        }\n        # determine which parameters are peak/max\n        # Unfortunately, these parameters don't tell you WHERE in the block the peak\n        # value occurs. So when mapping block parameters in setAssemblyStateFromOverlaps(),\n        # we will just grab the maximum value over all of the source blocks. This effectively\n        # assumes that all of the source blocks overlap 100% with the destination block,\n        # although this is rarely actually the case.\n        self.isPeak = {\n            paramName: b.p.paramDefs[paramName].atLocation(parameters.ParamLocation.MAX)\n            for paramName in blockParamNames\n        }\n\n        self.reactorParamNames = reactorParamNames\n        self.blockParamNames = blockParamNames\n\n    def paramSetter(self, block: \"Block\", vals: list, paramNames: list[str]):\n        \"\"\"Sets block parameter data.\"\"\"\n        for paramName, val in zip(paramNames, vals):\n            # Skip setting None values.\n            if val is None:\n                continue\n\n            if isinstance(val, (tuple, list, np.ndarray)):\n                self._arrayParamSetter(block, [val], [paramName])\n            else:\n                self._scalarParamSetter(block, [val], [paramName])\n\n    def paramGetter(self, block: \"Block\", paramNames: list[str]):\n        \"\"\"Returns block parameter values as an array in the order of the parameter names given.\"\"\"\n        paramVals = []\n        symmetryFactor = block.getSymmetryFactor()\n        for paramName in paramNames:\n            multiplier = self.getFactorSymmetry(paramName, symmetryFactor)\n            val = block.p[paramName]\n            # list-like should be treated as a numpy array\n            if val is None:\n                paramVals.append(val)\n            elif isinstance(val, (tuple, list, np.ndarray)):\n                paramVals.append(np.array(val) * multiplier if len(val) > 0 else None)\n            else:\n                paramVals.append(val * multiplier)\n\n        return np.array(paramVals, dtype=object)\n\n    def _scalarParamSetter(self, block: \"Block\", vals: list, paramNames: list[str]):\n        \"\"\"Assigns a set of float/integer/string values to a given set of parameters on a block.\"\"\"\n        symmetryFactor = block.getSymmetryFactor()\n        for paramName, val in zip(paramNames, vals):\n            if val is None:\n                block.p[paramName] = val\n            else:\n                block.p[paramName] = val / self.getFactorSymmetry(paramName, symmetryFactor)\n\n    def _arrayParamSetter(self, block: \"Block\", arrayVals: list, paramNames: list[str]):\n        \"\"\"Assigns a set of list/array values to a given set of parameters on a block.\"\"\"\n        symmetryFactor = block.getSymmetryFactor()\n        for paramName, vals in zip(paramNames, arrayVals):\n            if vals is None:\n                continue\n            block.p[paramName] = np.array(vals) / self.getFactorSymmetry(paramName, symmetryFactor)\n\n    def getFactorSymmetry(self, paramName: str, symmetryFactor: int):\n        \"\"\"Returns the symmetry factor if the parameter is volume integrated, returns 1 otherwise.\"\"\"\n        if self.isVolIntegrated[paramName]:\n            return symmetryFactor\n        else:\n            return 1\n\n\ndef setNumberDensitiesFromOverlaps(block, overlappingBlockInfo):\n    r\"\"\"\n    Set number densities on a block based on overlapping blocks.\n\n    A conservation of number of atoms technique is used to map the non-uniform number densities onto the uniform\n    neutronics mesh. When the number density of a height :math:`H` neutronics mesh block :math:`N^{\\prime}` is\n    being computed from one or more blocks in the ARMI mesh with number densities :math:`N_i` and\n    heights :math:`h_i`, the following formula is used:\n\n    .. math::\n\n        N^{\\prime} =  \\sum_i N_i \\frac{h_i}{H}\n    \"\"\"\n    totalDensities = collections.defaultdict(float)\n    block.clearNumberDensities()\n    blockHeightInCm = block.getHeight()\n    for overlappingBlock, overlappingHeightInCm in overlappingBlockInfo:\n        heightScaling = overlappingHeightInCm / blockHeightInCm\n        for nucName, numberDensity in overlappingBlock.getNumberDensities().items():\n            totalDensities[nucName] += numberDensity * heightScaling\n    block.setNumberDensities(dict(totalDensities))\n    # Set the volume of each component in the block to `None` so that the\n    # volume of each component is recomputed.\n    for c in block:\n        c.p.volume = None\n"
  },
  {
    "path": "armi/reactor/cores.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCore is a high-level object in the data model in ARMI.\n\nA Core frequently contain assemblies which in turn contain more refinement in representing the\nphysical reactor.\n\"\"\"\n\nimport collections\nimport copy\nimport itertools\nimport os\nimport time\nfrom typing import Callable, Iterator, Optional\n\nimport numpy as np\nfrom ruamel.yaml import YAML\n\nfrom armi import getPluginManagerOrFail, nuclearDataIO, runLog\nfrom armi.nuclearDataIO import xsLibraries\nfrom armi.reactor import (\n    assemblies,\n    blocks,\n    composites,\n    flags,\n    geometry,\n    grids,\n    parameters,\n    reactorParameters,\n    zones,\n)\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.zones import Zone, Zones\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_CIRCULAR_RING_PITCH,\n    CONF_DETAILED_AXIAL_EXPANSION,\n    CONF_FRESH_FEED_TYPE,\n    CONF_MIN_MESH_SIZE_RATIO,\n    CONF_NON_UNIFORM_ASSEM_FLAGS,\n    CONF_STATIONARY_BLOCK_FLAGS,\n    CONF_TRACK_ASSEMS,\n    CONF_ZONE_DEFINITIONS,\n    CONF_ZONES_FILE,\n)\nfrom armi.utils import createFormattedStrWithDelimiter, tabulate, units\nfrom armi.utils.iterables import Sequence\nfrom armi.utils.mathematics import average1DWithinTolerance\n\n\nclass Core(composites.Composite):\n    \"\"\"\n    Reactor structure made up of assemblies. Could be a Core, spent fuel pool, reactor head, etc.\n\n    This has the bulk of the data management operations.\n\n    Attributes\n    ----------\n    params : dict\n        Core-level parameters are scalar values that have time dependence. Examples are keff,\n        maxPercentBu, etc.\n    assemblies : list\n        List of assembly objects that are currently in the core\n    \"\"\"\n\n    pDefs = reactorParameters.defineCoreParameters()\n\n    def __init__(self, name):\n        \"\"\"\n        Initialize the reactor object.\n\n        Parameters\n        ----------\n        name : str\n            Name of the object. Flags will inherit from this.\n        \"\"\"\n        composites.Composite.__init__(self, name)\n        self.assembliesByName = {}\n        self.circularRingList = {}\n        self.blocksByName = {}  # lookup tables\n        self.numRings = 0\n        self.spatialGrid = None\n        self.xsIndex = {}\n        self.p.numMoves = 0\n        self._lib = None  # placeholder for ISOTXS object\n        self.locParams = {}  # location-based parameters\n        # overridden in case.py to include pre-reactor time.\n        self.timeOfStart = time.time()\n        self.zones = zones.Zones()  # initialize with empty Zones object\n        # initialize the list that holds all shuffles\n        self.moves = {}\n        self.scalarVals = {}\n        self._nuclideCategories = {}\n        self.typeList = []  # list of block types to convert name - to -number.\n\n        # leftover default \"settings\" that are intended to eventually be elsewhere.\n        self._freshFeedType = \"feed fuel\"\n        self._trackAssems = False\n        self._circularRingMode = False\n        self._circularRingPitch = 1.0\n        self._minMeshSizeRatio = 0.15\n        self._detailedAxialExpansion = False\n\n    def setOptionsFromCs(self, cs):\n        from armi.physics.fuelCycle.settings import (\n            CONF_CIRCULAR_RING_MODE,\n            CONF_JUMP_RING_NUM,\n        )\n\n        # these are really \"user modifiable modeling constants\"\n        self.p.jumpRing = cs[CONF_JUMP_RING_NUM]\n        self._freshFeedType = cs[CONF_FRESH_FEED_TYPE]\n        self._trackAssems = cs[CONF_TRACK_ASSEMS]\n        self._circularRingMode = cs[CONF_CIRCULAR_RING_MODE]\n        self._circularRingPitch = cs[CONF_CIRCULAR_RING_PITCH]\n        self._minMeshSizeRatio = cs[CONF_MIN_MESH_SIZE_RATIO]\n        self._detailedAxialExpansion = cs[CONF_DETAILED_AXIAL_EXPANSION]\n\n    def __getstate__(self):\n        \"\"\"Applies a settings and parent to the core and components.\"\"\"\n        state = composites.Composite.__getstate__(self)\n        return state\n\n    def __setstate__(self, state):\n        composites.Composite.__setstate__(self, state)\n        self.regenAssemblyLists()\n\n    def __deepcopy__(self, memo):\n        memo[id(self)] = newC = self.__class__.__new__(self.__class__)\n        newC.__setstate__(copy.deepcopy(self.__getstate__(), memo))\n        newC.name = self.name + \"-copy\"\n        return newC\n\n    def __repr__(self):\n        return \"<{}: {} id:{}>\".format(self.__class__.__name__, self.name, id(self))\n\n    def __iter__(self):\n        \"\"\"Override the base Composite __iter__ to produce stable sort order.\"\"\"\n        return iter(self._children)\n\n    @property\n    def r(self):\n        from armi.reactor.reactors import Reactor\n\n        if isinstance(self.parent, Reactor):\n            return self.parent\n\n        return None\n\n    @property\n    def symmetry(self) -> geometry.SymmetryType:\n        \"\"\"Getter for symmetry type.\n\n        .. impl:: Get core symmetry.\n            :id: I_ARMI_R_SYMM\n            :implements: R_ARMI_R_SYMM\n\n            This property getter returns the symmetry attribute of the spatialGrid instance\n            attribute. The spatialGrid is an instance of a child of the abstract base class\n            :py:class:`Grid <armi.reactor.grids.grid.Grid>` type. The symmetry attribute is an\n            instance of the :py:class:`SymmetryType <armi.reactor.geometry.SymmetryType>` class,\n            which is a wrapper around the :py:class:`DomainType <armi.reactor.geometry.DomainType>`\n            and :py:class:`BoundaryType <armi.reactor.geometry.BoundaryType>` enumerations used to\n            classify the domain (e.g., 1/3 core, quarter core, full core) and symmetry boundary\n            conditions (e.g., periodic, reflective, none) of a reactor, respectively.\n\n            Only specific combinations of :py:class:`Grid <armi.reactor.grids.grid.Grid>` type,\n            :py:class:`DomainType <armi.reactor.geometry.DomainType>`, and :py:class:`BoundaryType\n            <armi.reactor.geometry.BoundaryType>` are valid. The validity of a user-specified\n            geometry and symmetry is verified by a settings :py:class:`Inspector\n            <armi.settings.settingsValidation.Inspector`.\n        \"\"\"\n        if not self.spatialGrid:\n            raise ValueError(\"Cannot access symmetry before a spatialGrid is attached.\")\n        return self.spatialGrid.symmetry\n\n    @symmetry.setter\n    def symmetry(self, val: str):\n        \"\"\"Setter for symmetry type.\"\"\"\n        self.spatialGrid.symmetry = str(val)\n        self.clearCache()\n\n    @property\n    def geomType(self) -> geometry.GeomType:\n        if not self.spatialGrid:\n            raise ValueError(\"Cannot access geomType before a spatialGrid is attached.\")\n        return self.spatialGrid.geomType\n\n    @property\n    def powerMultiplier(self):\n        \"\"\"\n        Symmetry factor for this model. 1 for full core, 3 for 1/3 core, etc.\n\n        Notes\n        -----\n        This should not be a state variable because it just reflects the current geometry.\n        It changes automatically if the symmetry changes (e.g. from a geometry conversion).\n        \"\"\"\n        return self.symmetry.symmetryFactor()\n\n    @property\n    def lib(self) -> Optional[xsLibraries.IsotxsLibrary]:\n        \"\"\"\n        Return the microscopic cross section library, if one exists.\n\n        - If there is a library currently associated with the Core, it will be returned\n        - Otherwise, an ``ISOTXS`` file will be searched for in the working directory, opened as ``ISOTXS`` object and\n          returned. If possible, it will find the correct file for the current cycle and timeNode.\n        - Finally, if no ``ISOTXS`` file exists in the working directory, a None value will be returned.\n        \"\"\"\n        # determine the current cycle and timeNode\n        cycle = None\n        node = None\n        if self.r is not None:\n            cycle = self.r.p.cycle\n            node = self.r.p.timeNode\n\n        # if self._lib is None, try to find a local file\n        isotxsFileName = nuclearDataIO.getExpectedISOTXSFileName(cycle, node)\n        if self._lib is None and os.path.exists(isotxsFileName):\n            # try to find the file for this specific cycle/node\n            runLog.info(f\"Loading microscopic cross section library `{isotxsFileName}` at {cycle}/{node}\")\n            self._lib = nuclearDataIO.isotxs.readBinary(isotxsFileName)\n        elif self._lib is None:\n            # try to find any local file, not labeled by cycle/node\n            isotxsFileName = nuclearDataIO.getExpectedISOTXSFileName()\n            if os.path.exists(isotxsFileName):\n                runLog.info(f\"Loading microscopic cross section library `{isotxsFileName}`\")\n                self._lib = nuclearDataIO.isotxs.readBinary(isotxsFileName)\n\n        return self._lib\n\n    @lib.setter\n    def lib(self, value):\n        \"\"\"Set the microscopic cross section library.\"\"\"\n        runLog.extra(f\"Updating cross section library on {self}.\\nInitial: {self._lib}\\nUpdated: {value}.\")\n        self._lib = value\n\n    def hasLib(self):\n        \"\"\"Check if the microscopic cross section library is set.\n\n        Since the property ``lib`` will attempt to auto-load from a given ISOTXS file\n        in the working directory, checking ``r.core.lib is not None`` may result in unexpected\n        behavior. Use this instead.\n        \"\"\"\n        return self._lib is not None\n\n    @property\n    def isFullCore(self):\n        \"\"\"Return True if reactor is full core, otherwise False.\"\"\"\n        # Avoid using `not core.isFullCore` to check if third core geometry\n        # use `core.symmetry.domain == geometry.DomainType.THIRD_CORE\n        return self.symmetry.domain == geometry.DomainType.FULL_CORE\n\n    @property\n    def refAssem(self):\n        \"\"\"\n        Return the \"reference\" assembly for this Core.\n\n        The reference assembly is defined as the center-most assembly with a FUEL flag, if any are\n        present, or the center-most of any assembly otherwise.\n\n        Warning\n        -------\n        The convenience of this property should be weighed against it's somewhat arbitrary nature\n        for any particular client. The center-most fueled assembly is not particularly\n        representative of the state of the core as a whole.\n        \"\"\"\n        key = lambda a: a.spatialLocator.getRingPos()\n        assems = self.getAssemblies(Flags.FUEL, sortKey=key)\n        if not assems:\n            assems = self.getAssemblies(sortKey=key)\n\n        return assems[0]\n\n    def sortAssemsByRing(self):\n        \"\"\"Sorts the reactor assemblies by ring and position.\"\"\"\n        sortKey = lambda a: a.spatialLocator.getRingPos()\n        self._children = sorted(self._children, key=sortKey)\n\n    def summarizeReactorStats(self):\n        \"\"\"Writes a summary of the reactor to check the mass and volume of all of the blocks.\"\"\"\n        totalMass = 0.0\n        fissileMass = 0.0\n        heavyMetalMass = 0.0\n        totalVolume = 0.0\n        numBlocks = 0\n        for block in self.iterBlocks():\n            totalMass += block.getMass()\n            fissileMass += block.getFissileMass()\n            heavyMetalMass += block.getHMMass()\n            totalVolume += block.getVolume()\n            numBlocks += 1\n        totalMass = totalMass * self.powerMultiplier / 1000.0\n        fissileMass = fissileMass * self.powerMultiplier / 1000.0\n        heavyMetalMass = heavyMetalMass * self.powerMultiplier / 1000.0\n        totalVolume = totalVolume * self.powerMultiplier\n        runLog.extra(\n            \"Summary of {}\\n\".format(self)\n            + tabulate.tabulate(\n                [\n                    (\"Number of Blocks\", numBlocks),\n                    (\"Total Volume (cc)\", totalVolume),\n                    (\"Total Mass (kg)\", totalMass),\n                    (\"Fissile Mass (kg)\", fissileMass),\n                    (\"Heavy Metal Mass (kg)\", heavyMetalMass),\n                ],\n                tableFmt=\"armi\",\n            )\n        )\n\n    def setPowerFromDensity(self):\n        \"\"\"Set the power from the powerDensity.\"\"\"\n        self.p.power = self.p.powerDensity * self.getHMMass()\n\n    def setPowerIfNecessary(self):\n        \"\"\"Set the core power, from the power density.\n\n        If the power density is set, but the power isn't, calculate the total heavy metal mass of\n        the reactor, and set the total power. Which will then be the real source of truth again.\n        \"\"\"\n        if self.p.power == 0 and self.p.powerDensity > 0:\n            self.setPowerFromDensity()\n\n    def setBlockMassParams(self):\n        \"\"\"Set the parameters kgHM and kgFis for each block and calculate Pu fraction.\"\"\"\n        for b in self.iterBlocks():\n            b.p.kgHM = b.getHMMass() / units.G_PER_KG\n            b.p.kgFis = b.getFissileMass() / units.G_PER_KG\n            b.p.puFrac = b.getPuMoles() / b.p.molesHmBOL if b.p.molesHmBOL > 0.0 else 0.0\n\n    def getScalarEvolution(self, key):\n        return self.scalarVals[key]\n\n    def locateAllAssemblies(self):\n        \"\"\"\n        Store the current location of all assemblies.\n\n        This is required for shuffle printouts, repeat shuffling, and MCNP shuffling.\n        \"\"\"\n        for a in self.getAssemblies(includeAll=True):\n            a.lastLocationLabel = a.getLocation()\n\n    def removeAssembly(self, a1, discharge=True, addToSFP=False):\n        \"\"\"\n        Takes an assembly and puts it out of core.\n\n        Parameters\n        ----------\n        a1 : assembly\n            The assembly to remove\n        discharge : bool, optional\n            Discharge the assembly, including adding it to the SFP. Default: True\n        addToSFP : bool, optional\n            Store the discharged assembly in the SFP regardless of the\n            ``trackAssems`` setting. Default: False\n\n        Notes\n        -----\n        Please expect this method will delete your assembly (instead of moving it into a Spent Fuel\n        Pool) unless you set ``trackAssems`` to True or ``addToSFP`` is set to True.\n\n        Originally, this held onto all assemblies in the Spend Fuel Pool. However, they use memory.\n        And it is possible to have the history interface record only the parameters you need.\n        \"\"\"\n        from armi.reactor.reactors import Reactor\n\n        paramDefs = set(parameters.ALL_DEFINITIONS)\n        paramDefs.difference_update(set(parameters.forType(Core)))\n        paramDefs.difference_update(set(parameters.forType(Reactor)))\n        for paramDef in paramDefs:\n            if paramDef.assigned & parameters.SINCE_ANYTHING:\n                paramDef.assigned = parameters.SINCE_ANYTHING\n\n        if discharge:\n            runLog.debug(f\"Removing {a1} from {self}\")\n        else:\n            runLog.debug(f\"Purging  {a1} from {self}\")\n\n        self.childrenByLocator.pop(a1.spatialLocator)\n        a1.p.dischargeTime = self.r.p.time\n        self.remove(a1)\n\n        if discharge and (self._trackAssems or addToSFP):\n            if self.parent.excore.get(\"sfp\") is not None:\n                self.parent.excore.sfp.add(a1)\n            else:\n                runLog.info(\"No Spent Fuel Pool is found, can't track assemblies.\")\n        else:\n            self._removeListFromAuxiliaries(a1)\n\n    def removeAssembliesInRing(self, ringNum, cs, overrideCircularRingMode=False):\n        \"\"\"\n        Removes all of the assemblies in a given ring.\n\n        Parameters\n        ----------\n        ringNum : int\n            The ring to remove\n        cs: Settings\n            A relevant settings object\n        overrideCircularRingMode : bool, optional\n            False ~ default: use circular/square/hex rings, just as the reactor defines them\n            True ~ Turn off circular ring mode, and instead use square or hex.\n\n        See Also\n        --------\n        getAssembliesInRing : definition of a ring\n        \"\"\"\n        for a in self.getAssembliesInRing(ringNum, overrideCircularRingMode=overrideCircularRingMode):\n            self.removeAssembly(a)\n\n        self.processLoading(cs)\n\n    def _removeListFromAuxiliaries(self, assembly):\n        \"\"\"\n        Remove an assembly from all auxiliary reference tables and lists.\n\n        Otherwise it will get added back into assembliesByName, etc.\n\n        History will fail if it tries to summarize an assembly that has been purged.\n        \"\"\"\n        del self.assembliesByName[assembly.getName()]\n        for b in assembly:\n            try:\n                del self.blocksByName[b.getName()]\n            except KeyError:\n                runLog.warning(\n                    \"Cannot delete block {0}. It is not in the Core.blocksByName structure\".format(b),\n                    single=True,\n                    label=\"cannot dereference: lost block\",\n                )\n\n    def normalizeNames(self, startIndex=0):\n        \"\"\"\n        Renumber and rename all the Assemblies and Blocks.\n\n        Parameters\n        ----------\n        startIndex : int, optional\n            The default is to start counting at zero. But if you are renumbering assemblies across\n            the entire Reactor, you may want to start at a different number.\n\n        Returns\n        -------\n        int\n            The new max Assembly number.\n        \"\"\"\n        ind = startIndex\n        for a in self:\n            oldName = a.getName()\n            newName = a.makeNameFromAssemNum(ind)\n            if oldName == newName:\n                ind += 1\n                continue\n\n            a.p.assemNum = ind\n            a.setName(newName)\n\n            for b in a:\n                axialIndex = int(b.name.split(\"-\")[-1])\n                b.name = b.makeName(ind, axialIndex)\n\n            ind += 1\n\n        self.normalizeInternalBookeeping()\n\n        return ind\n\n    def normalizeInternalBookeeping(self):\n        \"\"\"Update some bookkeeping dictionaries of assembly and block names in this Core.\"\"\"\n        self.assembliesByName = {}\n        self.blocksByName = {}\n        for assem in self:\n            self.assembliesByName[assem.getName()] = assem\n            for b in assem:\n                self.blocksByName[b.getName()] = b\n\n    def add(self, a, spatialLocator=None):\n        \"\"\"\n        Adds an assembly to the reactor.\n\n        An object must be added before it is placed in a particular cell in the reactor's\n        spatialGrid. When an object is added to a Reactor it get placed in a generic location at the\n        center of the Reactor unless a spatialLocator is passed in as well.\n\n        Parameters\n        ----------\n        a : ArmiObject\n            The object to add to the reactor\n        spatialLocator : SpatialLocator object, optional\n            The location in the reactor to add the new object to. Must be unoccupied.\n\n        See Also\n        --------\n        removeAssembly : removes an assembly\n        \"\"\"\n        from armi.reactor.reactors import Reactor\n\n        # Negative assembly IDs are placeholders, and we need to renumber the assembly\n        if a.p.assemNum < 0:\n            a.renumber(self.r.incrementAssemNum())\n\n        # resetting .assigned forces database to be rewritten for shuffled core\n        paramDefs = set(parameters.ALL_DEFINITIONS)\n        paramDefs.difference_update(set(parameters.forType(Core)))\n        paramDefs.difference_update(set(parameters.forType(Reactor)))\n        for paramDef in paramDefs:\n            if paramDef.assigned & parameters.SINCE_ANYTHING:\n                paramDef.assigned = parameters.SINCE_ANYTHING\n\n        # could speed up output by passing format args as an arg and only process if verb good.\n        runLog.debug(\"Adding   {0} to {1}\".format(a, self))\n        composites.Composite.add(self, a)\n        aName = a.getName()\n\n        spatialLocator = spatialLocator or a.spatialLocator\n\n        if spatialLocator is not None and spatialLocator in self.childrenByLocator:\n            raise ValueError(\n                \"Cannot add {} because location {} is already filled by {}.\".format(\n                    aName, a.spatialLocator, self.childrenByLocator[a.spatialLocator]\n                )\n            )\n\n        if spatialLocator is not None:\n            # transfer spatialLocator to Core one\n            spatialLocator = self.spatialGrid[tuple(spatialLocator.indices)]\n            if not self.spatialGrid.locatorInDomain(spatialLocator, symmetryOverlap=True):\n                raise LookupError(\n                    \"Location `{}` outside of the represented domain: `{}`\".format(\n                        spatialLocator, self.spatialGrid.symmetry.domain\n                    )\n                )\n            a.moveTo(spatialLocator)\n\n        self.childrenByLocator[spatialLocator] = a\n        # build a lookup table for history tracking.\n        if aName in self.assembliesByName and self.assembliesByName[aName] != a:\n            # try to keep assem numbering correct\n            runLog.error(\n                \"The assembly {1} in the reactor already has the name {0}.\\nCannot add {2}. \"\n                \"Current assemNum is {3}\"\n                \"\".format(aName, self.assembliesByName[aName], a, self.r.p.maxAssemNum)\n            )\n            raise RuntimeError(\"Core already contains an assembly with the same name.\")\n\n        self.assembliesByName[aName] = a\n        for b in a:\n            self.blocksByName[b.getName()] = b\n\n        a.orientBlocks(parentSpatialGrid=self.spatialGrid)\n        if self.geomType == geometry.GeomType.HEX:\n            ring, _loc = self.spatialGrid.getRingPos(a.spatialLocator.getCompleteIndices())\n            if ring > self.numRings:\n                self.numRings = ring\n\n        # track the highest assem Num so when we load from a DB the future assemNums remain constant\n        aNum = a.p.assemNum\n        if aNum > self.p.maxAssemNum:\n            self.p.maxAssemNum = aNum\n\n        if a.lastLocationLabel != a.DATABASE:\n            # time the assembly enters the core in days\n            a.p.chargeTime = self.r.p.time\n            # cycle that the assembly enters the core\n            a.p.chargeCycle = self.r.p.cycle\n            # convert to kg\n            a.p.chargeFis = a.getFissileMass() / 1000.0\n            a.p.chargeBu = a.getMaxParam(\"percentBu\")\n\n    def genAssembliesAddedThisCycle(self):\n        \"\"\"\n        Yield the assemblies that have been added in the current cycle.\n\n        This uses the reactor's cycle parameter and the assemblies' chargeCycle parameters.\n        \"\"\"\n        for a in self:\n            if a.p.chargeCycle == self.r.p.cycle:\n                yield a\n\n    def getNumRings(self, indexBased=False):\n        \"\"\"\n        Returns the number of rings in this reactor. Based on location, so indexing will start at 1.\n\n        Circular ring shuffling changes the interpretation of this result.\n\n        Warning\n        -------\n        If you loop through range(maxRing) then ring+1 is the one you want!\n\n        Parameters\n        ----------\n        indexBased : bool, optional\n            If true, will force location-index interpretation, even if \"circular shuffling\" is enabled.\n        \"\"\"\n        if self.circularRingList and not indexBased:\n            return max(self.circularRingList)\n        else:\n            return self.getNumHexRings()\n\n    def getNumHexRings(self):\n        \"\"\"Return the number of hex rings in the core. Based on location so indexing starts at 1.\"\"\"\n        maxRing = 0\n        for a in self:\n            ring, _pos = self.spatialGrid.getRingPos(a.spatialLocator)\n            maxRing = max(maxRing, ring)\n\n        return maxRing\n\n    def getNumAssembliesWithAllRingsFilledOut(self, nRings):\n        \"\"\"\n        Returns nAssmWithBlanks (see description immediately below).\n\n        Parameters\n        ----------\n        nRings : int\n            The number of hex assembly rings in this core, including non-ful) rings.\n\n        Returns\n        -------\n        nAssmWithBlanks: int\n            The number of assemblies that WOULD exist in this core if all outer assembly hex rings\n            were \"filled out\".\n        \"\"\"\n        if self.powerMultiplier == 1:\n            return 3 * nRings * (nRings - 1) + 1\n        else:\n            return nRings * (nRings - 1) + (nRings + 1) // 2\n\n    def getNumEnergyGroups(self):\n        \"\"\"\n        Return the number of energy groups used in the problem.\n\n        See Also\n        --------\n        armi.nuclearDataIO.ISOTXS.read1D : reads the number of energy groups off the ISOTXS library.\n        \"\"\"\n        return self.lib.numGroups\n\n    def countBlocksWithFlags(self, blockTypeSpec, assemTypeSpec=None):\n        \"\"\"\n        Return the total number of blocks in an assembly in the reactor that\n        meets the specified type.\n\n        Parameters\n        ----------\n        blockTypeSpec : Flags or list of Flags\n            The types of blocks to be counted in a single assembly\n        assemTypeSpec : Flags or list of Flags\n            The types of assemblies that are to be examine for the blockTypes of interest. None is\n            every assembly.\n\n        Returns\n        -------\n        maxBlocks : int\n            The maximum number of blocks of the specified types in a single assembly in the core.\n        \"\"\"\n        assems = self.getAssemblies(typeSpec=assemTypeSpec)\n        try:\n            return max(sum(b.hasFlags(blockTypeSpec) for b in a) for a in assems)\n        except ValueError:\n            # In case assems is empty\n            return 0\n\n    def countFuelAxialBlocks(self):\n        \"\"\"\n        Return the maximum number of fuel type blocks in any assembly in the core.\n\n        See Also\n        --------\n        getFirstFuelBlockAxialNode\n        \"\"\"\n        fuelblocks = (a.getBlocks(Flags.FUEL) for a in self.getAssemblies(includeBolAssems=True))\n        try:\n            return max(len(fuel) for fuel in fuelblocks)\n        except ValueError:  # thrown when iterator is empty\n            return 0\n\n    def getFirstFuelBlockAxialNode(self):\n        \"\"\"\n        Determine the offset of the fuel from the grid plate in the assembly with the lowest fuel\n        block.\n\n        This assembly will dictate at what block level the SASSYS reactivity coefficients will start\n        to be generated\n        \"\"\"\n        try:\n            return min(\n                i\n                for a in self.getAssemblies(includeBolAssems=True)\n                for (i, b) in enumerate(a)\n                if b.hasFlags(Flags.FUEL)\n            )\n        except ValueError:\n            # ValueError is thrown if min is called on an empty sequence.\n            return float(\"inf\")\n\n    def getAssembliesInRing(\n        self,\n        ring,\n        typeSpec=None,\n        exactType=False,\n        exclusions=None,\n        overrideCircularRingMode=False,\n    ) -> list[assemblies.Assembly]:\n        \"\"\"\n        Returns the assemblies in a specified ring. Definitions of rings can change\n        with problem parameters.\n\n        This function acts as a switch between two separate functions that define what a\n        ring is based on a cs setting 'circularRingMode'\n\n        Parameters\n        ----------\n        ring : int\n            The ring number\n\n        typeSpec : str, list\n            a string or list of assembly types of interest\n\n        exactType : bool\n            flag to match the assembly type exactly\n\n        exclusions : list of assemblies\n            list of assemblies that are not to be considered\n\n        overrideCircularRingMode : bool, optional\n            False ~ default: use circular/square/hex rings, just as the reactor defines them\n            True ~ If you know you don't want to use the circular ring mode, and instead want square or hex.\n\n        Returns\n        -------\n        aList : list of assemblies\n            A list of assemblies that match the criteria within the ring\n        \"\"\"\n        if self._circularRingMode and not overrideCircularRingMode:\n            getter = self.getAssembliesInCircularRing\n        else:\n            getter = self.getAssembliesInSquareOrHexRing\n\n        return getter(ring=ring, typeSpec=typeSpec, exactType=exactType, exclusions=exclusions)\n\n    def getMaxAssembliesInHexRing(self, ring, fullCore=False):\n        \"\"\"\n        Returns the maximum number of assemblies possible for a given Hexagonal ring.\n\n        ring - The ring of interest to calculate the maximum number of assemblies.\n        numEdgeAssems - The number of edge assemblies in the reactor model (1/3 core).\n\n        Notes\n        -----\n        Assumes that odd rings do not have an edge assembly in third core geometry.\n        \"\"\"\n        numAssemsUpToOuterRing = self.getNumAssembliesWithAllRingsFilledOut(ring)\n        numAssemsUpToInnerRing = self.getNumAssembliesWithAllRingsFilledOut(ring - 1)\n        maxAssemsInRing = numAssemsUpToOuterRing - numAssemsUpToInnerRing\n\n        # See note*\n        if not fullCore:\n            ringMod = ring % 2\n            if ringMod == 1:\n                maxAssemsInRing -= 1\n\n        return maxAssemsInRing\n\n    def getAssembliesInSquareOrHexRing(\n        self, ring, typeSpec=None, exactType=False, exclusions=None\n    ) -> list[assemblies.Assembly]:\n        \"\"\"\n        Returns the assemblies in a specified ring. Definitions of rings can change with problem\n        parameters.\n\n        Parameters\n        ----------\n        ring : int\n            The ring number\n\n        typeSpec : Flags or [Flags], optional\n            a Flags instance or list of Flags with assembly types of interest\n\n        exactType : bool\n            flag to match the assembly type exactly\n\n        exclusions : list of assemblies\n            list of assemblies that are not to be considered\n\n        Returns\n        -------\n        assems : list of assemblies\n            A list of assemblies that match the criteria within the ring\n        \"\"\"\n        assems = Sequence(self)\n\n        if exclusions:\n            exclusions = set(exclusions)\n            assems.drop(lambda a: a in exclusions)\n\n        # filter based on geomType\n        if self.geomType == geometry.GeomType.CARTESIAN:  # a ring in cartesian is basically a square.\n            assems.select(lambda a: any(xy == ring for xy in abs(a.spatialLocator.indices[:2])))\n        else:\n            assems.select(lambda a: (a.spatialLocator.getRingPos()[0] == ring))\n\n        # filter based on typeSpec\n        if typeSpec:\n            assems.select(lambda a: a.hasFlags(typeSpec, exact=exactType))\n\n        return list(assems)\n\n    def getAssembliesInCircularRing(\n        self, ring, typeSpec=None, exactType=False, exclusions=None\n    ) -> list[assemblies.Assembly]:\n        \"\"\"\n        Gets an assemblies within a circular range of the center of the core. This function allows\n        for more circular styled assembly shuffling instead of the current hex approach.\n\n        Parameters\n        ----------\n        ring : int\n            The ring number\n\n        typeSpec : Flags or list of Flags\n            a Flags instance or list of Flags with assembly types of interest\n\n        exactType : bool\n            flag to match the assembly type exactly\n\n        exclusions : list of assemblies\n            list of assemblies that are not to be considered\n\n        Returns\n        -------\n        assems : list of assemblies\n            A list of assemblies that match the criteria within the ring\n        \"\"\"\n        if self.geomType == geometry.GeomType.CARTESIAN:\n            # a ring in cartesian is basically a square.\n            raise RuntimeError(\"A circular ring in cartesian coordinates has not been defined yet.\")\n\n        # determine if the circularRingList has been generated\n        if not self.circularRingList:\n            self.circularRingList = self.buildCircularRingDictionary(self._circularRingPitch)\n\n        assems = Sequence(self)\n\n        # Remove exclusions\n        if exclusions:\n            exclusions = set(exclusions)\n            assems.drop(lambda a: a in exclusions)\n\n        # get assemblies at locations\n        locSet = self.circularRingList[ring]\n        assems.select(lambda a: a.getLocation() in locSet)\n\n        if typeSpec:\n            assems.select(lambda a: a.hasFlags(typeSpec, exact=exactType))\n\n        return list(assems)\n\n    def buildCircularRingDictionary(self, ringPitch=1.0):\n        \"\"\"\n        Builds a dictionary of all circular rings in the core. This is required information for\n        getAssembliesInCircularRing.\n\n        The purpose of this function is to allow for more circular core shuffling in the hex design.\n\n        Parameters\n        ----------\n        ringPitch : float, optional\n            The relative pitch that should be used to define the spacing between each ring.\n        \"\"\"\n        runLog.extra(\"Building a circular ring dictionary with ring pitch {}\".format(ringPitch))\n        referenceAssembly = self.childrenByLocator[self.spatialGrid[0, 0, 0]]\n        refLocation = referenceAssembly.spatialLocator\n        pitchFactor = ringPitch / self.spatialGrid.pitch\n\n        circularRingDict = collections.defaultdict(set)\n\n        for a in self:\n            dist = a.spatialLocator.distanceTo(refLocation)\n            # To reduce numerical sensitivity, round distance to 6 decimal places\n            # before truncating.\n            index = int(round(dist * pitchFactor, 6)) or 1  # 1 is the smallest ring.\n            circularRingDict[index].add(a.getLocation())\n\n        return circularRingDict\n\n    def _getAssembliesByName(self):\n        \"\"\"\n        If the assembly name-to-assembly object map is deleted or out of date, then this will\n        regenerate it.\n        \"\"\"\n        runLog.extra(\"Generating assemblies-by-name map.\")\n\n        # NOTE: eliminated unnecessary repeated lookups in self for self.assembliesByName\n        self.assembliesByName = assymap = {}\n        # don't includeAll b/c detailed ones are not ready yet\n        for assem in self.getAssemblies(includeBolAssems=True, includeSFP=True):\n            aName = assem.getName()\n            if aName in assymap and assymap[aName] != assem:\n                # dangerous situation that can occur in restart runs where the global assemNum isn't\n                # updated. !=assem clause added because sometimes an assem is in one of the\n                # includeAll lists that is also in the core and that's ok.\n                runLog.error(\n                    \"Two (or more) assemblies in the reactor (and associated lists) have the name \"\n                    \"{0},\\nincluding {1} and {2}.\".format(aName, assem, assymap[aName])\n                )\n                raise RuntimeError(\"Assembly name collision.\")\n\n            assymap[aName] = assem\n\n    def getAssemblyByName(self, name: str) -> assemblies.Assembly:\n        \"\"\"\n        Find the assembly that has this name.\n\n        .. impl:: Get assembly by name.\n            :id: I_ARMI_R_GET_ASSEM0\n            :implements: R_ARMI_R_GET_ASSEM\n\n            This method returns the :py:class:`assembly <armi.reactor.core.assemblies.Assembly>`\n            with a name matching the value provided as an input parameter to this function. The\n            ``name`` of an assembly is based on the ``assemNum`` parameter.\n\n        Parameters\n        ----------\n        name : str\n            the assembly name e.g. 'A0001'\n\n        Returns\n        -------\n        Assembly\n\n        See Also\n        --------\n        getAssembly : more general version of this method\n        \"\"\"\n        return self.assembliesByName[name]\n\n    def getAssemblies(\n        self,\n        typeSpec=None,\n        sortKey=None,\n        includeBolAssems=False,\n        includeSFP=False,\n        includeAll=False,\n        zones=None,\n        exact=False,\n    ) -> list[assemblies.Assembly]:\n        \"\"\"\n        Return a list of all the assemblies in the reactor.\n\n        Assemblies from the Core are sorted based on the location-based Assembly comparison\n        operators. This is done so that two reactors with physically identical properties are\n        more likely to behave similarly when their assemblies may have been added in different\n        orders.\n\n        (In the future this will likely be replaced by sorting the _children list itself internally,\n        as there is still opportunity for inconsistencies.)\n\n        Parameters\n        ----------\n        typeSpec : Flags or iterable of Flags, optional\n            List of assembly types that will be returned\n\n        sortKey : callable, optional\n            Sort predicate to use when sorting the assemblies.\n\n        includeBolAssems : bool, optional\n            Include the BOL assemblies as well as the ones that are in the core.\n            Default: False\n\n        includeSFP : bool, optional\n            Include assemblies in the SFP\n\n        includeAll : bool, optional\n            Will include ALL assemblies.\n\n        zones : iterable, optional\n            Only include assemblies that are in this these zones\n        \"\"\"\n        if includeAll:\n            includeBolAssems = includeSFP = True\n\n        assems = []\n        if includeBolAssems and self.parent is not None and self.parent.blueprints is not None:\n            assems.extend(self.parent.blueprints.assemblies.values())\n\n        assems.extend(a for a in sorted(self, key=sortKey))\n\n        if includeSFP and self.parent is not None and self.parent.excore.get(\"sfp\") is not None:\n            assems.extend(self.parent.excore.sfp.getChildren())\n\n        if typeSpec:\n            assems = [a for a in assems if a.hasFlags(typeSpec, exact=exact)]\n\n        if zones:\n            zoneLocs = self.zones.getZoneLocations(zones)\n            assems = [a for a in assems if a.getLocation() in zoneLocs]\n\n        return assems\n\n    def getNozzleTypes(self):\n        r\"\"\"\n        Get a dictionary of all of the assembly ``nozzleType``\\ s in the core.\n\n        Returns\n        -------\n        nozzles : dict\n            A dictionary of ``{nozzleType: nozzleID}`` pairs, where the nozzleIDs are\n            numbers corresponding to the alphabetical order of the ``nozzleType`` names.\n\n        Notes\n        -----\n        Getting the ``nozzleID`` by alphabetical order could cause a problem if a new\n        ``nozzleType`` is added during a run. This problem should not occur with the\n        ``includeBolAssems=True`` argument provided.\n        \"\"\"\n        nozzleList = list(set(a.p.nozzleType for a in self.getAssemblies(includeBolAssems=True)))\n        return {nozzleType: i for i, nozzleType in enumerate(sorted(nozzleList))}\n\n    def getBlockByName(self, name: str) -> blocks.Block:\n        \"\"\"\n        Finds a block based on its name.\n\n        Parameters\n        ----------\n        name : str\n            Block name e.g. A0001A\n\n        Returns\n        -------\n        Block : the block with the name\n\n        Notes\n        -----\n        The blocksByName structure must be up to date for this to work properly.\n        \"\"\"\n        try:\n            return self.blocksByName[name]\n        except AttributeError:\n            self._genBlocksByName()\n            return self.blocksByName[name]\n\n    def getBlocksByIndices(self, indices) -> list[blocks.Block]:\n        \"\"\"Get blocks in assemblies by block indices.\"\"\"\n        blocks = []\n        for i, j, k in indices:\n            assem = self.childrenByLocator[self.spatialGrid[i, j, 0]]\n            blocks.append(assem[k])\n        return blocks\n\n    def _genBlocksByName(self):\n        \"\"\"If self.blocksByName is deleted, then this will regenerate it.\"\"\"\n        self.blocksByName = {block.getName(): block for block in self.getBlocks(includeAll=True)}\n\n    # This will likely fail, but it will help diagnose why property approach wasn't working\n    # correctly\n    def genBlocksByLocName(self):\n        \"\"\"If self.blocksByLocName is deleted, then this will regenerate it or update it if things change.\"\"\"\n        self.blocksByLocName = {block.getLocation(): block for block in self.getBlocks(includeAll=True)}\n\n    def getBlocks(self, bType=None, **kwargs) -> list[blocks.Block]:\n        \"\"\"\n        Returns an iterator over all blocks in the reactor in order.\n\n        Parameters\n        ----------\n        bType : list or Flags, optional\n            Restrict results to a specific block type such as Flags.FUEL, Flags.SHIELD, etc.\n\n        includeBolAssems : bool, optional\n            Include the BOL-Assembly blocks as well. These blocks are created at BOL\n            and used to create new assemblies, etc. If true, the blocks in these\n            assemblies will be returned as well as the ones in the reactor.\n\n        kwargs : dict\n            Any keyword argument from :meth:`getAssemblies`\n\n        Returns\n        -------\n        blocks : iterator\n            all blocks in the reactor (or of type requested)\n\n        See Also\n        --------\n        * :meth:`iterBlocks`: iterator over blocks with limited filtering.\n        * :meth:`getAssemblies` : locates the assemblies in the search\n        \"\"\"\n        blocks = [b for a in self.getAssemblies(**kwargs) for b in a]\n        if bType:\n            blocks = [b for b in blocks if b.hasFlags(bType)]\n        return blocks\n\n    def getFirstBlock(self, blockType=None, exact=False) -> blocks.Block:\n        \"\"\"\n        Return the first block of the requested type in the reactor, or return first block.\n        exact=True will only match fuel, not testfuel, for example.\n\n        Parameters\n        ----------\n        blockType : Flags, optional\n            The type of block to return\n\n        exact : bool, optional\n            Requires an exact match on blockType\n\n        Returns\n        -------\n        b : Block object (or None if no such block exists)\n        \"\"\"\n        for a in self:\n            for b in a:\n                if b.hasFlags(blockType, exact):\n                    return b\n\n        return None\n\n    def getFirstAssembly(self, typeSpec=None, exact=False) -> assemblies.Assembly:\n        \"\"\"\n        Gets the first assembly in the reactor.\n\n        Warning\n        -------\n        This function should be used with great care. There are **very** few\n        circumstances in which one wants the \"first\" of a given sort of assembly,\n        `whichever that may happen to be`. Precisely which assembly is returned is\n        sensitive to all sorts of implementation details in Grids, etc., which make the\n        concept of \"first\" rather slippery. Prefer using some sort of precise logic to\n        pick a specific assembly from the Core.\n\n        Parameters\n        ----------\n        typeSpec : Flags or iterable of Flags, optional\n        \"\"\"\n        if typeSpec:\n            try:\n                return next(a for a in self if a.hasFlags(typeSpec, exact))\n            except StopIteration:\n                runLog.warning(\"No assem of type {0} in reactor\".format(typeSpec))\n                return None\n\n        # Assumes at least one assembly in `self`\n        return next(iter(self))\n\n    def regenAssemblyLists(self):\n        \"\"\"\n        If the attribute lists which contain assemblies are deleted (such as by reactors.detachAllAssemblies),\n        then this function will call the other functions to regrow them.\n        \"\"\"\n        self._getAssembliesByName()\n        self._genBlocksByName()\n        self._genChildByLocationLookupTable()\n\n    def getAllXsSuffixes(self):\n        \"\"\"Return all XS suffices (e.g. AA, AB, etc.) in the core.\"\"\"\n        return sorted(set(b.getMicroSuffix() for b in self.iterBlocks()))\n\n    def getNuclideCategories(self):\n        \"\"\"\n        Categorize nuclides as coolant, fuel and structure.\n\n        Notes\n        -----\n        This is used to categorize nuclides for Doppler broadening. Control nuclides are treated as structure.\n\n        The categories are defined in the following way:\n\n        1. Add nuclides from coolant components to coolantNuclides\n        2. Add nuclides from fuel components to fuelNuclides (this may be incomplete, e.g.\n           at BOL there are no fission products)\n        3. Add nuclides from all other components to structureNuclides\n        4. Since fuelNuclides may be incomplete, add anything else the user wants to model\n           that isn't already listed in coolantNuclides or structureNuclides.\n\n        Returns\n        -------\n        coolantNuclides : set\n            set of nuclide names\n\n        fuelNuclides : set\n            set of nuclide names\n\n        structureNuclides : set\n            set of nuclide names\n        \"\"\"\n        if not self._nuclideCategories:\n            coolantNuclides = set()\n            fuelNuclides = set()\n            structureNuclides = set()\n            for c in self.iterComponents():\n                compNuclides = []\n                # get only nuclides with non-zero number density\n                # nuclides could be present at 0.0 density just for XS generation\n                if c.p.numberDensities is None:\n                    continue\n                for nuc, dens in zip(c.p.nuclides, c.p.numberDensities):\n                    if dens > 0.0:\n                        compNuclides.append(nuc.decode())\n                if c.getName() == \"coolant\":\n                    coolantNuclides.update(compNuclides)\n                elif \"fuel\" in c.getName():\n                    fuelNuclides.update(compNuclides)\n                else:\n                    structureNuclides.update(compNuclides)\n            structureNuclides -= coolantNuclides\n            structureNuclides -= fuelNuclides\n            remainingNuclides = set(self.parent.blueprints.allNuclidesInProblem) - structureNuclides - coolantNuclides\n            fuelNuclides.update(remainingNuclides)\n            self._nuclideCategories[\"coolant\"] = coolantNuclides\n            self._nuclideCategories[\"fuel\"] = fuelNuclides\n            self._nuclideCategories[\"structure\"] = structureNuclides\n            self.summarizeNuclideCategories()\n\n        return (\n            self._nuclideCategories[\"coolant\"],\n            self._nuclideCategories[\"fuel\"],\n            self._nuclideCategories[\"structure\"],\n        )\n\n    def summarizeNuclideCategories(self):\n        \"\"\"Write summary table of the various nuclide categories within the reactor.\"\"\"\n        runLog.info(\n            \"Nuclide categorization for cross section temperature assignments:\\n\"\n            + tabulate.tabulate(\n                [\n                    (\n                        \"Fuel\",\n                        createFormattedStrWithDelimiter(self._nuclideCategories[\"fuel\"]),\n                    ),\n                    (\n                        \"Coolant\",\n                        createFormattedStrWithDelimiter(self._nuclideCategories[\"coolant\"]),\n                    ),\n                    (\n                        \"Structure\",\n                        createFormattedStrWithDelimiter(self._nuclideCategories[\"structure\"]),\n                    ),\n                ],\n                headers=[\"Nuclide Category\", \"Nuclides\"],\n                tableFmt=\"armi\",\n            )\n        )\n\n    def getLocationContents(self, locs, assemblyLevel=False, locContents=None):\n        \"\"\"\n        Given a list of locations, this goes through and finds the blocks or assemblies.\n\n        Parameters\n        ----------\n        locs : list of location objects or strings\n            The locations you'd like to find assemblies in\n        assemblyLevel : bool, optional\n            If True, will find assemblies rather than blocks\n        locContents : dict, optional\n            A lookup table with location string keys and block/assembly values\n            useful if you want to call this function many times and would like a speedup.\n\n        Returns\n        -------\n        blockList : iterable\n            List of blocks or assemblies that correspond to the locations passed in\n\n        Notes\n        -----\n        Useful in reading the db.\n\n        See Also\n        --------\n        makeLocationLookup : allows caching to speed this up if you call it a lot.\n        \"\"\"\n        # Why isn't locContents an attribute of reactor? It could be another\n        # property that is generated on demand\n        if not locContents:\n            locContents = self.makeLocationLookup(assemblyLevel)\n        try:\n            # now look 'em up\n            return [locContents[str(loc)] for loc in locs]\n        except KeyError as e:\n            raise KeyError(\"There is nothing in core location {0}.\".format(e))\n\n    def makeLocationLookup(self, assemblyLevel=False):\n        \"\"\"\n        Build a location-keyed lookup table to figure out which block (or\n        assembly, if assemblyLevel=True) is in which location. Used within\n        getLocationContents, but can also be used to pre-build a cache for that\n        function, speeding the lookup with a cache.\n\n        See Also\n        --------\n        getLocationContents : can use this lookup table to go faster.\n        \"\"\"\n        # build a lookup table one time.\n        if assemblyLevel:\n            return {a.getLocation(): a for a in self}\n        else:\n            return {b.getLocation(): b for a in self for b in a}\n\n    def getFluxVector(self, energyOrder=0, adjoint=False, extSrc=False, volumeIntegrated=True):\n        \"\"\"\n        Return the multigroup real or adjoint flux of the entire reactor as a vector.\n\n        Order of meshes is based on getBlocks\n\n        Parameters\n        ----------\n        energyOrder : int, optional\n            A value of 0 implies that the flux will have all energy groups for the first mesh point,\n            and then all energy groups for the next mesh point, etc.\n\n            A value of 1 implies that the flux will have values for all mesh points of the first\n            energy group first, followed by all mesh points for the second energy group, etc.\n\n        adjoint : bool, optional\n            If True, will return adjoint flux instead of real flux.\n\n        extSrc : bool, optional\n            If True, will return external source instead of real flux.\n\n        volumeIntegrated : bool, optional\n            If true (default), flux units will be #-cm/s. If false, they will be #-cm^2/s\n\n        Returns\n        -------\n        vals : list\n            The values you requested. length is NxG.\n        \"\"\"\n        flux = []\n        groups = range(self.lib.numGroups)\n\n        # build in order 0\n        for b in self.iterBlocks():\n            if adjoint:\n                vals = b.p.adjMgFlux\n            elif extSrc:\n                vals = b.p.extSrc\n            else:\n                vals = b.p.mgFlux\n\n            if not volumeIntegrated:\n                vol = b.getVolume()\n                vals = [v / vol for v in vals]\n\n            flux.extend(vals)\n\n        if energyOrder == 1:\n            # swap order\n            newFlux = []\n            for g in groups:\n                oneGroup = [flux[i] for i in range(g, len(flux), len(groups))]\n                newFlux.extend(oneGroup)\n            flux = newFlux\n\n        return np.array(flux)\n\n    def getAssembly(self, assemNum=None, locationString=None, assemblyName=None, *args, **kwargs):\n        \"\"\"\n        Finds an assembly in the core.\n\n        Parameters\n        ----------\n        assemNum : int, optional\n            Returns the assembly with this assemNum\n        locationString : str\n            A location string\n        assemblyName : str, optional\n            The assembly name\n        *args : additional optional arguments for self.getAssemblies\n\n        Returns\n        -------\n        a : Assembly\n            The assembly that matches, or None if nothing is found\n\n        See Also\n        --------\n        getAssemblyByName\n        getAssemblyWithStringLocation\n        getLocationContents : a much more efficient way to look up assemblies in a list of locations\n        \"\"\"\n        if assemblyName:\n            return self.getAssemblyByName(assemblyName)\n\n        for a in self.getAssemblies(*args, **kwargs):\n            if a.getLocation() == locationString:\n                return a\n            if a.getNum() == assemNum:\n                return a\n\n        return None\n\n    def getAssemblyWithAssemNum(self, assemNum):\n        \"\"\"\n        Retrieve assembly with a particular assembly number from the core.\n\n        Parameters\n        ----------\n        assemNum : int\n            The assembly number of interest\n\n        Returns\n        -------\n        foundAssembly : Assembly object or None\n            The assembly found, or None\n        \"\"\"\n        return self.getAssembly(assemNum=assemNum)\n\n    def getAssemblyWithStringLocation(self, locationString):\n        \"\"\"Returns an assembly or none if given a location string like '001-001'.\n\n        .. impl:: Get assembly by location.\n            :id: I_ARMI_R_GET_ASSEM1\n            :implements: R_ARMI_R_GET_ASSEM\n\n            This method returns the :py:class:`assembly <armi.reactor.core.assemblies.Assembly>`\n            located in the requested location. The location is provided to this method as an input\n            parameter in a string with the format \"001-001\". For a :py:class:`HexGrid\n            <armi.reactor.grids.hexagonal.HexGrid>`, the first number indicates the hexagonal ring\n            and the second number indicates the position within that ring. For a\n            :py:class:`CartesianGrid <armi.reactor.grids.cartesian.CartesianGrid>`, the first number\n            represents the x index and the second number represents the y index. If there is no\n            assembly in the grid at the requested location, this method returns None.\n        \"\"\"\n        ring, pos, _ = grids.locatorLabelToIndices(locationString)\n        loc = self.spatialGrid.getLocatorFromRingAndPos(ring, pos)\n        assem = self.childrenByLocator.get(loc)\n        return assem\n\n    def _checkIfAssemAtRingPosCycle(self, a, ring, pos, cycleNum):\n        \"\"\"\n        Interrogate location history param of specified assembly object.\n\n        Return True if assembly was at specified (ring, pos) at specified cycleNum BOC.\n        \"\"\"\n        nCycles = len(a.p.ringPosHist)\n        if nCycles > cycleNum:  # requested cycleNum has data populated\n            rp = a.p.ringPosHist[cycleNum]\n            if rp[0] not in a.NOT_IN_CORE:\n                if (int(rp[0]), int(rp[1])) == (ring, pos):\n                    return True\n        return False\n\n    def getAssemblyWithRingPosHist(self, ring, pos, cycleNum):\n        \"\"\"\n        Search the Core and SFP for assembly which resided at specified ring and position at specified cycle.\n\n        This is an alternative to getting an assembly by number or string location.\n        \"\"\"\n        # search core\n        for a in self:\n            if self._checkIfAssemAtRingPosCycle(a, ring, pos, cycleNum):\n                return a\n\n        # search sfp\n        if self.parent.excore.get(\"sfp\") is not None:\n            for a in list(self.r.excore[\"sfp\"]):\n                if self._checkIfAssemAtRingPosCycle(a, ring, pos, cycleNum):\n                    return a\n\n        return None\n\n    def getAssemblyPitch(self):\n        \"\"\"\n        Find the assembly pitch for the whole core.\n\n        This returns the pitch according to the spatialGrid. To capture any thermal/hydraulic\n        feedback of the core pitch, T/H modules will need to modify the grid pitch directly based\n        on the relevant mechanical assumptions.\n\n        Returns\n        -------\n        pitch : float\n            The assembly pitch.\n        \"\"\"\n        return self.spatialGrid.pitch\n\n    def findNeighbors(self, a, showBlanks=True, duplicateAssembliesOnReflectiveBoundary=False):\n        r\"\"\"\n        Find assemblies that are next to this assembly.\n\n        Return a list of neighboring assemblies.\n\n        For a hexagonal grid, the list begins from the 30 degree point (point 1) then moves\n        counterclockwise around.\n\n        For a Cartesian grid, the order of the neighbors is east, north, west, south.\n\n        .. impl:: Retrieve neighboring assemblies of a given assembly.\n            :id: I_ARMI_R_FIND_NEIGHBORS\n            :implements: R_ARMI_R_FIND_NEIGHBORS\n\n            This method takes an :py:class:`Assembly\n            <armi.reactor.assemblies.Assembly>` as an input parameter and returns\n            a list of the assemblies neighboring that assembly. There are 6\n            neighbors in a hexagonal grid and 4 neighbors in a Cartesian grid.\n            The (i, j) indices of the neighbors are provided by\n            :py:meth:`getNeighboringCellIndices\n            <armi.reactor.grids.StructuredGrid.getNeighboringCellIndices>`. For\n            a hexagonal grid, the (i, j) indices are converted to (ring,\n            position) indexing using the ``core.spatialGrid`` instance attribute.\n\n            The ``showBlanks`` option determines whether non-existing assemblies\n            will be indicated with a ``None`` in the list or just excluded from\n            the list altogether.\n\n            The ``duplicateAssembliesOnReflectiveBoundary`` setting only works for\n            1/3 core symmetry with periodic boundary conditions. For these types\n            of geometries, if this setting is ``True``\\ , neighbor lists for\n            assemblies along a periodic boundary will include the assemblies\n            along the opposite periodic boundary that are effectively neighbors.\n\n        Parameters\n        ----------\n        a : Assembly object\n            The assembly to find neighbors of.\n\n        showBlanks : Boolean, optional\n            If True, the returned array of 6 neighbors will return \"None\" for\n            neighbors that do not explicitly exist in the 1/3 core model\n            (including many that WOULD exist in a full core model).\n\n            If False, the returned array will not include the \"None\" neighbors.\n            If one or more neighbors does not explicitly exist in the 1/3 core\n            model, the returned array will have a length of less than 6.\n\n        duplicateAssembliesOnReflectiveBoundary : Boolean, optional\n            If True, findNeighbors duplicates neighbor assemblies into their\n            \"symmetric identicals\" so that even assemblies that border symmetry\n            lines will have 6 neighbors. The only assemblies that will have\n            fewer than 6 neighbors are those that border the outer core boundary\n            (usually vacuum).\n\n            If False, findNeighbors returns None for assemblies that do not\n            exist in a 1/3 core model (but WOULD exist in a full core model).\n\n            For example, applying findNeighbors for the central assembly (ring,\n            pos) = (1, 1) in 1/3 core symmetry (with\n            duplicateAssembliesOnReflectiveBoundary = True) would return a list\n            of 6 assemblies, but those 6 would really only be assemblies (2, 1)\n            and (2, 2) repeated 3 times each.\n\n            Note that the value of duplicateAssembliesOnReflectiveBoundary only\n            really matters if showBlanks == True. This will have no effect if\n            the model is full core since asymmetric models could find many\n            duplicates in the other thirds\n\n        Notes\n        -----\n        The duplicateAssembliesOnReflectiveBoundary setting only works for third\n        core symmetry.\n\n        This uses the 'mcnp' index map (MCNP GEODST hex coordinates) instead of\n        the standard (ring, pos) map. because neighbors have consistent indices\n        this way. We then convert over to (ring, pos) using the lookup table\n        that a reactor has.\n\n        Returns\n        -------\n        neighbors : list of assembly objects\n            This is a list of \"nearest neighbors\" to assembly a.\n\n            If showBlanks = False, it will return fewer than the maximum number\n            of neighbors if not all neighbors explicitly exist in the core\n            model. For a hexagonal grid, the maximum number of neighbors is 6.\n            For a Cartesian grid, the maximum number is 4.\n\n            If showBlanks = True and duplicateAssembliesOnReflectiveBoundary =\n            False, it will have a \"None\" for assemblies that do not exist in the\n            1/3 model.\n\n            If showBlanks = True and duplicateAssembliesOnReflectiveBoundary =\n            True, it will return the existing \"symmetric identical\" assembly of\n            a non-existing assembly. It will only return \"None\" for an assembly\n            when that assembly is non-existing AND has no existing \"symmetric\n            identical\".\n\n        See Also\n        --------\n        grids.Grid.getSymmetricEquivalents\n        \"\"\"\n        neighborIndices = self.spatialGrid.getNeighboringCellIndices(*a.spatialLocator.getCompleteIndices())\n\n        dupReflectors = (\n            self.symmetry.domain == geometry.DomainType.THIRD_CORE\n            and self.symmetry.boundary == geometry.BoundaryType.PERIODIC\n            and duplicateAssembliesOnReflectiveBoundary\n        )\n\n        neighbors = []\n        for iN, jN, kN in neighborIndices:\n            neighborLoc = self.spatialGrid[iN, jN, kN]\n            neighbor = self.childrenByLocator.get(neighborLoc)\n            if neighbor is not None:\n                neighbors.append(neighbor)\n            elif showBlanks:\n                if dupReflectors:\n                    symmetricAssem = self._getReflectiveDuplicateAssembly(neighborLoc)\n                    neighbors.append(symmetricAssem)\n                else:\n                    neighbors.append(None)\n\n        return neighbors\n\n    def _getReflectiveDuplicateAssembly(self, neighborLoc):\n        \"\"\"\n        Return duplicate assemblies across symmetry line.\n\n        Notes\n        -----\n        If an existing symmetric identical has been found, return it.\n        If an existing symmetric identical has NOT been found, return a None (it's empty).\n        \"\"\"\n        duplicates = []\n        otherTwoLocations = self.spatialGrid.getSymmetricEquivalents(neighborLoc)\n        for i, j in otherTwoLocations:\n            neighborLocation2 = self.spatialGrid[i, j, 0]\n            duplicateAssem = self.childrenByLocator.get(neighborLocation2)\n            if duplicateAssem is not None:\n                duplicates.append(duplicateAssem)\n\n        # should always be 0 or 1\n        nDuplicates = len(duplicates)\n        if nDuplicates == 1:\n            return duplicates[0]\n        elif nDuplicates > 1:\n            raise ValueError(\"Too many neighbors found!\")\n        return None\n\n    def setMoveList(self, cycle, oldLoc, newLoc, enrichList, assemblyType, ringPosCycle=None):\n        \"\"\"Tracks the movements in terms of locations and enrichments.\"\"\"\n        from armi.physics.fuelCycle.fuelHandlers import AssemblyMove\n\n        data = AssemblyMove(oldLoc, newLoc, enrichList, assemblyType, ringPosCycle)\n        if self.moves.get(cycle) is None:\n            self.moves[cycle] = []\n        if data in self.moves[cycle]:\n            # remove the old version and throw the new one at the end.\n            self.moves[cycle].remove(data)\n        self.moves[cycle].append(data)\n\n    def createFreshFeed(self, cs=None):\n        \"\"\"\n        Creates a new feed assembly.\n\n        Parameters\n        ----------\n        cs : Settings\n            Global settings for the case\n\n        See Also\n        --------\n        createAssemblyOfType: creates an assembly\n        \"\"\"\n        return self.createAssemblyOfType(assemType=self._freshFeedType, cs=cs)\n\n    def createAssemblyOfType(self, assemType=None, enrichList=None, cs=None):\n        \"\"\"\n        Create an assembly of a specific type and apply enrichments if they are specified.\n\n        Parameters\n        ----------\n        assemType : str\n            The assembly type to create\n        enrichList : list\n            weight percent enrichments of each block\n        cs : Settings\n            Global settings for the case\n\n        Returns\n        -------\n        a : Assembly\n            A new assembly\n\n        Notes\n        -----\n        This and similar fuel shuffle-enabling functionality on the Core are responsible\n        for coupling between the Core and Blueprints. Technically, it should not be\n        required to involve Blueprints at all in the construction of a Reactor model.\n        Therefore in some circumstances, this function will not work. Ultimately, this\n        should be purely the domain of blueprints themselves, and may be migrated out of\n        Core in the future.\n\n        See Also\n        --------\n        armi.fuelHandler.doRepeatShuffle : uses this to repeat shuffling\n        \"\"\"\n        a = self.parent.blueprints.constructAssem(cs, name=assemType)\n\n        # check to see if a default bol assembly is being used or we are adding more information\n        if enrichList:\n            # got an enrichment list that should be the same height as the fuel blocks\n            if isinstance(enrichList, float):\n                # make endlessly iterable if float was passed in\n                enrichList = itertools.cycle([enrichList])\n            elif len(a) != len(enrichList):\n                raise RuntimeError(\"{0} and enrichment list do not have the same number of blocks.\".format(a))\n\n            for b, enrich in zip(a, enrichList):\n                if enrich == 0.0:\n                    # don't change blocks when enrich specified as 0\n                    continue\n                if abs(b.getUraniumMassEnrich() - enrich) > 1e-10:\n                    # only adjust block enrichment if it's different.\n                    # WARNING: If this is not fresh fuel, this messes up the number of moles of HM at BOL and\n                    # therefore breaks the burnup metric.\n                    b.adjustUEnrich(enrich)\n\n        if not self._detailedAxialExpansion:\n            # if detailedAxialExpansion: False, make sure that the assembly being created has the correct core mesh\n            a.setBlockMesh(self.p.referenceBlockAxialMesh[1:], conserveMassFlag=\"auto\")  # pass [1:] to skip 0.0\n\n        return a\n\n    def saveAllFlux(self, fName=\"allFlux.txt\"):\n        \"\"\"Dump all flux to file for debugging purposes.\"\"\"\n        groups = range(self.lib.numGroups)\n        with open(fName, \"w\") as f:\n            for block in self.iterBlocks():\n                for gi in groups:\n                    f.write(\n                        \"{:10s} {:10d} {:12.5E} {:12.5E} {:12.5E}\\n\".format(\n                            block.getName(),\n                            gi,\n                            block.p.mgFlux[gi],\n                            block.p.adjMgFlux[gi],\n                            block.getVolume(),\n                        )\n                    )\n                if len(block.p.mgFlux) > len(groups) or len(block.p.adjMgFlux) > len(groups):\n                    raise ValueError(\n                        \"Too many flux values: {}\\n{}\\n{}\".format(block, block.p.mgFlux, block.p.adjMgFlux)\n                    )\n\n    def getAssembliesOnSymmetryLine(self, symmetryLineID):\n        \"\"\"Find assemblies that are on a symmetry line in a symmetric core.\"\"\"\n        assembliesOnLine = []\n        for a in self:\n            if a.isOnWhichSymmetryLine() == symmetryLineID:\n                assembliesOnLine.append(a)\n\n        # in order of innermost to outermost (for averaging)\n        assembliesOnLine.sort(key=lambda a: a.spatialLocator.getRingPos())\n        return assembliesOnLine\n\n    def getCoreRadius(self):\n        \"\"\"Returns a radius that the core would fit into.\"\"\"\n        return self.getNumRings(indexBased=True) * self.getFirstBlock().getPitch()\n\n    def findAllMeshPoints(self, assems=None, applySubMesh=True):\n        \"\"\"\n        Return all mesh positions in core including both endpoints.\n\n        .. impl:: Construct a mesh based on core blocks.\n            :id: I_ARMI_R_MESH\n            :implements: R_ARMI_R_MESH\n\n            This method iterates through all of the assemblies provided, or all\n            assemblies in the core if no list of ``assems`` is provided, and\n            constructs a tuple of three lists which contain the unique i, j, and\n            k mesh coordinates, respectively. The ``applySubMesh`` setting\n            controls whether the mesh will include the submesh coordinates. For\n            a standard assembly-based reactor geometry with a hexagonal or\n            Cartesian assembly grid, this method is only used to produce axial\n            (k) mesh points. If multiple assemblies are provided with different\n            axial meshes, the axial mesh list will contain the union of all\n            unique mesh points. Duplicate mesh points are removed.\n\n        Parameters\n        ----------\n        assems : list, optional\n            assemblies to consider when determining the mesh points. If not given, all in-core assemblies are used.\n        applySubMesh : bool, optional\n            Apply submeshing parameters to make mesh points smaller than blocks. Default=True.\n\n        Returns\n        -------\n        meshVals : tuple\n            ((i-vals), (j-vals,), (k-vals,))\n\n        See Also\n        --------\n        armi.reactor.assemblies.Assembly.getAxialMesh : get block mesh\n\n        Notes\n        -----\n        These include all mesh points, not just block boundaries. There may be multiple mesh points\n        per block.\n\n        If a large block with multiple mesh points is in the same core as arbitrarily-expanded fuel blocks\n        from fuel performance, an imbalanced axial mesh may result.\n\n        There is a challenge with TRZ blocks because we need the mesh centroid in terms of RZT, not XYZ\n\n        When determining the submesh, it is important to not use too small of a rounding precision. It was\n        found that when using a precision of units.FLOAT_DIMENSION_DECIMALS, that the division in `step`\n        can produce mesh points that are the same up to the 9th or 10th digit, resulting in a repeated\n        mesh point. This repetition results in problems in downstream methods, such as the uniform mesh converter.\n        \"\"\"\n        runLog.debug(\"Finding all mesh points.\")\n        if assems is None:\n            assems = list(self)\n\n        iMesh, jMesh, kMesh = set(), set(), set()\n        for a in assems:\n            for b in a:\n                # these params should be combined into a new b.p.meshSubdivisions tuple\n                numPoints = (a.p.AziMesh, a.p.RadMesh, b.p.axMesh) if applySubMesh else (1, 1, 1)\n                base = b.spatialLocator.getGlobalCellBase()\n                # make sure this is in mesh coordinates (important to have TRZ, not XYZ in TRZ cases\n                top = b.spatialLocator.getGlobalCellTop()\n                for axis, (collection, subdivisions) in enumerate(zip((iMesh, jMesh, kMesh), numPoints)):\n                    axisVal = float(base[axis])  # convert from np.float64\n                    step = float(top[axis] - axisVal) / subdivisions\n                    for _subdivision in range(subdivisions):\n                        collection.add(round(axisVal, units.FLOAT_DIMENSION_DECIMALS))\n                        axisVal += step\n                    # add top too (only needed for last point)\n                    collection.add(round(axisVal, units.FLOAT_DIMENSION_DECIMALS))\n\n        iMesh, jMesh, kMesh = map(sorted, (iMesh, jMesh, kMesh))\n\n        return iMesh, jMesh, kMesh\n\n    def findAllAxialMeshPoints(self, assems=None, applySubMesh=True):\n        \"\"\"Return a list of all z-mesh positions in the core including zero and the top.\"\"\"\n        _i, _j, k = self.findAllMeshPoints(assems, applySubMesh)\n        return k\n\n    def updateAxialMesh(self):\n        \"\"\"\n        Update axial mesh based on perturbed meshes of the assemblies that are linked to the ref assem.\n\n        Notes\n        -----\n        While processLoading finds *all* axial mesh points, this method only updates the values of the\n        known mesh with the current assembly heights. **This does not change the number of mesh points**.\n\n        If ``detailedAxialExpansion`` is active, the global axial mesh param still only tracks the refAssem.\n        Otherwise, thousands upon thousands of mesh points would get created.\n\n        See Also\n        --------\n        processLoading : sets up the primary mesh that this perturbs.\n        \"\"\"\n        # most of the time, we want fuel, but they should mostly have the same number of blocks\n        # if this becomes a problem, we might find either the\n        #  1. mode: (len(a) for a in self).mode(), or\n        #  2. max: max(len(a) for a in self)\n        # depending on what makes the most sense\n        refAssem = self.refAssem\n        refMesh = self.findAllAxialMeshPoints([refAssem])\n        avgHeight = average1DWithinTolerance(\n            np.array(\n                [\n                    [h for b in a for h in [(b.p.ztop - b.p.zbottom) / b.p.axMesh] * b.p.axMesh]\n                    for a in self\n                    if self.findAllAxialMeshPoints([a]) == refMesh\n                ]\n            )\n        )\n        self.p.axialMesh = list(np.append([0.0], avgHeight.cumsum()))\n\n    def findAxialMeshIndexOf(self, heightCm):\n        \"\"\"\n        Return the axial index of the axial node corresponding to this height.\n\n        If the height lies on the boundary between two nodes, the lower node index\n        is returned.\n\n        Parameters\n        ----------\n        heightCm : float\n            The height (cm) from the assembly bottom.\n\n        Returns\n        -------\n        zIndex : int\n            The axial index (beginning with 0) of the mesh node containing the given height.\n        \"\"\"\n        for zi, currentHeightCm in enumerate(self.p.axialMesh[1:]):\n            if currentHeightCm >= heightCm:\n                return zi\n        raise ValueError(\n            \"The value {} cm is not within range of the reactor axial mesh with max {}\".format(\n                heightCm, currentHeightCm\n            )\n        )\n\n    def addMoreNodes(self, meshList):\n        \"\"\"Add additional mesh points in the the meshList so that the ratio of mesh sizes does not vary too fast.\"\"\"\n        ratio = self._minMeshSizeRatio\n        for i, innerMeshVal in enumerate(meshList[1:-1], start=1):\n            dP0 = innerMeshVal - meshList[i - 1]\n            dP1 = meshList[i + 1] - innerMeshVal\n\n            if dP0 / (dP0 + dP1) < ratio:\n                runLog.warning(\"Mesh gap too small. Adjusting mesh to be more reasonable.\")\n                meshList.append(innerMeshVal + dP1 * ratio)\n                meshList.sort()\n                return meshList, False\n            elif dP0 / (dP0 + dP1) > (1.0 - ratio):\n                runLog.warning(\"Mesh gap too large. Adjusting mesh to be more reasonable.\")\n                meshList.append(meshList[i - 1] + dP0 * (1.0 - ratio))\n                meshList.sort()\n                return meshList, False\n\n        return meshList, True\n\n    def findAllAziMeshPoints(self, extraAssems=None, applySubMesh=True):\n        \"\"\"\n        Returns a list of all azimuthal (theta)-mesh positions in the core.\n\n        Parameters\n        ----------\n        extraAssems : list\n            additional assemblies to consider when determining the mesh points.\n            They may be useful in the MCPNXT models to represent the fuel management dummies.\n\n        applySubMesh : bool\n            generates submesh points to further discretize the theta reactor mesh\n        \"\"\"\n        i, _, _ = self.findAllMeshPoints(extraAssems, applySubMesh)\n        return i\n\n    def findAllRadMeshPoints(self, extraAssems=None, applySubMesh=True):\n        \"\"\"\n        Return a list of all radial-mesh positions in the core.\n\n        Parameters\n        ----------\n        extraAssems : list\n            additional assemblies to consider when determining the mesh points. They may be useful\n            in the MCPNXT models to represent the fuel management dummies.\n\n        applySubMesh : bool\n            (not implemented) generates submesh points to further discretize the radial reactor mesh\n        \"\"\"\n        _, j, _ = self.findAllMeshPoints(extraAssems, applySubMesh)\n        return j\n\n    def getMaxBlockParam(self, *args, **kwargs):\n        \"\"\"Get max param over blocks.\"\"\"\n        if \"generationNum\" in kwargs:\n            raise ValueError(\"Cannot getMaxBlockParam over anything but blocks. Prefer `getMaxParam`.\")\n        kwargs[\"generationNum\"] = 2\n        return self.getMaxParam(*args, **kwargs)\n\n    def getTotalBlockParam(self, *args, **kwargs):\n        \"\"\"Get total param over blocks.\"\"\"\n        if \"generationNum\" in kwargs:\n            raise ValueError(\"Cannot getTotalBlockParam over anything but blocks. Prefer `calcTotalParam`.\")\n        kwargs[\"generationNum\"] = 2\n        return self.calcTotalParam(*args, **kwargs)\n\n    def getMaxNumPins(self):\n        \"\"\"Find max number of pins of any block in the reactor.\"\"\"\n        return max(b.getNumPins() for b in self.iterBlocks())\n\n    def getMinimumPercentFluxInFuel(self, target=0.005):\n        \"\"\"\n        Starting with the outer ring, this method goes through the entire Reactor to determine what\n        percentage of flux occurs at each ring.\n\n        Parameters\n        ----------\n        target : float\n            This is the fraction of the total reactor fuel flux compared to the flux in a specific\n            assembly in a ring\n\n        Returns\n        -------\n        targetRing, fraction of flux : tuple\n            targetRing is the ring with the fraction of flux that best meets the target.\n        \"\"\"\n        # get the total number of assembly rings\n        numRings = self.getNumRings()\n\n        # old target assembly fraction\n        fluxFraction = 0\n        targetRing = numRings\n\n        allFuelBlocks = self.getBlocks(Flags.FUEL)\n\n        # loop there all of the rings\n        for ringNumber in range(numRings, 0, -1):\n            # Compare to outer most ring. flatten list into one list of all blocks\n            blocksInRing = list(\n                itertools.chain.from_iterable([a.iterBlocks(Flags.FUEL) for a in self.getAssembliesInRing(ringNumber)])\n            )\n\n            totalPower = self.getTotalBlockParam(\"flux\", objs=allFuelBlocks)\n            ringPower = self.getTotalBlockParam(\"flux\", objs=blocksInRing)\n\n            # make sure that there is a non zero return\n            if fluxFraction == 0 and ringPower > 0:\n                fluxFraction = ringPower / totalPower\n                targetRing = ringNumber\n\n            # this will only get the leakage if the target fraction isn't too low\n            if ringPower / totalPower < target and ringPower / totalPower > fluxFraction:\n                fluxFraction = ringPower / totalPower\n                targetRing = ringNumber\n\n        return targetRing, fluxFraction\n\n    def getAvgTemp(self, typeSpec, blockList=None, flux2Weight=False):\n        \"\"\"\n        Get the volume-average fuel, cladding, coolant temperature in core.\n\n        Parameters\n        ----------\n        typeSpec : Flags or list of Flags\n            Component types to consider. If typeSpec is a list, then you get the volume average\n            temperature of all components. For instance, getAvgTemp([Flags.CLAD, Flags.WIRE,\n            Flags.DUCT]) returns the avg. structure temperature.\n\n        blockList : list, optional\n            Blocks to consider. If None, all blocks in core will be considered\n\n        flux2Weight : bool, optional\n            If true, will weight temperature against flux**2\n\n        Returns\n        -------\n        avgTemp : float\n            The average temperature in C.\n        \"\"\"\n        num = 0.0\n        denom = 0.0\n        if not blockList:\n            blockList = self.getBlocks()\n\n        for b in blockList:\n            if flux2Weight:\n                weight = b.p.flux**2.0\n            else:\n                weight = 1.0\n            for c in b.iterComponents(typeSpec):\n                vol = c.getVolume()\n                num += c.temperatureInC * vol * weight\n                denom += vol * weight\n\n        if denom:\n            return num / denom\n        else:\n            raise RuntimeError(\"no temperature average for {0}\".format(typeSpec))\n\n    def growToFullCore(self, cs):\n        \"\"\"Copies symmetric assemblies to build a full core model out of a 1/3 core model.\n\n        Returns\n        -------\n        converter : GeometryConverter\n            Geometry converter used to do the conversion.\n        \"\"\"\n        from armi.reactor.converters.geometryConverters import (\n            ThirdCoreHexToFullCoreChanger,\n        )\n\n        converter = ThirdCoreHexToFullCoreChanger(cs)\n        converter.convert(self.r)\n\n        return converter\n\n    def setPitchUniform(self, pitchInCm):\n        \"\"\"Set the pitch in all blocks.\"\"\"\n        for b in self.iterBlocks():\n            b.setPitch(pitchInCm)\n\n        # have to update the 2-D reactor mesh too.\n        self.spatialGrid.changePitch(pitchInCm)\n\n    def calcBlockMaxes(self):\n        \"\"\"\n        Searches all blocks for maximum values of key params.\n\n        See Also\n        --------\n        armi.physics.optimize.OptimizationInterface.interactBOL : handles these maxes in optimization cases\n        \"\"\"\n        # restrict to fuel\n        for k in self.p.paramDefs.inCategory(\"block-max\").names:\n            try:\n                maxVal = self.getMaxBlockParam(k.replace(\"max\", \"\"), Flags.FUEL)\n                if maxVal != 0.0:\n                    self.p[k] = maxVal\n            except KeyError:\n                continue\n\n        # add maxes based on pin-level max if it exists, block level max otherwise.\n        self.p.maxBuF = max(\n            (a.getMaxParam(\"percentBu\") for a in self.getAssemblies(Flags.FEED | Flags.FUEL)),\n            default=0.0,\n        )\n        self.p.maxBuI = max(\n            (\n                a.getMaxParam(\"percentBu\")\n                for a in self.getAssemblies(\n                    [\n                        Flags.IGNITER | Flags.FUEL,\n                        Flags.DRIVER | Flags.FUEL,\n                        Flags.STARTER | Flags.FUEL,\n                    ]\n                )\n            ),\n            default=0.0,\n        )\n\n    def getFuelBottomHeight(self):\n        \"\"\"\n        Obtain the height of the lowest fuel in the core.\n\n        This is the \"axial coordinate shift\" between ARMI and SASSYS.\n        While ARMI sets z=0 at the bottom of the lowest block (usually the\n        grid plate), SASSYS sets z=0 at the bottom of the fuel.\n\n        Returns\n        -------\n        lowestFuelHeightInCm : float\n            The height (cm) of the lowest fuel in this core model.\n        \"\"\"\n        lowestFuelHeightInCm = self[0].getHeight()\n        fuelBottoms = []\n        for a in self.getAssemblies(Flags.FUEL):\n            fuelHeightInCm = 0.0\n            for b in a:\n                if b.hasFlags(Flags.FUEL):\n                    break\n                else:\n                    fuelHeightInCm += b.getHeight()\n            if fuelHeightInCm < lowestFuelHeightInCm:\n                lowestFuelHeightInCm = fuelHeightInCm\n            fuelBottoms.append(fuelHeightInCm)\n        return lowestFuelHeightInCm\n\n    def processLoading(self, cs, dbLoad: bool = False):\n        \"\"\"\n        After nuclide densities are loaded, this goes through and prepares the reactor.\n\n        Notes\n        -----\n        This does a few operations :\n         * It process boosters,\n         * sets axial snap lists,\n         * checks the geometry,\n         * sets up location tables (tracks where the initial feeds were (for moderation or something)\n\n        See Also\n        --------\n        updateAxialMesh : Perturbs the axial mesh originally set up here.\n        \"\"\"\n        self.setOptionsFromCs(cs)\n        runLog.header(\"=========== Initializing Mesh, Assembly Zones, and Nuclide Categories =========== \")\n\n        for b in self.iterBlocks():\n            if b.p.molesHmBOL > 0.0:\n                break\n        else:\n            # Good easter egg, but sometimes a user will want to use the framework do\n            # only decay analyses and heavy metals are not required.\n            runLog.warning(\n                \"The system has no heavy metal and therefore is not a nuclear reactor.\\n\"\n                \"Please make sure that this is intended and not a input error.\"\n            )\n\n        if dbLoad:\n            # reactor.blueprints.assemblies need to be populated this normally happens during\n            # blueprint constructAssem. But for DB load, this is not called so it must be here.\n            self.parent.blueprints._prepConstruction(cs)\n        else:\n            # set reactor level meshing params\n            nonUniformAssems = [Flags.fromStringIgnoreErrors(t) for t in cs[CONF_NON_UNIFORM_ASSEM_FLAGS]]\n            # Some assemblies, like control assemblies, have a non-conforming mesh and should not be\n            # included in self.p.referenceBlockAxialMesh and self.p.axialMesh\n            uniformAssems = [a for a in self.getAssemblies() if not any(a.hasFlags(f) for f in nonUniformAssems)]\n            self.p.referenceBlockAxialMesh = self.findAllAxialMeshPoints(\n                assems=uniformAssems,\n                applySubMesh=False,\n            )\n            self.p.axialMesh = self.findAllAxialMeshPoints(\n                assems=uniformAssems,\n                applySubMesh=True,\n            )\n\n        self.getNuclideCategories()\n\n        # Generate list of flags that are to be stationary during assembly shuffling\n        stationaryBlockFlags = []\n\n        for stationaryBlockFlagString in cs[CONF_STATIONARY_BLOCK_FLAGS]:\n            stationaryBlockFlags.append(Flags.fromString(stationaryBlockFlagString))\n\n        self.stationaryBlockFlagsList = stationaryBlockFlags\n        self.setBlockMassParams()\n        self.p.maxAssemNum = self.getMaxParam(\"assemNum\")\n\n        getPluginManagerOrFail().hook.onProcessCoreLoading(core=self, cs=cs, dbLoad=dbLoad)\n\n    def buildManualZones(self, cs):\n        \"\"\"\n        Build the Zones that are defined in the given Settings, in the `zoneDefinitions` or `zonesFile` case setting.\n\n        Parameters\n        ----------\n        cs : Settings\n            The standard ARMI settings object\n\n        Examples\n        --------\n        Manual zones will be defined in a special string format, e.g.:\n\n        >>> zoneDefinitions:\n        >>>     - \"ring-1: 001-001\"\n        >>>     - \"ring-2: 002-001, 002-002\"\n        >>>     - \"ring-3: 003-001, 003-002, 003-003\"\n\n        Notes\n        -----\n        This function will just define the Zones it sees in the settings, it does not do any validation against a Core\n        object to ensure those manual zones make sense.\n        \"\"\"\n        if cs[CONF_ZONE_DEFINITIONS]:\n            runLog.info(f\"Building Zones by manual definitions in {CONF_ZONE_DEFINITIONS} setting\")\n\n            stripper = lambda s: s.strip()\n            self.zones = zones.Zones()\n\n            # parse the special input string for zone definitions\n            for zoneString in cs[CONF_ZONE_DEFINITIONS]:\n                zoneName, zoneLocs = zoneString.split(\":\")\n                zoneLocs = zoneLocs.split(\",\")\n                zone = zones.Zone(zoneName.strip())\n                zone.addLocs(map(stripper, zoneLocs))\n                self.zones.addZone(zone)\n\n        elif cs[CONF_ZONES_FILE]:\n            runLog.info(f\"Custom zoning strategy applied from {CONF_ZONES_FILE}.\")\n\n            self.zones = Zones()\n            with open(cs[CONF_ZONES_FILE]) as stream:\n                zonesDict = YAML(typ=\"safe\").load(stream)\n\n            for location, zoneName in zonesDict[\"customZonesMap\"].items():\n                # if the the zoneName isn't already a Zones key, then add a new Zone\n                if zoneName not in self.zones:\n                    self.zones.addZone(Zone(zoneName, [location]))\n                # if the zoneName is already a Zones key, then add the location to the existing Zone\n                else:\n                    self.zones[zoneName].addLoc(location)\n\n            # sort the Zones\n            self.zones.sortZones()\n\n        else:\n            runLog.warning(f\"No zones defined in either {CONF_ZONE_DEFINITIONS} or {CONF_ZONES_FILE} settings\")\n\n    def iterBlocks(\n        self,\n        typeSpec: Optional[flags.TypeSpec] = None,\n        exact=False,\n        predicate: Callable[[blocks.Block], bool] = None,\n    ) -> Iterator[blocks.Block]:\n        \"\"\"Iterate over the blocks in the core.\n\n        Useful for operations that just want to find all the blocks in the core with light filtering.\n\n        Parameters\n        ----------\n        typeSpec: armi.reactor.flags.TypeSpec, optional\n            Limit the traversal to blocks that have these flags.\n        exact: bool, optional\n            Strictness on the usage of ``typeSpec`` used in :meth:`armi.reactor.composites.hasFlags`\n        predicate: f(block) -> bool, optional\n            Limit the traversal to blocks that pass this predicate. Can be used in addition to\n            ``typeSpec`` to perform more advanced filtering.\n\n        Returns\n        -------\n        iterator[Block]\n            Iterator over blocks in the core that meet the conditions provided.\n\n        Examples\n        --------\n        >>> for b in r.core.iterBlocks(Flags.FUEL):\n        ...     pass\n\n        See Also\n        --------\n        The :py:meth:`getBlocks` has more control over what is included in the returned list including looking at the\n        spent fuel pool and assemblies that may not exist now but existed at BOL (via :meth:`getAssemblies`). But if\n        you're just interested in the blocks in the core now, maybe with a flag attached to that block, this is what you\n        should use.\n\n        Notes\n        -----\n        Assumes your composite tree is structured ``Core`` -> ``Assembly`` -> ``Block``. If this is not the case,\n        consider using :meth:`iterChildren`.\n        \"\"\"\n        if typeSpec is not None:\n            typeChecker = lambda b: b.hasFlags(typeSpec, exact=exact)\n        else:\n            typeChecker = lambda _: True\n\n        if predicate is not None:\n            blockChecker = lambda b: typeChecker(b) and predicate(b)\n        else:\n            blockChecker = typeChecker\n\n        return self.iterChildren(generationNum=2, predicate=blockChecker)\n"
  },
  {
    "path": "armi/reactor/excoreStructure.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module provides the simplest base-class tools for representing reactor objects that are\noutside the reactor core.\n\nThe idea here is that all ex-core objects will be represented first as a spatial grid, and then\narbitrary ArmiObjects can be added to that grid.\n\"\"\"\n\nimport copy\n\nfrom armi.reactor.composites import Composite\n\n\nclass ExcoreStructure(Composite):\n    \"\"\"This is meant as the simplest baseclass needed to represent an ex-core reactor thing.\n\n    An ex-core structure is expected to:\n\n    - be a child of the Reactor,\n    - have a grid associated with it,\n    - contain a hierarchical set of ArmiObjects.\n    \"\"\"\n\n    def __init__(self, name, parent=None):\n        Composite.__init__(self, name)\n        self.parent = parent\n        self.spatialGrid = None\n\n    def __repr__(self):\n        return \"<{}: {} id:{}>\".format(self.__class__.__name__, self.name, id(self))\n\n    @property\n    def r(self):\n        return self.getAncestor(fn=lambda x: x.__class__.__name__ == \"Reactor\")\n\n    def add(self, obj, loc=None):\n        \"\"\"Add an ArmiObject to a particular grid location, in this structure.\n\n        Parameters\n        ----------\n        assem : ArmiObject\n            Any generic ArmiObject to add to the structure.\n        loc : LocationBase, optional\n            The location on this structure's grid. If omitted, will come from the object.\n        \"\"\"\n        # if a location is not provided, we demand the object has one\n        if loc is None:\n            loc = obj.spatialLocator\n\n        if loc.grid is not self.spatialGrid:\n            raise ValueError(f\"An Composite cannot be added to {self} using a spatial locator from another grid.\")\n\n        # If an assembly is added and it has a negative ID, that is a placeholder, fix it.\n        if \"assemNum\" in obj.p and obj.p.assemNum < 0:\n            # update the assembly count in the Reactor\n            newNum = self.r.incrementAssemNum()\n            obj.renumber(newNum)\n\n        obj.spatialLocator = loc\n        super().add(obj)\n\n\nclass ExcoreCollection(dict):\n    \"\"\"\n    A collection that allows ex-core structures to be accessed like a dict, or class attributes.\n\n    Examples\n    --------\n    Build some sample data::\n\n        >>> sfp = ExcoreStructure(\"sfp\")\n        >>> ivs = ExcoreStructure(\"ivs\")\n\n    Build THIS collection::\n\n        >>> excore = ExcoreCollection()\n\n    Now you can add data to this collection like it were a dictionary, and access freely::\n\n        >>> excore[\"sfp\"] = sfp\n        >>> excore[\"sfp\"]\n        <ExcoreStructure: sfp id:2311582653024>\n        >>> excore.sfp\n        <ExcoreStructure: sfp id:2311582653024>\n\n    Or you can add data as if it were a class attribute, and still have dual access::\n\n        >>> excore.ivs = ivs\n        >>> excore.ivs\n        <ExcoreStructure: ivs id:2311590971136>\n        >>> excore[\"ivs\"]\n        <ExcoreStructure: ivs id:2311590971136>\n    \"\"\"\n\n    def __getattr__(self, key):\n        \"\"\"Override the class attribute getter.\n\n        First check if the class attribute exists. If not, check if the key is in the dictionary.\n        \"\"\"\n        try:\n            # try to get a real class attribute\n            return self.__dict__[key]\n        except KeyError:\n            try:\n                # if it's not a class attribute, maybe it is a dictionary key?\n                return self.__getitem__(key)\n            except Exception:\n                pass\n            # it is neither, just raise the usual error\n            raise\n\n    def __setattr__(self, key, value):\n        \"\"\"Override the class attribute setting.\n\n        If the value has an ExcoreStructure type, assume we want to store this in the dictionary.\n        \"\"\"\n        if type(value) is ExcoreStructure:\n            self.__setitem__(key, value)\n        else:\n            self.__dict__[key] = value\n\n    def __getstate__(self):\n        \"\"\"Needed to support pickling and unpickling the Reactor.\"\"\"\n        return self.__dict__.copy()\n\n    def __setstate__(self, state):\n        \"\"\"Needed to support pickling and unpickling the Reactor.\"\"\"\n        self.__dict__.update(state)\n\n    def __deepcopy__(self, memo):\n        \"\"\"Needed to support pickling and unpickling the Reactor.\"\"\"\n        memo[id(self)] = newE = self.__class__.__new__(self.__class__)\n        newE.__setstate__(copy.deepcopy(self.__getstate__(), memo))\n        return newE\n\n    def __repr__(self):\n        return \"<{}: {} id:{}>\".format(self.__class__.__name__, self.name, id(self))\n"
  },
  {
    "path": "armi/reactor/flags.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nHandles *flags* that formally bind certain categories to reactor parts.\n\n``Flags`` are used to formally categorize the various ``ArmiObject`` objects that make\nup a reactor model. These categories allow parts of the ARMI system to treat different\nAssemblies, Blocks, Components, etc. differently.\n\nBy default, the specific Flags that are bound to each object are derived by that\nobject's name when constructed; if the name contains any valid flag names, those Flags\nwill be assigned to the object. However, specific Flags may be specified within\nblueprints, in which case the name is ignored and only the explicitly-requested Flags\nare applied (see :ref:`bp-input-file` for more details).\n\nIndividual Flags tend to be various nouns and adjectives that describe common objects\nthat go into a reactor (e.g. \"fuel\", \"shield\", \"control\", \"duct\", \"plenum\", etc.). In\naddition, there are some generic Flags (e.g., \"A\", \"B\", \"C\", etc.) that aid in\ndisambiguating between objects that need to be targeted separately but would otherwise\nhave the same Flags. Flags are stored as integer bitfields within the parameter system,\nallowing them to be combined arbitrarily on any ARMI object. Since they are stored in\nbitfields, each new flag definition requires widening this bitfield; therefore, the\nnumber of defined Flags should be kept relatively small, and each flag should provide\nmaximum utility.\n\nWithin the code, Flags are usually combined into a \"type specification (``TypeSpec``)\",\nwhich is either a single combination of Flags, or a list of Flag combinations. More\ninformation about how ``TypeSpec`` is interpreted can be found in\n:py:meth:`armi.reactor.composites.ArmiObject.hasFlags`.\n\nFlags are intended to describe `what something is`, rather than `what something should\ndo`. Historically, Flags have been used to do both, which has led to confusion. The\nbinding of specific behavior to certain Flags should ideally be controlled through\nsettings with reasonable defaults, rather than being hard-coded. Currently, much of the\ncode still uses hard-coded ``TypeSpecs``, and certain Flags are clearly saying `what\nsomething should do` (e.g., ``Flags.DEPLETABLE``).\n\n.. note::\n    Flags have a rather storied history. Way back when, code that needed to operate on\n    specific objects would do substring searches against object names to decide if they\n    were relevant. This was very prone to error, and led to all sorts of surprising\n    behavior based on the names used in input files. To improve the situation, Flags\n    were developed to better formalize which strings mattered, and to define canonical\n    names for things. Still almost all flag checks were hard-coded, and\n    aside from up-front error checks, many of the original issues persisted. For\n    instance, developing a comprehensive manual of which Flags lead to which behavior\n    was very difficult.\n\n    Migrating the `meaning` of Flags into settings will allow us to better document how\n    those Flags/settings affect ARMI's behavior.\n\n    As mentioned above, plenty of code still hard-codes Flag ``TypeSpecs``, and certain\n    Flags do not follow the `what something is` convention. Future work should improve\n    upon this as possible.\n\n\nThings that Flags are used for include:\n\n* **Fuel management**: Different kinds of assemblies (LTAs, fuel, reflectors) have\n  different shuffling operations and must be distinguished. Certain blocks in an\n  assembly are stationary, and shouldn't be moved along with the rest of the assembly\n  when shuffling is performed. Filtering for stationary blocks can also be done using\n  Flags (e.g., ``Flags.GRID_PLATE``).\n\n* **Fuel performance**: Knowing what's fuel (``Flags.FUEL``) and what isn't (e.g.,\n  ``Flags.PLENUM``) is important to figure out what things to grow and where to move\n  fission gas to.\n\n* **Fluid fuel** reactors need to find all the fuel that ever circulates through the\n  reactor so it can be depleted with the average flux.\n\n* **Core Mechanical** analyses often need to know if an object is solid, fluid, or void\n  (material subclassing can handle this).\n\n* **T/H** needs to find the pin bundle in different kinds of assemblies (*radial shield*\n  block in *radial shield* assemblies, *fuel* in *fuel*, etc.). Also needs to generate\n  3-layer pin models with pin (fuel/control/shield/slug), then gap (liners/gap/bond),\n  then clad.\n\n\nExamples\n--------\n>>> block.hasFlags(Flags.PRIMARY | Flags.TEST | Flags.FUEL)\nTrue\n\n>>> block.hasFlags([Flags.PRIMARY, Flags.TEST, Flags.FUEL])\nTrue\n\n>>> block.getComponent(Flags.INTERDUCTCOOLANT)\n<component InterDuctCoolant>\n\n>>> block.getComponents(Flags.FUEL)\n[<component fuel1>, <component fuel2>, ...]\n\n\"\"\"\n\nimport re\nfrom typing import Optional, Sequence, Union\n\nfrom armi.utils.flags import Flag, FlagType, auto\n\n# Type alias used for passing type specifications to many of the composite methods. See\n# Composite::hasFlags() to understand the semantics for how TypeSpecs are interpreted.\n# Anything that interprets a TypeSpec should apply the same semantics.\nTypeSpec = Optional[Union[FlagType, Sequence[FlagType]]]\n\n\ndef __fromStringGeneral(cls, typeSpec, updateMethod):\n    \"\"\"Helper method to minimize code repeat in other fromString methods.\"\"\"\n    result = cls(0)\n    typeSpec = typeSpec.upper()\n    for conversion in _CONVERSIONS:\n        m = conversion.search(typeSpec)\n        if m:\n            typeSpec = re.sub(conversion, \"\", typeSpec)\n            result |= _CONVERSIONS[conversion]\n\n    for name in typeSpec.split():\n        try:\n            # first, check for an exact match, to cover flags with digits\n            result |= cls[name]\n        except KeyError:\n            # ignore numbers so we don't have to define flags up to the number of pins/assem\n            typeSpecWithoutNumbers = \"\".join([c for c in name if not c.isdigit()])\n            if not typeSpecWithoutNumbers:\n                continue\n            result |= updateMethod(typeSpecWithoutNumbers)\n\n    return result\n\n\ndef _fromStringIgnoreErrors(cls, typeSpec):\n    \"\"\"\n    Convert string into a set of flags.\n\n    Each word can be its own flag.\n\n    Notes\n    -----\n    This ignores words in the typeSpec that are not valid flags.\n\n    Complications arise when:\n\n    a. multiple-word flags are used such as *grid plate* or *inlet nozzle* so we use lookups.\n    b. Some flags have digits in them. We just strip those off.\n    \"\"\"\n\n    def updateMethodIgnoreErrors(typeSpec):\n        try:\n            return cls[typeSpec]\n        except KeyError:\n            return cls(0)\n\n    return __fromStringGeneral(cls, typeSpec, updateMethodIgnoreErrors)\n\n\ndef _fromString(cls, typeSpec):\n    \"\"\"Make flag from string and fail if any unknown words are encountered.\"\"\"\n\n    def updateMethod(typeSpec):\n        try:\n            return cls[typeSpec]\n        except KeyError:\n            raise InvalidFlagsError(\n                f\"The requested type specification `{typeSpec}` is invalid. See armi.reactor.flags documentation.\"\n            )\n\n    return __fromStringGeneral(cls, typeSpec, updateMethod)\n\n\ndef _toString(cls, typeSpec):\n    \"\"\"\n    Make flag from string and fail if any unknown words are encountered.\n\n    Notes\n    -----\n    This converts a flag from ``Flags.A|B`` to ``'A B'``\n    \"\"\"\n    strings = str(typeSpec).split(\"{}.\".format(cls.__name__))[1]\n    return \" \".join(sorted(strings.split(\"|\")))\n\n\nclass Flags(Flag):\n    \"\"\"Defines the valid flags used in the framework.\"\"\"\n\n    # basic classifiers\n    PRIMARY = auto()\n    SECONDARY = auto()\n    TERTIARY = auto()\n    ANNULAR = auto()  # ideally this info would be inferred from shape\n    A = auto()\n    B = auto()\n    C = auto()\n    D = auto()\n    E = auto()\n    HIGH = auto()\n    MEDIUM = auto()\n    LOW = auto()\n\n    # general kinds of assemblies or blocks\n    MATERIAL = auto()\n    FUEL = auto()\n    TEST = auto()\n    CONTROL = auto()\n    ULTIMATE = auto()\n    SHUTDOWN = auto()\n    SHIELD = auto()\n    SHIELD_BLOCK = auto()\n    SLUG = auto()\n    REFLECTOR = auto()\n\n    # different kinds of fuel\n    DRIVER = auto()\n    IGNITER = auto()\n    FEED = auto()\n    STARTER = auto()\n    BLANKET = auto()\n    BOOSTER = auto()\n    TARGET = auto()\n    MOX = auto()\n\n    # radial positions\n    INNER = auto()\n    MIDDLE = auto()\n    OUTER = auto()\n    RADIAL = auto()\n\n    # axial positions\n    AXIAL = auto()\n    UPPER = auto()\n    LOWER = auto()\n\n    # assembly parts (including kinds of pins)\n    DUCT = auto()\n    GRID_PLATE = auto()\n    HANDLING_SOCKET = auto()\n    INLET_NOZZLE = auto()\n    PLENUM = auto()\n    BOND = auto()  # not empty\n    LINER = auto()  # Use PRIMARY or SECONDARY to get multiple liners\n    CLAD = auto()\n    PIN = auto()  # the \"meat\" inside the clad\n    GAP = auto()  # generally empty\n    WIRE = auto()\n    COOLANT = auto()\n    INTERCOOLANT = auto()\n    LOAD_PAD = auto()\n    ACLP = auto()  # above core load pad\n    SKID = auto()\n    VOID = auto()\n    INTERDUCTCOOLANT = auto()\n    DSPACERINSIDE = auto()\n    GUIDE_TUBE = auto()\n    FISSION_CHAMBER = auto()\n    MODERATOR = auto()\n    COLLAR = auto()\n\n    # more parts\n    CORE_BARREL = auto()\n    DUMMY = auto()\n    BATCHMASSADDITION = auto()\n\n    POISON = auto()\n\n    STRUCTURE = auto()\n    DEPLETABLE = auto()\n\n    # Allows movement of lower plenum with control rod\n    MOVEABLE = auto()\n\n    @classmethod\n    def fromStringIgnoreErrors(cls, typeSpec):\n        return _fromStringIgnoreErrors(cls, typeSpec)\n\n    @classmethod\n    def fromString(cls, typeSpec):\n        \"\"\"\n        Retrieve flag from a string.\n\n        .. impl:: Retrieve flag from a string.\n            :id: I_ARMI_FLAG_TO_STR0\n            :implements: R_ARMI_FLAG_TO_STR\n\n            For a string passed as ``typeSpec``, first converts the whole string to uppercase. Then\n            tries to parse the string for any special phrases, as defined in the module dictionary\n            ``_CONVERSIONS``, and converts those phrases to flags directly.\n\n            Then it splits the remaining string into words based on spaces. Looping over each of the\n            words, if any word exactly matches a flag name. Otherwise, any numbers are stripped out\n            and the remaining string is matched up to any class attribute names. If any matches are\n            found these are returned as flags.\n        \"\"\"\n        return _fromString(cls, typeSpec)\n\n    @classmethod\n    def toString(cls, typeSpec):\n        \"\"\"\n        Convert a flag to a string.\n\n        .. impl:: Convert a flag to string.\n            :id: I_ARMI_FLAG_TO_STR1\n            :implements: R_ARMI_FLAG_TO_STR\n\n            This converts the representation of a bunch of flags from ``typeSpec``, which might look\n            like ``Flags.A|B``, into a string with spaces in between the flag names, which would\n            look like  ``'A B'``. This is done via nesting string splitting and replacement actions.\n        \"\"\"\n        return _toString(cls, typeSpec)\n\n\nclass InvalidFlagsError(KeyError):\n    \"\"\"Raised when code attempts to look for an undefined flag.\"\"\"\n\n    pass\n\n\n# string conversions for multiple-word flags\n# Beware of how these may interact with the standard flag names! E.g., make sure NOZZLE\n# doesn't eat the NOZZLE in INLET_NOZZLE. Make sure that words that would otherwise be a\n# substring of a valid flag are wrapped in word-boundary `\\b`s\n_CONVERSIONS = {\n    re.compile(r\"\\bGRID\\s+PLATE\\b\"): Flags.GRID_PLATE,\n    re.compile(r\"\\bGRID\\b\"): Flags.GRID_PLATE,\n    re.compile(r\"\\bINLET\\s+NOZZLE\\b\"): Flags.INLET_NOZZLE,\n    re.compile(r\"\\bNOZZLE\\b\"): Flags.INLET_NOZZLE,\n    re.compile(r\"\\bLOAD\\s+PAD\\b\"): Flags.LOAD_PAD,\n    re.compile(r\"\\bHANDLING\\s+SOCKET\\b\"): Flags.HANDLING_SOCKET,\n    re.compile(r\"\\bGUIDE\\s+TUBE\\b\"): Flags.GUIDE_TUBE,\n    re.compile(r\"\\bFISSION\\s+CHAMBER\\b\"): Flags.FISSION_CHAMBER,\n    re.compile(r\"\\bSOCKET\\b\"): Flags.HANDLING_SOCKET,\n    re.compile(r\"\\bSHIELD\\s+BLOCK\\b\"): Flags.SHIELD_BLOCK,\n    re.compile(r\"\\bSHIELDBLOCK\\b\"): Flags.SHIELD_BLOCK,\n    re.compile(r\"\\bCORE\\s+BARREL\\b\"): Flags.CORE_BARREL,\n    re.compile(r\"\\bINNERDUCT\\b\"): Flags.INNER | Flags.DUCT,\n    re.compile(r\"\\bGAP1\\b\"): Flags.GAP | Flags.A,\n    re.compile(r\"\\bGAP2\\b\"): Flags.GAP | Flags.B,\n    re.compile(r\"\\bGAP3\\b\"): Flags.GAP | Flags.C,\n    re.compile(r\"\\bGAP4\\b\"): Flags.GAP | Flags.D,\n    re.compile(r\"\\bGAP5\\b\"): Flags.GAP | Flags.E,\n    re.compile(r\"\\bLINER1\\b\"): Flags.LINER | Flags.A,\n    re.compile(r\"\\bLINER2\\b\"): Flags.LINER | Flags.B,\n}\n"
  },
  {
    "path": "armi/reactor/geometry.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains constants and enumerations that are useful for describing system\ngeometry.\n\"\"\"\n\nimport enum\nfrom typing import Optional, Union\n\n\nclass GeomType(enum.Enum):\n    \"\"\"\n    Enumeration of geometry types.\n\n    Historically, ARMI has used strings to specify and express things like geometry type\n    and symmetry conditions. This makes interpretation of user input straightforward,\n    but is less ergonomic, less efficient, and more error-prone within the code. For\n    instance:\n\n    * is \"quarter reflective\" the same as \"reflective quarter\"? Should it be?\n    * code that needs to interpret these need to use string operations, which are\n      non-trivial compared to enum comparisons.\n    * rules about mutual exclusion (hex and Cartesian can't both be used in the same\n      context) and composability (geometry type + domain + symmetry type) are harder to\n      enforce.\n\n    Instead, we hope to parse user input into a collection of enumerations and use those\n    internally throughout the code. Future work should expand this to satisfy all needs\n    of the geometry system and refactor to replace use of the string constants.\n    \"\"\"\n\n    HEX = 1\n    CARTESIAN = 2\n    RZT = 3\n    RZ = 4\n\n    @classmethod\n    def fromAny(cls, source: Union[str, \"GeomType\"]) -> \"GeomType\":\n        \"\"\"\n        Safely convert from string representation, no-op if already an enum instance.\n\n        This is useful as we transition to using enumerations more throughout the code.\n        There will remain situations where a geomType may be provided in string or enum\n        form, in which the consuming code would have to check the type before\n        proceeding. This function serves two useful purposes:\n\n        * Relieve client code from having to if/elif/else on ``isinstance()`` checks\n        * Provide a location to instrument these conversions for when we actually try\n          to deprecate the strings. E.g., produce a warning when this is called, or\n          eventually forbidding the conversion entirely.\n\n        \"\"\"\n        if isinstance(source, GeomType):\n            return source\n        elif isinstance(source, str):\n            return cls.fromStr(source)\n        else:\n            raise TypeError(\"Expected str or GeomType; got {}\".format(type(source)))\n\n    @classmethod\n    def fromStr(cls, geomStr: str) -> \"GeomType\":\n        # case-insensitive\n        canonical = geomStr.lower().strip()\n        if canonical in (HEX, HEX_CORNERS_UP):\n            # corners-up is used to rotate grids, but shouldn't be needed after the grid\n            # is appropriately oriented, so we collapse to HEX in the enumeration. If\n            # there is a good reason to make corners-up HEX its own geom type, we will\n            # need to figure out how to design around that.\n            return cls.HEX\n        elif canonical == CARTESIAN:\n            return cls.CARTESIAN\n        elif canonical == RZT:\n            return cls.RZT\n        elif canonical == RZ:\n            return cls.RZ\n\n        # use the original geomStr with preserved capitalization for better\n        # error-finding.\n        errorMsg = \"Unrecognized geometry type {}. Valid geometry options are: \".format(geomStr)\n        errorMsg += \", \".join([f\"{geom}\" for geom in geomTypes])\n        raise ValueError(errorMsg)\n\n    @property\n    def label(self):\n        \"\"\"Human-presentable label.\"\"\"\n        if self == self.HEX:\n            return \"Hexagonal\"\n        elif self == self.CARTESIAN:\n            return \"Cartesian\"\n        elif self == self.RZT:\n            return \"R-Z-Theta\"\n        else:\n            return \"R-Z\"\n\n    def __str__(self):\n        \"\"\"Inverse of fromStr().\"\"\"\n        if self == self.HEX:\n            return HEX\n        elif self == self.CARTESIAN:\n            return CARTESIAN\n        elif self == self.RZT:\n            return RZT\n        else:\n            return RZ\n\n\nclass DomainType(enum.Enum):\n    \"\"\"Enumeration of shape types.\"\"\"\n\n    NULL = 0\n    FULL_CORE = 1\n    THIRD_CORE = 3\n    QUARTER_CORE = 4\n    EIGHTH_CORE = 8\n    SIXTEENTH_CORE = 16\n\n    @classmethod\n    def fromAny(cls, source: Union[str, \"DomainType\"]) -> \"DomainType\":\n        if isinstance(source, DomainType):\n            return source\n        elif isinstance(source, str):\n            return cls.fromStr(source)\n        else:\n            raise TypeError(\"Expected str or DomainType; got {}\".format(type(source)))\n\n    @classmethod\n    def fromStr(cls, shapeStr: str) -> \"DomainType\":\n        # case-insensitive\n        canonical = shapeStr.lower().strip()\n        if canonical == FULL_CORE:\n            return cls.FULL_CORE\n        elif canonical == THIRD_CORE:\n            return cls.THIRD_CORE\n        elif canonical == QUARTER_CORE:\n            return cls.QUARTER_CORE\n        elif canonical == EIGHTH_CORE:\n            return cls.EIGHTH_CORE\n        elif canonical == SIXTEENTH_CORE:\n            return cls.SIXTEENTH_CORE\n        elif canonical == \"\":\n            return cls.NULL\n\n        errorMsg = \"{} is not a valid domain option. Valid domain options are:\".format(str(canonical))\n        errorMsg += \", \".join([f\"{sym}\" for sym in domainTypes])\n        raise ValueError(errorMsg)\n\n    @property\n    def label(self):\n        \"\"\"Human-presentable label.\"\"\"\n        if self == self.FULL_CORE:\n            return \"Full\"\n        elif self == self.THIRD_CORE:\n            return \"Third\"\n        elif self == self.QUARTER_CORE:\n            return \"Quarter\"\n        elif self == self.EIGHTH_CORE:\n            return \"Eighth\"\n        elif self == self.SIXTEENTH_CORE:\n            return \"Sixteenth\"\n        else:\n            # is NULL\n            return \"\"\n\n    def __str__(self):\n        \"\"\"Inverse of fromStr().\"\"\"\n        if self == self.FULL_CORE:\n            return FULL_CORE\n        elif self == self.THIRD_CORE:\n            return THIRD_CORE\n        elif self == self.QUARTER_CORE:\n            return QUARTER_CORE\n        elif self == self.EIGHTH_CORE:\n            return EIGHTH_CORE\n        elif self == self.SIXTEENTH_CORE:\n            return SIXTEENTH_CORE\n        else:\n            # is NULL\n            return \"\"\n\n    def symmetryFactor(self) -> float:\n        if self in (self.FULL_CORE, self == self.NULL):\n            return 1.0\n        elif self == self.THIRD_CORE:\n            return 3.0\n        elif self == self.QUARTER_CORE:\n            return 4.0\n        elif self == self.EIGHTH_CORE:\n            return 8.0\n        elif self == self.SIXTEENTH_CORE:\n            return 16.0\n        else:\n            raise ValueError(\"Could not calculate symmetry factor for domain size {}. update logic.\".format(self.label))\n\n\nclass BoundaryType(enum.Enum):\n    \"\"\"Enumeration of boundary types.\"\"\"\n\n    NO_SYMMETRY = 0\n    PERIODIC = 1\n    REFLECTIVE = 2\n\n    @classmethod\n    def fromAny(cls, source: Union[str, \"BoundaryType\"]) -> \"BoundaryType\":\n        if isinstance(source, BoundaryType):\n            return source\n        elif isinstance(source, str):\n            return cls.fromStr(source)\n        else:\n            raise TypeError(\"Expected str or BoundaryType; got {}\".format(type(source)))\n\n    @classmethod\n    def fromStr(cls, symmetryStr: str) -> \"BoundaryType\":\n        # case-insensitive\n        canonical = symmetryStr.lower().strip()\n        if canonical == NO_SYMMETRY:\n            return cls.NO_SYMMETRY\n        elif canonical == PERIODIC:\n            return cls.PERIODIC\n        elif canonical == REFLECTIVE:\n            return cls.REFLECTIVE\n\n        errorMsg = \"{} is not a valid boundary option. Valid boundary options are:\".format(str(canonical))\n        errorMsg += \", \".join([f\"{sym}\" for sym in boundaryTypes])\n        raise ValueError(errorMsg)\n\n    @property\n    def label(self):\n        \"\"\"Human-presentable label.\"\"\"\n        if self == self.NO_SYMMETRY:\n            return \"No Symmetry\"\n        elif self == self.REFLECTIVE:\n            return \"Reflective\"\n        else:\n            return \"Periodic\"\n\n    def __str__(self):\n        \"\"\"Inverse of fromStr().\"\"\"\n        if self == self.NO_SYMMETRY:\n            return \"\"\n        elif self == self.PERIODIC:\n            return PERIODIC\n        else:\n            return REFLECTIVE\n\n    def hasSymmetry(self):\n        return self != self.NO_SYMMETRY\n\n\nclass SymmetryType:\n    \"\"\"\n    A wrapper for DomainType and BoundaryType enumerations.\n\n    The goal of this class is to provide simple functions for storing these options\n    in enumerations and using them to check symmetry conditions, while also providing\n    a standard string representation of the options that facilitates interfacing with\n    yaml and/or the database nicely.\n    \"\"\"\n\n    VALID_SYMMETRY = {\n        (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY, False),\n        (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY, True),\n        (DomainType.THIRD_CORE, BoundaryType.PERIODIC, False),\n        (DomainType.QUARTER_CORE, BoundaryType.PERIODIC, False),\n        (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE, False),\n        (DomainType.QUARTER_CORE, BoundaryType.PERIODIC, True),\n        (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE, True),\n        (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC, False),\n        (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE, False),\n        (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC, True),\n        (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE, True),\n        (DomainType.SIXTEENTH_CORE, BoundaryType.PERIODIC, False),\n        (DomainType.SIXTEENTH_CORE, BoundaryType.REFLECTIVE, False),\n    }\n\n    @staticmethod\n    def _checkIfThroughCenter(centerString: str) -> bool:\n        return THROUGH_CENTER_ASSEMBLY in centerString\n\n    def __init__(\n        self,\n        domainType: \"DomainType\" = DomainType.THIRD_CORE,\n        boundaryType: \"BoundaryType\" = BoundaryType.PERIODIC,\n        throughCenterAssembly: Optional[bool] = False,\n    ):\n        self.domain = domainType\n        self.boundary = boundaryType\n        self.isThroughCenterAssembly = throughCenterAssembly\n\n        if not self.checkValidSymmetry():\n            errorMsg = \"{} is not a valid symmetry option. Valid symmetry options are: \".format(str(self))\n            errorMsg += \", \".join([f\"{sym}\" for sym in self.createValidSymmetryStrings()])\n            raise ValueError(errorMsg)\n\n    @classmethod\n    def createValidSymmetryStrings(cls):\n        \"\"\"Create a list of valid symmetry strings based on the set of tuples in VALID_SYMMETRY.\"\"\"\n        return [cls(domain, boundary, isThroughCenter) for domain, boundary, isThroughCenter in cls.VALID_SYMMETRY]\n\n    @classmethod\n    def fromStr(cls, symmetryString: str) -> \"SymmetryType\":\n        \"\"\"Construct a SymmetryType object from a valid string.\"\"\"\n        canonical = symmetryString.lower().strip()\n        # ignore \"assembly\" since it is unnecessary and overly-verbose and too specific\n        noAssembly = canonical.replace(\"assembly\", \"\").strip()\n        isThroughCenter = cls._checkIfThroughCenter(canonical)\n        coreString = noAssembly.replace(THROUGH_CENTER_ASSEMBLY, \"\").strip()\n        trimmedString = coreString.replace(\"core\", \"\").strip()\n        pieces = trimmedString.split()\n        domain = DomainType.fromStr(pieces[0])\n        if len(pieces) == 1:\n            # set the BoundaryType to a default for the DomainType\n            if domain == DomainType.FULL_CORE:\n                boundary = BoundaryType.NO_SYMMETRY\n            elif domain == DomainType.THIRD_CORE:\n                boundary = BoundaryType.PERIODIC\n            else:\n                boundary = BoundaryType.REFLECTIVE\n        elif len(pieces) == 2:\n            boundary = BoundaryType.fromStr(pieces[1])\n        else:\n            errorMsg = \"{} [{}] is not a valid symmetry option. Valid symmetry options are:\".format(\n                symmetryString, trimmedString\n            )\n            errorMsg += \", \".join([f\"{sym}\" for sym in cls.createValidSymmetryStrings()])\n            raise ValueError(errorMsg)\n        return cls(domain, boundary, isThroughCenter)\n\n    @classmethod\n    def fromAny(cls, source: Union[str, \"SymmetryType\"]) -> \"SymmetryType\":\n        if isinstance(source, SymmetryType):\n            return source\n        elif isinstance(source, str):\n            return cls.fromStr(source)\n        else:\n            raise TypeError(\"Expected str or SymmetryType; got {}\".format(type(source)))\n\n    def __str__(self):\n        \"\"\"Combined string of domain and boundary symmetry type.\"\"\"\n        strList = [str(self.domain)]\n        if self.boundary.hasSymmetry():\n            strList.append(str(self.boundary))\n        if self.isThroughCenterAssembly:\n            strList.append(THROUGH_CENTER_ASSEMBLY)\n        return \" \".join(strList)\n\n    def __eq__(self, other):\n        \"\"\"Compare two SymmetryType instances. False if other is not a SymmetryType.\"\"\"\n        if isinstance(other, SymmetryType):\n            return (\n                self.domain == other.domain\n                and self.boundary == other.boundary\n                and self.isThroughCenterAssembly == other.isThroughCenterAssembly\n            )\n        elif isinstance(other, str):\n            otherSym = SymmetryType.fromStr(other)\n            return (\n                self.domain == otherSym.domain\n                and self.boundary == otherSym.boundary\n                and self.isThroughCenterAssembly == otherSym.isThroughCenterAssembly\n            )\n        else:\n            raise NotImplementedError\n\n    def __hash__(self):\n        \"\"\"Hash a SymmetryType object based on a tuple of its options.\"\"\"\n        return hash((self.domain, self.boundary, self.isThroughCenterAssembly))\n\n    def checkValidSymmetry(self) -> bool:\n        \"\"\"Check if the tuple representation of the SymmetryType can be found in VALID_SYMMETRY.\"\"\"\n        return (\n            self.domain,\n            self.boundary,\n            self.isThroughCenterAssembly,\n        ) in self.VALID_SYMMETRY\n\n    def symmetryFactor(self) -> float:\n        return self.domain.symmetryFactor()\n\n\ndef checkValidGeomSymmetryCombo(\n    geomType: Union[str, \"GeomType\"],\n    symmetryInput: Union[str, \"SymmetryType\"],\n) -> bool:\n    \"\"\"\n    Check if the given combination of GeomType and SymmetryType is valid.\n    Return a boolean indicating the outcome of the check.\n    \"\"\"\n    symmetry = SymmetryType.fromAny(symmetryInput)\n    if (symmetry.domain, symmetry.boundary) in VALID_GEOM_SYMMETRY[GeomType.fromAny(geomType)]:\n        return True\n    else:\n        raise ValueError(\n            \"GeomType: {} and SymmetryType: {} is not a valid combination!\".format(str(geomType), str(symmetry))\n        )\n\n\nSYSTEMS = \"systems\"\nVERSION = \"version\"\n\nHEX = \"hex\"\nHEX_CORNERS_UP = \"hex_corners_up\"\nRZT = \"thetarz\"\nRZ = \"rz\"\nCARTESIAN = \"cartesian\"\n\nDODECAGON = \"dodecagon\"\nREC_PRISM = \"RecPrism\"\nHEX_PRISM = \"HexPrism\"\nCONCENTRIC_CYLINDER = \"ConcentricCylinder\"\nANNULUS_SECTOR_PRISM = \"AnnulusSectorPrism\"\n\nVALID_GEOMETRY_TYPE = {HEX, HEX_CORNERS_UP, RZT, RZ, CARTESIAN}\n\nVALID_GEOM_SYMMETRY = {\n    GeomType.HEX: [\n        (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY),\n        (DomainType.THIRD_CORE, BoundaryType.PERIODIC),\n    ],\n    GeomType.CARTESIAN: [\n        (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY),\n        (DomainType.QUARTER_CORE, BoundaryType.PERIODIC),\n        (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC),\n        (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE),\n        (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE),\n    ],\n    GeomType.RZT: [\n        (DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY),\n        (DomainType.THIRD_CORE, BoundaryType.PERIODIC),\n        (DomainType.QUARTER_CORE, BoundaryType.PERIODIC),\n        (DomainType.EIGHTH_CORE, BoundaryType.PERIODIC),\n        (DomainType.SIXTEENTH_CORE, BoundaryType.PERIODIC),\n        (DomainType.QUARTER_CORE, BoundaryType.REFLECTIVE),\n        (DomainType.EIGHTH_CORE, BoundaryType.REFLECTIVE),\n        (DomainType.SIXTEENTH_CORE, BoundaryType.REFLECTIVE),\n    ],\n    GeomType.RZ: [(DomainType.FULL_CORE, BoundaryType.NO_SYMMETRY)],\n}\n\nFULL_CORE = \"full\"\nTHIRD_CORE = \"third\"\nQUARTER_CORE = \"quarter\"\nEIGHTH_CORE = \"eighth\"\nSIXTEENTH_CORE = \"sixteenth\"\nREFLECTIVE = \"reflective\"\nPERIODIC = \"periodic\"\nNO_SYMMETRY = \"no symmetry\"\n# through center assembly applies only to cartesian\nTHROUGH_CENTER_ASSEMBLY = \"through center\"\n\ngeomTypes = {HEX, CARTESIAN, RZT, RZ}\ndomainTypes = {FULL_CORE, THIRD_CORE, QUARTER_CORE, EIGHTH_CORE, SIXTEENTH_CORE}\nboundaryTypes = {NO_SYMMETRY, PERIODIC, REFLECTIVE}\n"
  },
  {
    "path": "armi/reactor/grids/__init__.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nr\"\"\"\nThis contains structured meshes in multiple geometries and spatial locators (i.e. locations).\n\n:py:class:`Grids <Grid>` are objects that map indices (i, j, k) to spatial locations\n(x,y,z) or (t,r,z).  They are useful for arranging things in reactors, such as:\n\n* Fuel assemblies in a reactor\n* Plates in a heat exchanger\n* Pins in a fuel assembly\n* Blocks in a fuel assembly (1-D)\n\nFast reactors often use a hexagonal grid, while other reactors may be better suited for\nCartesian or RZT grids. This module contains representations of all these.\n\n``Grid``\\ s can be defined by any arbitrary combination of absolute grid boundaries and\nunit step directions.\n\nAssociated with grids are :py:class:`IndexLocations <IndexLocation>`. Each of these maps\nto a single cell in a grid, or to an arbitrary point in the continuous space represented\nby a grid. When a `Grid`` is built, it builds a collection of ``IndexLocation``\\ s, one\nfor each cell.\n\nIn the ARMI :py:mod:`armi.reactor` module, each object is assigned a locator either from\na grid or in arbitrary, continuous space (using a :py:class:`CoordinateLocation`) on the\n``spatialLocator`` attribute.\n\nBelow is a basic example of how to use a 2-D grid::\n\n    >>> grid = CartesianGrid.fromRectangle(1.0, 1.0)  # 1 cm square-pitch Cartesian grid\n    >>> location = grid[1,2,0]\n    >>> location.getGlobalCoordinates()\n    array([ 1.,  2.,  0.])\n\nGrids can be chained together in a parent-child relationship. This is often used in ARMI\nwhere a 1-D axial grid (e.g. in an assembly) is being positioned in a core or spent-fuel\npool. See example in\n:py:meth:`armi.reactor.tests.test_grids.TestSpatialLocator.test_recursion`.\n\nThe \"radial\" (ring, position) indexing used in DIF3D can be converted to and from the\nmore quasi-Cartesian indexing in a hex mesh easily with the utility methods\n:py:meth:`HexGrid.getRingPos` and :py:func:`indicesToRingPos`.\n\nThis module is designed to satisfy the spatial arrangement requirements of :py:mod:`the\nReactor package <armi.reactor>`.\n\nThroughout the module, the term **global** refers to the top-level coordinate system\nwhile the word **local** refers to within the current coordinate system defined by the\ncurrent grid.\n\"\"\"\n\n# ruff: noqa: F401\nfrom typing import Optional, Tuple\n\nfrom armi.reactor.grids.axial import AxialGrid\nfrom armi.reactor.grids.cartesian import CartesianGrid\nfrom armi.reactor.grids.constants import (\n    BOUNDARY_0_DEGREES,\n    BOUNDARY_60_DEGREES,\n    BOUNDARY_120_DEGREES,\n    BOUNDARY_CENTER,\n)\nfrom armi.reactor.grids.grid import Grid\nfrom armi.reactor.grids.hexagonal import COS30, SIN30, TRIANGLES_IN_HEXAGON, HexGrid\nfrom armi.reactor.grids.locations import (\n    CoordinateLocation,\n    IndexLocation,\n    LocationBase,\n    MultiIndexLocation,\n    addingIsValid,\n)\nfrom armi.reactor.grids.structuredGrid import GridParameters, StructuredGrid, _tuplify\nfrom armi.reactor.grids.thetarz import TAU, ThetaRZGrid\n\n\ndef locatorLabelToIndices(label: str) -> Tuple[int, int, Optional[int]]:\n    \"\"\"\n    Convert a locator label to numerical i,j,k indices.\n\n    If there are only i,j  indices, make the last item None\n    \"\"\"\n    intVals = tuple(int(idx) for idx in label.split(\"-\"))\n    if len(intVals) == 2:\n        intVals = (intVals[0], intVals[1], None)\n    return intVals\n"
  },
  {
    "path": "armi/reactor/grids/axial.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import TYPE_CHECKING, List, NoReturn, Optional\n\nimport numpy as np\n\nfrom armi.reactor.grids.locations import IJType, LocationBase\nfrom armi.reactor.grids.structuredGrid import StructuredGrid\n\nif TYPE_CHECKING:\n    from armi.reactor.composites import ArmiObject\n\n\nclass AxialGrid(StructuredGrid):\n    \"\"\"1-D grid in the k-direction (z).\n\n    .. note::\n\n        It is recommended to use :meth:`fromNCells` rather than calling\n        the ``__init_`` constructor directly\n\n    \"\"\"\n\n    @classmethod\n    def fromNCells(cls, numCells: int, armiObject: Optional[\"ArmiObject\"] = None) -> \"AxialGrid\":\n        \"\"\"Produces an unit grid where each bin is 1-cm tall.\n\n        ``numCells + 1`` mesh boundaries are added, since one block would\n        require a bottom and a top.\n\n        \"\"\"\n        # Need float bounds or else we truncate integers\n        return cls(\n            bounds=(None, None, np.arange(numCells + 1, dtype=np.float64)),\n            armiObject=armiObject,\n        )\n\n    @staticmethod\n    def getSymmetricEquivalents(indices: IJType) -> List[IJType]:\n        return []\n\n    @staticmethod\n    def locatorInDomain(locator: LocationBase, symmetryOverlap: Optional[bool] = False) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def getIndicesFromRingAndPos(ring: int, pos: int) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def getMinimumRings(n: int) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def getPositionsInRing(ring: int) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def overlapsWhichSymmetryLine(indices: IJType) -> None:\n        return None\n\n    @property\n    def pitch(self) -> float:\n        \"\"\"Grid spacing in the z-direction.\n\n        Returns\n        -------\n        float\n            Pitch in cm\n\n        \"\"\"\n"
  },
  {
    "path": "armi/reactor/grids/cartesian.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nfrom typing import NoReturn, Optional, Tuple\n\nimport numpy as np\n\nfrom armi.reactor import geometry\nfrom armi.reactor.grids.locations import IJType\nfrom armi.reactor.grids.structuredGrid import StructuredGrid\n\n\nclass CartesianGrid(StructuredGrid):\n    \"\"\"\n    Grid class representing a conformal Cartesian mesh.\n\n    It is recommended to call :meth:`fromRectangle` to construct,\n    rather than directly constructing with ``__init__``\n\n    Notes\n    -----\n    In Cartesian, (i, j, k) indices map to (x, y, z) coordinates.\n    In an axial plane (i, j) are as follows::\n\n        (-1, 1)(0, 1)(1, 1)\n        (-1, 0)(0, 0)(1, 0)\n        (-1, -1)(0, -1)(1, -1)\n\n    The concepts of ring and position are a bit tricker in Cartesian grids than in Hex,\n    because unlike in the Hex case, there is no guaranteed center location. For example,\n    when using a CartesianGrid to lay out assemblies in a core, there is only a single\n    central location if the number of assemblies in the core is odd-by-odd; in an\n    even-by-even case, there are four center-most assemblies. Therefore, the number of\n    locations per ring will vary depending on the \"through center\" nature of\n    ``symmetry``.\n\n    Furthermore, notice that in the \"through center\" (odd-by-odd) case, the central\n    index location, (0,0) is typically centered at the origin (0.0, 0.0), whereas with\n    the \"not through center\" (even-by-even) case, the (0,0) index location is offset,\n    away from the origin.\n\n    These concepts are illustrated in the example drawings below.\n\n    .. figure:: ../.static/through-center.png\n        :width: 400px\n        :align: center\n\n        Grid example where the axes pass through the \"center assembly\" (odd-by-odd).\n        Note that ring 1 only has one location in it.\n\n    .. figure:: ../.static/not-through-center.png\n        :width: 400px\n        :align: center\n\n        Grid example where the axes lie between the \"center assemblies\" (even-by-even).\n        Note that ring 1 has four locations, and that the center of the (0, 0)-index\n        location is offset from the origin.\n    \"\"\"\n\n    @classmethod\n    def fromRectangle(cls, width, height, numRings=5, symmetry=\"\", isOffset=False, armiObject=None):\n        \"\"\"\n        Build a finite step-based 2-D Cartesian grid based on a width and height in cm.\n\n        Parameters\n        ----------\n        width : float\n            Width of the unit rectangle\n        height : float\n            Height of the unit rectangle\n        numRings : int\n            Number of rings that the grid should span\n        symmetry : str\n            The symmetry condition (see :py:mod:`armi.reactor.geometry`)\n        isOffset : bool\n            If True, the origin of the Grid's coordinate system will be placed at the\n            bottom-left corner of the center-most cell. Otherwise, the origin will be\n            placed at the center of the center-most cell.\n        armiObject : ArmiObject\n            An object in a Composite model that the Grid should be bound to.\n        \"\"\"\n        unitSteps = ((width, 0.0, 0.0), (0.0, height, 0.0), (0, 0, 0))\n        offset = np.array((width / 2.0, height / 2.0, 0.0)) if isOffset else None\n        return cls(\n            unitSteps=unitSteps,\n            unitStepLimits=((-numRings, numRings), (-numRings, numRings), (0, 1)),\n            offset=offset,\n            armiObject=armiObject,\n            symmetry=symmetry,\n        )\n\n    def overlapsWhichSymmetryLine(self, indices: IJType) -> None:\n        \"\"\"Return lines of symmetry position at a given index can be found.\n\n        .. warning::\n\n            This is not really implemented, but parts of ARMI need it to\n            not fail, so it always returns None.\n\n        \"\"\"\n        return None\n\n    def getRingPos(self, indices):\n        \"\"\"\n        Return ring and position from indices.\n\n        Ring is the Manhattan distance from (0, 0) to the passed indices. Position\n        counts up around the ring counter-clockwise from the quadrant 1 diagonal, like\n        this::\n\n            7   6  5  4  3  2  1\n            8         |       24\n            9         |       23\n            10 -------|------ 22\n            11        |       21\n            12        |       20\n            13 14 15 16 17 18 19\n\n        Grids that split the central locations have 1 location in in inner-most ring,\n        whereas grids without split central locations will have 4.\n\n        Notes\n        -----\n        This is needed to support GUI, but should not often be used.\n        i, j (0-based) indices are much more useful. For example:\n\n        >>> locator = core.spatialGrid[i, j, 0]  # 3rd index is 0 for assembly\n        >>> a = core.childrenByLocator[locator]\n\n        >>> a = core.childrenByLocator[core.spatialGrid[i, j, 0]]  # one liner\n        \"\"\"\n        i, j = indices[0:2]\n        split = self._isThroughCenter()\n\n        if not split:\n            i += 0.5\n            j += 0.5\n\n        ring = max(abs(int(i)), abs(int(j)))\n\n        if not split:\n            ring += 0.5\n\n        if j == ring:\n            # region 1\n            pos = -i + ring\n        elif i == -ring:\n            # region 2\n            pos = 3 * ring - j\n        elif j == -ring:\n            # region 3\n            pos = 5 * ring + i\n        else:\n            # region 4\n            pos = 7 * ring + j\n        return (int(ring) + 1, int(pos) + 1)\n\n    @staticmethod\n    def getIndicesFromRingAndPos(ring: int, pos: int) -> NoReturn:\n        \"\"\"Not implemented for Cartesian-see getRingPos notes.\"\"\"\n        raise NotImplementedError(\n            \"Cartesian should not need need ring/pos, use i, j indices.\"\n            \"See getRingPos doc string notes for more information/example.\"\n        )\n\n    def getMinimumRings(self, n: int) -> int:\n        \"\"\"Return the minimum number of rings needed to fit ``n`` objects.\"\"\"\n        numPositions = 0\n        ring = 0\n        for ring in itertools.count(1):\n            ringPositions = self.getPositionsInRing(ring)\n            numPositions += ringPositions\n            if numPositions >= n:\n                break\n\n        return ring\n\n    def getPositionsInRing(self, ring: int) -> int:\n        \"\"\"\n        Return the number of positions within a ring.\n\n        Parameters\n        ----------\n        ring : int\n            Ring in question\n\n        Notes\n        -----\n        The number of positions within a ring will change\n        depending on whether the central position in the\n        grid is at origin, or if origin is the point\n        where 4 positions meet (i.e., the ``_isThroughCenter``\n        method returns True).\n        \"\"\"\n        if ring == 1:\n            ringPositions = 1 if self._isThroughCenter() else 4\n        else:\n            ringPositions = (ring - 1) * 8\n            if not self._isThroughCenter():\n                ringPositions += 4\n        return ringPositions\n\n    def locatorInDomain(self, locator, symmetryOverlap: Optional[bool] = False):\n        if self.symmetry.domain == geometry.DomainType.QUARTER_CORE:\n            return locator.i >= 0 and locator.j >= 0\n        else:\n            return True\n\n    def changePitch(self, xw: float, yw: float):\n        \"\"\"\n        Change the pitch of a Cartesian grid.\n\n        This also scales the offset.\n        \"\"\"\n        xwOld = self._unitSteps[0][0]\n        ywOld = self._unitSteps[1][1]\n        self._unitSteps = np.array(((xw, 0.0, 0.0), (0.0, yw, 0.0), (0, 0, 0)))[self._stepDims]\n        newOffsetX = self._offset[0] * xw / xwOld\n        newOffsetY = self._offset[1] * yw / ywOld\n        self._offset = np.array((newOffsetX, newOffsetY, 0.0))\n\n    def getSymmetricEquivalents(self, indices):\n        symmetry = self.symmetry  # construct the symmetry object once up top\n        isRotational = symmetry.boundary == geometry.BoundaryType.PERIODIC\n\n        i, j = indices[0:2]\n        if symmetry.domain == geometry.DomainType.FULL_CORE:\n            return []\n        elif symmetry.domain == geometry.DomainType.QUARTER_CORE:\n            if symmetry.isThroughCenterAssembly:\n                # some locations lie on the symmetric boundary\n                if i == 0 and j == 0:\n                    # on the split corner, so the location is its own symmetric\n                    # equivalent\n                    return []\n                elif i == 0:\n                    if isRotational:\n                        return [(j, i), (i, -j), (-j, i)]\n                    else:\n                        return [(i, -j)]\n                elif j == 0:\n                    if isRotational:\n                        return [(j, i), (-i, j), (j, -i)]\n                    else:\n                        return [(-i, j)]\n                else:\n                    # Math is a bit easier for the split case, since there is an actual\n                    # center location for (0, 0)\n                    if isRotational:\n                        return [(-j, i), (-i, -j), (j, -i)]\n                    else:\n                        return [(-i, j), (-i, -j), (i, -j)]\n            else:\n                # most objects have 3 equivalents. the bottom-left corner of Quadrant I\n                # is (0, 0), so to reflect, add one and negate each index in\n                # combination. To rotate, first flip the indices for the Quadrant II and\n                # Quadrant IV\n                if isRotational:\n                    # rotational\n                    #        QII           QIII          QIV\n                    return [(-j - 1, i), (-i - 1, -j - 1), (j, -i - 1)]\n                else:\n                    # reflective\n                    #        QII           QIII          QIV\n                    return [(-i - 1, j), (-i - 1, -j - 1), (i, -j - 1)]\n\n        elif symmetry.domain == geometry.DomainType.EIGHTH_CORE:\n            raise NotImplementedError(\"Eighth-core symmetry isn't fully implemented for grids yet!\")\n        else:\n            raise NotImplementedError(\n                \"Unhandled symmetry condition for {}: {}\".format(type(self).__name__, symmetry.domain)\n            )\n\n    def _isThroughCenter(self):\n        \"\"\"Return whether the central cells are split through the middle for symmetry.\"\"\"\n        return all(self._offset == [0, 0, 0])\n\n    @property\n    def pitch(self) -> Tuple[float, float]:\n        \"\"\"Grid pitch in the x and y dimension.\n\n        Returns\n        -------\n        float\n            x-pitch (cm)\n        float\n            y-pitch (cm)\n\n        \"\"\"\n        pitch = (self._unitSteps[0][0], self._unitSteps[1][1])\n        if pitch[0] == 0:\n            raise ValueError(f\"Grid {self} does not have a defined pitch.\")\n        return pitch\n"
  },
  {
    "path": "armi/reactor/grids/constants.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Some constants often used in grid manipulation.\"\"\"\n\nBOUNDARY_0_DEGREES = 1\nBOUNDARY_60_DEGREES = 2\nBOUNDARY_120_DEGREES = 3\nBOUNDARY_CENTER = 4\n"
  },
  {
    "path": "armi/reactor/grids/grid.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Dict, Hashable, Iterable, List, Optional, Tuple, Union\n\nimport numpy as np\n\nfrom armi.reactor import geometry\nfrom armi.reactor.grids.locations import IJKType, IJType, IndexLocation, LocationBase\n\nif TYPE_CHECKING:\n    from armi.reactor.composites import ArmiObject\n\n\nclass Grid(ABC):\n    \"\"\"Base class that defines the interface for grids.\n\n    Most work will be done with structured grids, e.g., hexagonal grid, Cartesian grids,\n    but some physics codes accept irregular or unstructured grids. Consider\n    a Cartesian grid but with variable stepping between cells, where ``dx`` may not be\n    constant.\n\n    So here, we define an interface so things that rely on grids can worry less\n    about how the location data are stored.\n\n    .. impl:: Grids can nest.\n        :id: I_ARMI_GRID_NEST\n        :implements: R_ARMI_GRID_NEST\n\n        The reactor will usually have (i,j,k) coordinates to define a\n        simple mesh for locating objects in the reactor. But inside that mesh can\n        be a smaller mesh to define the layout of pins in a reactor, or fuel pellets in\n        a pin, or the layout of some intricate ex-core structure.\n\n        Every time the :py:class:`armi.reactor.grids.locations.IndexLocation` of an\n        object in the reactor is returned, ARMI will look to see if the grid this object\n        is in has a :py:meth:`parent <armi.reactor.grids.locations.IndexLocation.parentLocation>`,\n        and if so, ARMI will try to sum the\n        :py:meth:`indices <armi.reactor.grids.locations.IndexLocation.indices>` of the two\n        nested grids to give a resultant, more finely-grained grid position. ARMI can only\n        handle grids nested 3 deep.\n\n    Parameters\n    ----------\n    geomType : str or armi.reactor.geometry.GeomType\n        Underlying geometric representation\n    symmetry : str or armi.reactor.geometry.SymmetryType\n        Symmetry conditions\n    armiObject : optional, armi.reactor.composites.ArmiObject\n        If given, what is this grid attached to or what does it describe?\n        Something like a :class:`armi.reactor.Core`\n    \"\"\"\n\n    _geomType: str\n    _symmetry: str\n    armiObject: Optional[\"ArmiObject\"]\n\n    def __init__(\n        self,\n        geomType: Union[str, geometry.GeomType] = \"\",\n        symmetry: Union[str, geometry.SymmetryType] = \"\",\n        armiObject: Optional[\"ArmiObject\"] = None,\n    ):\n        # geometric metadata encapsulated here because it's related to the grid.\n        # They do not impact the grid object itself.\n        # Notice that these are stored using their string representations, rather than\n        # the GridType enum. This avoids the danger of deserializing an enum value from\n        # an old version of the code that may have had different numeric values.\n        self.geomType = geomType\n        self.symmetry = symmetry\n        self.armiObject = armiObject\n        self._backup = None\n\n    @property\n    def geomType(self) -> geometry.GeomType:\n        \"\"\"Geometric representation.\"\"\"\n        return geometry.GeomType.fromStr(self._geomType)\n\n    @geomType.setter\n    def geomType(self, geomType: Union[str, geometry.GeomType]):\n        if geomType:\n            self._geomType = str(geometry.GeomType.fromAny(geomType))\n        else:\n            self._geomType = \"\"\n\n    @property\n    def symmetry(self) -> str:\n        \"\"\"Symmetry applied to the grid.\n\n        .. impl:: Grids shall be able to represent 1/3 and full core symmetries.\n            :id: I_ARMI_GRID_SYMMETRY0\n            :implements: R_ARMI_GRID_SYMMETRY\n\n            Every grid contains a :py:class:`armi.reactor.geometry.SymmetryType` or\n            string that defines a grid as full core or a partial core: 1/3, 1/4, 1/8, or 1/16\n            core. The idea is that the user can define 1/3 or 1/4 of the reactor, so\n            the analysis can be run faster on a smaller reactor. And if a non-full\n            core reactor grid is defined, the boundaries of the grid can be reflective\n            or periodic, to determine what should happen at the boundaries of the\n            reactor core.\n\n            It is important to note, that not all of these geometries will apply to\n            every reactor or core. If your core is made of hexagonal assemblies, then a\n            1/3 core grid would make sense, but not if your reactor core was made up of\n            square assemblies. Likewise, a hexagonal core would not make be able to\n            support a 1/4 grid. You want to leave assemblies (and other objects) whole\n            when dividing a grid up fractionally.\n        \"\"\"\n        return geometry.SymmetryType.fromStr(self._symmetry)\n\n    @symmetry.setter\n    def symmetry(self, symmetry: Union[str, geometry.SymmetryType]):\n        if symmetry:\n            self._symmetry = str(geometry.SymmetryType.fromAny(symmetry))\n        else:\n            self._symmetry = \"\"\n\n    def __getstate__(self) -> Dict:\n        \"\"\"\n        Pickling removes reference to ``armiObject``.\n\n        Removing the ``armiObject`` allows us to pickle an assembly without pickling\n        the entire reactor. An ``Assembly.spatialLocator.grid.armiObject`` is the\n        reactor, by removing the link here, we still have spatial orientation, but are\n        not required to pickle the entire reactor to pickle an assembly.\n\n        This relies on the ``armiObject.__setstate__`` to assign itself.\n        \"\"\"\n        state = self.__dict__.copy()\n        state[\"armiObject\"] = None\n\n        return state\n\n    def __setstate__(self, state: Dict):\n        \"\"\"\n        Pickling removes reference to ``armiObject``.\n\n        This relies on the ``ArmiObject.__setstate__`` to assign itself.\n        \"\"\"\n        self.__dict__.update(state)\n\n        for _index, locator in self.items():\n            locator._grid = self\n\n    @property\n    @abstractmethod\n    def isAxialOnly(self) -> bool:\n        \"\"\"Indicate to parts of ARMI if this Grid handles only axial cells.\"\"\"\n\n    @abstractmethod\n    def __len__(self) -> int:\n        \"\"\"Number of items in the grid.\"\"\"\n\n    @abstractmethod\n    def items(self) -> Iterable[Tuple[IJKType, IndexLocation]]:\n        \"\"\"Return list of ((i, j, k), IndexLocation) tuples.\"\"\"\n\n    @abstractmethod\n    def locatorInDomain(self, locator: LocationBase, symmetryOverlap: Optional[bool] = False) -> bool:\n        \"\"\"\n        Return whether the passed locator is in the domain represented by the Grid.\n\n        For instance, if we have a 1/3rd core hex grid, this would return False for\n        locators that are outside of the first third of the grid.\n\n        Parameters\n        ----------\n        locator : LocationBase\n            The location to test\n        symmetryOverlap : bool, optional\n            Whether grid locations along the symmetry line should be considered \"in the\n            represented domain\". This can be useful when assemblies are split along the\n            domain boundary, with fractions of the assembly on either side.\n\n        Returns\n        -------\n        bool\n            If the given locator is within the given grid\n        \"\"\"\n\n    @abstractmethod\n    def getSymmetricEquivalents(self, indices: IJType) -> List[IJType]:\n        \"\"\"\n        Return a list of grid indices that contain matching contents based on symmetry.\n\n        The length of the list will depend on the type of symmetry being used, and\n        potentially the location of the requested indices. E.g.,\n        third-core will return the two sets of indices at the matching location in the\n        other two thirds of the grid, unless it is the central location, in which case\n        no indices will be returned.\n        \"\"\"\n\n    @abstractmethod\n    def overlapsWhichSymmetryLine(self, indices: IJType) -> Optional[int]:\n        \"\"\"Return lines of symmetry position at a given index can be found.\n\n        Parameters\n        ----------\n        indices : tuple of [int, int]\n            Indices for the requested object\n\n        Returns\n        -------\n        None or int\n            None if not line of symmetry goes through the object at the\n            requested index. Otherwise, some grid constants like ``BOUNDARY_CENTER``\n            will be returned.\n        \"\"\"\n\n    @abstractmethod\n    def getCoordinates(\n        self,\n        indices: Union[IJKType, List[IJKType]],\n        nativeCoords: bool = False,\n    ) -> np.ndarray:\n        pass\n\n    @abstractmethod\n    def backUp(self):\n        \"\"\"Subclasses should modify the internal backup variable.\"\"\"\n\n    @abstractmethod\n    def restoreBackup(self):\n        \"\"\"Restore state from backup.\"\"\"\n\n    @abstractmethod\n    def getCellBase(self, indices: IJKType) -> np.ndarray:\n        \"\"\"Return the lower left case of this cell in cm.\"\"\"\n\n    @abstractmethod\n    def getCellTop(self, indices: IJKType) -> np.ndarray:\n        \"\"\"Get the upper right of this cell in cm.\"\"\"\n\n    @staticmethod\n    def getLabel(indices):\n        \"\"\"\n        Get a string label from a 0-based spatial locator.\n\n        Returns a string representing i, j, and k indices of the locator\n        \"\"\"\n        i, j = indices[:2]\n        label = f\"{i:03d}-{j:03d}\"\n        if len(indices) == 3:\n            label += f\"-{indices[2]:03d}\"\n        return label\n\n    @abstractmethod\n    def reduce(self) -> Tuple[Hashable, ...]:\n        \"\"\"\n        Return the set of arguments used to create this Grid.\n\n        This is very much like the argument tuple from ``__reduce__``, but we do not\n        implement ``__reduce__`` for real, because we are generally happy with\n        ``__getstate__`` and ``__setstate__`` for pickling purposes. However, getting\n        these arguments to ``__init__`` is useful for storing Grids to the database, as\n        they are more stable (less likely to change) than the actual internal state of\n        the objects.\n\n        The return value should be hashable, such that a set of these can be created.\n\n        The return type should be symmetric such that a similar grid can be\n        created just with the outputs of ``Grid.reduce``, e.g.,\n        ``type(grid)(*grid.reduce())``\n\n        Notes\n        -----\n        For consistency, the second to last argument **must** be the geomType\n        \"\"\"\n"
  },
  {
    "path": "armi/reactor/grids/hexagonal.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom collections import deque\nfrom math import isclose, sqrt\nfrom typing import List, Optional, Tuple\n\nimport numpy as np\n\nfrom armi.reactor import geometry\nfrom armi.reactor.grids.constants import (\n    BOUNDARY_0_DEGREES,\n    BOUNDARY_60_DEGREES,\n    BOUNDARY_120_DEGREES,\n    BOUNDARY_CENTER,\n)\nfrom armi.reactor.grids.locations import IJKType, IJType, IndexLocation\nfrom armi.reactor.grids.structuredGrid import StructuredGrid\nfrom armi.utils import hexagon\n\nCOS30 = sqrt(3) / 2.0\nSIN30 = 1.0 / 2.0\n# going counter-clockwise from \"position 1\" (top right)\nTRIANGLES_IN_HEXAGON = np.array(\n    [\n        (+COS30, SIN30),\n        (+0, 1.0),\n        (-COS30, SIN30),\n        (-COS30, -SIN30),\n        (+0, -1.0),\n        (+COS30, -SIN30),\n    ]\n)\n\n\nclass HexGrid(StructuredGrid):\n    r\"\"\"\n    Has 6 neighbors in plane.\n\n    It is recommended to use :meth:`fromPitch` rather than calling the ``__init__`` onstructor directly.\n\n    .. impl:: Construct a hexagonal lattice.\n        :id: I_ARMI_GRID_HEX\n        :implements: R_ARMI_GRID_HEX\n\n        This class represents a hexagonal ``StructuredGrid``, that is one where the mesh maps to real, physical\n        coordinates. This hexagonal grid is 2D, and divides the plane up into regular hexagons. That is, each hexagon is\n        symmetric and is precisely flush with six neighboring hexagons. This class only allows for two rotational\n        options: flats up (where two sides of the hexagons are parallel with the X-axis), and points up (where two sides\n        are parallel with the Y-axis).\n\n    Notes\n    -----\n    In an axial plane (i, j) are as follows (flats up)::\n\n                 _____\n                /     \\\n          _____/  0,1  \\_____\n         /     \\       /     \\\n        / -1,1  \\_____/  1,0  \\\n        \\       /     \\       /\n         \\_____/  0,0  \\_____/\n         /     \\       /     \\\n        / -1,0  \\_____/  1,-1 \\\n        \\       /     \\       /\n         \\_____/  0,-1 \\_____/\n               \\       /\n                \\_____/\n\n    In an axial plane (i, j) are as follows (corners up)::\n\n               / \\     / \\\n             /     \\ /     \\\n            |  0,1  |  1,0  |\n            |       |       |\n           / \\     / \\     / \\\n         /     \\ /     \\ /     \\\n        | -1,1  |  0,0  |  1,-1 |\n        |       |       |       |\n         \\     / \\     / \\     /\n           \\ /     \\ /     \\ /\n            | -1,0  |  0,-1 |\n            |       |       |\n             \\     / \\     /\n               \\ /     \\ /\n\n    Basic hexagon geometry::\n\n        - pitch = sqrt(3) * side\n        - long diagonal = 2 * side\n        - Area = (sqrt(3) / 4) * side^2\n        - perimeter = 6 * side\n\n    \"\"\"\n\n    @property\n    def cornersUp(self) -> bool:\n        \"\"\"\n        Check whether the hexagonal grid is \"corners up\" or \"flats up\".\n\n        See the armi.reactor.grids.HexGrid class documentation for an illustration of the two types of grid indexing.\n        \"\"\"\n        return self._unitSteps[0][1] != 0.0\n\n    @staticmethod\n    def fromPitch(pitch, numRings=25, armiObject=None, cornersUp=False, symmetry=\"\"):\n        \"\"\"\n        Build a finite step-based 2D hex grid from a hex pitch in cm.\n\n        .. impl:: Hexagonal grids can be points-up or flats-up.\n            :id: I_ARMI_GRID_HEX_TYPE\n            :implements: R_ARMI_GRID_HEX_TYPE\n\n            When this method creates a ``HexGrid`` object, it can create a hexagonal grid with one of two rotations:\n            flats up (where two sides of the hexagons are parallel with the X-axis), and points up (where two sides are\n            parallel with the Y-axis). While it is possible to imagine the hexagons being rotated at other arbitrary\n            angles, those are not supported here.\n\n        .. impl:: When creating a hexagonal grid, the user can specify the symmetry.\n            :id: I_ARMI_GRID_SYMMETRY1\n            :implements: R_ARMI_GRID_SYMMETRY\n\n            When this method creates a ``HexGrid`` object, it takes as an input the symmetry of the resultant grid. This\n            symmetry can be a string (e.g. \"full\") or a ``SymmetryType`` object (e.g. ``FULL_CORE``). If the grid is not\n            full-core, the method ``getSymmetricEquivalents()`` will be usable to map any possible grid cell to the ones\n            that are being modeled in the sub-grid.\n\n        Parameters\n        ----------\n        pitch : float\n            Hex pitch (flat-to-flat) in cm\n        numRings : int, optional\n            The number of rings in the grid to pre-populate with locatator objects. Even if positions are not\n            pre-populated, locators will be generated there on the fly.\n        armiObject : ArmiObject, optional\n            The object that this grid is anchored to (i.e. the reactor for a grid of assemblies)\n        cornersUp : bool, optional\n            Rotate the hexagons 30 degrees so that the corners point up instead of the flat faces.\n        symmetry : string, optional\n            A string representation of the symmetry options for the grid.\n\n        Returns\n        -------\n        HexGrid\n            A functional hexagonal grid object.\n        \"\"\"\n        unitSteps = HexGrid._getRawUnitSteps(pitch, cornersUp)\n\n        hex = HexGrid(\n            unitSteps=unitSteps,\n            unitStepLimits=((-numRings, numRings), (-numRings, numRings), (0, 1)),\n            armiObject=armiObject,\n            symmetry=symmetry,\n        )\n        return hex\n\n    @property\n    def pitch(self) -> float:\n        \"\"\"\n        Get the hex-pitch of a regular hexagonal array.\n\n        See Also\n        --------\n        armi.reactor.grids.HexGrid.fromPitch\n        \"\"\"\n        return sqrt(self._unitSteps[0][0] ** 2 + self._unitSteps[1][0] ** 2)\n\n    @staticmethod\n    def indicesToRingPos(i: int, j: int) -> Tuple[int, int]:\n        \"\"\"\n        Convert spatialLocator indices to ring/position.\n\n        One benefit it has is that it never has negative numbers.\n\n        Notes\n        -----\n        Ring, pos index system goes in counterclockwise hex rings.\n        \"\"\"\n        if i > 0 and j >= 0:\n            edge = 0\n            ring = i + j + 1\n            offset = j\n        elif i <= 0 and j > -i:\n            edge = 1\n            ring = j + 1\n            offset = -i\n        elif i < 0 and j > 0:\n            edge = 2\n            ring = -i + 1\n            offset = -j - i\n        elif i < 0:\n            edge = 3\n            ring = -i - j + 1\n            offset = -j\n        elif i >= 0 and j < -i:\n            edge = 4\n            ring = -j + 1\n            offset = i\n        else:\n            edge = 5\n            ring = i + 1\n            offset = i + j\n\n        positionBase = 1 + edge * (ring - 1)\n        return ring, positionBase + offset\n\n    @staticmethod\n    def getMinimumRings(n: int) -> int:\n        \"\"\"\n        Return the minimum number of rings needed to fit ``n`` objects.\n\n        Notes\n        -----\n        ``self`` is not used because hex grids always behave the same w.r.t. rings/positions.\n        \"\"\"\n        return hexagon.numRingsToHoldNumCells(n)\n\n    @staticmethod\n    def getPositionsInRing(ring: int) -> int:\n        \"\"\"Return the number of positions within a ring.\"\"\"\n        return hexagon.numPositionsInRing(ring)\n\n    def getNeighboringCellIndices(self, i: int, j: int = 0, k: int = 0) -> List[IJKType]:\n        \"\"\"\n        Return the indices of the immediate neighbors of a mesh point in the plane.\n\n        Note that these neighbors are ordered counter-clockwise beginning from the 30 or 60 degree direction. Exact\n        direction is dependent on cornersUp arg.\n        \"\"\"\n        return [\n            (i + 1, j, k),\n            (i, j + 1, k),\n            (i - 1, j + 1, k),\n            (i - 1, j, k),\n            (i, j - 1, k),\n            (i + 1, j - 1, k),\n        ]\n\n    def getLabel(self, indices):\n        \"\"\"\n        Hex labels start at 1, and are ring/position based rather than i,j.\n\n        This difference is partially because ring/pos is easier to understand in hex geometry, and partially because it\n        is used in some codes ARMI originally was focused on.\n        \"\"\"\n        ring, pos = self.getRingPos(indices)\n        if len(indices) == 2:\n            return super().getLabel((ring, pos))\n        else:\n            return super().getLabel((ring, pos, indices[2]))\n\n    @staticmethod\n    def _indicesAndEdgeFromRingAndPos(ring, position):\n        \"\"\"Given the ring and position, return the (I,J) coordinates, and which edge the grid cell is on.\n\n        Parameters\n        ----------\n        ring : int\n            Starting with 1 (not zero), the ring of the grid cell.\n        position : int\n            Starting with 1 (not zero), the position of the grid cell, in the ring.\n\n        Returns\n        -------\n        (int, int, int) : I coordinate, J coordinate, which edge of the hex ring\n\n        Notes\n        -----\n        - Edge indicates which edge of the ring in which the hexagon resides.\n        - Edge 0 is the NE edge, edge 1 is the N edge, etc.\n        - Offset is (0-based) index of the hexagon in that edge. For instance,\n          ring 3, pos 12 resides in edge 5 at index 1; it is the second hexagon\n          in ring 3, edge 5.\n        \"\"\"\n        # The inputs start counting at 1, but the grid starts counting at zero.\n        ring = ring - 1\n        pos = position - 1\n\n        # Handle the center grid cell.\n        if ring == 0:\n            if pos != 0:\n                raise ValueError(f\"Position in center ring must be 1, not {position}\")\n            return 0, 0, 0\n\n        # find the edge and offset (pos//ring or pos%ring)\n        edge, offset = divmod(pos, ring)\n\n        # find (I,J) based on the ring, edge, and offset\n        if edge == 0:\n            i = ring - offset\n            j = offset\n        elif edge == 1:\n            i = -offset\n            j = ring\n        elif edge == 2:\n            i = -ring\n            j = ring - offset\n        elif edge == 3:\n            i = offset - ring\n            j = -offset\n        elif edge == 4:\n            i = offset\n            j = -ring\n        elif edge == 5:\n            i = ring\n            j = offset - ring\n        else:\n            raise ValueError(f\"Edge {edge} is invalid. From ring {ring}, pos {pos}\")\n\n        return i, j, edge\n\n    @staticmethod\n    def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType:\n        r\"\"\"Given the ring and position, return the (I,J) coordinates in the hex grid.\n\n        Parameters\n        ----------\n        ring : int\n            Starting with 1 (not zero), the ring of the grid cell.\n        position : int\n            Starting with 1 (not zero), the position of the grid cell, in the ring.\n\n        Returns\n        -------\n        (int, int) : I coordinate, J coordinate\n\n        Notes\n        -----\n        In an axial plane, the (ring, position) coordinates are as follows::\n\n                 Flat-to-Flat                    Corners Up\n                     _____\n                    /     \\                      / \\     / \\\n              _____/  2,2  \\_____              /     \\ /     \\\n             /     \\       /     \\            |  2,2  |  2,1  |\n            /  2,3  \\_____/  2,1  \\           |       |       |\n            \\       /     \\       /          / \\     / \\     / \\\n             \\_____/  1,1  \\_____/         /     \\ /     \\ /     \\\n             /     \\       /     \\        |  2,3  |  1,1  |  2,6  |\n            /  2,4  \\_____/  2,6  \\       |       |       |       |\n            \\       /     \\       /        \\     / \\     / \\     /\n             \\_____/  2,5  \\_____/           \\ /     \\ /     \\ /\n                   \\       /                  |  2,4  |  2,5  |\n                    \\_____/                   |       |       |\n                                               \\     / \\     /\n                                                 \\ /     \\ /\n\n        \"\"\"\n        i, j, _edge = HexGrid._indicesAndEdgeFromRingAndPos(ring, pos)\n        return i, j\n\n    def getRingPos(self, indices: IJKType) -> Tuple[int, int]:\n        \"\"\"\n        Get 1-based ring and position from normal indices.\n\n        See Also\n        --------\n        getIndicesFromRingAndPos : does the reverse\n        \"\"\"\n        i, j = indices[:2]\n        return self.indicesToRingPos(i, j)\n\n    def overlapsWhichSymmetryLine(self, indices: IJType) -> Optional[int]:\n        \"\"\"Return a list of which lines of symmetry this is on.\n\n        Parameters\n        ----------\n        indices : tuple of [int, int]\n            Indices for the requested object\n\n        Returns\n        -------\n        None or int\n            None if not line of symmetry goes through the object at the requested index. Otherwise, some grid constants\n            like ``BOUNDARY_CENTER`` will be returned.\n\n        Notes\n        -----\n        - Only the 1/3 core view geometry is actually coded in here right now.\n        - Being \"on\" a symmetry line means the line goes through the middle of you.\n        \"\"\"\n        i, j = indices[:2]\n\n        if i == 0 and j == 0:\n            symmetryLine = BOUNDARY_CENTER\n        elif i > 0 and i == -2 * j:\n            # edge 1: 1/3 symmetry line (bottom horizontal side in 1/3 core view, theta = 0)\n            symmetryLine = BOUNDARY_0_DEGREES\n        elif i == j and i > 0 and j > 0:\n            # edge 2: 1/6 symmetry line (bisects 1/3 core view, theta = pi/3)\n            symmetryLine = BOUNDARY_60_DEGREES\n        elif j == -2 * i and j > 0:\n            # edge 3: 1/3 symmetry line (left oblique side in 1/3 core view, theta = 2*pi/3)\n            symmetryLine = BOUNDARY_120_DEGREES\n        else:\n            symmetryLine = None\n\n        return symmetryLine\n\n    def getSymmetricEquivalents(self, indices: IJKType) -> List[IJType]:\n        \"\"\"Retrieve the equivalent indices. If full core return nothing, if 1/3-core grid, return the symmetric\n        equivalents, if any other grid, raise an error.\n\n        .. impl:: Equivalent contents in thrid-core geometries are retrievable.\n            :id: I_ARMI_GRID_EQUIVALENTS\n            :implements: R_ARMI_GRID_EQUIVALENTS\n\n            This method takes in (I,J,K) indices, and if this ``HexGrid`` is full core, it returns nothing. If this\n            ``HexGrid`` is third-core, this method will return the third-core symmetric equivalent of just (I,J). If\n            this grid is any other kind, this method will just return an error; a hexagonal grid with any other symmetry\n            is probably an error.\n        \"\"\"\n        if (\n            self.symmetry.domain == geometry.DomainType.THIRD_CORE\n            and self.symmetry.boundary == geometry.BoundaryType.PERIODIC\n        ):\n            return self._getSymmetricIdenticalsThird(indices)\n        elif self.symmetry.domain == geometry.DomainType.FULL_CORE:\n            return []\n        else:\n            raise NotImplementedError(f\"Unhandled symmetry condition for HexGrid: {self.symmetry}\")\n\n    @staticmethod\n    def _getSymmetricIdenticalsThird(indices) -> List[IJType]:\n        \"\"\"This works by rotating the indices by 120 degrees twice, counterclockwise.\"\"\"\n        i, j = indices[:2]\n        if i == 0 and j == 0:\n            return []\n\n        identicals = [(-i - j, i), (j, -i - j)]\n        return identicals\n\n    def triangleCoords(self, indices: IJKType) -> np.ndarray:\n        \"\"\"\n        Return 6 coordinate pairs representing the centers of the 6 triangles in a hexagon centered here.\n\n        Ignores z-coordinate and only operates in 2D for now.\n        \"\"\"\n        xy = self.getCoordinates(indices)[:2]\n        scale = self.pitch / 3.0\n        return xy + scale * TRIANGLES_IN_HEXAGON\n\n    @staticmethod\n    def _getRawUnitSteps(pitch, cornersUp=False):\n        \"\"\"Get the raw unit steps (ignore step dimensions), for a hex grid.\n\n        Parameters\n        ----------\n        pitch : float\n            The short diameter of the hexagons (flat to flat).\n        cornersUp : bool, optional\n            If True, the hexagons have a corner pointing in the Y direction. Default: False\n\n        Returns\n        -------\n        tuple : The full 3D set of derivatives of X,Y,Z in terms of i,j,k.\n        \"\"\"\n        side = hexagon.side(pitch)\n        if cornersUp:\n            # rotated 30 degrees counter-clockwise from normal\n            # increases in i moves you in x and y\n            # increases in j also moves you in x and y\n            unitSteps = (\n                (pitch / 2.0, -pitch / 2.0, 0),\n                (1.5 * side, 1.5 * side, 0),\n                (0, 0, 0),\n            )\n        else:\n            # x direction is only a function of i because j-axis is vertical.\n            # y direction is a function of both.\n            unitSteps = ((1.5 * side, 0.0, 0.0), (pitch / 2.0, pitch, 0.0), (0, 0, 0))\n\n        return unitSteps\n\n    def changePitch(self, newPitchCm: float):\n        \"\"\"Change the hex pitch.\"\"\"\n        unitSteps = np.array(HexGrid._getRawUnitSteps(newPitchCm, self.cornersUp))\n        self._unitSteps = unitSteps[self._stepDims]\n\n    def locatorInDomain(self, locator, symmetryOverlap: Optional[bool] = False) -> bool:\n        # This will include the \"top\" 120-degree symmetry lines. This is to support adding of edge\n        # assemblies.\n        if self.symmetry.domain == geometry.DomainType.THIRD_CORE:\n            return self.isInFirstThird(locator, includeTopEdge=symmetryOverlap)\n        else:\n            return True\n\n    def isInFirstThird(self, locator, includeTopEdge=False) -> bool:\n        \"\"\"Test if the given locator is in the first 1/3 of the HexGrid.\n\n        .. impl:: Determine if grid is in first third.\n            :id: I_ARMI_GRID_SYMMETRY_LOC\n            :implements: R_ARMI_GRID_SYMMETRY_LOC\n\n            This is a simple helper method to determine if a given locator (from an ArmiObject) is in the first 1/3 of\n            the ``HexGrid``. This method does not attempt to check if this grid is full or 1/3-core. It just does the\n            basic math of dividing up a hex-assembly reactor core into thirds and testing if the given location is in\n            the first 1/3 or not.\n        \"\"\"\n        ring, pos = self.getRingPos(locator.indices)\n        if ring == 1:\n            return True\n\n        maxPosTotal = self.getPositionsInRing(ring)\n\n        maxPos1 = ring + ring // 2 - 1\n        maxPos2 = maxPosTotal - ring // 2 + 1\n        if ring % 2:\n            # Odd ring; upper edge assem typically not included.\n            if includeTopEdge:\n                maxPos1 += 1\n        else:\n            # Even ring; upper edge assem included.\n            maxPos2 += 1\n\n        return bool(pos <= maxPos1 or pos >= maxPos2)\n\n    def generateSortedHexLocationList(self, nLocs: int):\n        \"\"\"\n        Generate a list IndexLocations, sorted based on their distance from the center.\n\n        IndexLocation are taken from a full core.\n\n        Ties between locations with the same distance (e.g. A3001 and A3003) are broken by ring number then position\n        number.\n        \"\"\"\n        # first, roughly calculate how many rings need to be created to cover nLocs worth of assemblies\n        nLocs = int(nLocs)\n\n        # next, generate a list of locations and corresponding distances\n        locList = []\n        for ring in range(1, hexagon.numRingsToHoldNumCells(nLocs) + 1):\n            positions = self.getPositionsInRing(ring)\n            for position in range(1, positions + 1):\n                i, j = self.getIndicesFromRingAndPos(ring, position)\n                locList.append(self[(i, j, 0)])\n\n        # round to avoid differences due to floating point math\n        locList.sort(\n            key=lambda loc: (\n                round(np.linalg.norm(loc.getGlobalCoordinates()), 6),\n                loc.i,\n                loc.j,\n            )\n        )\n\n        return locList[:nLocs]\n\n    def rotateIndex(self, loc: IndexLocation, rotations: int) -> IndexLocation:\n        \"\"\"Find the new location of an index after some number of CCW rotations.\n\n        Parameters\n        ----------\n        loc : IndexLocation\n            Starting index\n        rotations : int\n            Number of counter clockwise rotations\n\n        Returns\n        -------\n        IndexLocation\n            Index in the grid after rotation\n\n        Notes\n        -----\n        Rotation uses a three-dimensional index in what can be known elsewhere by the confusing name\n        of \"cubic\" coordinate system for a hexagon. Cubic stems from the notion of using three\n        dimensions, ``(q, r, s)`` to describe a point in the hexagonal grid. The conversion from the\n        indexing used in the ARMI framework follows::\n\n            q = i\n            r = j\n            # s = - q - r = - (q + r)\n            s = -(i + j)\n\n        The motivation for the cubic notation is rotation is far simpler: a clockwise rotation by 60\n        degrees results in a shifting and negating of the coordinates. So the first rotation of\n        ``(q, r, s)`` would produce a new coordinate ``(-r, -s, -q)``. Another rotation would\n        produce ``(s, q, r)``, and so on.\n\n        Raises\n        ------\n        TypeError\n            If ``loc.grid`` is populated and not consistent with this grid. For example, it doesn't\n            make sense to rotate an index from a Cartesian grid in a hexagonal coordinate system,\n            nor hexagonal grid with different orientation (flats up vs. corners up)\n        \"\"\"\n        if self._roughlyEqual(loc.grid) or loc.grid is None:\n            i, j, k = loc[:3]\n            buffer = deque((i, j, -(i + j)))\n            buffer.rotate(-rotations)\n            newI = buffer[0]\n            newJ = buffer[1]\n            if rotations % 2:\n                newI *= -1\n                newJ *= -1\n            return IndexLocation(newI, newJ, k, loc.grid)\n        raise TypeError(f\"Refusing to rotate an index {loc} from a grid {loc.grid} that is not consistent with {self}\")\n\n    def _roughlyEqual(self, other) -> bool:\n        \"\"\"Check that two hex grids are nearly identical.\n\n        Would the same ``(i, j, k)`` index in ``self`` be the same location in ``other``?\n        \"\"\"\n        if other is self:\n            return True\n        return (\n            isinstance(other, HexGrid)\n            and isclose(self.pitch, other.pitch, rel_tol=1e-4)\n            and other.cornersUp == self.cornersUp\n        )\n"
  },
  {
    "path": "armi/reactor/grids/locations.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport math\nfrom abc import ABC, abstractmethod\nfrom typing import TYPE_CHECKING, Hashable, Iterator, List, Optional, Tuple, Union\n\nimport numpy as np\n\nif TYPE_CHECKING:\n    # Avoid some circular imports\n    from armi.reactor.grids import Grid\n\n\nIJType = Tuple[int, int]\nIJKType = Tuple[int, int, int]\n\n\nclass LocationBase(ABC):\n    \"\"\"\n    A namedtuple-like object for storing location information.\n\n    It's immutable (you can't set things after construction) and has names.\n    \"\"\"\n\n    __slots__ = (\"_i\", \"_j\", \"_k\", \"_grid\")\n\n    def __init__(self, i: int, j: int, k: int, grid: Optional[\"Grid\"]):\n        self._i = i\n        self._j = j\n        self._k = k\n        self._grid = grid\n\n    def __repr__(self) -> str:\n        return \"<{} @ ({},{:},{})>\".format(self.__class__.__name__, self.i, self.j, self.k)\n\n    def __getstate__(self) -> Hashable:\n        \"\"\"Used in pickling and deepcopy, this detaches the grid.\"\"\"\n        return (self._i, self._j, self._k, None)\n\n    def __setstate__(self, state: Hashable):\n        \"\"\"Unpickle a locator, the grid will attach itself if it was also pickled, otherwise this will be detached.\"\"\"\n        self.__init__(*state)\n\n    @property\n    def i(self) -> int:\n        return self._i\n\n    @property\n    def j(self) -> int:\n        return self._j\n\n    @property\n    def k(self) -> int:\n        return self._k\n\n    @property\n    def grid(self) -> Optional[\"Grid\"]:\n        return self._grid\n\n    def __getitem__(self, index: int) -> Union[int, \"Grid\"]:\n        return (self.i, self.j, self.k, self.grid)[index]\n\n    def __hash__(self) -> Hashable:\n        \"\"\"\n        Define a hash so we can use these as dict keys w/o having exact object.\n\n        Notes\n        -----\n        Including the ``grid`` attribute may be more robust; however, using only (i, j, k) allows dictionaries to use\n        IndexLocations and (i,j,k) tuples interchangeably.\n        \"\"\"\n        return hash((self.i, self.j, self.k))\n\n    def __eq__(self, other: Union[IJKType, \"LocationBase\"]) -> bool:\n        if isinstance(other, tuple):\n            return (self.i, self.j, self.k) == other\n        if isinstance(other, LocationBase):\n            return self.i == other.i and self.j == other.j and self.k == other.k and self.grid is other.grid\n        return NotImplemented\n\n    def __lt__(self, that: \"LocationBase\") -> bool:\n        \"\"\"\n        A Locationbase is less than another if the pseudo-radius is less, or if equal, in order any index is less.\n\n        Examples\n        --------\n        >>> grid = grids.HexGrid.fromPitch(1.0)\n        >>> grid[0, 0, 0] < grid[2, 3, 4]  # the \"radius\" is less\n        True\n        >>> grid[2, 3, 4] < grid[2, 3, 4]  # they are equal\n        False\n        >>> grid[2, 3, 4] < grid[-2, 3, 4]  # 2 is greater than -2\n        False\n        >>> grid[-2, 3, 4] < grid[2, 3, 4]  # -2 is less than 2\n        True\n        >>> grid[1, 3, 4] < grid[-2, 3, 4]  # the \"radius\" is less\n        True\n        \"\"\"\n        selfIndices = self.indices\n        thatIndices = that.indices\n        # this is not really r, but it is fast and consistent\n        selfR = abs(selfIndices).sum()\n        thatR = abs(thatIndices).sum()\n\n        # this cannot be reduced to\n        #   return selfR < thatR or (selfIndices < thatIndices).any()\n        # because the comparison is not symmetric.\n        if selfR < thatR:\n            return True\n        else:\n            for lt, eq in zip(selfIndices < thatIndices, selfIndices == thatIndices):\n                if eq:\n                    continue\n\n                return lt\n\n            return False\n\n    def __len__(self) -> int:\n        \"\"\"Returns 3, the number of directions.\"\"\"\n        return 3\n\n    def associate(self, grid: \"Grid\"):\n        \"\"\"Re-assign locator to another Grid.\"\"\"\n        self._grid = grid\n\n    @property\n    @abstractmethod\n    def indices(self) -> np.ndarray:\n        \"\"\"Get the non-grid indices (i,j,k) of this locator.\n\n        This strips off the annoying ``grid`` tagalong which is there to ensure proper equality (i.e. (0,0,0) in a\n        storage rack is not equal to (0,0,0) in a core).\n\n        It is a numpy array for two reasons:\n\n        1. It can be added and subtracted for the recursive computations through different coordinate systems.\n        2. It can be written/read from the database.\n        \"\"\"\n\n\nclass IndexLocation(LocationBase):\n    \"\"\"\n    An immutable location representing one cell in a grid.\n\n    The locator is intimately tied to a grid and together, they represent a grid cell somewhere in\n    the coordinate system of the grid.\n\n    ``grid`` is not in the constructor (must be added after construction ) because the extra argument (grid) gives an\n    inconsistency between __init__ and __new__. Unfortunately this decision makes whipping up IndexLocations on the fly\n    awkward. But perhaps that's ok because they should only be created by their grids.\n    \"\"\"\n\n    __slots__ = ()\n\n    def __add__(self, that: Union[IJKType, \"IndexLocation\"]) -> \"IndexLocation\":\n        \"\"\"\n        Enable adding with other objects like this and/or 3-tuples.\n\n        Tuples are needed so we can terminate the recursive additions with a (0,0,0) basis.\n        \"\"\"\n        # New location is not associated with any particular grid.\n        return self.__class__(self[0] + that[0], self[1] + that[1], self[2] + that[2], None)\n\n    def __sub__(self, that: Union[IJKType, \"IndexLocation\"]) -> \"IndexLocation\":\n        return self.__class__(self[0] - that[0], self[1] - that[1], self[2] - that[2], None)\n\n    def detachedCopy(self) -> \"IndexLocation\":\n        \"\"\"\n        Make a copy of this locator that is not associated with a grid.\n\n        See Also\n        --------\n        armi.reactor.reactors.detach : uses this\n        \"\"\"\n        return self.__class__(self.i, self.j, self.k, None)\n\n    @property\n    def parentLocation(self):\n        \"\"\"\n        Get the spatialLocator of the ArmiObject that this locator's grid is anchored to.\n\n        For example, if this is one of many spatialLocators in a 2-D grid representing a reactor, then the\n        ``parentLocation`` is the spatialLocator of the reactor, which will often be a ``CoordinateLocation``.\n        \"\"\"\n        grid = self.grid  # performance matters a lot here so we remove a dot\n        # check for None rather than __nonzero__ for speed (otherwise it checks the length)\n        if grid is not None and grid.armiObject is not None and grid.armiObject.parent is not None:\n            return grid.armiObject.spatialLocator\n        return None\n\n    @property\n    def indices(self) -> np.ndarray:\n        \"\"\"\n        Get the non-grid indices (i,j,k) of this locator.\n\n        This strips off the annoying ``grid`` tagalong which is there to ensure proper equality (i.e. (0,0,0) in a\n        storage rack is not equal to (0,0,0) in a core).\n\n        It is a numpy array for two reasons:\n\n        1. It can be added and subtracted for the recursive computations through different coordinate systems.\n        2. It can be written/read from the database.\n\n        \"\"\"\n        return np.array(self[:3])\n\n    def getCompleteIndices(self) -> IJKType:\n        \"\"\"\n        Transform the indices of this object up to the top mesh.\n\n        The top mesh is either the one where there's no more parent (true top) or when an axis gets added twice. Unlike\n        with coordinates, you can only add each index axis one time. Thus a *complete* set of indices is one where an\n        index for each axis has been defined by a set of 1, 2, or 3 nested grids.\n\n        This is useful for getting the reactor-level (i,j,k) indices of an object in a multi-layered 2-D(assemblies in\n        core)/1-D(blocks in assembly) mesh like the one mapping blocks up to reactor in Hex reactors.\n\n        The benefit of that particular mesh over a 3-D one is that different assemblies can have different axial meshes,\n        a common situation.\n\n        It will just return local indices for pin-meshes inside of blocks.\n\n        A tuple is returned so that it is easy to compare pairs of indices.\n        \"\"\"\n        parentLocation = self.parentLocation  # to avoid evaluating property if's twice\n        indices = self.indices\n        if parentLocation is not None:\n            if parentLocation.grid is not None and addingIsValid(self.grid, parentLocation.grid):\n                indices += parentLocation.indices\n        return tuple(indices)\n\n    def getLocalCoordinates(self, nativeCoords=False):\n        \"\"\"Return the coordinates of the center of the mesh cell here in cm.\"\"\"\n        if self.grid is None:\n            raise ValueError(f\"Cannot get local coordinates of {self} because grid is None.\")\n        return self.grid.getCoordinates(self.indices, nativeCoords=nativeCoords)\n\n    def getGlobalCoordinates(self, nativeCoords=False):\n        \"\"\"Get coordinates in global 3D space of the centroid of this object.\"\"\"\n        parentLocation = self.parentLocation  # to avoid evaluating property if's twice\n        if parentLocation:\n            return self.getLocalCoordinates(nativeCoords=nativeCoords) + parentLocation.getGlobalCoordinates(\n                nativeCoords=nativeCoords\n            )\n        return self.getLocalCoordinates(nativeCoords=nativeCoords)\n\n    def getGlobalCellBase(self):\n        \"\"\"Return the cell base (i.e. \"bottom left\"), in global coordinate system.\"\"\"\n        parentLocation = self.parentLocation  # to avoid evaluating property if's twice\n        if parentLocation:\n            return parentLocation.getGlobalCellBase() + self.grid.getCellBase(self.indices)\n        return self.grid.getCellBase(self.indices)\n\n    def getGlobalCellTop(self):\n        \"\"\"Return the cell top (i.e. \"top right\"), in global coordinate system.\"\"\"\n        parentLocation = self.parentLocation  # to avoid evaluating property if's twice\n        if parentLocation:\n            return parentLocation.getGlobalCellTop() + self.grid.getCellTop(self.indices)\n        return self.grid.getCellTop(self.indices)\n\n    def getRingPos(self):\n        \"\"\"Return ring and position of this locator.\"\"\"\n        return self.grid.getRingPos(self.getCompleteIndices())\n\n    def getSymmetricEquivalents(self):\n        \"\"\"\n        Get symmetrically-equivalent locations, based on Grid symmetry.\n\n        See Also\n        --------\n        Grid.getSymmetricEquivalents\n        \"\"\"\n        return self.grid.getSymmetricEquivalents(self.indices)\n\n    def distanceTo(self, other: \"IndexLocation\") -> float:\n        \"\"\"Return the distance from this locator to another.\"\"\"\n        return math.sqrt(((np.array(self.getGlobalCoordinates()) - np.array(other.getGlobalCoordinates())) ** 2).sum())\n\n\nclass MultiIndexLocation(IndexLocation):\n    \"\"\"\n    A collection of index locations that can be used as a spatialLocator.\n\n    This allows components with multiplicity>1 to have location information within a parent grid. The implication is\n    that there are multiple discrete components, each one residing in one of the actual locators underlying this\n    collection.\n\n    .. impl:: Store components with multiplicity greater than 1\n        :id: I_ARMI_GRID_MULT\n        :implements: R_ARMI_GRID_MULT\n\n        As not all grids are \"full core symmetry\", ARMI will sometimes need to track multiple positions for a single\n        object: one for each symmetric portion of the reactor. This class doesn't calculate those positions in the\n        reactor, it just tracks the multiple positions given to it. In practice, this class is mostly just a list of\n        ``IndexLocation`` objects.\n    \"\"\"\n\n    # MIL's cannot be hashed, so we need to scrape off the implementation from LocationBase. This raises some\n    # interesting questions of substitutability of the various Location classes, which should be addressed.\n    __hash__ = None\n\n    _locations: List[IndexLocation]\n\n    def __init__(self, grid: \"Grid\"):\n        IndexLocation.__init__(self, 0, 0, 0, grid)\n        self._locations = []\n\n    def __eq__(self, other):\n        \"\"\"Considered equal if the grids are identical and contained locations are identical.\n\n        Two ``MultiIndexLocation`` objects with the same total collection of locations, but in\n        different orders, will not be considered equal.\n        \"\"\"\n        if isinstance(other, type(self)):\n            return self.grid == other.grid and self._locations == other._locations\n        # Different objects -> let other.__eq__(self) handle it\n        return NotImplemented\n\n    def __getstate__(self) -> List[IndexLocation]:\n        \"\"\"Used in pickling and deepcopy, this detaches the grid.\"\"\"\n        return self._locations\n\n    def __setstate__(self, state: List[IndexLocation]):\n        \"\"\"Unpickle a locator, the grid will attach itself if it was also pickled, otherwise this will be detached.\"\"\"\n        self.__init__(None)\n        self._locations = state\n\n    def __repr__(self) -> str:\n        return f\"<{self.__class__.__name__} with {len(self._locations)} locations>\"\n\n    def __getitem__(self, index: int) -> IndexLocation:\n        return self._locations[index]\n\n    def __setitem__(self, index: int, obj: IndexLocation):\n        self._locations[index] = obj\n\n    def __iter__(self) -> Iterator[IndexLocation]:\n        return iter(self._locations)\n\n    def __len__(self) -> int:\n        return len(self._locations)\n\n    def detachedCopy(self) -> \"MultiIndexLocation\":\n        loc = MultiIndexLocation(None)\n        loc.extend(self._locations)\n        return loc\n\n    def associate(self, grid: \"Grid\"):\n        self._grid = grid\n        for loc in self._locations:\n            loc.associate(grid)\n\n    def getCompleteIndices(self) -> IJKType:\n        raise NotImplementedError(\"Multi locations cannot do this yet.\")\n\n    def append(self, location: IndexLocation):\n        self._locations.append(location)\n\n    def extend(self, locations: List[IndexLocation]):\n        self._locations.extend(locations)\n\n    def pop(self, location: IndexLocation):\n        self._locations.pop(location)\n\n    @property\n    def indices(self) -> List[np.ndarray]:\n        \"\"\"\n        Return indices for all locations.\n\n        .. impl:: Return the location of all instances of grid components with multiplicity greater than 1.\n            :id: I_ARMI_GRID_ELEM_LOC\n            :implements: R_ARMI_GRID_ELEM_LOC\n\n            This method returns the indices of all the ``IndexLocation`` objects. To be clear, this does not return the\n            ``IndexLocation`` objects themselves. This is designed to be consistent with the Grid's ``__getitem__()``\n            method.\n        \"\"\"\n        return [loc.indices for loc in self._locations]\n\n\nclass CoordinateLocation(IndexLocation):\n    \"\"\"\n    A triple representing a point in space.\n\n    This is still associated with a grid. The grid defines the continuous coordinate space and axes that the location is\n    within. This also links to the composite tree.\n    \"\"\"\n\n    __slots__ = ()\n\n    def __eq__(self, other):\n        if isinstance(other, type(self)):\n            # Mainly to avoid comparing against MultiIndexLocations. Fuel pins may have a multi index location and the\n            # duct may have a coordinate location and we don't want them to be equal.\n            return self.grid == other.grid and self.i == other.i and self.j == other.j and self.k == other.k\n        return NotImplemented\n\n    def __hash__(self):\n        \"\"\"Hash based on the coordinates but not the grid.\"\"\"\n        return hash((self.i, self.j, self.k))\n\n    def getLocalCoordinates(self, nativeCoords=False):\n        \"\"\"Return x,y,z coordinates in cm within the grid's coordinate system.\"\"\"\n        return self.indices\n\n    def getCompleteIndices(self) -> IJKType:\n        \"\"\"Top of chain. Stop recursion and return basis.\"\"\"\n        return 0, 0, 0\n\n    def getGlobalCellBase(self):\n        return self.indices\n\n    def getGlobalCellTop(self):\n        return self.indices\n\n\ndef addingIsValid(myGrid: \"Grid\", parentGrid: \"Grid\"):\n    \"\"\"\n    True if adding a indices from one grid to another is considered valid.\n\n    In ARMI we allow the addition of a 1-D axial grid with a 2-D grid. We do not allow any other kind of adding. This\n    enables the 2D/1D grid layout in Assemblies/Blocks but does not allow 2D indexing in pins to become inconsistent.\n    \"\"\"\n    return myGrid.isAxialOnly and not parentGrid.isAxialOnly\n"
  },
  {
    "path": "armi/reactor/grids/structuredGrid.py",
    "content": "# Copyright 2023 Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport collections\nimport itertools\nfrom abc import abstractmethod\nfrom typing import Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\n\nfrom armi.reactor.grids.grid import Grid\nfrom armi.reactor.grids.locations import (\n    IJKType,\n    IndexLocation,\n    LocationBase,\n    MultiIndexLocation,\n)\n\n# data structure for database-serialization of grids\nGridParameters = collections.namedtuple(\n    \"GridParameters\",\n    (\"unitSteps\", \"bounds\", \"unitStepLimits\", \"offset\", \"geomType\", \"symmetry\"),\n)\n\n\nclass StructuredGrid(Grid):\n    \"\"\"\n    A connected set of cells characterized by indices mapping to space and vice versa.\n\n    The cells may be characterized by any mixture of regular repeating steps and\n    user-defined steps in any direction.\n\n    For example, a 2-D hex lattice has constant, regular steps whereas a 3-D hex mesh\n    may have user-defined axial meshes. Similar for Cartesian, RZT, etc.\n\n    Parameters\n    ----------\n    unitSteps : tuple of tuples, optional\n        Describes the grid spatially as a function on indices.\n        Each tuple describes how each ``(x,y,or z)`` dimension is influenced by\n        ``(i,j,k)``. In other words, it is::\n\n            (dxi, dxj, jxk), (dyi, dyj, dyk), (dzi, dzj, dzk)\n\n        where ``dmn`` is the distance (in cm) that dimension ``m`` will change as a\n        function of index ``n``.\n\n        Unit steps are used as a generic method for defining repetitive grids in a\n        variety of geometries, including hexagonal and Cartesian.  The tuples are not\n        vectors in the direction of the translation, but rather grouped by direction. If\n        the bounds argument is described for a direction, the bounds will be used rather\n        than the unit step information. The default of (0, 0, 0) makes all dimensions\n        insensitive to indices since the coordinates are calculated by the dot product\n        of this and the indices.  With this default, any dimension that is desired to\n        change with indices should be defined with bounds. RZtheta grids are created\n        exclusively with bounds.\n    bounds : 3-tuple\n        Absolute increasing bounds in cm including endpoints of a non-uniform grid.\n        Each item represents the boundaries in the associated direction.  Use Nones when\n        unitSteps should be applied instead. Most useful for thetaRZ grids or other\n        non-uniform grids.\n    unitStepLimits : 3-tuple\n        The limit of the steps in all three directions. This constrains step-defined\n        grids to be finite so we can populate them with SpatialLocator objects.\n    offset : 3-tuple, optional\n        Offset in cm for each axis. By default the center of the (0,0,0)-th object is in\n        the center of the grid. Offsets can move it so that the (0,0,0)-th object can\n        be fully within a quadrant (i.e. in a Cartesian grid).\n    armiObject : ArmiObject, optional\n        The ArmiObject that this grid describes. For example if it's a 1-D assembly\n        grid, the armiObject is the assembly. Note that ``self.armiObject.spatialGrid``\n        is ``self``.\n\n    Examples\n    --------\n    A 2D a rectangular grid with width (x) 2 and height (y) 3 would be::\n\n    >>> grid = Grid(unitSteps=((2, 0, 0), (0, 3, 0), (0, 0, 0)))\n\n    A regular hex grid with pitch 1 is::\n\n    >>> grid = Grid(unitSteps= ((sqrt(3)/2, 0.0, 0.0), (0.5, 1.0, 0.0), (0, 0, 0))\n\n    .. note:: For this unit hex the magnitude of the vector constructed using the\n              0th index of each tuple is 1.0.\n\n    Notes\n    -----\n    Each dimension must either be defined through unitSteps or bounds.\n    The combination of unitSteps with bounds was settled upon after some struggle to\n    have one unified definition of a grid (i.e. just bounds). A hexagonal grid is\n    somewhat challenging to represent with bounds because the axes are not orthogonal,\n    so a unit-direction vector plus bounds would be required. And then the bounds would\n    be wasted space because they can be derived simply by unit steps. Memory efficiency\n    is important in this object so the compact representation of\n    unitSteps-when-possible, bounds-otherwise was settled upon.\n\n    Design considerations include:\n\n    * unitSteps are more intuitive as operations starting from the center of a cell,\n      particularly with hexagons and rectangles. Otherwise the 0,0 position of a hexagon\n      in the center of 1/3-symmetric hexagon is at the phantom bottom left of the\n      hexagon.\n\n    * Users generally prefer to input mesh bounds rather than centers (e.g. starting at\n      0.5 instead of 0.0 in a unit mesh is weird).\n\n    * If we store bounds, computing bounds is simple and computing centers takes ~2x the\n      effort. If we store centers, it's the opposite.\n\n    * Regardless of how we store things, we'll need a Grid that has the lower-left\n      assembly fully inside the problem (i.e. for full core Cartesian) as well as\n      another one that has the lower-left assembly half-way or quarter-way sliced off\n      (for 1/2, 1/4, and 1/8 symmetries).  The ``offset`` parameter handles this.\n\n    * Looking up mesh boundaries (to define a mesh in another code) is generally more\n      common than looking up centers (for plotting or measuring distance).\n\n    * A grid can be anchored to the object that it is in with a backreference. This\n      gives it the ability to traverse the composite tree and map local to global\n      locations without having to duplicate the composite pattern on grids. This remains\n      optional so grids can be used for non-reactor-package reasons.  It may seem\n      slightly cleaner to set the armiObject to the parent's spatialLocator itself\n      but the major disadvantage of this is that when an object moves, the armiObject\n      would have to be updated. By anchoring directly to Composite objects, the parent\n      is always up to date no matter where or how things get moved.\n\n    * Unit step calculations use dot products and must not be polluted by the bound\n      indices. Thus we reduce the size of the unitSteps tuple accordingly.\n    \"\"\"\n\n    def __init__(\n        self,\n        unitSteps=(0, 0, 0),\n        bounds=(None, None, None),\n        unitStepLimits=((0, 1), (0, 1), (0, 1)),\n        offset=None,\n        geomType=\"\",\n        symmetry=\"\",\n        armiObject=None,\n    ):\n        super().__init__(geomType, symmetry, armiObject)\n        # these lists contain the indices representing which dimensions for which steps\n        # are used, or for which bounds are used. index 0 is x direction, etc.\n        self._boundDims = []\n        self._stepDims = []\n        for dimensionIndex, bound in enumerate(bounds):\n            if bound is None:\n                self._stepDims.append(dimensionIndex)\n            else:\n                self._boundDims.append(dimensionIndex)\n\n        # numpy prefers tuples like this to do slicing on arrays\n        self._boundDims = (tuple(self._boundDims),)\n        self._stepDims = (tuple(self._stepDims),)\n\n        unitSteps = _tuplify(unitSteps)\n\n        self._bounds = bounds\n        self._unitStepLimits = _tuplify(unitStepLimits)\n\n        # only represent unit steps in dimensions they're being used so as to not\n        # pollute the dot product. This may reduce the length of this from 3 to 2 or 1\n        self._unitSteps = np.array(unitSteps)[self._stepDims]\n        self._offset = np.zeros(3) if offset is None else np.array(offset)\n        self._locations = {}\n        self._buildLocations()  # locations are owned by a grid, so the grid builds them.\n\n        (_ii, iLen), (_ji, jLen), (_ki, kLen) = self.getIndexBounds()\n        # True if only contains k-cells.\n        self._isAxialOnly = iLen == jLen == 1 and kLen > 1\n\n    def __len__(self) -> int:\n        return len(self._locations)\n\n    @property\n    def isAxialOnly(self) -> bool:\n        return self._isAxialOnly\n\n    def reduce(self) -> GridParameters:\n        \"\"\"Recreate the parameter necessary to create this grid.\"\"\"\n        offset = None if not self._offset.any() else tuple(self._offset)\n\n        bounds = _tuplify(self._bounds)\n\n        # recreate a constructor-friendly version of `_unitSteps` from live data (may have been reduced from\n        # length 3 to length 2 or 1 based on mixing the step-based definition and the bounds-based definition\n        # described in Design Considerations above.)\n        # We don't just save the original tuple passed in because that may miss transformations that\n        # occur between instantiation and reduction.\n        unitSteps = []\n        compressedSteps = list(self._unitSteps[:])\n        for i in range(3):\n            # Recall _stepDims are stored as a single-value tuple (for numpy indexing)\n            # So this just is grabbing the actual data.\n            if i in self._stepDims[0]:\n                unitSteps.append(compressedSteps.pop(0))\n            else:\n                # Add dummy value which will never get used (it gets reduced away)\n                unitSteps.append(0)\n        unitSteps = _tuplify(unitSteps)\n\n        return GridParameters(\n            unitSteps,\n            bounds,\n            self._unitStepLimits,\n            offset,\n            self._geomType,\n            self._symmetry,\n        )\n\n    @property\n    def offset(self) -> np.ndarray:\n        \"\"\"Offset in cm for each axis.\"\"\"\n        return self._offset\n\n    @offset.setter\n    def offset(self, offset: np.ndarray):\n        self._offset = offset\n\n    def __repr__(self) -> str:\n        msg = (\n            [\"<{} -- {}\\nBounds:\\n\".format(self.__class__.__name__, id(self))]\n            + [\"  {}\\n\".format(b) for b in self._bounds]\n            + [\"Steps:\\n\"]\n            + [\"  {}\\n\".format(b) for b in self._unitSteps]\n            + [\n                \"Anchor: {}\\n\".format(self.armiObject),\n                \"Offset: {}\\n\".format(self._offset),\n                \"Num Locations: {}>\".format(len(self)),\n            ]\n        )\n        return \"\".join(msg)\n\n    def __getitem__(self, ijk: Union[IJKType, List[IJKType]]) -> LocationBase:\n        \"\"\"\n        Get a location by (i, j, k) indices. If it does not exist, create a new one and return it.\n\n        Parameters\n        ----------\n        ijk : tuple of indices or list of the same\n            If provided a tuple, an IndexLocation will be created (if necessary) and\n            returned. If provided a list, each element will create a new IndexLocation\n            (if necessary), and a MultiIndexLocation containing all of the passed\n            indices will be returned.\n\n        Notes\n        -----\n        The method is defaultdict-like, in that it will create a new location on the fly. However,\n        the class itself is not really a dictionary, it is just index-able. For example, there is no\n        desire to have a ``__setitem__`` method, because the only way to create a location is by\n        retrieving it or through ``buildLocations``.\n        \"\"\"\n        try:\n            return self._locations[ijk]\n        except (KeyError, TypeError):\n            pass\n\n        if isinstance(ijk, tuple):\n            i, j, k = ijk\n            val = IndexLocation(i, j, k, self)\n            self._locations[ijk] = val\n        elif isinstance(ijk, list):\n            val = MultiIndexLocation(self)\n            locators = [self[idx] for idx in ijk]\n            val.extend(locators)\n        else:\n            raise TypeError(\"Unsupported index type `{}` for `{}`\".format(type(ijk), ijk))\n        return val\n\n    def items(self) -> Iterable[Tuple[IJKType, IndexLocation]]:\n        return self._locations.items()\n\n    def backUp(self):\n        \"\"\"Gather internal info that should be restored within a retainState.\"\"\"\n        self._backup = self._unitSteps, self._bounds, self._offset\n\n    def restoreBackup(self):\n        self._unitSteps, self._bounds, self._offset = self._backup\n\n    def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray:\n        \"\"\"Return the coordinates of the center of the mesh cell at the given indices in cm.\n\n        .. impl:: Get the coordinates from a location in a grid.\n            :id: I_ARMI_GRID_GLOBAL_POS\n            :implements: R_ARMI_GRID_GLOBAL_POS\n\n            Probably the most common request of a structure grid will be to give the grid indices and return the\n            physical coordinates of the center of the mesh cell. This is super handy in any situation where the\n            coordinates have physical meaning.\n\n            The math for finding the centroid turns out to be very easy, as the mesh is defined on the coordinates. So\n            finding the mid-point along one axis is just taking the upper and lower bounds and dividing by two. And this\n            is done for all axes. There are no more complicated situations where we need to find the centroid of a\n            octagon on a rectangular mesh, or the like.\n        \"\"\"\n        indices = np.array(indices)\n        return self._evaluateMesh(indices, self._centroidBySteps, self._centroidByBounds)\n\n    def getCellBase(self, indices) -> np.ndarray:\n        \"\"\"Get the mesh base (lower left) of this mesh cell in cm.\"\"\"\n        indices = np.array(indices)\n        return self._evaluateMesh(indices, self._meshBaseBySteps, self._meshBaseByBounds)\n\n    def getCellTop(self, indices) -> np.ndarray:\n        \"\"\"Get the mesh top (upper right) of this mesh cell in cm.\"\"\"\n        indices = np.array(indices) + 1\n        return self._evaluateMesh(indices, self._meshBaseBySteps, self._meshBaseByBounds)\n\n    def _evaluateMesh(self, indices, stepOperator, boundsOperator) -> np.ndarray:\n        \"\"\"\n        Evaluate some function of indices on this grid.\n\n        Recall from above that steps are mesh-centered and bounds are mesh-edged.\n\n        Notes\n        -----\n        This method may be simplifiable. Complications arose from mixtures of bounds-based and step-based meshing. These\n        were separate subclasses, but in practice many cases have some mix of step-based (hexagons, squares), and bounds\n        based (radial, zeta).\n        \"\"\"\n        boundCoords = []\n        for ii, bounds in enumerate(self._bounds):\n            if bounds is not None:\n                boundCoords.append(boundsOperator(indices[ii], bounds))\n\n        # limit step operator to the step dimensions\n        stepCoords = stepOperator(np.array(indices)[self._stepDims])\n\n        # now mix/match bounds coords with step coords appropriately.\n        result = np.zeros(len(indices))\n        result[self._stepDims] = stepCoords\n        result[self._boundDims] = boundCoords\n\n        return result + self._offset\n\n    def _centroidBySteps(self, indices):\n        return np.dot(self._unitSteps, indices)\n\n    def _meshBaseBySteps(self, indices):\n        return (self._centroidBySteps(indices - 1) + self._centroidBySteps(indices)) / 2.0\n\n    @staticmethod\n    def _centroidByBounds(index, bounds):\n        if index < 0:\n            # avoid wrap-around\n            raise IndexError(\"Bounds-defined indices may not be negative.\")\n        return (bounds[index + 1] + bounds[index]) / 2.0\n\n    @staticmethod\n    def _meshBaseByBounds(index, bounds):\n        if index < 0:\n            raise IndexError(\"Bounds-defined indices may not be negative.\")\n        return bounds[index]\n\n    @staticmethod\n    def getNeighboringCellIndices(i, j=0, k=0):\n        \"\"\"Return the indices of the immediate neighbors of a mesh point in the plane.\"\"\"\n        return ((i + 1, j, k), (i, j + 1, k), (i - 1, j, k), (i, j - 1, k))\n\n    @staticmethod\n    def getAboveAndBelowCellIndices(indices):\n        i, j, k = indices\n        return ((i, j, k + 1), (i, j, k - 1))\n\n    def getIndexBounds(self):\n        \"\"\"\n        Get min index and number of indices in this grid.\n\n        Step-defined grids would be infinite but for the step limits defined in the constructor.\n\n        Notes\n        -----\n        This produces output that is intended to be passed to a ``range`` statement.\n        \"\"\"\n        indexBounds = []\n        for minMax, bounds in zip(self._unitStepLimits, self._bounds):\n            if bounds is None:\n                indexBounds.append(minMax)\n            else:\n                indexBounds.append((0, len(bounds)))\n        return tuple(indexBounds)\n\n    def getBounds(\n        self,\n    ) -> Tuple[Optional[Sequence[float]], Optional[Sequence[float]], Optional[Sequence[float]]]:\n        \"\"\"Return the grid bounds for each dimension, if present.\"\"\"\n        return self._bounds\n\n    def getLocatorFromRingAndPos(self, ring, pos, k=0):\n        \"\"\"\n        Return the location based on ring and position.\n\n        Parameters\n        ----------\n        ring : int\n            Ring number (1-based indexing)\n        pos : int\n            Position number (1-based indexing)\n        k : int, optional\n            Axial index (0-based indexing)\n\n        See Also\n        --------\n        getIndicesFromRingAndPos\n            This implements the transform into i, j indices based on ring and position.\n        \"\"\"\n        i, j = self.getIndicesFromRingAndPos(ring, pos)\n        return self[i, j, k]\n\n    @staticmethod\n    @abstractmethod\n    def getIndicesFromRingAndPos(ring: int, pos: int):\n        \"\"\"\n        Return i, j indices given ring and position.\n\n        Note\n        ----\n        This should be implemented as a staticmethod, since no Grids currently in\n        existence actually need any instance data to perform this task, and\n        staticmethods provide the convenience of calling the method without an instance\n        of the class in the first place.\n        \"\"\"\n\n    @abstractmethod\n    def getMinimumRings(self, n: int) -> int:\n        \"\"\"\n        Return the minimum number of rings needed to fit ``n`` objects.\n\n        Warning\n        -------\n        While this is useful and safe for answering the question of \"how many rings do I\n        need to hold N things?\", is generally not safe to use it to answer \"I have N\n        things; within how many rings are they distributed?\". This function provides a\n        lower bound, assuming that objects are densely-packed. If they are not actually\n        densely packed, this may be unphysical.\n        \"\"\"\n\n    @abstractmethod\n    def getPositionsInRing(self, ring: int) -> int:\n        \"\"\"Return the number of positions within a ring.\"\"\"\n\n    def getRingPos(self, indices) -> Tuple[int, int]:\n        \"\"\"\n        Get ring and position number in this grid.\n\n        For non-hex grids this is just i and j.\n\n        A tuple is returned so that it is easy to compare pairs of indices.\n        \"\"\"\n        # Regular grids don't know about ring and position. Check the parent.\n        if (\n            self.armiObject is not None\n            and self.armiObject.parent is not None\n            and self.armiObject.parent.spatialGrid is not None\n        ):\n            return self.armiObject.parent.spatialGrid.getRingPos(indices)\n\n        raise ValueError(\"No ring position found, because no spatial grid was found.\")\n\n    def getAllIndices(self):\n        \"\"\"Get all possible indices in this grid.\"\"\"\n        iBounds, jBounds, kBounds = self.getIndexBounds()\n        allIndices = tuple(itertools.product(range(*iBounds), range(*jBounds), range(*kBounds)))\n        return allIndices\n\n    def _buildLocations(self):\n        \"\"\"Populate all grid cells with a characteristic SpatialLocator.\"\"\"\n        for i, j, k in self.getAllIndices():\n            loc = IndexLocation(i, j, k, self)\n            self._locations[(i, j, k)] = loc\n\n    @property\n    @abstractmethod\n    def pitch(self) -> Union[float, Tuple[float, float]]:\n        \"\"\"Grid pitch.\n\n        Some implementations may rely on a single pitch, such\n        as axial or hexagonal grids. Cartesian grids may use\n        a single pitch between elements or separate pitches\n        for the x and y dimensions.\n\n        Returns\n        -------\n        float or tuple of (float, float)\n            Grid spacing in cm\n        \"\"\"\n\n\ndef _tuplify(maybeArray) -> tuple:\n    if isinstance(maybeArray, (np.ndarray, list, tuple)):\n        maybeArray = tuple(tuple(row) if isinstance(row, (np.ndarray, list)) else row for row in maybeArray)\n\n    return maybeArray\n"
  },
  {
    "path": "armi/reactor/grids/tests/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/grids/tests/test_grids.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for grids.\"\"\"\n\nimport math\nimport pickle\nimport unittest\nfrom io import BytesIO\nfrom random import randint\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom armi.reactor import geometry, grids\nfrom armi.utils import hexagon\n\n\nclass MockLocator(grids.IndexLocation):\n    \"\"\"\n    Locator subclass that with direct location -> location paternity (to avoid\n    needing blocks, assems).\n    \"\"\"\n\n    @property\n    def parentLocation(self):\n        return self._parent\n\n\nclass MockCoordLocator(grids.CoordinateLocation):\n    @property\n    def parentLocation(self):\n        return self._parent\n\n\nclass MockArmiObject:\n    \"\"\"Any sort of object that can serve as a grid's armiObject attribute.\"\"\"\n\n    def __init__(self, parent=None):\n        self.parent = parent\n\n\nclass MockStructuredGrid(grids.StructuredGrid):\n    \"\"\"Need a concrete class to test a lot of inherited methods.\n\n    Abstract methods from the parent now raise ``NotImplementedError``\n    \"\"\"\n\n\n# De-abstract the mock structured grid to test some basic\n# properties, but let the abstract methods error\ndef _throwsNotImplemented(*args, **kwargs):\n    raise NotImplementedError\n\n\nfor f in MockStructuredGrid.__abstractmethods__:\n    setattr(MockStructuredGrid, f, _throwsNotImplemented)\n\nMockStructuredGrid.__abstractmethods__ = ()\n\n\nclass TestSpatialLocator(unittest.TestCase):\n    def test_add(self):\n        loc1 = grids.IndexLocation(1, 2, 0, None)\n        loc2 = grids.IndexLocation(2, 2, 0, None)\n        self.assertEqual(loc1 + loc2, grids.IndexLocation(3, 4, 0, None))\n\n    def test_multiIndexEq(self):\n        \"\"\"Check multi index locations are only true if they live on the same grid and have the same locations.\"\"\"\n        a = grids.MultiIndexLocation(None)\n        a.append(grids.IndexLocation(0, 0, 0, None))\n        b = grids.MultiIndexLocation(None)\n        b.append(grids.IndexLocation(1, 1, 1, None))\n        self.assertNotEqual(a, b)\n\n        c = grids.MultiIndexLocation(None)\n        c.append(grids.IndexLocation(0, 0, 0, None))\n        self.assertEqual(a, c)\n\n    def test_multiIndexEqWithLocations(self):\n        \"\"\"Two multi index locators on the same grid are equal.\"\"\"\n        grid = MockStructuredGrid()\n        a = grids.MultiIndexLocation(grid)\n        a.extend((grids.IndexLocation(i, -i, i, grid) for i in range(5)))\n\n        b = grids.MultiIndexLocation(grid)\n        b.extend(a)\n\n        self.assertEqual(a, b)\n        # If the order differs but all the locations are the same, the locators are considered not equal\n        locs = list(a)\n        locs.insert(0, locs.pop())\n        c = grids.MultiIndexLocation(grid)\n        c.extend(locs)\n        self.assertNotEqual(c, a)\n\n    def test_coordinateLocationEq(self):\n        \"\"\"Test for equality on the coordinate location object.\"\"\"\n        base = grids.CoordinateLocation(1, -3, 5, MockStructuredGrid())\n        self.assertEqual(base, base)\n        self.assertEqual(base, grids.CoordinateLocation(base.i, base.j, base.k, base.grid))\n        self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j, base.k, None))\n        # Pick some points with different indices in one dimension\n        # Offsets are arbitrary\n        self.assertNotEqual(base, grids.CoordinateLocation(base.i + 1, base.j, base.k, base.grid))\n        self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j - 2, base.k, base.grid))\n        self.assertNotEqual(base, grids.CoordinateLocation(base.i, base.j, base.k + 13, base.grid))\n\n    def test_coordinateLocationHash(self):\n        \"\"\"Ensure we can hash the location based on it's position, not the grid.\"\"\"\n        a = grids.CoordinateLocation(5, 9, 1, MockStructuredGrid())\n        self.assertEqual(hash(a), hash((a.i, a.j, a.k)))\n        b = grids.CoordinateLocation(a.i, a.j, a.k, None)\n        self.assertEqual(hash(b), hash(a))\n\n    def test_recursion(self):\n        \"\"\"\n        Make sure things work as expected with a chain of locators/grids/locators.\n\n        This makes a Cartesian-like reactor out of unit cubes. The origin\n        is in the center of the central cube radially and the bottom axially due\n        to the different way steps and bounds are set up.\n        \"\"\"\n        core = MockArmiObject()\n        assem = MockArmiObject(core)\n        block = MockArmiObject(assem)\n\n        # build meshes just like how they're used on a regular system.\n        # 2-D grid\n        coreGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0, armiObject=core)\n\n        # 1-D z-mesh\n        assemblyGrid = grids.AxialGrid.fromNCells(5, armiObject=assem)\n\n        # pins sit in this 2-D grid.\n        blockGrid = grids.CartesianGrid.fromRectangle(0.1, 0.1, armiObject=block)\n\n        coreLoc = grids.CoordinateLocation(0.0, 0.0, 0.0, None)\n        core.spatialLocator = coreLoc\n\n        assemblyLoc = grids.IndexLocation(2, 3, 0, coreGrid)\n        assem.spatialLocator = assemblyLoc\n\n        blockLoc = grids.IndexLocation(0, 0, 3, assemblyGrid)\n        block.spatialLocator = blockLoc\n\n        pinIndexLoc = grids.IndexLocation(1, 5, 0, blockGrid)\n        pinFree = grids.CoordinateLocation(1.0, 2.0, 3.0, blockGrid)\n\n        assert_allclose(blockLoc.getCompleteIndices(), np.array((2, 3, 3)))\n        assert_allclose(blockLoc.getGlobalCoordinates(), (2.0, 3.0, 3.5))\n        assert_allclose(blockLoc.getGlobalCellBase(), (1.5, 2.5, 3))\n        assert_allclose(blockLoc.getGlobalCellTop(), (2.5, 3.5, 4))\n\n        # check coordinates of pins in block\n        assert_allclose(pinFree.getGlobalCoordinates(), (2.0 + 1.0, 3.0 + 2.0, 3.5 + 3.0))  # epic\n        assert_allclose(pinIndexLoc.getGlobalCoordinates(), (2.0 + 0.1, 3.0 + 0.5, 3.5))  # wow\n\n        # pin indices should not combine with the parent indices.\n        assert_allclose(pinIndexLoc.getCompleteIndices(), (1, 5, 0))\n\n    def test_recursionPin(self):\n        \"\"\"Ensure pin the center assem has axial coordinates consistent with a pin in\n        an off-center assembly.\n        \"\"\"\n        core = MockArmiObject()\n        assem = MockArmiObject(core)\n        block = MockArmiObject(assem)\n\n        # 2-D grid\n        coreGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0, armiObject=core)\n        # 1-D z-mesh\n        assemblyGrid = grids.AxialGrid.fromNCells(5, armiObject=assem)\n        # pins sit in this 2-D grid.\n        blockGrid = grids.CartesianGrid.fromRectangle(0.1, 0.1, armiObject=block)\n\n        coreLoc = grids.CoordinateLocation(0.0, 0.0, 0.0, None)\n        core.spatialLocator = coreLoc\n        assemblyLoc = grids.IndexLocation(0, 0, 0, coreGrid)\n        assem.spatialLocator = assemblyLoc\n        blockLoc = grids.IndexLocation(0, 0, 3, assemblyGrid)\n        block.spatialLocator = blockLoc\n        pinIndexLoc = grids.IndexLocation(1, 5, 0, blockGrid)\n\n        assert_allclose(pinIndexLoc.getCompleteIndices(), (1, 5, 0))\n\n\nclass TestGrid(unittest.TestCase):\n    def test_basicPosition(self):\n        \"\"\"\n        Ensure a basic Cartesian grid works as expected.\n\n        The default stepped grid defines zero at the center of the (0,0,0)th cell.\n        Its centroid is 0., 0., 0). This convention is nicely compatible with 120-degree hex grid.\n\n        Full core Cartesian meshes will want to be shifted to bottom left of 0th cell.\n        \"\"\"\n        grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))\n        assert_allclose(grid.getCoordinates((1, 1, 1)), (1, 1, 1))\n        assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0))\n        assert_allclose(grid.getCoordinates((0, 0, -1)), (0, 0, -1))\n        assert_allclose(grid.getCoordinates((1, 0, 0)), (1, 0, 0))\n\n    def test_neighbors(self):\n        grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))\n        neighbs = grid.getNeighboringCellIndices(0, 0, 0)\n        self.assertEqual(len(neighbs), 4)\n\n    def test_label(self):\n        grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))\n        self.assertEqual(grid.getLabel((1, 1, 2)), \"001-001-002\")\n\n    def test_isAxialOnly(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        self.assertEqual(grid.isAxialOnly, False)\n\n        grid2 = grids.AxialGrid.fromNCells(10)\n        self.assertEqual(grid2.isAxialOnly, True)\n\n    def test_lookupFactory(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        self.assertEqual(grid[10, 5, 0].i, 10)\n\n    def test_quasiReduce(self):\n        \"\"\"Make sure our DB-friendly version of reduce works.\"\"\"\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        reduction = grid.reduce()\n        self.assertAlmostEqual(reduction[0][1][1], 1.0)\n\n    def test_generateSortedHexLocationList(self):\n        for pitch in [1.0, 3.14]:\n            for rings in [3, 12]:\n                grid = grids.HexGrid.fromPitch(pitch, numRings=rings)\n\n                lst = grid.generateSortedHexLocationList(1)\n                self.assertEqual(len(lst), 1)\n                self.assertEqual(lst[0].indices.tolist(), [0, 0, 0])\n\n                lst = grid.generateSortedHexLocationList(2)\n                self.assertEqual(len(lst), 2)\n                self.assertEqual(lst[0].indices.tolist(), [0, 0, 0])\n                self.assertEqual(lst[1].indices.tolist(), [-1, 0, 0])\n\n                lst = grid.generateSortedHexLocationList(4)\n                self.assertEqual(len(lst), 4)\n                self.assertEqual(lst[0].indices.tolist(), [0, 0, 0])\n                self.assertEqual(lst[1].indices.tolist(), [-1, 0, 0])\n                self.assertEqual(lst[2].indices.tolist(), [-1, 1, 0])\n                self.assertEqual(lst[3].indices.tolist(), [0, -1, 0])\n\n    def test_getitem(self):\n        \"\"\"\n        Test that locations are created on demand, and the multi-index locations are\n        returned when necessary.\n\n        .. test:: Return the locations of grid items with multiplicity greater than one.\n            :id: T_ARMI_GRID_ELEM_LOC\n            :tests: R_ARMI_GRID_ELEM_LOC\n        \"\"\"\n        grid = grids.HexGrid.fromPitch(1.0, numRings=0)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        self.assertNotIn((0, 0, 0), grid._locations)\n        _ = grid[0, 0, 0]\n        self.assertIn((0, 0, 0), grid._locations)\n\n        multiLoc = grid[[(0, 0, 0), (1, 0, 0), (0, 1, 0)]]\n        self.assertIsInstance(multiLoc, grids.MultiIndexLocation)\n        self.assertIn((1, 0, 0), grid._locations)\n\n        i = multiLoc.indices\n        i = [ii.tolist() for ii in i]\n        self.assertEqual(i, [[0, 0, 0], [1, 0, 0], [0, 1, 0]])\n\n    def test_ringPosFromIndicesIncorrect(self):\n        \"\"\"Test the getRingPos fails if there is no armiObect or parent.\"\"\"\n        grid = MockStructuredGrid(unitSteps=((1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0)))\n\n        grid.armiObject = None\n        with self.assertRaises(ValueError):\n            grid.getRingPos(((0, 0), (1, 1)))\n\n\nclass TestHexGrid(unittest.TestCase):\n    \"\"\"A set of tests for the Hexagonal Grid.\"\"\"\n\n    def test_getCoordinatesFlatsUp(self):\n        \"\"\"Test getCoordinates() for flats up hex grids.\"\"\"\n        grid = grids.HexGrid.fromPitch(1.0, cornersUp=False)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        side = 1.0 / math.sqrt(3)\n        assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0))\n        assert_allclose(grid.getCoordinates((1, 0, 0)), (1.5 * side, 0.5, 0.0))\n        assert_allclose(grid.getCoordinates((-1, 0, 0)), (-1.5 * side, -0.5, 0.0))\n        assert_allclose(grid.getCoordinates((0, 1, 0)), (0, 1.0, 0.0))\n        assert_allclose(grid.getCoordinates((1, -1, 0)), (1.5 * side, -0.5, 0.0))\n\n        unitSteps = grid.reduce()[0]\n        iDirection = tuple(direction[0] for direction in unitSteps)\n        jDirection = tuple(direction[1] for direction in unitSteps)\n        for directionVector in (iDirection, jDirection):\n            self.assertAlmostEqual(\n                (sum(val**2 for val in directionVector)) ** 0.5,\n                1.0,\n                msg=f\"Direction vector {directionVector} should have magnitude 1 for pitch 1.\",\n            )\n        assert_allclose(grid.getCoordinates((1, 0, 0)), iDirection)\n        assert_allclose(grid.getCoordinates((0, 1, 0)), jDirection)\n\n    def test_getCoordinatesCornersUp(self):\n        \"\"\"Test getCoordinates() for corners up hex grids.\"\"\"\n        grid = grids.HexGrid.fromPitch(1.0, cornersUp=True)\n        self.assertAlmostEqual(grid.pitch, 1.0)\n        side = 1.0 / math.sqrt(3)\n        assert_allclose(grid.getCoordinates((0, 0, 0)), (0.0, 0.0, 0.0))\n        assert_allclose(grid.getCoordinates((1, 0, 0)), (0.5, 1.5 * side, 0.0))\n        assert_allclose(grid.getCoordinates((-1, 0, 0)), (-0.5, -1.5 * side, 0.0))\n        assert_allclose(grid.getCoordinates((0, 1, 0)), (-0.5, 1.5 * side, 0.0))\n        assert_allclose(grid.getCoordinates((1, -1, 0)), (1, 0.0, 0.0))\n\n        unitSteps = grid.reduce()[0]\n        iDirection = tuple(direction[0] for direction in unitSteps)\n        jDirection = tuple(direction[1] for direction in unitSteps)\n        for directionVector in (iDirection, jDirection):\n            self.assertAlmostEqual(\n                (sum(val**2 for val in directionVector)) ** 0.5,\n                1.0,\n                msg=f\"Direction vector {directionVector} should have magnitude 1 for pitch 1.\",\n            )\n        assert_allclose(grid.getCoordinates((1, 0, 0)), iDirection)\n        assert_allclose(grid.getCoordinates((0, 1, 0)), jDirection)\n\n    def test_getLocalCoordinatesHex(self):\n        \"\"\"Test getLocalCoordinates() is different for corners up vs flats up hex grids.\"\"\"\n        grid0 = grids.HexGrid.fromPitch(1.0, cornersUp=True)\n        grid1 = grids.HexGrid.fromPitch(1.0, cornersUp=False)\n        for i in range(3):\n            for j in range(3):\n                if i == 0 and j == 0:\n                    continue\n                coords0 = grid0[i, j, 0].getLocalCoordinates()\n                coords1 = grid1[i, j, 0].getLocalCoordinates()\n                self.assertNotEqual(coords0[0], coords1[0], msg=f\"X @ ({i}, {j})\")\n                self.assertNotEqual(coords0[1], coords1[1], msg=f\"Y @ ({i}, {j})\")\n                self.assertEqual(coords0[2], coords1[2], msg=f\"Z @ ({i}, {j})\")\n\n    def test_getLocalCoordinatesCornersUp(self):\n        \"\"\"Test getLocalCoordinates() for corners up hex grids.\"\"\"\n        # validate the first ring of a corners-up hex grid\n        grid = grids.HexGrid.fromPitch(1.0, cornersUp=True)\n        vals = []\n        for pos in range(grid.getPositionsInRing(2)):\n            i, j = grid.getIndicesFromRingAndPos(2, pos + 1)\n            vals.append(grid[i, j, 0].getLocalCoordinates())\n\n        # short in Y\n        maxY = max(v[1] for v in vals)\n        minY = min(v[1] for v in vals)\n        val = math.sqrt(3) / 2\n        self.assertAlmostEqual(maxY, val, delta=0.0001)\n        self.assertAlmostEqual(minY, -val, delta=0.0001)\n\n        # long in X\n        maxX = max(v[0] for v in vals)\n        minX = min(v[0] for v in vals)\n        self.assertAlmostEqual(maxX, 1)\n        self.assertAlmostEqual(minX, -1)\n\n    def test_getLocalCoordinatesFlatsUp(self):\n        \"\"\"Test getLocalCoordinates() for flats up hex grids.\"\"\"\n        # validate the first ring of a flats-up hex grid\n        grid = grids.HexGrid.fromPitch(1.0, cornersUp=False)\n        vals = []\n        for pos in range(grid.getPositionsInRing(2)):\n            i, j = grid.getIndicesFromRingAndPos(2, pos + 1)\n            vals.append(grid[i, j, 0].getLocalCoordinates())\n\n        # long in Y\n        maxY = max(v[1] for v in vals)\n        minY = min(v[1] for v in vals)\n        self.assertAlmostEqual(maxY, 1)\n        self.assertAlmostEqual(minY, -1)\n\n        # short in X\n        maxX = max(v[0] for v in vals)\n        minX = min(v[0] for v in vals)\n        val = math.sqrt(3) / 2\n        self.assertAlmostEqual(maxX, val, delta=0.0001)\n        self.assertAlmostEqual(minX, -val, delta=0.0001)\n\n    def test_neighbors(self):\n        grid = grids.HexGrid.fromPitch(1.0)\n        neighbs = grid.getNeighboringCellIndices(0, 0, 0)\n        self.assertEqual(len(neighbs), 6)\n        self.assertIn((1, -1, 0), neighbs)\n\n    def test_ringPosFromIndices(self):\n        \"\"\"Test conversion from<-->to ring/position based on hand-prepared right answers.\"\"\"\n        grid = grids.HexGrid.fromPitch(1.0)\n        for indices, ringPos in [\n            ((0, 0), (1, 1)),\n            ((1, 0), (2, 1)),\n            ((0, 1), (2, 2)),\n            ((-1, 1), (2, 3)),\n            ((-1, 0), (2, 4)),\n            ((0, -1), (2, 5)),\n            ((1, -1), (2, 6)),\n            ((1, 1), (3, 2)),\n            ((11, -7), (12, 60)),\n            ((-1, -2), (4, 12)),\n            ((-3, 1), (4, 9)),\n            ((-2, 3), (4, 6)),\n            ((1, 2), (4, 3)),\n            ((2, -4), (5, 19)),\n        ]:\n            self.assertEqual(indices, grid.getIndicesFromRingAndPos(*ringPos))\n            self.assertEqual(ringPos, grid.getRingPos(indices))\n\n    def test_label(self):\n        grid = grids.HexGrid.fromPitch(1.0)\n        indices = grid.getIndicesFromRingAndPos(12, 5)\n        label1 = grid.getLabel(indices)\n        self.assertEqual(label1, \"012-005\")\n        self.assertEqual(grids.locatorLabelToIndices(label1), (12, 5, None))\n\n        label2 = grid.getLabel(indices + (5,))\n        self.assertEqual(label2, \"012-005-005\")\n        self.assertEqual(grids.locatorLabelToIndices(label2), (12, 5, 5))\n\n    def test_overlapsWhichSymmetryLine(self):\n        grid = grids.HexGrid.fromPitch(1.0)\n        self.assertEqual(\n            grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(5, 3)),\n            grids.BOUNDARY_60_DEGREES,\n        )\n        self.assertEqual(\n            grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(5, 23)),\n            grids.BOUNDARY_0_DEGREES,\n        )\n        self.assertEqual(\n            grid.overlapsWhichSymmetryLine(grid.getIndicesFromRingAndPos(3, 4)),\n            grids.BOUNDARY_120_DEGREES,\n        )\n\n    def test_getSymmetricIdenticalsThird(self):\n        \"\"\"Retrieve equivalent contents based on third symmetry.\n\n        .. test:: Equivalent contents in third geometry are retrievable.\n            :id: T_ARMI_GRID_EQUIVALENTS\n            :tests: R_ARMI_GRID_EQUIVALENTS\n        \"\"\"\n        g = grids.HexGrid.fromPitch(1.0)\n        g.symmetry = str(geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC))\n        self.assertEqual(g.getSymmetricEquivalents((3, -2)), [(-1, 3), (-2, -1)])\n        self.assertEqual(g.getSymmetricEquivalents((2, 1)), [(-3, 2), (1, -3)])\n\n        symmetrics = g.getSymmetricEquivalents(g.getIndicesFromRingAndPos(5, 3))\n        self.assertEqual([(5, 11), (5, 19)], [g.getRingPos(indices) for indices in symmetrics])\n\n    def test_thirdAndFullSymmetry(self):\n        \"\"\"Test that we can construct a full and a 1/3 core grid.\n\n        .. test:: Test 1/3 and full cores have the correct positions and rings.\n            :id: T_ARMI_GRID_SYMMETRY\n            :tests: R_ARMI_GRID_SYMMETRY\n        \"\"\"\n        full = grids.HexGrid.fromPitch(1.0, symmetry=\"full core\")\n        third = grids.HexGrid.fromPitch(1.0, symmetry=\"third core periodic\")\n\n        # check full core\n        self.assertEqual(full.getMinimumRings(2), 2)\n        self.assertEqual(full.getIndicesFromRingAndPos(2, 2), (0, 1))\n        self.assertEqual(full.getPositionsInRing(3), 12)\n        self.assertEqual(full.getSymmetricEquivalents((3, -2)), [])\n\n        # check 1/3 core\n        self.assertEqual(third.getMinimumRings(2), 2)\n        self.assertEqual(third.getIndicesFromRingAndPos(2, 2), (0, 1))\n        self.assertEqual(third.getPositionsInRing(3), 12)\n        self.assertEqual(third.getSymmetricEquivalents((3, -2)), [(-1, 3), (-2, -1)])\n\n    def test_cornersUpFlatsUp(self):\n        \"\"\"Test the cornersUp attribute of the fromPitch method.\n\n        .. test:: Build a points-up and a flats-up hexagonal grids.\n            :id: T_ARMI_GRID_HEX_TYPE\n            :tests: R_ARMI_GRID_HEX_TYPE\n        \"\"\"\n        flatsUp = grids.HexGrid.fromPitch(1.0, cornersUp=False)\n        self.assertAlmostEqual(flatsUp._unitSteps[0][0], math.sqrt(3) / 2)\n        self.assertAlmostEqual(flatsUp.pitch, 1.0)\n\n        cornersUp = grids.HexGrid.fromPitch(1.0, cornersUp=True)\n        self.assertAlmostEqual(cornersUp._unitSteps[0][0], 0.5)\n        self.assertAlmostEqual(cornersUp.pitch, 1.0)\n\n    def test_triangleCoords(self):\n        g = grids.HexGrid.fromPitch(8.15)\n        indices1 = g.getIndicesFromRingAndPos(5, 3) + (0,)\n        indices2 = g.getIndicesFromRingAndPos(5, 23) + (0,)\n        indices3 = g.getIndicesFromRingAndPos(3, 4) + (0,)\n        cur = g.triangleCoords(indices1)\n        ref = [\n            (16.468_916_428_634_078, 25.808_333_333_333_337),\n            (14.116_214_081_686_351, 27.166_666_666_666_67),\n            (11.763_511_734_738_627, 25.808_333_333_333_337),\n            (11.763_511_734_738_627, 23.091_666_666_666_67),\n            (14.116_214_081_686_351, 21.733_333_333_333_334),\n            (16.468_916_428_634_078, 23.091_666_666_666_67),\n        ]\n        assert_allclose(cur, ref)\n\n        cur = grids.HexGrid.fromPitch(2.5).triangleCoords(indices2)\n        ref = [\n            (9.381_941_874_331_42, 0.416_666_666_666_666_7),\n            (8.660_254_037_844_387, 0.833_333_333_333_333_4),\n            (7.938_566_201_357_355_5, 0.416_666_666_666_666_7),\n            (7.938_566_201_357_355_5, -0.416_666_666_666_666_7),\n            (8.660_254_037_844_387, -0.833_333_333_333_333_4),\n            (9.381_941_874_331_42, -0.416_666_666_666_666_7),\n        ]\n        assert_allclose(cur, ref)\n\n        cur = grids.HexGrid.fromPitch(3.14).triangleCoords(indices3)\n        ref = [\n            (-1.812_879_845_255_425, 5.233_333_333_333_333),\n            (-2.719_319_767_883_137_6, 5.756_666_666_666_667),\n            (-3.625_759_690_510_850_2, 5.233_333_333_333_333),\n            (-3.625_759_690_510_850_2, 4.186_666_666_666_666_5),\n            (-2.719_319_767_883_137_6, 3.663_333_333_333_333),\n            (-1.812_879_845_255_425, 4.186_666_666_666_666_5),\n        ]\n        assert_allclose(cur, ref)\n\n    def test_getIndexBounds(self):\n        numRings = 5\n        g = grids.HexGrid.fromPitch(1.0, numRings=numRings)\n        boundsIJK = g.getIndexBounds()\n        self.assertEqual(boundsIJK, ((-numRings, numRings), (-numRings, numRings), (0, 1)))\n\n    def test_getAllIndices(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        indices = grid.getAllIndices()\n        self.assertIn((1, 2, 0), indices)\n\n    def test_buildLocations(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        loc1 = grid[1, 2, 0]\n        self.assertEqual(loc1.i, 1)\n        self.assertEqual(loc1.j, 2)\n\n    def test_is_pickleable(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n        loc = grid[1, 1, 0]\n        for protocol in range(pickle.HIGHEST_PROTOCOL + 1):\n            buf = BytesIO()\n            pickle.dump(loc, buf, protocol=protocol)\n            buf.seek(0)\n            newLoc = pickle.load(buf)\n            assert_allclose(loc.indices, newLoc.indices)\n\n    def test_adjustPitchFlatsUp(self):\n        \"\"\"Adjust the pitch of a hexagonal lattice, for a flats up grid.\n\n        .. test:: Construct a hexagonal lattice with three rings.\n            :id: T_ARMI_GRID_HEX0\n            :tests: R_ARMI_GRID_HEX\n\n        .. test:: Return the grid coordinates of different locations.\n            :id: T_ARMI_GRID_GLOBAL_POS0\n            :tests: R_ARMI_GRID_GLOBAL_POS\n        \"\"\"\n        # run this test for a grid with no offset, and then a few random offset values\n        for offset in [0, 1, 1.123, 3.14]:\n            # build a hex grid with pitch=1, 3 rings, and the above offset\n            grid = grids.HexGrid(\n                unitSteps=((1.5 / math.sqrt(3), 0.0, 0.0), (0.5, 1, 0.0), (0, 0, 0)),\n                unitStepLimits=((-3, 3), (-3, 3), (0, 1)),\n                offset=np.array([offset, offset, offset]),\n            )\n\n            # test number of rings before converting pitch\n            self.assertEqual(grid._unitStepLimits[0][1], 3)\n\n            # test that we CAN change the pitch, and it scales the grid (but not the offset)\n            v1 = grid.getCoordinates((1, 0, 0))\n            grid.changePitch(2.0)\n            self.assertAlmostEqual(grid.pitch, 2.0)\n            v2 = grid.getCoordinates((1, 0, 0))\n            assert_allclose(2 * v1 - offset, v2)\n\n            # basic sanity: test number of rings has not changed\n            self.assertEqual(grid._unitStepLimits[0][1], 3)\n\n            # basic sanity: check the offset exists and is correct\n            for i in range(3):\n                self.assertEqual(grid.offset[i], offset)\n\n    def test_adjustPitchCornersUp(self):\n        \"\"\"Adjust the pich of a hexagonal lattice, for a \"corners up\" grid.\n\n        .. test:: Construct a hexagonal lattice with three rings.\n            :id: T_ARMI_GRID_HEX1\n            :tests: R_ARMI_GRID_HEX\n\n        .. test:: Return the grid coordinates of different locations.\n            :id: T_ARMI_GRID_GLOBAL_POS1\n            :tests: R_ARMI_GRID_GLOBAL_POS\n        \"\"\"\n        # run this test for a grid with no offset, and then a few random offset values\n        for offset in [0, 1, 1.123, 3.14]:\n            offsets = [offset, 0, 0]\n            # build a hex grid with pitch=1, 3 rings, and the above offset\n            grid = grids.HexGrid(\n                unitSteps=(\n                    (0.5, -0.5, 0),\n                    (1.5 / math.sqrt(3), 1.5 / math.sqrt(3), 0),\n                    (0, 0, 0),\n                ),\n                unitStepLimits=((-3, 3), (-3, 3), (0, 1)),\n                offset=np.array(offsets),\n            )\n\n            # test number of rings before converting pitch\n            self.assertEqual(grid._unitStepLimits[0][1], 3)\n\n            # test that we CAN change the pitch, and it scales the grid (but not the offset)\n            v1 = grid.getCoordinates((1, 0, 0))\n            grid.changePitch(2.0)\n            self.assertAlmostEqual(grid.pitch, 2.0, delta=1e-9)\n            v2 = grid.getCoordinates((1, 0, 0))\n            correction = np.array([0.5, math.sqrt(3) / 2, 0])\n            assert_allclose(v1 + correction, v2)\n\n            # basic sanity: test number of rings has not changed\n            self.assertEqual(grid._unitStepLimits[0][1], 3)\n\n            # basic sanity: check the offset exists and is correct\n            for i, off in enumerate(offsets):\n                self.assertEqual(grid.offset[i], off)\n\n    def test_badIndices(self):\n        grid = grids.HexGrid.fromPitch(1.0, numRings=3)\n\n        # this is actually ok because step-defined grids are infinite\n        self.assertEqual(grid.getCoordinates((-100, 2000, 5))[2], 0.0)\n\n        grid = grids.AxialGrid.fromNCells(10)\n        with self.assertRaises(IndexError):\n            grid.getCoordinates((0, 5, -1))\n\n    def test_isInFirstThird(self):\n        \"\"\"Determine if grid is in the first third.\n\n        .. test:: Determine if grid is in the first third.\n            :id: T_ARMI_GRID_SYMMETRY_LOC\n            :tests: R_ARMI_GRID_SYMMETRY_LOC\n        \"\"\"\n        grid = grids.HexGrid.fromPitch(1.0, numRings=10)\n        self.assertTrue(grid.isInFirstThird(grid[0, 0, 0]))\n        self.assertTrue(grid.isInFirstThird(grid[1, 0, 0]))\n        self.assertTrue(grid.isInFirstThird(grid[3, -1, 0]))\n        self.assertFalse(grid.isInFirstThird(grid[1, -1, 0]))\n        self.assertFalse(grid.isInFirstThird(grid[-1, -1, 0]))\n        self.assertFalse(grid.isInFirstThird(grid[3, -2, 0]))\n\n    def test_indicesAndEdgeFromRingAndPos(self):\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(0, 0)\n        self.assertEqual(i, 0)\n        self.assertEqual(j, -1)\n        self.assertEqual(edge, 1)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(1, 1)\n        self.assertEqual(i, 0)\n        self.assertEqual(j, 0)\n        self.assertEqual(edge, 0)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 11)\n        self.assertEqual(i, 2)\n        self.assertEqual(j, -2)\n        self.assertEqual(edge, 5)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 9)\n        self.assertEqual(i, 0)\n        self.assertEqual(j, -2)\n        self.assertEqual(edge, 4)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 7)\n        self.assertEqual(i, -2)\n        self.assertEqual(j, 0)\n        self.assertEqual(edge, 3)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 5)\n        self.assertEqual(i, -2)\n        self.assertEqual(j, 2)\n        self.assertEqual(edge, 2)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 3)\n        self.assertEqual(i, 0)\n        self.assertEqual(j, 2)\n        self.assertEqual(edge, 1)\n\n        i, j, edge = grids.HexGrid._indicesAndEdgeFromRingAndPos(7, 3)\n        self.assertEqual(i, 4)\n        self.assertEqual(j, 2)\n        self.assertEqual(edge, 0)\n\n        with self.assertRaises(ValueError):\n            _ = grids.HexGrid._indicesAndEdgeFromRingAndPos(3, 13)\n\n        with self.assertRaises(ValueError):\n            _ = grids.HexGrid._indicesAndEdgeFromRingAndPos(1, 3)\n\n    def test_rotatedIndices(self):\n        \"\"\"Test that a hex grid can produce a rotated cell location.\"\"\"\n        g = grids.HexGrid.fromPitch(1.0, numRings=3)\n        center: grids.IndexLocation = g[(0, 0, 0)]\n        notRotated = self._rotateAndCheckAngle(g, center, 0)\n        self.assertEqual(notRotated, center)\n\n        # One rotation for a trivial check\n        northEast: grids.IndexLocation = g[(1, 0, 0)]\n        dueNorth: grids.IndexLocation = g[(0, 1, 0)]\n        northWest: grids.IndexLocation = g[(-1, 1, 0)]\n        actual = self._rotateAndCheckAngle(g, northEast, 1)\n        self.assertEqual(actual, dueNorth)\n        np.testing.assert_allclose(dueNorth.getLocalCoordinates(), [0.0, 1.0, 0.0])\n\n        actual = self._rotateAndCheckAngle(g, dueNorth, 1)\n        self.assertEqual(actual, northWest)\n        np.testing.assert_allclose(northWest.getLocalCoordinates(), [-hexagon.SQRT3 / 2, 0.5, 0])\n\n        # Two rotations from the \"first\" object in the first full ring\n        actual = self._rotateAndCheckAngle(g, northEast, 2)\n        self.assertEqual(actual, northWest)\n\n        # Fuzzy rotation: if we rotate an location, and then rotate it back, we get the same location\n        for _ in range(10):\n            startI = randint(-10, 10)\n            startJ = randint(-10, 10)\n            start = g[(startI, startJ, 0)]\n            rotations = randint(-10, 10)\n            postRotate = self._rotateAndCheckAngle(g, start, rotations)\n            if startI == 0 and startJ == 0:\n                self.assertEqual(postRotate, start)\n                continue\n            if rotations % 6:\n                self.assertNotEqual(postRotate, start, msg=rotations)\n            else:\n                self.assertEqual(postRotate, start, msg=rotations)\n            reversed = self._rotateAndCheckAngle(g, postRotate, -rotations)\n            self.assertEqual(reversed, start)\n\n    def _rotateAndCheckAngle(self, g: grids.HexGrid, start: grids.IndexLocation, rotations: int) -> grids.IndexLocation:\n        \"\"\"Rotate a location and verify it lands where we expected.\"\"\"\n        finish = g.rotateIndex(start, rotations)\n        self._checkAngle(start, finish, rotations)\n        return finish\n\n    def _checkAngle(self, start: grids.IndexLocation, finish: grids.IndexLocation, rotations: int):\n        \"\"\"Compare two locations that should be some number of 60 degree CCW rotations apart.\"\"\"\n        startXY = start.getLocalCoordinates()[:2]\n        theta = math.pi / 3 * rotations\n        rotationMatrix = np.array(\n            [\n                [math.cos(theta), -math.sin(theta)],\n                [math.sin(theta), math.cos(theta)],\n            ]\n        )\n        expected = rotationMatrix.dot(startXY)\n        finishXY = finish.getLocalCoordinates()[:2]\n        np.testing.assert_allclose(finishXY, expected, atol=1e-8)\n\n    def test_inconsistentRotationGrids(self):\n        \"\"\"Test that only locations in consistent grids are rotatable.\"\"\"\n        base = grids.HexGrid.fromPitch(1, cornersUp=False)\n        larger = grids.HexGrid.fromPitch(base.pitch * 2, cornersUp=base.cornersUp)\n        fromLarger = larger[1, 0, 0]\n        with self.assertRaises(TypeError):\n            base.rotateIndex(fromLarger)\n\n        differentOrientation = grids.HexGrid.fromPitch(base.pitch, cornersUp=not base.cornersUp)\n        fromDiffOrientation = differentOrientation[0, 1, 0]\n        with self.assertRaises(TypeError):\n            base.rotateIndex(fromDiffOrientation)\n\n        axialGrid = grids.AxialGrid.fromNCells(5)\n        fromAxial = axialGrid[2, 0, 0]\n        with self.assertRaises(TypeError):\n            base.rotateIndex(fromAxial)\n\n    def test_rotatedIndexGridAssignment(self):\n        \"\"\"Test that the grid of the rotated index is identical through rotation.\"\"\"\n        base = grids.HexGrid.fromPitch(1)\n        other = grids.HexGrid.fromPitch(base.pitch, cornersUp=base.cornersUp)\n\n        for i, j in ((0, 0), (1, 1), (2, 1), (-1, 3)):\n            loc = grids.IndexLocation(i, j, k=0, grid=other)\n            postRotate = base.rotateIndex(loc, rotations=2)\n            self.assertIs(postRotate.grid, loc.grid)\n\n    def test_rotatedIndexRoughEqualPitch(self):\n        \"\"\"Test indices can be rotated in close but not exactly equal grids.\"\"\"\n        base = grids.HexGrid.fromPitch(1.345)\n        other = grids.HexGrid.fromPitch(base.pitch * 1.00001)\n\n        for i, j in ((0, 0), (1, 1), (2, 1), (-1, 3)):\n            loc = grids.IndexLocation(i, j, k=0, grid=base)\n            fromBase = base.rotateIndex(loc, rotations=2)\n            fromOther = other.rotateIndex(loc, rotations=2)\n            self.assertEqual((fromBase.i, fromBase.j), (fromOther.i, fromOther.j))\n\n\nclass TestBoundsDefinedGrid(unittest.TestCase):\n    def test_positions(self):\n        grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90]))\n        assert_allclose(grid.getCoordinates((1, 1, 1)), (1.5, 15.0, 40.0))\n\n    def test_base(self):\n        grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90]))\n        assert_allclose(grid.getCellBase((1, 1, 1)), (1.0, 10.0, 20.0))\n\n    def test_positionsMixedDefinition(self):\n        grid = MockStructuredGrid(unitSteps=((1.0, 0.0), (0.0, 1.0)), bounds=(None, None, [0, 20, 60, 90]))\n        assert_allclose(grid.getCoordinates((1, 1, 1)), (1, 1, 40.0))\n\n    def test_getIndexBounds(self):\n        grid = MockStructuredGrid(bounds=([0, 1, 2, 3, 4], [0, 10, 20, 50], [0, 20, 60, 90]))\n        boundsIJK = grid.getIndexBounds()\n        self.assertEqual(boundsIJK, ((0, 5), (0, 4), (0, 4)))\n\n\nclass TestThetaRZGrid(unittest.TestCase):\n    \"\"\"A set of tests for the RZTheta Grid.\"\"\"\n\n    def test_positions(self):\n        grid = grids.ThetaRZGrid(bounds=(np.linspace(0, 2 * math.pi, 13), [0, 2, 2.5, 3], [0, 10, 20, 30]))\n        assert_allclose(grid.getCoordinates((1, 0, 1)), (math.sqrt(2) / 2, math.sqrt(2) / 2, 15.0))\n\n        # test round trip ring position\n        ringPos = (1, 1)\n        indices = grid.getIndicesFromRingAndPos(*ringPos)\n        ringPosFromIndices = grid.getRingPos(indices)\n        self.assertEqual(ringPos, ringPosFromIndices)\n\n\nclass TestCartesianGrid(unittest.TestCase):\n    \"\"\"A set of tests for the Cartesian Grid.\"\"\"\n\n    def test_ringPosNoSplit(self):\n        grid = grids.CartesianGrid.fromRectangle(1.0, 1.0, isOffset=True)\n\n        expectedRing = [\n            [3, 3, 3, 3, 3, 3],\n            [3, 2, 2, 2, 2, 3],\n            [3, 2, 1, 1, 2, 3],\n            [3, 2, 1, 1, 2, 3],\n            [3, 2, 2, 2, 2, 3],\n            [3, 3, 3, 3, 3, 3],\n        ]\n\n        expectedPos = [\n            [6, 5, 4, 3, 2, 1],\n            [7, 4, 3, 2, 1, 20],\n            [8, 5, 2, 1, 12, 19],\n            [9, 6, 3, 4, 11, 18],\n            [10, 7, 8, 9, 10, 17],\n            [11, 12, 13, 14, 15, 16],\n        ]\n        expectedPos.reverse()\n\n        for j in range(-3, 3):\n            for i in range(-3, 3):\n                ring, pos = grid.getRingPos((i, j))\n                self.assertEqual(ring, expectedRing[j + 3][i + 3])\n                self.assertEqual(pos, expectedPos[j + 3][i + 3])\n\n        # Bonus test of getMinimumRings() using the above grid\n        self.assertEqual(grid.getMinimumRings(7), 2)\n        self.assertEqual(grid.getMinimumRings(17), 3)\n\n    def test_ringPosSplit(self):\n        grid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n\n        expectedRing = [\n            [4, 4, 4, 4, 4, 4, 4],\n            [4, 3, 3, 3, 3, 3, 4],\n            [4, 3, 2, 2, 2, 3, 4],\n            [4, 3, 2, 1, 2, 3, 4],\n            [4, 3, 2, 2, 2, 3, 4],\n            [4, 3, 3, 3, 3, 3, 4],\n            [4, 4, 4, 4, 4, 4, 4],\n        ]\n\n        expectedPos = [\n            [7, 6, 5, 4, 3, 2, 1],\n            [8, 5, 4, 3, 2, 1, 24],\n            [9, 6, 3, 2, 1, 16, 23],\n            [10, 7, 4, 1, 8, 15, 22],\n            [11, 8, 5, 6, 7, 14, 21],\n            [12, 9, 10, 11, 12, 13, 20],\n            [13, 14, 15, 16, 17, 18, 19],\n        ]\n        expectedPos.reverse()\n\n        for j in range(-3, 4):\n            for i in range(-3, 4):\n                ring, pos = grid.getRingPos((i, j))\n                self.assertEqual(ring, expectedRing[j + 3][i + 3])\n                self.assertEqual(pos, expectedPos[j + 3][i + 3])\n\n    def test_symmetry(self):\n        # PERIODIC, no split\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=str(geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.PERIODIC)),\n        )\n\n        expected = {\n            (0, 0): {(-1, 0), (-1, -1), (0, -1)},\n            (1, 0): {(-1, 1), (-2, -1), (0, -2)},\n            (2, 1): {(-2, 2), (-3, -2), (1, -3)},\n            (2, 2): {(-3, 2), (-3, -3), (2, -3)},\n            (0, 1): {(-2, 0), (-1, -2), (1, -1)},\n            (-2, 2): {(-3, -2), (1, -3), (2, 1)},\n        }\n\n        for idx, expectedEq in expected.items():\n            equivalents = {i for i in grid.getSymmetricEquivalents(idx)}\n\n            self.assertEqual(expectedEq, equivalents)\n\n        # PERIODIC, split\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=geometry.SymmetryType(\n                geometry.DomainType.QUARTER_CORE,\n                geometry.BoundaryType.PERIODIC,\n                throughCenterAssembly=True,\n            ),\n        )\n\n        expected = {\n            (0, 0): set(),\n            (1, 0): {(0, 1), (-1, 0), (0, -1)},\n            (2, 2): {(-2, 2), (-2, -2), (2, -2)},\n            (2, 1): {(-1, 2), (-2, -1), (1, -2)},\n            (-1, 3): {(-3, -1), (1, -3), (3, 1)},\n            (0, 2): {(-2, 0), (0, -2), (2, 0)},\n        }\n\n        for idx, expectedEq in expected.items():\n            equivalents = {i for i in grid.getSymmetricEquivalents(idx)}\n\n            self.assertEqual(expectedEq, equivalents)\n\n        # REFLECTIVE, no split\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE),\n        )\n\n        expected = {\n            (0, 0): {(-1, 0), (-1, -1), (0, -1)},\n            (1, 0): {(-2, 0), (-2, -1), (1, -1)},\n            (-2, 2): {(-2, -3), (1, -3), (1, 2)},\n        }\n\n        for idx, expectedEq in expected.items():\n            equivalents = {i for i in grid.getSymmetricEquivalents(idx)}\n\n            self.assertEqual(expectedEq, equivalents)\n\n        # REFLECTIVE, split\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=geometry.SymmetryType(\n                geometry.DomainType.QUARTER_CORE,\n                geometry.BoundaryType.REFLECTIVE,\n                throughCenterAssembly=True,\n            ),\n        )\n\n        expected = {\n            (0, 0): set(),\n            (1, 0): {(-1, 0)},\n            (-1, 2): {(-1, -2), (1, -2), (1, 2)},\n            (-2, 0): {(2, 0)},\n            (0, -2): {(0, 2)},\n        }\n\n        for idx, expectedEq in expected.items():\n            equivalents = {i for i in grid.getSymmetricEquivalents(idx)}\n\n            self.assertEqual(expectedEq, equivalents)\n\n        # Full core\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=geometry.FULL_CORE,\n        )\n        self.assertEqual(grid.getSymmetricEquivalents((5, 6)), [])\n\n        # 1/8 core not supported yet\n        grid = grids.CartesianGrid.fromRectangle(\n            1.0,\n            1.0,\n            symmetry=geometry.SymmetryType(\n                geometry.DomainType.EIGHTH_CORE,\n                geometry.BoundaryType.REFLECTIVE,\n            ),\n        )\n        with self.assertRaises(NotImplementedError):\n            grid.getSymmetricEquivalents((5, 6))\n\n\nclass TestAxialGrid(unittest.TestCase):\n    def test_simpleBounds(self):\n        N_CELLS = 5\n        g = grids.AxialGrid.fromNCells(N_CELLS)\n        _x, _y, z = g.getBounds()\n        self.assertEqual(len(z), N_CELLS + 1)\n        assert_array_equal(z, [0, 1, 2, 3, 4, 5])\n        self.assertTrue(g.isAxialOnly)\n\n    def test_getLocations(self):\n        N_CELLS = 10\n        g = grids.AxialGrid.fromNCells(N_CELLS)\n        for count in range(N_CELLS):\n            index = g[(0, 0, count)]\n            x, y, z = index.getLocalCoordinates()\n            self.assertEqual(x, 0.0)\n            self.assertEqual(y, 0.0)\n            self.assertEqual(z, count + 0.5)\n"
  },
  {
    "path": "armi/reactor/grids/thetarz.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport math\nfrom typing import NoReturn\n\nimport numpy as np\n\nfrom armi.reactor.grids.locations import IJKType, IJType\nfrom armi.reactor.grids.structuredGrid import StructuredGrid\n\nTAU = math.tau\n\n\nclass ThetaRZGrid(StructuredGrid):\n    \"\"\"\n    A grid characterized by azimuthal, radial, and zeta indices.\n\n    The angular meshes are limited to 0 to 2pi radians. R and Zeta are as in other meshes.\n\n    See Figure 2.2 in Derstine 1984, ANL. [DIF3D]_.\n    \"\"\"\n\n    def getSymmetricEquivalents(self, indices: IJType) -> NoReturn:\n        raise NotImplementedError(f\"{self.__class__.__name__} does not support symmetric equivalents\")\n\n    def getRingPos(self, indices):\n        return (indices[1] + 1, indices[0] + 1)\n\n    @staticmethod\n    def getIndicesFromRingAndPos(ring: int, pos: int) -> IJType:\n        return (pos - 1, ring - 1)\n\n    def getCoordinates(self, indices, nativeCoords=False) -> np.ndarray:\n        meshCoords = theta, r, z = super().getCoordinates(indices, nativeCoords=nativeCoords)\n        if not 0 <= theta <= TAU:\n            raise ValueError(\"Invalid theta value: {}. Check mesh.\".format(theta))\n        if nativeCoords:\n            # return Theta, R, Z values directly.\n            return meshCoords\n        else:\n            # return x, y ,z\n            return np.array((r * math.cos(theta), r * math.sin(theta), z))\n\n    def indicesOfBounds(\n        self,\n        rad0: float,\n        rad1: float,\n        theta0: float,\n        theta1: float,\n        sigma: float = 1e-4,\n    ) -> IJKType:\n        \"\"\"\n        Return indices corresponding to upper and lower radial and theta bounds.\n\n        Parameters\n        ----------\n        rad0 : float\n            inner radius of control volume\n        rad1 : float\n            outer radius of control volume\n        theta0 : float\n            inner azimuthal location of control volume in radians\n        theta1 : float\n            inner azimuthal of control volume in radians\n        sigma: float\n            acceptable relative error (i.e. if one of the positions in the mesh are within\n            this error it'll act the same if it matches a position in the mesh)\n\n        Returns\n        -------\n        tuple : i, j, k of given bounds\n        \"\"\"\n        i = int(np.abs(self._bounds[0] - theta0).argmin())\n        j = int(np.abs(self._bounds[1] - rad0).argmin())\n\n        return (i, j, 0)\n\n    @staticmethod\n    def locatorInDomain(*args, **kwargs) -> bool:\n        \"\"\"\n        ThetaRZGrids do not check for bounds, though they could if that becomes a\n        problem.\n        \"\"\"\n        return True\n\n    @staticmethod\n    def getMinimumRings(n: int) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def getPositionsInRing(ring: int) -> NoReturn:\n        raise NotImplementedError\n\n    @staticmethod\n    def overlapsWhichSymmetryLine(indices: IJType) -> None:\n        return None\n\n    @staticmethod\n    def pitch() -> NoReturn:\n        raise NotImplementedError()\n"
  },
  {
    "path": "armi/reactor/parameters/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThe parameters hold state info for everything in ARMI's composites structure.\n\n.. list-table:: Example Parameters\n    :widths: 50 50\n    :header-rows: 1\n\n    * - Object\n      - Parameters\n    * - :py:class:`~armi.reactor.reactors.Reactor`\n      - :py:mod:`Reactor Parameters <armi.reactor.reactorParameters>`\n    * - :py:class:`~armi.reactor.assemblies.Assembly`\n      - :py:mod:`Assembly Parameters <armi.reactor.assemblyParameters>`\n    * - :py:class:`~armi.reactor.blocks.Block`\n      - :py:mod:`Block Parameters <armi.reactor.blockParameters>`\n    * - :py:class:`~armi.reactor.components.Component`\n      - :py:mod:`Component Parameters <armi.reactor.components.componentParameters>`\n\nBasic Usage\n===========\nGiven an ARMI reactor model object such as ``r``, one may set or get a parameter just\nlike any other instance attribute on ``r.p``::\n\n    >>> r.p.cycleLength\n    350.0\n\nAlternatively, dictionary-like access is supported::\n\n    >>> r.p[\"cycleLength\"]\n    350.0\n\n.. note::\n\n    The data themselves are stored in special hidden fields, which are typically\n    accessed through the ``Parameter`` definition that describes them. The name for such\n    a parameter field looks like ``\"_p_\" + paramName``. For example, to get\n    ``cycleLength`` one could do::\n\n        >>> r.core.p._p_cycleLength\n        350.0\n\n    However, it is not recommended to access parameters in this way, as it circumvents\n    the setters and getters that may have been implemented for a given parameter. One\n    should always use the style from the first two examples to access parameter values.\n\n    Furthermore, ``ParameterCollection`` classes have some extra controls to make sure\n    that someone doesn't try to set random extra attributes on them. Only parameters\n    that were defined before a particular ``ParameterCollection`` class is instantiated\n    may be accessed.The rationale behind this is documented in the Design\n    Considerations section below.\n\nMost parameters in ARMI are block parameters. These include flux, power, temperatures,\nnumber densities, etc. Parameters can be any basic type (float, int, str), or an array\nof any such types. The type within a given array should be homogeneous. Examples::\n\n    >>> b.p.flux = 2.5e13\n    >>> b.p.fuelTemp = numpy.array(range(217), dtype=float)\n    >>> b.p.fuelTemp[58] = 600\n\nThe parameter attributes can be access via the ``paramDefs`` property. Perhaps a user is\ncurious about the units of a block parameter:\n\n    >>> defs = b.p.paramDefs\n    >>> defs[\"heightBOL\"]\n    <ParamDef name:heightBOL collectionType:BlockParameterCollection units:cm assigned:29>\n\n    # Or, more simply:\n    >>> defs[\"heightBOL\"].units\n    'cm'\n\n.. note::\n\n    There have been many discussions on what the specific name of this module/system\n    should be. After great deliberation, the definition of parameter seemed very\n    suitable:\n\n        One of a set of measurable factors, such as temperature and pressure, that\n        define a system and determine its behavior and are varied in an experiment ~\n        `thefreedictionary`_\n\n        any of a set of physical properties whose values determine the characteristics\n        or behavior of something <parameters of the atmosphere such as temperature,\n        pressure, and density> ~ `Meriam-Webster`_\n\nThe parameters system is composed of several classes:\n\n:py:class:`~armi.reactor.parameters.parameterDefinitions.Parameter` :\n    These store metadata about each parameter including the name, description, its\n    units, etc. :py:class:`Parameters <parameterDefinitions.Parameter>` also define some\n    behaviors such as setters/getters, and what to do when retrieving a value that has\n    not been set, and whether or not to store the parameter in the database. The\n    :py:class:`parameterDefinitions.Parameter` object implement the Python descriptor\n    protocol (the magic behind ``@property``), and are stored on corresponding\n    :py:class:`parameterCollections.ParameterCollection` classes to access their\n    underlying values.\n\n:py:class:`~armi.reactor.parameters.parameterDefinitions.ParameterDefinitionCollection` :\n    As the name suggests, these represent a collection of parameter definitions. Each\n    :py:class:`ParameterCollection` gets a :py:class:`ParameterDefinitionCollection`,\n    and there are also module-global collections, such as ``ALL_DEFINITIONS``\n    (containing all defined parameters over all ``ArmiObject`` classes), and others\n    which break parameters down by their categories, associated composite types, etc.\n\n:py:class:`~armi.reactor.parameters.parameterDefinitions.ParameterBuilder` :\n    These are used to aid in the creation of :py:class:`Parameter` instances, and store\n    default arguments to the :py:class:`Parameter` constructor.\n\n:py:class:`~armi.reactor.parameters.parameterCollections.ParameterCollection` :\n    These are used to store parameter values for a specific instance of an item in the\n    ARMI composite structure, and have features for accessing those parameters and their\n    definitions. The actual parameter values are stored in secret `\"_p_\"+paramName`\n    fields, and accessed through the Parameter definition, which functions as a\n    descriptor. Parameter definitions are stored as class attributes so that they can be\n    shared amongst instances. All parameter fields are filled with an initial value in\n    their ``__init__()`` to benefit from the split-key dictionaries introduced in\n    PEP-412. This and protections to prevent setting any other attributes form a sort of\n    \"``__slots__`` lite\".\n\n:py:class:`~armi.reactor.parameters.resolveCollections.ResolveParametersMeta` :\n    This metaclass is used by the base ``ArmiObject`` class to aid in the creation of a\n    hierarchy of ``ParameterCollection`` classes that appropriately represent a specific\n    ``ArmiObject`` subclass's parameters. In short, it looks at the class attributes of\n    an ``ArmiObject`` subclass to see if there is a ``pDefs`` attribute (which should be\n    an instance of ``ParameterDefinitionCollection``). If the ``pDefs`` attribute\n    exists, the class will get its own ``ParameterCollection`` class, which will itself\n    be a subclass of the parameter collection class associated with the most immediate\n    ancestor that also had its own ``pDefs``. If an ``ArmiObject`` subclass has not\n    ``pDefs`` attribute of its own, it will simply be associated with the parameter\n    collection class of its parent.\n\nThis rather roundabout approach is used to address many of the design considerations\nlaid out below.  Namely that pains be taken to minimize memory consumption, properties\nbe used to control data access, and that it be relatively difficult to introduce\nprogramming errors related to improperly-defined or colliding parameters.\n\nDesign Considerations\n=====================\n\n.. list-table:: Design considerations\n    :header-rows: 1\n\n    * - Issue\n      - Resolution/Consequences\n    * - Metadata about parameters is necessary for determining whether a parameter\n        should be stored in the database, and to allow the user to toggle this switch.\n      - Parameters must uniquely named within a ``Composite`` subclass.\n\n        Also, we need to have :py:class:`Parameter` classes to store this metadata.\n    * - There should not be any naming restrictions between different ``Composite`` subclasses.\n      - Parameters must be defined or associated with a specific ``ParameterCollection`` subclass.\n    * - PyLint cannot find programming errors related to incorrect strings.\n      - We would like to use methods/functions for controlling state information.\n\n        This also eliminated the possibility of using resource files to define the\n        properties, otherwise we would be mapping names between some resource file and\n        the associated parameter/property definition.\n    * - Creating getters and setters for every parameter would be overwhelming and\n        unsustainable.\n      - We will use Python descriptors, which have *most* of the functionality used in\n        getters and setters.\n\n        :py:class:`ParameterCollection` knows how to generate descriptors for itself,\n        based on a :py:class:`ParameterDefinitionCollection`.\n    * - The majority of memory consumption occurs in parameters, strings and\n        dictionaries.  Minimizing the storage requirements of the parameters is desirable.\n      - Python ``__slots__`` are a language feature which eliminates the need for each\n        class instance to have a ``__dict__``. This saves memory when there are many\n        instances of a class. Slot access can sometimes be faster as well.\n\n        In the past, ``__slots__`` were used to store parameter values. This became\n        rather onerous when we wanted to support parameter definitions from plugins. We\n        now use the traditional ``__dict__``, but take pains to make sure that we can\n        get the memory savings from the key-sharing dicts provided by PEP-412. Namely,\n        all attributes from the parameter definitions and other state are initialized to\n        __something__ within the ``__init__()`` routine.\n    * - Parameters are just fancy properties with meta data.\n      - Implementing the descriptor interface on a :py:class:`Parameter` removes the\n        need to construct a :py:class:`Parameter` without a name, then come back through\n        with the ``applyParameters()`` method to apply the\n        :py:class:`Parameter` as a descriptor.\n\n.. _thefreedictionary: http://www.thefreedictionary.com/parameter\n.. _Meriam-Webster: http://www.merriam-webster.com/dictionary/parameter\n\"\"\"\n\n# ruff: noqa: F401\nfrom armi.reactor.parameters.exceptions import (\n    ParameterDefinitionError,\n    ParameterError,\n    UnknownParameterError,\n)\nfrom armi.reactor.parameters.parameterCollections import (\n    ParameterCollection,\n    applyAllParameters,\n    collectPluginParameters,\n)\nfrom armi.reactor.parameters.parameterDefinitions import (\n    ALL_DEFINITIONS,\n    NEVER,\n    SINCE_ANYTHING,\n    SINCE_BACKUP,\n    SINCE_INITIALIZATION,\n    SINCE_LAST_DISTRIBUTE_STATE,\n    SINCE_LAST_GEOMETRY_TRANSFORMATION,\n    Category,\n    NoDefault,\n    Parameter,\n    ParameterDefinitionCollection,\n    ParamLocation,\n    Serializer,\n)\n\nforType = ALL_DEFINITIONS.forType\ninCategory = ALL_DEFINITIONS.inCategory\nbyNameAndType = ALL_DEFINITIONS.byNameAndType\nresetAssignmentFlag = ALL_DEFINITIONS.resetAssignmentFlag\nsince = ALL_DEFINITIONS.since\n\n\ndef reset():\n    \"\"\"Reset the status of all parameter definitions.\n\n    This may become necessary when the state of the global parameter definitions becomes\n    invalid.  Typically this happens when running multiple cases for the same import of\n    this module, e.g. in unit tests. In this case things like the assigned flags will\n    persist across test cases, leading to strange and incorrect behavior.\n    \"\"\"\n    for pd in ALL_DEFINITIONS:\n        pd.assigned = NEVER\n"
  },
  {
    "path": "armi/reactor/parameters/exceptions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nclass ParameterDefinitionError(Exception):\n    \"\"\"Exception raised due to a programming error.\n\n    Programming errors include:\n\n    * Attempting to create two parameters with the same name.\n    * Attempting to create a parameter outside of a :py:class:`ParameterFactory`\n      ``with`` statement.\n\n    \"\"\"\n\n    def __init__(self, message):\n        Exception.__init__(\n            self,\n            \"This is a programming error, and needs to be addressed by the developer encountering it:\\n\" + message,\n        )\n\n\nclass ParameterError(Exception):\n    \"\"\"Exception raised due to a usage error.\n\n    Usage errors include:\n\n    * Attempting to get the value of a parameter that has not been defined a value, and\n      has no default.\n    * Attempting to set the value of a parameter that cannot be set through\n      ``setParam``.\n\n    \"\"\"\n\n\nclass UnknownParameterError(ParameterError):\n    \"\"\"Exception raised due to a usage error.\n\n    Usage errors include:\n\n    * Attempting to set the value of a parameter that has no definition and no rename\n\n    \"\"\"\n"
  },
  {
    "path": "armi/reactor/parameters/parameterCollections.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport copy\nimport pickle\nimport sys\nfrom typing import Any, Callable, Iterator, List, Optional, Set\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.reactor.parameters import exceptions, parameterDefinitions\nfrom armi.reactor.parameters.parameterDefinitions import (\n    NEVER,\n    SINCE_ANYTHING,\n    SINCE_BACKUP,\n    SINCE_LAST_DISTRIBUTE_STATE,\n)\nfrom armi.utils import units\n\nGLOBAL_SERIAL_NUM = -1\n\"\"\"\nThe serial number for all ParameterCollections\n\nThis is a counter of the number of instances of all types. They are useful for tracking items\nthrough the history of a database.\n\nWarning\n-------\nThis is not MPI safe. We also have not done anything to make it thread safe, except that the GIL\nexists.\n\"\"\"\n\n\ndef _getBaseParameterDefinitions():\n    pDefs = parameterDefinitions.ParameterDefinitionCollection()\n    pDefs.add(\n        parameterDefinitions.Parameter(\n            \"serialNum\",\n            units=units.UNITLESS,\n            description=(\n                \"Unique serial integer for all objects in the ARMI Composite Tree. \"\n                \"The numbers are only unique for a simulation, on an MPI rank.\"\n            ),\n            location=None,\n            saveToDB=True,\n            default=parameterDefinitions.NoDefault,\n            setter=parameterDefinitions.NoDefault,\n            categories=set(),\n        )\n    )\n\n    return pDefs\n\n\nclass _ParameterCollectionType(type):\n    \"\"\"\n    Simple metaclass to make sure that expected class attributes are present.\n\n    These attributes shouldn't  be shared among different subclasses, so this makes sure that each\n    subclass gets its own.\n    \"\"\"\n\n    def __new__(mcl, name, bases, attrs):\n        attrs[\"pDefs\"] = attrs.get(\"pDefs\") or None\n        attrs[\"_ArmiObject\"] = None\n        attrs[\"_allFields\"] = []\n\n        return type.__new__(mcl, name, bases, attrs)\n\n\nclass ParameterCollection(metaclass=_ParameterCollectionType):\n    \"\"\"An empty class for holding state information in the ARMI data structure.\n\n    A parameter collection stores one or more formally-defined values (\"parameters\"). Until a given\n    ParameterCollection subclass has been instantiated, new parameters may be added to its parameter\n    definitions (e.g., from plugins). Upon first instantiation, ``applyParameters()`` will be\n    called, binding the parameter definitions to the Collection class as descriptors.\n\n    It is illegal to redefine a parameter with the same name in the same class, or its subclasses,\n    and attempting to do so should result in exceptions in ``applyParameters()``.\n\n    Attributes\n    ----------\n    _backup : str\n        A pickle dump of the __getstate__, or None.\n\n    _hist : dict\n        Keys are ``(paramName, timeStep)``.\n\n    assigned : int Flag\n        indicates the synchronization state of the parameter collection. This is used to reduce the\n        amount of information that is transmitted during database, and MPI operations as well as\n        determine the collection's state when exiting a ``Composite.retainState``.\n\n        This attribute when used with the ``Parameter.assigned`` attribute allows us to efficiently\n        perform many operations.\n\n    See Also\n    --------\n    armi.reactors.parameters\n    \"\"\"\n\n    pDefs: parameterDefinitions.ParameterDefinitionCollection = _getBaseParameterDefinitions()\n    _allFields: List[str] = []\n\n    _ArmiObject = None\n    \"\"\"The ArmiObject class that this ParameterCollection belongs to.\n\n    Crucially **not** the instance that owns this collection. For any\n    ``ArmiObject``, the following are true::\n\n        >>> self.p._ArmiObject is not self\n        >>> isinstance(self, self.p._ArmiObject)\n\n    \"\"\"\n\n    # A set of all instance attributes that are settable on an instance. This prevents inadvertent\n    # setting of values that aren't proper parameters. Named _slots, as it is used to emulate some\n    # of the behaviors of __slots__.\n    _slots: Set[str] = set()\n\n    def __init__(self, _state: Optional[List[Any]] = None):\n        \"\"\"\n        Create a new ParameterCollection instance.\n\n        Parameters\n        ----------\n        _state:\n            Optional list of parameter values, ordered by _allFields. Passed values\n            should come from a call to __getstate__(). This should only be used\n            internally to this model.\n        \"\"\"\n        # add a hook to make this readOnly\n        self._slots.add(\"readOnly\")\n        self.readOnly = False\n\n        if self.pDefs is None or not self.pDefs.locked:\n            type(self).applyParameters()\n\n        assert self.pDefs.locked, (\n            \"It looks like parameter definitions haven't been \"\n            \"set up yet for {}; be sure that applyAllParameters() is being called \"\n            \"somewhere.\".format(type(self))\n        )\n\n        self._backup = None\n        # used by the history tracker when a parameter key is a tuple (name, timestep)\n        self._hist = {}\n\n        # Initialize all parameter values to **something**. This is crucial to getting\n        # the split-key dictionary memory savings in lieu of using __slots__!\n        if _state is None:\n            for pDef in self.paramDefs:\n                setattr(self, pDef.fieldName, pDef.default)\n        else:\n            for key, val in zip(self._allFields, _state):\n                self.__dict__[key] = val\n\n        self.assigned = NEVER\n\n        global GLOBAL_SERIAL_NUM\n        self.serialNum = GLOBAL_SERIAL_NUM = GLOBAL_SERIAL_NUM + 1\n\n        if self.serialNum > sys.maxsize:\n            runLog.warning(\"Created serial number larger than an integer. Current serial: {}\".format(GLOBAL_SERIAL_NUM))\n\n    @classmethod\n    def applyParameters(cls):\n        \"\"\"\n        Apply the definitions from a ParameterDefinitionCollection as properties.\n\n        This places the parameter definitions in the associated\n        ParameterDefinitionCollection onto this ParameterCollection class as class\n        attributes. In the process it recursively calls the same method on base classes,\n        and adds their parameter definitions as well. Since each instance of Parameter\n        implements the descriptor protocol, these are effectively behaving as\n        ``@property``-style accessors.\n\n        This function must act on each ParameterCollection subclass before the first\n        instance is created. Subsequent calls will short-circuit. Before calling this\n        method, it is possible to add more Parameters to the associated\n        ParameterDefinitionCollection, ``cls.pDefs``. After calling this method, the\n        ParameterDefinitionCollection will be locked, preventing any further additions.\n\n        This method is called in the ``__init__()`` method, but can also be called\n        proactively to compile the parameter definitions earlier, if desired.\n\n        See Also\n        --------\n        armi.reactor.parameters.parameterDefinitions.ParameterDefinitionCollection\n        \"\"\"\n        if bool(cls._allFields):\n            # Short-circuit if this has already been done\n            return\n\n        # Ensure that we have at least something to start with\n        cls.pDefs = cls.pDefs or parameterDefinitions.ParameterDefinitionCollection()\n\n        # Collect definitions from base ParameterCollection classes. E.g.,\n        # HelixParameterCollection also gets parameter definitions from\n        # ComponentParameterCollection.\n        if not cls.pDefs.locked:\n            basePDefs = parameterDefinitions.ParameterDefinitionCollection()\n            for base in [b for b in cls.__bases__ if issubclass(b, ParameterCollection)]:\n                base.applyParameters()\n                if base.pDefs is not None:\n                    basePDefs.extend(base.pDefs)\n\n            # Check for duplicate parameter definitions\n            seen = set()\n            duplicates = set()\n            for name in cls.pDefs.names:\n                if name in seen:\n                    duplicates.add(name)\n                seen.add(name)\n            if duplicates:\n                raise exceptions.ParameterDefinitionError(\n                    \"The following parameters were multiply-defined:\\n    {}\".format(duplicates)\n                )\n            overriddenParameters = set(cls.pDefs.names).intersection(set(basePDefs.names))\n            if overriddenParameters:\n                raise exceptions.ParameterDefinitionError(\n                    \"The following parameters \"\n                    \"have been redefined in a subclass: {}\\n\"\n                    \"current type: {}\\n\"\n                    \"bases: {}\".format(overriddenParameters, cls, cls.__bases__)\n                )\n\n        # Bind the parameter definitions as descriptors to the collection\n        for pd in cls.pDefs:\n            pd.collectionType = cls\n            setattr(cls, pd.name, pd)\n            parameterDefinitions.ALL_DEFINITIONS.add(pd)\n\n        cls.pDefs.extend(basePDefs)\n\n        # prevent the addition of new parameter definitions. This will lead to errors\n        # early, rather than mysterious attribute access errors later.\n        cls.pDefs.lock()\n        cls._allFields = list(sorted([\"_backup\", \"_hist\", \"assigned\"] + [pd.fieldName for pd in cls.pDefs]))\n\n        cls._slots = set(cls._allFields).union({pd.name for pd in cls.pDefs})\n        cls._slots.add(\"readOnly\")\n\n    def __repr__(self):\n        return \"<{} assigned:{}>\".format(self.__class__.__name__, self.assigned)\n\n    def __setattr__(self, key, value):\n        assert key in self._slots, \"Trying to set undefined attribute `{}` on a ParameterCollection!\".format(key)\n\n        if getattr(self, \"readOnly\", False):\n            if key == \"readOnly\":\n                raise RuntimeError(\"A read-only Parameter Collection cannot be made writeable.\")\n            else:\n                raise RuntimeError(f\"Cannot set a read-only parameter {key}.\")\n\n        object.__setattr__(self, key, value)\n\n    def __deepcopy__(self, memo):\n        \"\"\"\n        Returns a new instance of ParameterCollection with a new ``serialNum``.\n\n        Notes\n        -----\n        This operates under the assumption that ``__deepcopy__`` is used when needing a\n        new instance, which should get its own serial number. This follows from the\n        assumption that parameter collections are typically copied when copying an\n        ArmiObject to which it may belong. In this case, serialNum needs to be\n        incremented so that the objects are unique. serialNum is special.\n        \"\"\"\n        # Grabbing state first and passing it into __init__() as a performance\n        # optimization. This avoids the extra work in __init__() of defaulting all of\n        # the parameters, only to set them in __setstate__(). Instead we pass them in,\n        # so that __init__() can set them.\n        state = copy.deepcopy(self.__getstate__(), memo)\n        memo[id(self)] = newPC = self.__class__(_state=state)\n        return newPC\n\n    def __reduce__(self):\n        \"\"\"\n        Implement pickle __reduce__ protocol.\n\n        We need to do this because most subclasses of ParameterCollection are created\n        from a metaclass, and are therefore not top-level objects and not trivially\n        picklable. This implementation works by asking the ArmiObject itself to give an\n        instance of its associated ParameterCollection class, then setting its state.\n        \"\"\"\n        assert type(self)._ArmiObject is not None, (\n            \"Cannot reduce {}, since it does not have an associated ArmiObject, and is \"\n            \"therefore not tied to the world of the living.\".format(type(self))\n        )\n        return type(self)._ArmiObject.getParameterCollection, (), self.__getstate__()\n\n    def __getstate__(self):\n        # reduce data to one giant list, ordered by _allFields (sorted). Use NoDefault\n        # when a value is missing\n        data = [getattr(self, fieldName, parameterDefinitions.NoDefault) for fieldName in self._allFields]\n        return data\n\n    def __setstate__(self, state):\n        # does the reverse of __getstate__\n        for key, val in zip(self._allFields, state):\n            setattr(self, key, val)\n\n    def __getitem__(self, name):\n        try:\n            return getattr(self, name)\n        except TypeError:  # allows for history parameter tuples\n            return self._hist[name]\n        except AttributeError:\n            raise exceptions.UnknownParameterError(\"Parameter {} is not defined for {}\".format(name, type(self)))\n\n    def __setitem__(self, name, value):\n        try:\n            setattr(self, name, value)\n        except TypeError:  # allows for history parameter tuples\n            if isinstance(name, tuple):\n                self._hist[name] = value\n            else:\n                raise\n        except AttributeError:  # for clarity\n            raise exceptions.UnknownParameterError(\n                \"Cannot locate definition for parameter {} in {}\".format(name, type(self))\n            )\n\n    def __delitem__(self, name):\n        if isinstance(name, str):\n            pd = self.paramDefs[name]\n            if hasattr(self, pd.fieldName):\n                pd.assigned = SINCE_ANYTHING\n                delattr(self, pd.fieldName)\n        else:\n            del self._hist[name]\n\n    def __contains__(self, name):\n        if isinstance(name, str):\n            return hasattr(self, \"_p_\" + name)\n        else:\n            return name in self._hist\n\n    def __eq__(self, other: \"ParameterCollection\"):\n        if not isinstance(other, self.__class__):\n            return False\n\n        for pd in self.paramDefs:\n            fieldName = pd.fieldName\n            haveValue = (hasattr(self, fieldName), hasattr(other, fieldName))\n            if all(haveValue):\n                if getattr(self, fieldName) != getattr(self, fieldName):\n                    return False\n            elif any(haveValue):\n                return False\n\n        return True\n\n    def __iter__(self) -> Iterator[str]:\n        \"\"\"Iterate over names of assigned parameters define on this collection.\"\"\"\n        return (\n            pd.name\n            for pd in self.paramDefs\n            if pd.assigned != NEVER and getattr(self, pd.fieldName) is not parameterDefinitions.NoDefault\n        )\n\n    def items(self):\n        keys = list(iter(self))\n        return zip(keys, (getattr(self, key) for key in keys))\n\n    def get(self, key, default=None):\n        \"\"\"Return a requested parameter value, if possible.\n\n        This functions similarly to the same method on a dict or similar. If there is a\n        value present for the requested parameter on this parameter collection, return\n        it. Otherwise, return the supplied default. The main reason for using this is\n        for safely attempting to access a parameter that doesn't have a default value,\n        and may not have been set. Other methods for accessing parameters would raise\n        an exception.\n        \"\"\"\n        try:\n            return self[key]\n        except exceptions.ParameterError:\n            return default\n\n    def keys(self):\n        return list(iter(self)) + list(self._hist.keys())\n\n    def values(self):\n        paramVals = list(getattr(self, pd.fieldName) for pd in self.paramDefs if hasattr(self, pd.fieldName))\n        return paramVals + list(self._hist.values())\n\n    def update(self, someDict):\n        for k, val in someDict.items():\n            self[k] = val\n\n    @property\n    def paramDefs(self) -> parameterDefinitions.ParameterDefinitionCollection:\n        r\"\"\"\n        Get the :py:class:`ParameterDefinitionCollection` associated with this instance.\n\n        This serves as both an alias for the pDefs class attribute, and as a read-only\n        accessor for them. Most non-parameter-system related interactions with an\n        object's ``ParameterCollection`` should go through this. In the future, it\n        probably makes sense to make the ``pDefs`` that the ``applyDefinitions`` and\n        ``ResolveParametersMeta`` things are sensitive to more hidden from outside the\n        parameter system.\n        \"\"\"\n        return type(self).pDefs\n\n    def getSyncData(self):\n        \"\"\"\n        Get all changed parameters SINCE_LAST_DISTRIBUTE_STATE (or ``syncMpiState``).\n\n        If this ParmaterCollection (proxy for a ``Composite``) has been modified\n        ``SINCE_LAST_DISTRIBUTE_STATE``, this will return a dictionary of parameter name\n        keys and values, otherwise ``None``.\n        \"\"\"\n        if self.assigned & SINCE_LAST_DISTRIBUTE_STATE:\n            syncData = {\n                paramDef.name: getattr(self, paramDef.fieldName)\n                for paramDef in self.paramDefs\n                if paramDef.assigned & SINCE_LAST_DISTRIBUTE_STATE and paramDef.name in self\n            }\n            return syncData\n        return None\n\n    def backUp(self):\n        \"\"\"Back up the state in a Pickle.\"\"\"\n        try:\n            self._backup = pickle.dumps(self.__getstate__())\n            # this reads as assigned & everything_but(SINCE_BACKUP)\n            self.assigned &= ~SINCE_BACKUP\n        except:\n            runLog.error(\"Attempted to pickle {}.\".format(self))\n            raise\n\n    def restoreBackup(self, paramsToApply):\n        \"\"\"Restore the backed up the state in a from a pickle.\n\n        Parameters\n        ----------\n        paramsToApply : list of ParmeterDefinitions\n            restores the state of all parameters not in `paramsToApply`\n        \"\"\"\n        currentData = dict()\n\n        if self.assigned & SINCE_BACKUP:\n            compParams = (pd for pd in paramsToApply.intersection(set(self.paramDefs)))\n            currentData = {pd: getattr(self, pd.fieldName) for pd in compParams if hasattr(self, pd.fieldName)}\n\n        self.__setstate__(pickle.loads(self._backup))\n\n        for pd, currentValue in currentData.items():\n            # correct for global paramDef.assigned assumption\n            retainedValue = getattr(self, pd.fieldName)\n            if isinstance(retainedValue, np.ndarray) or isinstance(currentValue, np.ndarray):\n                if (retainedValue != currentValue).any():\n                    setattr(self, pd.fieldName, currentValue)\n                    pd.assigned = SINCE_ANYTHING\n                    self.assigned = SINCE_ANYTHING\n            elif retainedValue != currentValue:\n                setattr(self, pd.fieldName, currentValue)\n                pd.assigned = SINCE_ANYTHING\n                self.assigned = SINCE_ANYTHING\n\n    def where(self, f: Callable[[parameterDefinitions.Parameter], bool]) -> Iterator[parameterDefinitions.Parameter]:\n        \"\"\"Produce an iterator over parameters that meet some criteria.\n\n        Parameters\n        ----------\n        f : callable function f(parameter) -> bool\n            Function to check if a parameter should be fetched during the iteration.\n\n        Returns\n        -------\n        iterator of :class:`armi.reactor.parameters.Parameter`\n            Iterator, **not** list or tuple, that produces each parameter that\n            meets ``f(parameter) == True``.\n\n        Examples\n        --------\n        >>> block = r.core[0][0]\n        >>> pdef = block.p.paramDefs\n        >>> for param in pdef.where(lambda pd: pd.atLocation(ParamLocation.EDGES)):\n        ...     print(param.name, block.p[param.name])\n\n        \"\"\"\n        return filter(f, self.paramDefs)\n\n\ndef collectPluginParameters(pm):\n    \"\"\"Apply parameters from plugins to their respective object classes.\"\"\"\n    for pluginParamDefnCollections in pm.hook.defineParameters():\n        for klass, pDefs in pluginParamDefnCollections.items():\n            klass.pDefs.extend(pDefs)\n\n\ndef applyAllParameters(klass=None):\n    klass = klass or ParameterCollection\n    klass.applyParameters()\n    for derived in klass.__subclasses__():\n        applyAllParameters(derived)\n"
  },
  {
    "path": "armi/reactor/parameters/parameterDefinitions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nThis module contains the code necessary to represent parameter definitions.\n\n``ParameterDefinition``\\ s are the metadata that describe specific parameters, and aid in enforcing\ncertain rules upon the parameters themselves and the parameter collections that contain them.\n\nThis module also describes the ``ParameterDefinitionCollection`` class, which serves as a\nspecialized container to manage related parameter definitions.\n\nSee Also\n--------\narmi.reactor.parameters\n\"\"\"\n\nimport enum\nimport functools\nimport re\nfrom typing import Any, Dict, Optional, Sequence, Tuple, Type\n\nimport numpy as np\n\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.parameters.exceptions import ParameterDefinitionError, ParameterError\n\n# bitwise masks for high-speed operations on the `assigned` attribute\n# see: https://web.archive.org/web/20120225043338/http://www.vipan.com/htdocs/bitwisehelp.html\n# Note that the various operations are responsible for clearing the flags on the events.\n# These should be interpreted as:\n#   The Parameter or ParameterCollection has been modified SINCE_<time-description>\n# In order for that to happen, the flags need to be cleared when the <time-description> begins.\nSINCE_INITIALIZATION = 1\nSINCE_LAST_DISTRIBUTE_STATE = 4\nSINCE_LAST_GEOMETRY_TRANSFORMATION = 8\nSINCE_BACKUP = 16\nSINCE_ANYTHING = SINCE_LAST_DISTRIBUTE_STATE | SINCE_INITIALIZATION | SINCE_LAST_GEOMETRY_TRANSFORMATION | SINCE_BACKUP\nNEVER = 32\n\n\nclass Category:\n    \"\"\"\n    A \"namespace\" for storing parameter categories.\n\n    Notes\n    -----\n    * `cumulative` parameters are accumulated over many time steps\n    * `pinQuantities` parameters are defined on the pin level within a block\n    * `multiGroupQuantities` parameters have group dependence (often a 1D numpy array)\n    * `fluxQuantities` parameters are related to neutron or gamma flux\n    * `neutronics` parameters are calculated in a neutronics global flux solve\n    * `gamma` parameters are calculated in a fixed-source gamma solve\n    * `detailedAxialExpansion` parameters are marked as such so that they are mapped from the\n       uniform mesh back to the non-uniform mesh\n    * `reactivity coefficients` parameters are related to reactivity coefficient or kinetics\n       parameters for kinetics solutions\n    * `thermal hydraulics` parameters come from a thermal hydraulics physics plugin (e.g., flow\n       rates, temperatures, etc.)\n    \"\"\"\n\n    depletion = \"depletion\"\n    cumulative = \"cumulative\"\n    cumulativeOverCycle = \"cumulative over cycle\"\n    assignInBlueprints = \"assign in blueprints\"\n    retainOnReplacement = \"retain on replacement\"\n    pinQuantities = \"pinQuantities\"\n    fluxQuantities = \"fluxQuantities\"\n    multiGroupQuantities = \"multi-group quantities\"\n    neutronics = \"neutronics\"\n    gamma = \"gamma\"\n    detailedAxialExpansion = \"detailedAxialExpansion\"\n    reactivityCoefficients = \"reactivity coefficients\"\n    thermalHydraulics = \"thermal hydraulics\"\n\n\nclass ParamLocation(enum.Flag):\n    \"\"\"Represents the point on which a parameter is physically meaningful.\"\"\"\n\n    TOP = 1\n    CENTROID = 2\n    BOTTOM = 4\n    AVERAGE = 10  # 2 + 8\n    MAX = 16\n    CORNERS = 32\n    EDGES = 64\n    VOLUME_INTEGRATED = 128\n    CHILDREN = 256  # on some child of a composite, like a pin\n    NA = 512  # no location\n\n\nclass NoDefault:\n    \"\"\"Class used to allow distinction between not setting a default and setting a default of\n    ``None``.\n    \"\"\"\n\n    def __init__(self):\n        raise NotImplementedError(\"You cannot create an instance of NoDefault\")\n\n\nclass _Undefined:\n    \"\"\"Class used to identify a parameter property as being in the undefined state.\"\"\"\n\n    def __init__(self):\n        raise NotImplementedError(\"You cannot create an instance of _Undefined.\")\n\n\nclass Serializer:\n    r\"\"\"\n    Abstract class describing serialize/deserialize operations for Parameter data.\n\n    Parameters need to be stored to and read from database files. This currently requires that the\n    Parameter data be converted to a numpy array of a datatype supported by the ``h5py`` package.\n    Some parameters may contain data that are not trivially representable in numpy/HDF5, and need\n    special treatment. Subclassing ``Serializer`` and setting it as a ``Parameter``\\ s\n    ``serializer`` allows for special operations to be performed on the parameter values as they are\n    stored to the database or read back in.\n\n    The ``Database`` already knows how to handle certain cases where the data are not\n    straightforward to get into a numpy array, such as when:\n\n      - There are ``None``\\ s.\n\n      - The dimensions of the values stored on each object are inconsistent (e.g.,\n        \"jagged\" arrays)\n\n    So, in these cases, a Serializer is not needed. Serializers are necessary for when the actual\n    data need to be converted to a native data type (e.g., int, float, etc). For example, we use a\n    Serializer to handle writing ``Flags`` to the Database, as they tend to be too big to fit into a\n    system-native integer.\n\n    .. important::\n\n        Defining a Serializer for a Parameter in part defines the underlying representation of the\n        data within a database file; the data stored in a database are sensitive to the code that\n        wrote them. Changing the method that a Serializer uses to pack or unpack data may break\n        compatibility with old database files. Therefore, Serializers should be diligent about\n        signaling changes by updating their version. It is also good practice, whenever possible,\n        to support reading old versions so that database files written by old versions can still be\n        read.\n\n    .. impl:: Users can define custom parameter serializers.\n        :id: I_ARMI_PARAM_SERIALIZE\n        :implements: R_ARMI_PARAM_SERIALIZE\n\n        Important physical parameters are stored in every ARMI object. These parameters represent\n        the plant's state during execution of the model. Currently, this requires that the\n        parameters be serializable to a numpy array of a datatype supported by the ``h5py`` package\n        so that the data can be written to, and subsequently read from, an HDF5 file.\n\n        This class allows for these parameters to be serialized in a custom manner by providing\n        interfaces for packing and unpacking parameter data. The user or downstream plugin is able\n        to specify how data is serialized if that data is not naturally serializable.\n\n    See Also\n    --------\n    armi.bookkeeping.db.database.packSpecialData\n    armi.bookkeeping.db.database.unpackSpecialData\n    armi.reactor.flags.FlagSerializer\n    \"\"\"\n\n    # This will accompany the packed data as an attribute when written, and will be provided to the\n    # unpack() method when reading. If the underlying format of the data changes, make sure to\n    # change this.\n    version: Optional[str] = None\n\n    @staticmethod\n    def pack(data: Sequence[any]) -> Tuple[np.ndarray, Dict[str, any]]:\n        \"\"\"\n        Given unpacked data, return packed data and a dictionary of attributes needed to unpack it.\n\n        This should perform the fundamental packing operation, returning the packed data and any\n        metadata (\"attributes\") that would be necessary to unpack the data. The class's version is\n        always stored, so no need to provide it as an attribute.\n\n        See Also\n        --------\n        armi.reactor.flags.FlagSerializer.pack\n        \"\"\"\n        raise NotImplementedError()\n\n    @classmethod\n    def unpack(cls, data: np.ndarray, version: Any, attrs: Dict[str, any]) -> Sequence[any]:\n        \"\"\"Given packed data and attributes, return the unpacked data.\"\"\"\n        raise NotImplementedError()\n\n\ndef isNumpyArray(paramStr):\n    \"\"\"Helper meta-function to create a method that sets a Parameter value to a NumPy array.\n\n    Parameters\n    ----------\n    paramStr : str\n        Name of the Parameter we want to set.\n\n    Returns\n    -------\n    function\n        A setter method on the Parameter class to force the value to be a NumPy array.\n    \"\"\"\n\n    def setParameter(selfObj, value):\n        if value is None or isinstance(value, np.ndarray):\n            setattr(selfObj, \"_p_\" + paramStr, value)\n        else:\n            setattr(selfObj, \"_p_\" + paramStr, np.array(value))\n\n    return setParameter\n\n\ndef isNumpyF32Array(paramStr: str):\n    \"\"\"Helper meta-function to create a method that sets a Parameter value to a 32 bit float NumPy array.\n\n    Parameters\n    ----------\n    paramStr\n        Name of the Parameter we want to set.\n\n    Returns\n    -------\n    function\n        A setter method on the Parameter class to force the value to be a 32 bit NumPy array.\n    \"\"\"\n\n    def setParameter(selfObj, value):\n        if value is None:\n            # allow default of None to exist\n            setattr(selfObj, \"_p_\" + paramStr, value)\n        else:\n            # force to 32 bit\n            setattr(selfObj, \"_p_\" + paramStr, np.array(value, dtype=np.float32))\n\n    return setParameter\n\n\n@functools.total_ordering\nclass Parameter:\n    \"\"\"Metadata about a specific parameter.\"\"\"\n\n    _validName = re.compile(\"^[a-zA-Z0-9_]+$\")\n\n    # Using slots because Parameters are pretty static and mostly POD. __slots__ make this official,\n    # and offer some performance benefits in memory (not too important; there aren't that many\n    # instances of Parameter to begin with) and attribute access time (more important, since we need\n    # to go through Parameter objects to get to a specific parameter's value in a\n    # ParameterCollection)\n    __slots__ = (\n        \"name\",\n        \"fieldName\",\n        \"collectionType\",\n        \"location\",\n        \"saveToDB\",\n        \"serializer\",\n        \"units\",\n        \"default\",\n        \"_getter\",\n        \"_setter\",\n        \"description\",\n        \"categories\",\n        \"assigned\",\n        \"_backup\",\n    )\n\n    def __init__(\n        self,\n        name,\n        units,\n        description,\n        location,\n        saveToDB,\n        default,\n        setter,\n        categories,\n        serializer: Optional[Type[Serializer]] = None,\n    ):\n        # nonsensical to have a serializer with no intention of saving to DB\n        assert not (serializer is not None and not saveToDB)\n        assert serializer is None or saveToDB\n        assert self._validName.match(name), \"{} is not a valid param name\".format(name)\n        assert len(description), f\"Parameter {name} defined without description.\"\n\n        self.collectionType = _Undefined\n        self.name = name\n        self.fieldName = \"_p_\" + name\n        self.location = location\n        self.saveToDB = saveToDB\n        self.serializer = serializer\n        self.description = description\n        self.units = units\n        self.default = default\n        self.categories = categories\n        self.assigned = NEVER\n        self._backup = None\n\n        if self.default is not NoDefault:\n\n            def paramGetter(p_self):\n                return getattr(p_self, self.fieldName, self.default)\n\n        else:\n\n            def paramGetter(p_self):\n                value = getattr(p_self, self.fieldName)\n                if value is NoDefault:\n                    raise ParameterError(\n                        \"Cannot get value for parameter `{}` in `{}` as no default has been \"\n                        \"defined, and no value has been assigned.\".format(self.name, type(p_self))\n                    )\n                return value\n\n        self._getter = paramGetter\n        self._setter = None  # actually, it gets assigned with this:\n        self.setter(setter)\n\n    def __repr__(self):\n        return \"<ParamDef name:{} collectionType:{} units:{} assigned:{}>\".format(\n            self.name, self.collectionType.__name__, self.units, self.assigned\n        )\n\n    def __eq__(self, other):\n        \"\"\"Name defines equality.\"\"\"\n        return self.name == other.name\n\n    def __ne__(self, other):\n        return not (self == other)\n\n    def __lt__(self, other):\n        \"\"\"Sort alphabetically by name.\"\"\"\n        return self.name < other.name\n\n    def __hash__(self):\n        return hash(self.name) + id(self)\n\n    def __setstate__(self, state):\n        self._backup = state[0]  # a tuple of 1 element.\n\n    def __set__(self, obj, val):\n        \"\"\"This is a property setter, see Python documentation for \"descriptor\".\"\"\"\n        self._setter(obj, val)\n\n    def __get__(self, obj, cls=None):\n        \"\"\"This is a property getter, see Python documentation for \"descriptor\".\n\n        Notes\n        -----\n        We do not check to see if ``cls != None``. This is an optimization choice, that someone may\n        deem unnecessary. As a result, unlike Python's ``property`` class, a subclass cannot\n        override the getter method.\n        \"\"\"\n        return self._getter(obj)\n\n    def setter(self, setter):\n        \"\"\"Decorator method for assigning setter.\n\n        .. impl:: Provide a way to signal if a parameter needs updating across processes.\n            :id: I_ARMI_PARAM_PARALLEL\n            :implements: R_ARMI_PARAM_PARALLEL\n\n            Parameters need to be handled properly during parallel code execution. This includes\n            notifying processes if a parameter has been updated by another process. This method\n            allows for setting a parameter's value as well as an attribute that signals whether this\n            parameter has been updated. Future processes will be able to query this attribute so\n            that the parameter's status is properly communicated.\n\n        Notes\n        -----\n        Unlike the traditional Python ``property`` class, this does not return a new instance of a\n        ``Parameter``; therefore it cannot be reassigned in the same way that a Python ``property``\n        can be.\n\n        Examples\n        --------\n        >>> class MyParameterCollection(parameters.ParameterCollection):\n        ...     mass = parameters.Parameter(...)\n        ...\n        ...     @mass.setter\n        ...     def mass(self, value):\n        ...         if value < 0:\n        ...             raise ValueError(\"Negative mass is not possible, consider a diet.\")\n        ...         self._p_speed = value\n        \"\"\"\n        if setter is NoDefault:\n\n            def paramSetter(p_self, value):\n                self.assigned = SINCE_ANYTHING\n                p_self.assigned = SINCE_ANYTHING\n                setattr(p_self, self.fieldName, value)\n\n        elif setter is None:\n\n            def paramSetter(p_self, value):\n                raise ParameterError(\n                    \"Cannot set value for parameter `{}` on {} to `{}`, it has a restricted setter.\".format(\n                        self.name, p_self, value\n                    )\n                )\n\n        elif callable(setter):\n\n            def paramSetter(p_self, value):\n                self.assigned = SINCE_ANYTHING\n                p_self.assigned = SINCE_ANYTHING\n                setter(p_self, value)\n\n        else:\n            raise ParameterDefinitionError(\n                \"The setter for parameter `{}` must be callable. Setter attribute: {}\".format(self.name, setter)\n            )\n\n        self._setter = paramSetter\n\n        return self\n\n    def backUp(self):\n        \"\"\"Back up the assigned state.\"\"\"\n        self._backup = (self._backup, self.assigned)\n\n    def restoreBackup(self, paramsToApply):\n        \"\"\"Restore the backed up state.\"\"\"\n        if self in paramsToApply:\n            # retain self.assigned if self in a category\n            self._backup, _assigned = self._backup\n        else:\n            self._backup, self.assigned = self._backup\n\n    def atLocation(self, loc):\n        \"\"\"True if parameter is defined at location.\"\"\"\n        return self.location and self.location & loc\n\n    def hasCategory(self, category: str) -> bool:\n        \"\"\"True if a parameter has a specific category.\"\"\"\n        return category in self.categories\n\n\nclass ParameterDefinitionCollection:\n    \"\"\"\n    A very specialized container for managing parameter definitions.\n\n    Notes\n    -----\n    ``_representedTypes`` is used to detect if this ``ParameterDefinitionCollection`` contains\n    definitions for only one type. If the collection only exists for 1 type, the lookup\n    (``__getitem__``) can short circuit O(n) logic for O(1) dictionary lookup.\n    \"\"\"\n\n    # Slots are not being used here as an attempt at optimization. Rather, they serve to add some\n    # needed rigidity to the parameter system.\n    __slots__ = (\"_paramDefs\", \"_paramDefDict\", \"_representedTypes\", \"_locked\")\n\n    def __init__(self):\n        self._paramDefs = list()\n        self._paramDefDict = dict()\n        self._representedTypes = set()\n        self._locked = False\n\n    def __iter__(self):\n        return iter(self._paramDefs)\n\n    def __len__(self):\n        return len(self._paramDefs)\n\n    def __getitem__(self, name):\n        \"\"\"Get a parameter by name.\n\n        Notes\n        -----\n        This method might break if the collection is for multiple composite types, and there exists\n        a parameter with the same name in multiple types.\n        \"\"\"\n        # O(1) lookup if there is only 1 type, could still raise a KeyError\n        if len(self._representedTypes) == 1:\n            return self._paramDefDict[name, next(iter(self._representedTypes))]\n\n        # \"matches\" only checks for the same name, while the add method checks both name and\n        # collectionType\n        matches = [pd for pd in self if pd.name == name]\n        if len(matches) != 1:\n            raise KeyError(\n                \"Too {} parameters with the name `{}`. Matches:\\n{}\".format(\n                    \"many\" if len(matches) > 1 else \"few\",\n                    name,\n                    \"\\n\".join(str(pd) for pd in matches),\n                )\n            )\n        return matches[0]\n\n    def add(self, paramDef):\n        \"\"\"Add a :py:class:`Parameter` to this collection.\"\"\"\n        assert not self._locked, \"This ParameterDefinitionCollection has been locked.\"\n        self._paramDefs.append(paramDef)\n        self._paramDefDict[paramDef.name, paramDef.collectionType] = paramDef\n        self._representedTypes.add(paramDef.collectionType)\n\n    def _filter(self, filterFunc):\n        pdc = ParameterDefinitionCollection()\n        pdc.extend(filter(filterFunc, self._paramDefs))\n        return pdc\n\n    def items(self):\n        return self._paramDefDict.items()\n\n    def extend(self, other):\n        \"\"\"Grow a parameter definition collection by another parameter definition collection.\"\"\"\n        assert not self._locked, \"This ParameterDefinitionCollection ({}) has been locked.\".format(\n            self._representedTypes\n        )\n        assert self is not other\n        if other is None:\n            raise ValueError(\n                f\"Cannot extend {self} with `None`. Ensure return value of parameter definitions returns something.\"\n            )\n        for pd in other:\n            self.add(pd)\n\n    def inCategory(self, categoryName):\n        \"\"\"\n        Create a :py:class:`ParameterDefinitionCollection` that contains definitions that are in a\n        specific category.\n        \"\"\"\n        return self._filter(lambda pd: categoryName in pd.categories)\n\n    def atLocation(self, paramLoc):\n        \"\"\"\n        Make a param definition collection with all defs defined at a specific location.\n\n        Parameters can be defined at various locations within their container based on\n        :py:class:`ParamLocation`. This allows selection by those values.\n        \"\"\"\n        return self._filter(lambda pd: pd.atLocation(paramLoc))\n\n    def since(self, mask):\n        \"\"\"\n        Create a :py:class:`ParameterDefinitionCollection` that contains definitions that have been\n        modified since a specific set of actions.\n        \"\"\"\n        return self._filter(lambda pd: pd.assigned & mask)\n\n    def unchanged_since(self, mask):\n        \"\"\"\n        Create a :py:class:`ParameterDefinitionCollection` that contains definitions that have not\n        been modified since a specific set of actions. This is the complementary set of the\n        collection returned by `since`.\n        \"\"\"\n        return self._filter(lambda pd: not (pd.assigned & mask))\n\n    def forType(self, compositeType):\n        \"\"\"\n        Create a :py:class:`ParameterDefinitionCollection` that contains definitions for a\n        specific composite type.\n        \"\"\"\n        return self._filter(lambda pd: issubclass(compositeType.paramCollectionType, pd.collectionType))\n\n    def resetAssignmentFlag(self, mask):\n        \"\"\"\n        Clear the `assigned` flag for a certain operation on all parameters.\n\n        These flags will get set by the param definition setters if they get changed again.\n\n        Notes\n        -----\n        See http://www.vipan.com/htdocs/bitwisehelp.html to understand the bitwise operations\n        \"\"\"\n        for pd in self._paramDefs:\n            pd.assigned &= ~mask\n\n    def setAssignmentFlag(self, mask):\n        for pd in self._paramDefs:\n            pd.assigned |= mask\n\n    def byNameAndType(self, name, compositeType):\n        \"\"\"Get a :py:class:`Parameter` by compositeType and name.\"\"\"\n        return self._paramDefDict[name, compositeType.paramCollectionType]\n\n    def byNameAndCollectionType(self, name, collectionType):\n        \"\"\"Get a :py:class:`Parameter` by collectionType and name.\"\"\"\n        return self._paramDefDict[name, collectionType]\n\n    @property\n    def categories(self):\n        \"\"\"Get the categories of all the :py:class:`~Parameter` instances within this collection.\"\"\"\n        categories = set()\n        for paramDef in self:\n            categories |= paramDef.categories\n        return categories\n\n    @property\n    def names(self):\n        return [pd.name for pd in self]\n\n    def lock(self):\n        self._locked = True\n\n    @property\n    def locked(self):\n        return self._locked\n\n    def toWriteToDB(self, assignedMask: Optional[int] = None):\n        \"\"\"\n        Get a list of acceptable parameters to store to the database for a level of the data model.\n\n        .. impl:: Filter parameters to write to DB.\n            :id: I_ARMI_PARAM_DB\n            :implements: R_ARMI_PARAM_DB\n\n            This method is called when writing the parameters to the database file. It queries the\n            parameter's ``saveToDB`` attribute to ensure that this parameter is desired for saving\n            to the database file. It returns a list of parameters that should be included in the\n            database write operation.\n\n        Parameters\n        ----------\n        assignedMask : int\n            A bitmask to down-filter which params to use based on how \"stale\" they are.\n        \"\"\"\n        mask = assignedMask or SINCE_ANYTHING\n        return [p for p in self if p.saveToDB and p.assigned & mask]\n\n    def createBuilder(self, *args, **kwargs):\n        \"\"\"\n        Create an associated object that can create definitions into this collection.\n\n        Using the returned ParameterBuilder will add all defined parameters to this\n        ParameterDefinitionCollection, using the passed arguments as defaults. Arguments should be\n        valid arguments to ``ParameterBuilder.__init__()``\n        \"\"\"\n        paramBuilder = ParameterBuilder(*args, **kwargs)\n        paramBuilder.associateParameterDefinitionCollection(self)\n        return paramBuilder\n\n\nclass ParameterBuilder:\n    \"\"\"Factory for creating Parameter and parameter properties.\"\"\"\n\n    def __init__(\n        self,\n        location=ParamLocation.AVERAGE,\n        default=NoDefault,\n        categories=None,\n        saveToDB=True,\n    ):\n        \"\"\"Create a :py:class:`ParameterBuilder`.\"\"\"\n        self._entered = False\n        self._defaultLocation = location\n        self._defaultCategories = set(categories or [])  # make sure it is always a set\n        self._defaultValue = default\n        self._assertDefaultIsProperType(default)\n        self._saveToDB = saveToDB\n        self._paramDefs = None\n\n    def __enter__(self):\n        self._entered = True\n        return self\n\n    def __exit__(self, exc_type, exc_value, tracebac):\n        if exc_type is not None:\n            # allow exceptions to be raised normally, to prevent confusing stack traces\n            return\n        self._entered = False\n\n    @staticmethod\n    def _assertDefaultIsProperType(default):\n        if default in (NoDefault, None) or isinstance(default, (int, str, float, bool, Flags)):\n            return\n        raise AssertionError(\n            \"Cannot specify a default mutable type ({}) value to a parameter; all instances would \"\n            \"share the same list.\".format(type(default))\n        )\n\n    def associateParameterDefinitionCollection(self, paramDefs):\n        \"\"\"\n        Associate this parameter factory with a specific ParameterDefinitionCollection.\n\n        Subsequent calls to defParam will automatically add the created ParameterDefinitions to this\n        ParameterDefinitionCollection. This results in a cleaner syntax when defining many\n        ParameterDefinitions.\n        \"\"\"\n        self._paramDefs = paramDefs\n\n    def defParam(\n        self,\n        name,\n        units,\n        description,\n        location=None,\n        saveToDB=NoDefault,\n        default=NoDefault,\n        setter=NoDefault,\n        categories=None,\n        serializer: Optional[Type[Serializer]] = None,\n    ):\n        r\"\"\"Create a parameter as a property (with get/set) on a class.\n\n        Parameters\n        ----------\n        name: str\n            the official name of the parameter\n\n        units: str\n            string representation of the units\n\n        description: str\n            a brief, but precise-as-possible description of what the parameter is used\n            for.\n\n        location: str\n            string representation of the location the attribute is applicable to, such\n            as average, max, etc.\n\n        saveToDB: bool\n            indicator as to whether the parameter should be written to the database. The\n            actual default is defined by the :py:class:`ParameterBuilder`, and is\n            :code:`True`.\n\n        default: immutable type\n            a default value for this parameter which must be an immutable type. If the\n            type is mutable, e.g. a list, dict, an exception should be raised, or\n            unknown behavior.\n\n        setter: None or callable\n            If ``None``, there is no direct way to set the parameter. If some other\n            callable method, (which may have the same name as the property!) then the\n            setter method is used instead.\n\n        categories: List of str\n            A list of categories to which this Parameter should belong. Categories are\n            typically used to engage special treatment for certain Parameters.\n\n        serializer: Optional subclass of Serializer\n            A class describing how the parameter data should be stored to the database.\n            This is usually only needed in exceptional cases where it is difficult to\n            store a parameter in a numpy array.\n\n        Notes\n        -----\n        It is not possible to initialize the parameter on the class this method would be used on,\n        because there is no instance (i.e. self) when this method is run. However, this method could\n        access a globally available set of definitions, if one existed.\n        \"\"\"\n        self._assertDefaultIsProperType(default)\n        if location is None and self._defaultLocation is None:\n            raise ParameterDefinitionError(\n                \"The default location is not specified for {}; a parameter-specific location is required.\".format(self)\n            )\n\n        paramDef = Parameter(\n            name,\n            units=units,\n            description=description,\n            location=location or self._defaultLocation,\n            saveToDB=saveToDB if saveToDB is not NoDefault else self._saveToDB,\n            default=default if default is not NoDefault else self._defaultValue,\n            setter=setter,\n            categories=set(categories or []).union(self._defaultCategories),\n            serializer=serializer,\n        )\n\n        if self._paramDefs is not None:\n            self._paramDefs.add(paramDef)\n        return paramDef\n\n\n# Container for all parameter definition collections that have been bound to an ArmiObject or\n# subclass. These are added from the applyParameters() method on the ParameterCollection class.\nALL_DEFINITIONS = ParameterDefinitionCollection()\n"
  },
  {
    "path": "armi/reactor/parameters/resolveCollections.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains the magic that makes the Parameter system and ARMI composite model\nplay nicely together.\n\nThe contained metaclass is useful for maintaining a hierarchy of ``ParameterCollection``\nclasses, which mimic the hierarchy of ``ArmiObject`` s to which they apply. Some\n``ArmiObject`` subclasses define their own parameters, while others do not, so we do not\nwant to blindly create a ``ParameterCollectionClass`` for each ``ArmiObject`` subclass.\nInstead, we want to be able to skip generations when no additional parameters were\nrequested for that level. For instance if we have a hierarchy like: ``ArmiObject`` <-\n``A`` <- ``B``, where ``ArmiObject`` and ``B`` define parameters, while ``A`` does not\ndefine any parameters of its own, we want to have a ``ArmiObjectParameterCollection``\nand a ``BParameterCollection`` (with ``BParameterCollection`` being a subclass of\n``ArmiObjectParameterCollection``).  ``ArmiObject`` and ``A`` will both *share* the\n``ArmiObjectParameterCollection``, while ``B`` will use ``BParameterCollection``.\n``BParameterCollection`` will contain all of the parameters defined in\n``ArmiObjectParameterCollection``, plus whatever additional parameters were defined on\n``B``.\n\nThe above scenario should behave rather intuitively for someone used to classes and\ninheritance, but maintaining this hierarchy by hand would be onerous and error-prone.\nWhat if one day we decide to add some parameters to ``A``? We need to remember to add a\nnew class for its parameters, and make sure to make ``BParameterCollection`` a subclass\nof that new ``ParameterCollection`` class. With the below metaclass, we needn't worry\nourselves with any of that; it is taken care of automatically.\n\nIf you want to know how the sausage is made, the ``ResolveParametersMeta`` metaclass is\nresponsible for forming a hierarchy of ``ParameterCollection`` classes that correspond\nto the related hierarchy of classes inheriting the root ``ArmiObject`` class. It should\nbe rare for an ARMI developer not engaged directly with Framework development to need to\nknow exactly how this works, but a proficient ARMI developer must keep in mind the\nfollowing rules about how this system behaves in practice:\n\n* When defining subclasses of ``ArmiObject``, defining a class attribute called\n  ``pDefs`` of the ``ParameterDefinitionCollection`` type signals to the system that\n  this is a *Parameter Class*.\n* When defining a *Parameter Class*, it will trigger the creation of a new\n  ``ParameterCollection`` class, which will be derived from the\n  ``ParameterCollection`` class of the most immediate *Parameter Class* ancestor\n  the new class's inheritance tree.\n* All classes derived from ``ArmiObject`` will receive an associated subclass of\n  ``ParameterCollection``, which will ultimately include all of the relevant\n  Parameters for that class. The specific class is the ``ParameterCollection``\n  subclass defined for the most immediate *Parameter Class* in the classes\n  inheritance tree.\n* Parameter definitions can be added to a *Parameter Class*'s ``pDefs`` until\n  Parameters have been \"compiled\" for it. After compiling parameters, the ``pDefs``\n  are locked, and any attempts at defining additional parameters will cause an\n  error.\n* ``ArmiObject`` s cannot be instantiated until after parameters have been compiled.\n\n\"\"\"\n\nfrom armi.reactor.parameters.parameterCollections import ParameterCollection\nfrom armi.reactor.parameters.parameterDefinitions import ParameterDefinitionCollection\n\n\nclass ResolveParametersMeta(type):\n    \"\"\"Metaclass for automatically defining associated ParameterCollection classes.\n\n    Any class invoking this metaclass will automatically create an associated sub-class\n    of the ``ParameterCollection`` type, if it has a class attribute called ``pDefs``\n    that is an instance of ``ParameterDefinitionCollection``. This new class will itself\n    be a subclass of the ``ParameterCollection`` class that is associated with the\n    invoking class's parent.\n\n    If no ``pDefs`` class attribute is present, the invoking class will adopt the\n    ``ParameterCollection`` class associated with it's parent, or ``None`` if it cannot\n    find one.\n\n    The associated ``ParameterCollection`` will be stored on the new class's\n    ``paramCollectionType`` attribute.\n\n    For example, when this metaclass is applied to the ``Block`` class it will create a\n    new class named ``BlockParameterCollection``, and add it as a class attribute called\n    ``Block.paramCollectionType``. The ``BlockParameterCollection`` class will itself be\n    a subclass of ``ArmiObjectParameterCollection``, which it would have found from the\n    ``Composite`` class from which the ``Block`` class inherits. The ``Composite``\n    class, on the other hand, would have obtained the ``ArmiObjectParameterCollection``\n    from it's parent (``ArmiObject``), since it does not have a ``pDefs`` attribute of\n    its own.\n    \"\"\"\n\n    def __new__(mcl, name, bases, attrs):\n        assert attrs.get(\"paramCollectionType\") is None, \"{} already has parameter collection\".format(name)\n        baseCollections = [b.paramCollectionType for b in bases if hasattr(b, \"paramCollectionType\")]\n        # Make sure that these are what we expect them to be\n        assert all([issubclass(c, ParameterCollection) for c in baseCollections if c is not None])\n\n        # Pull out the one element of the list if it exists\n        inferredBaseCollection = next(iter(baseCollections), None)\n\n        # pDefs can be defined in the class definition; if it is, this is is a Parameter\n        # Class!\n        pDefs = attrs.get(\"pDefs\")\n        makeNewPC = pDefs is not None\n        if makeNewPC:\n            # We may have our own parameters, so we need to spin up a new\n            # XParameterCollection class to store them.\n            assert isinstance(pDefs, ParameterDefinitionCollection)\n\n            collectionName = name + \"ParameterCollection\"\n            collectionBase = inferredBaseCollection or ParameterCollection\n\n            # Note that we also give a reference to the pDefs to the parameter\n            # collection. This is so that the ParmameterCollection hierarchy can do all\n            # of the parameter definitions work, while plugins can associate definitions\n            # with the ArmiObjects\n            paramCollectionType = type(\n                collectionName,\n                (collectionBase,),\n                {\n                    \"pDefs\": pDefs,\n                },\n            )\n        else:\n            # We will not be defining our own parameters, so we will defer to to those\n            # of our parent classes if they have any\n            paramCollectionType = inferredBaseCollection\n\n        attrs[\"paramCollectionType\"] = paramCollectionType\n\n        nt = type.__new__(mcl, name, bases, attrs)\n        if makeNewPC:\n            paramCollectionType._ArmiObject = nt\n\n        return nt\n"
  },
  {
    "path": "armi/reactor/reactorParameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reactor parameter definitions.\"\"\"\n\nfrom armi.reactor import parameters\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.utils import units\n\n\ndef defineReactorParameters():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:\n        pb.defParam(\n            \"cycle\",\n            units=units.UNITLESS,\n            description=\"Current cycle of the simulation (integer)\",\n            default=0,\n        )\n\n        pb.defParam(\n            \"cycleLength\",\n            units=units.DAYS,\n            description=\"Length of the cycle, including outage time described by availabilityFactor\",\n        )\n\n        pb.defParam(\"stepLength\", units=units.DAYS, description=\"Length of current step\")\n\n        pb.defParam(\n            \"availabilityFactor\",\n            units=units.UNITLESS,\n            description=\"Availability factor of the plant. This is the fraction of the time that \"\n            \"the plant is operating.\",\n            default=1.0,\n        )\n\n        pb.defParam(\n            \"capacityFactor\",\n            units=units.UNITLESS,\n            description=\"The fraction of power produced by the plant this cycle over the \"\n            \"full-power, 100% uptime potential of the plant.\",\n            default=1.0,\n        )\n\n        pb.defParam(\n            \"time\",\n            units=units.YEARS,\n            description=\"Time of reactor life from BOL to current time node\",\n            categories=[\"depletion\"],\n        )\n\n        pb.defParam(\"timeNode\", units=units.UNITLESS, description=\"Integer timeNode\", default=0)\n\n        pb.defParam(\n            \"maxAssemNum\",\n            units=units.UNITLESS,\n            description=\"Max number of assemblies created so far in the Reactor (integer)\",\n            default=0,\n        )\n\n    return pDefs\n\n\ndef defineCoreParameters():\n    pDefs = parameters.ParameterDefinitionCollection()\n\n    with pDefs.createBuilder(location=ParamLocation.CENTROID) as pb:\n        pb.defParam(\n            \"orientation\",\n            units=units.DEGREES,\n            description=(\n                \"Triple representing rotations counterclockwise around each spatial axis. For \"\n                \"example, a hex assembly rotated by 1/6th has orientation (0,0,60.0)\"\n            ),\n            default=None,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0) as pb:\n        pb.defParam(\n            \"maxAssemNum\",\n            units=units.UNITLESS,\n            description=\"Maximum assembly number\",\n            default=0,\n        )\n\n        pb.defParam(\"numMoves\", units=units.UNITLESS, description=\"numMoves\", default=0)\n\n    with pDefs.createBuilder(location=ParamLocation.NA, categories=[\"control rods\"]) as pb:\n        pb.defParam(\n            \"crMostValuablePrimaryRodLocation\",\n            default=\"\",\n            units=units.UNITLESS,\n            saveToDB=True,\n            description=(\"Core assembly location for the most valuable primary control rod.\"),\n        )\n        pb.defParam(\n            \"crMostValuableSecondaryRodLocation\",\n            default=\"\",\n            units=units.UNITLESS,\n            saveToDB=True,\n            description=(\"Core assembly location for the most valuable secondary control rod.\"),\n        )\n        pb.defParam(\n            \"crTransientOverpowerWorth\",\n            default=0.0,\n            units=units.PCM,\n            saveToDB=True,\n            description=(\n                \"Reactivity worth introduced by removal of the highest worth primary control rod \"\n                \"from the core, starting from its critical position\"\n            ),\n        )\n\n    with pDefs.createBuilder() as pb:\n        pb.defParam(\n            \"axialMesh\",\n            units=units.CM,\n            description=\"Global axial mesh of the reactor core from bottom to top.\",\n            default=None,\n            location=ParamLocation.TOP,\n        )\n\n    with pDefs.createBuilder(default=0.0, location=ParamLocation.NA) as pb:\n        pb.defParam(\n            \"referenceBlockAxialMesh\",\n            units=units.CM,\n            description=(\"The axial block boundaries that assemblies should conform to in a uniform mesh case.\"),\n            default=None,\n        )\n\n        pb.defParam(\"fissileMass\", units=units.GRAMS, description=\"Fissile mass of the reactor\")\n\n        pb.defParam(\n            \"heavyMetalMass\",\n            units=units.GRAMS,\n            description=\"Heavy Metal mass of the reactor\",\n        )\n\n        pb.defParam(\n            \"keffUnc\",\n            units=units.UNITLESS,\n            saveToDB=True,\n            default=0.0,\n            description=\"Uncontrolled k-effective for the reactor core (with control rods fully removed).\",\n        )\n\n        pb.defParam(\n            \"maxDPA\",\n            units=units.DPA,\n            description=\"Maximum DPA based on pin-level max if it exists, block level max otherwise\",\n        )\n\n        pb.defParam(\"maxGridDpa\", units=units.DPA, description=\"Grid plate max dpa\")\n\n        pb.defParam(\n            \"maxProcessMemoryInMB\",\n            units=units.MB,\n            description=\"Maximum memory used by an ARMI process\",\n        )\n\n        pb.defParam(\n            \"minProcessMemoryInMB\",\n            units=units.MB,\n            description=\"Minimum memory used by an ARMI process\",\n        )\n\n        pb.defParam(\n            \"minutesSinceStart\",\n            units=units.MINUTES,\n            description=\"Run time since the beginning of the calculation\",\n        )\n\n        pb.defParam(\n            \"peakGridDpaAt60Years\",\n            units=units.DPA,\n            description=\"Grid plate peak dpa after 60 years irradiation\",\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, default=0.0, categories=[\"neutronics\"]) as pb:\n        pb.defParam(\n            \"power\",\n            units=units.WATTS,\n            description=\"Thermal power of the reactor core. Corresponds to the nuclear power generated in the core.\",\n        )\n\n        pb.defParam(\n            \"powerDensity\",\n            units=f\"{units.WATTS}/{units.GRAMS}\",\n            description=\"BOL Power density of the reactor core, in units of Watts per\"\n            \"grams of Heavy Metal Mass. After the BOL, the power parameter will be set, \"\n            \"and this will entirely overridden by that.\",\n        )\n\n        pb.defParam(\n            \"maxdetailedDpaPeak\",\n            units=units.DPA,\n            description=\"Highest peak dpa of any block in the problem\",\n        )\n\n        pb.defParam(\n            \"maxFlux\",\n            units=f\"n/{units.CM}^2/{units.SECONDS}\",\n            description=\"Max neutron flux in the core\",\n        )\n\n        pb.defParam(\n            \"maxDetailedDpaThisCycle\",\n            units=units.DPA,\n            description=\"Max increase in dpa this cycle (only defined at EOC)\",\n        )\n\n        pb.defParam(\n            \"dpaFullWidthHalfMax\",\n            units=units.CM,\n            description=\"Full width at half max of the detailedDpa distribution\",\n        )\n\n        pb.defParam(\n            \"elevationOfACLP3Cycles\",\n            units=units.CM,\n            description=\"minimum axial location of the ACLP for 3 cycles at peak dose\",\n        )\n\n        pb.defParam(\n            \"elevationOfACLP7Cycles\",\n            units=units.CM,\n            description=\"minimum axial location of the ACLP for 7 cycles at peak dose\",\n        )\n\n        pb.defParam(\n            \"maxpercentBu\",\n            units=units.PERCENT_FIMA,\n            description=\"Max percent burnup on any block in the problem\",\n        )\n\n        pb.defParam(\"rxSwing\", units=units.PCM, description=\"Reactivity swing\")\n\n        pb.defParam(\n            \"maxBuF\",\n            units=units.PERCENT,\n            description=\"Maximum burnup seen in any feed assemblies\",\n        )\n\n        pb.defParam(\n            \"maxBuI\",\n            units=units.PERCENT,\n            description=\"Maximum burnup seen in any igniter assemblies\",\n        )\n\n        pb.defParam(\"keff\", units=units.UNITLESS, description=\"Global multiplication factor\")\n\n        pb.defParam(\n            \"peakKeff\",\n            units=units.UNITLESS,\n            description=\"Maximum keff in the simulation\",\n        )\n\n        pb.defParam(\n            \"fastFluxFrAvg\",\n            units=units.UNITLESS,\n            description=\"Fast flux fraction average\",\n        )\n\n        pb.defParam(\n            \"maxpdens\",\n            units=f\"{units.WATTS}/{units.CM}^3\",\n            description=\"Maximum avg. volumetric power density of all blocks\",\n        )\n\n        pb.defParam(\n            \"maxPD\",\n            units=f\"{units.MW}/{units.METERS}^2\",\n            description=\"Maximum areal power density of all assemblies\",\n        )\n\n        pb.defParam(\n            \"jumpRing\",\n            units=units.UNITLESS,\n            description=(\n                \"Radial ring number where bred-up fuel assemblies shuffle jump from the low power \"\n                \"to the high power region.\"\n            ),\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n        categories=[\"reactivity coefficients\", \"kinetics\"],\n    ) as pb:\n        pb.defParam(\n            \"beta\",\n            units=units.UNITLESS,\n            description=\"Effective delayed neutron fraction\",\n            default=None,\n        )\n\n        pb.defParam(\n            \"betaComponents\",\n            units=units.UNITLESS,\n            description=\"Group-wise delayed neutron fractions\",\n            default=None,\n        )\n\n        pb.defParam(\n            \"betaDecayConstants\",\n            units=f\"1/{units.SECONDS}\",\n            description=\"Group-wise precursor decay constants\",\n            default=None,\n        )\n\n    with pDefs.createBuilder(\n        default=0.0,\n        location=ParamLocation.AVERAGE,\n        categories=[\"reactivity coefficients\", \"core wide\"],\n    ) as pb:\n        # CORE WIDE REACTIVITY COEFFICIENTS\n        pb.defParam(\n            \"rxFuelAxialExpansionCoeffPerTemp\",\n            units=f\"{units.REACTIVITY}/{units.DEGK}\",\n            description=\"Fuel Axial Expansion Coefficient\",\n        )\n\n        pb.defParam(\n            \"rxGridPlateRadialExpansionCoeffPerTemp\",\n            units=f\"{units.REACTIVITY}/{units.DEGK}\",\n            description=\"Grid Plate Radial Expansion Coefficient\",\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, categories=[\"equilibrium\"]) as pb:\n        pb.defParam(\n            \"cyclics\",\n            units=units.UNITLESS,\n            description=(\"The integer number of cyclic mode equilibrium-cycle iterations that have occurred so far\"),\n            default=0,\n        )\n\n    with pDefs.createBuilder(location=ParamLocation.AVERAGE, categories=[\"equilibrium\"]) as pb:\n        pb.defParam(\n            \"axialExpansionPercent\",\n            units=units.PERCENT,\n            description=\"Percent of axial growth of fuel blocks\",\n            default=0.0,\n        )\n\n        pb.defParam(\n            \"coupledIteration\",\n            units=units.UNITLESS,\n            description=\"Pre-defined number of tightly coupled iterations.\",\n            default=0,\n        )\n\n    return pDefs\n\n\ndef makeParametersReadOnly(r):\n    \"\"\"Convert all the parameters in a Reactor to read-only.\n\n    This method is pretty simple. It goes through all the children of a Reactor object,\n    recursively, and converts the parameters to read-only mode. This will affect the Core, but also\n    any Spent Fuel Pools or other high-level reactor systems.\n\n    Parameters\n    ----------\n    r : Reactor\n        Full reactor object, to be modified.\n\n    Notes\n    -----\n    Once you make one Reactor read-only, you cannot make it writeable again.\n    \"\"\"\n    r.p.readOnly = True\n    for child in r.iterChildren(deep=True):\n        child.p.readOnly = True\n"
  },
  {
    "path": "armi/reactor/reactors.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Reactor objects represent the highest level in the hierarchy of structures that compose the system to be modeled.\"\"\"\n\nimport copy\n\nfrom armi import getPluginManagerOrFail, runLog\nfrom armi.reactor import composites, reactorParameters\nfrom armi.reactor.cores import Core\nfrom armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure\nfrom armi.settings.fwSettings.globalSettings import CONF_SORT_REACTOR\nfrom armi.utils import directoryChangers\n\n\nclass Reactor(composites.Composite):\n    \"\"\"\n    Top level of the composite structure, potentially representing all components in a reactor.\n\n    This class contains the core and any ex-core structures that are to be represented in the ARMI\n    model. Historically, the ``Reactor`` contained only the core. To support better representation\n    of ex-core structures, the old ``Reactor`` functionality was moved to the newer `Core` class,\n    which has a ``Reactor`` parent.\n\n    .. impl:: The user-specified reactor.\n        :id: I_ARMI_R\n        :implements: R_ARMI_R\n\n        The :py:class:`Reactor <armi.reactor.reactors.Reactor>` is the top level of the composite\n        structure, which can represent all components within a reactor core. The reactor contains a\n        :py:class:`Core <armi.reactor.reactors.Core>`, which contains a collection of\n        :py:class:`Assembly <armi.reactor.assemblies.Assembly>` objects arranged in a hexagonal or\n        Cartesian grid. Each Assembly consists of a stack of\n        :py:class:`Block <armi.reactor.blocks.Block>` objects, which are each composed of one or\n        more :py:class:`Component <armi.reactor.components.component.Component>` objects. Each\n        :py:class:`Interface <armi.interfaces.Interface>` is able to interact with the reactor and\n        its child :py:class:`Composites <armi.reactor.composites.Composite>` by retrieving data from\n        it or writing new data to it. This is the main medium through which input information and\n        the output of physics calculations is exchanged between interfaces and written to an ARMI\n        database.\n    \"\"\"\n\n    pDefs = reactorParameters.defineReactorParameters()\n\n    def __init__(self, name, blueprints):\n        composites.Composite.__init__(self, \"R-{}\".format(name))\n        self.o = None\n        self.spatialGrid = None\n        self.spatialLocator = None\n        self.p.maxAssemNum = 0\n        self.p.cycle = 0\n        self.core = None\n        self.excore = ExcoreCollection()\n        self.blueprints = blueprints\n\n    def __getstate__(self):\n        \"\"\"Applies a settings and parent to the reactor and components.\"\"\"\n        state = composites.Composite.__getstate__(self)\n        state[\"o\"] = None\n        return state\n\n    def __setstate__(self, state):\n        composites.Composite.__setstate__(self, state)\n\n    def __deepcopy__(self, memo):\n        memo[id(self)] = newR = self.__class__.__new__(self.__class__)\n        newR.__setstate__(copy.deepcopy(self.__getstate__(), memo))\n        newR.name = f\"{self.name}-copy\"\n        return newR\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__}: {self.name} id:{id(self)}>\"\n\n    @property\n    def nuclideBases(self):\n        from armi.nucDirectory import nuclideBases\n\n        if nuclideBases.nuclideBases is None:\n            nuclideBases.factory()\n\n        return nuclideBases.nuclideBases\n\n    def add(self, container):\n        composites.Composite.add(self, container)\n        cores = [c for c in self.getChildren(deep=True) if isinstance(c, Core)]\n        if cores:\n            if len(cores) != 1:\n                raise ValueError(\n                    f\"Only 1 core may be specified at this time. Please adjust input. {len(cores)} cores found.\"\n                )\n            self.core = cores[0]\n\n        if isinstance(container, ExcoreStructure):\n            nomen = container.name.replace(\" \", \"\").lower()\n            if nomen == \"spentfuelpool\":\n                nomen = \"sfp\"\n            self.excore[nomen] = container\n\n    def incrementAssemNum(self):\n        \"\"\"\n        Increase the max assembly number by one and returns the current value.\n\n        Notes\n        -----\n        The \"max assembly number\" is not currently used in the Reactor. So the idea is that we return the current\n        number, then iterate it for the next assembly.\n\n        Obviously, this method will be unused for non-assembly-based reactors.\n\n        Returns\n        -------\n        int\n            The new max Assembly number.\n        \"\"\"\n        val = int(self.p.maxAssemNum)\n        self.p.maxAssemNum += 1\n        return val\n\n    def normalizeNames(self):\n        \"\"\"\n        Renumber and rename all the Assemblies and Blocks.\n\n        This method normalizes the names in the Core then the SFP.\n\n        Returns\n        -------\n        int\n            The new max Assembly number.\n        \"\"\"\n        self.p.maxAssemNum = 0\n\n        ind = self.core.normalizeNames(self.p.maxAssemNum)\n        self.p.maxAssemNum = ind\n\n        if self.excore.sfp is not None:\n            ind = self.excore.sfp.normalizeNames(self.p.maxAssemNum)\n            self.p.maxAssemNum = ind\n\n        return ind\n\n\ndef loadFromCs(cs) -> Reactor:\n    \"\"\"\n    Load a Reactor based on the input settings.\n\n    Parameters\n    ----------\n    cs: Settings\n        A relevant settings object\n\n    Returns\n    -------\n    Reactor\n        Reactor loaded from settings file\n    \"\"\"\n    from armi.reactor import blueprints\n\n    bp = blueprints.loadFromCs(cs)\n    return factory(cs, bp)\n\n\ndef factory(cs, bp) -> Reactor:\n    \"\"\"Build a reactor from input settings and blueprints.\"\"\"\n    runLog.header(\"=========== Constructing Reactor and Verifying Inputs ===========\")\n    getPluginManagerOrFail().hook.beforeReactorConstruction(cs=cs)\n\n    r = Reactor(cs.caseTitle, bp)\n\n    # For now, ARMI will create a default Spent Fuel Pool and add it to every reactor.\n    if not any(structure.typ == \"sfp\" for structure in bp.systemDesigns.values()):\n        bp.addDefaultSFP()\n\n    with directoryChangers.DirectoryChanger(cs.inputDirectory, dumpOnException=False):\n        # always construct the core first (for assembly serial number purposes)\n        if not bp.systemDesigns:\n            raise ValueError(\"The input must define a `core` system, but does not. Update inputs\")\n\n        for structure in bp.systemDesigns:\n            structure.construct(cs, bp, r)\n\n    runLog.debug(f\"Reactor: {r}\")\n\n    # return a Reactor object\n    if cs[CONF_SORT_REACTOR]:\n        r.sort()\n    else:\n        runLog.info(f\"Due to the setting {CONF_SORT_REACTOR}, this Reactor is unsorted.\")\n\n    return r\n"
  },
  {
    "path": "armi/reactor/spentFuelPool.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A nuclear reactor frequently has storage pools (or 'ponds') for spent fuel.\n\nThis file implements a simple/default representation of such as an ARMI \"system\". ARMI systems, like the core are grids\nfilled with ArmiObjects. This module also includes some helper tools to aid transferring spent fuel assemblies from the\ncore to the SFP.\n\"\"\"\n\nimport itertools\n\nfrom armi.reactor.excoreStructure import ExcoreStructure\n\n\nclass SpentFuelPool(ExcoreStructure):\n    \"\"\"The Spent Fuel Pool (SFP) is a place to store discharged assemblies.\n\n    This class is a core-like system object, so it has a spatial grid that Assemblies can fit in.\n\n    .. impl:: The user-specified spent fuel pool.\n        :id: I_ARMI_SFP\n        :implements: R_ARMI_SFP\n\n        The SpentFuelPool is a composite structure meant to represent storage ponds for used fuel\n        assemblies. As a data structure, it is little more than a container for ``Assembly``\n        objects. It should be able to easily support adding or removing ``Assembly`` objects. And at\n        every time node the current state of the SFP will be written to the database.\n    \"\"\"\n\n    def __init__(self, name, parent=None):\n        ExcoreStructure.__init__(self, name)\n        self.parent = parent\n        self.spatialGrid = None\n        self.numColumns = None\n\n    def add(self, assem, loc=None):\n        \"\"\"\n        Add an Assembly to the list.\n\n        Parameters\n        ----------\n        assem : Assembly\n            The Assembly to add to the spent fuel pool\n        loc : LocationBase, optional\n            If provided, the assembly is inserted at this location.\n            If it is not provided, the locator on the Assembly object will be used.\n            If the Assembly's loc belongs to ``self.spatialGrid``, it will not be used.\n        \"\"\"\n        if loc is not None and loc.grid is not self.spatialGrid:\n            raise ValueError(f\"An assembly cannot be added to {self} using a spatial locator from another grid.\")\n\n        if self.numColumns is None:\n            self._updateNumberOfColumns()\n\n        # If the assembly added has a negative ID, that is a placeholder, fix it.\n        if assem.p.assemNum < 0:\n            newNum = self.r.incrementAssemNum()\n            assem.renumber(newNum)\n\n        # Make sure the location of the new assembly is valid\n        locProvided = loc is not None or (\n            assem.spatialLocator is not None and assem.spatialLocator.grid is self.spatialGrid\n        )\n        if locProvided:\n            loc = loc or assem.spatialLocator\n        else:\n            loc = self._getNextLocation()\n\n        # orient the blocks to match this grid\n        assem.orientBlocks(parentSpatialGrid=self.spatialGrid)\n\n        super().add(assem, loc)\n\n    def getAssembly(self, name):\n        \"\"\"Get a specific assembly by name.\"\"\"\n        for a in self:\n            if a.getName() == name:\n                return a\n\n        return None\n\n    def _updateNumberOfColumns(self):\n        \"\"\"Determine the number of columns in the spatial grid.\"\"\"\n        locs = self.spatialGrid.items()\n        self.numColumns = len(set([ll[0][0] for ll in locs]))\n\n    def _getNextLocation(self):\n        \"\"\"Helper method to allow each discharged assembly to be easily dropped into the SFP.\n\n        The logic here is that we assume that the SFP is a rectangular-ish grid, with a set number of columns per row.\n        So when you add an Assembly here, if you don't provide a location, the grid is filled in a col/row order with\n        whatever grid cell is found open first.\n        \"\"\"\n        filledLocations = {a.spatialLocator for a in self}\n        grid = self.spatialGrid\n\n        for idx in itertools.count():\n            j = idx // self.numColumns\n            i = idx % self.numColumns\n            loc = grid[i, j, 0]\n            if loc not in filledLocations:\n                return loc\n\n        return None\n\n    def normalizeNames(self, startIndex=0):\n        \"\"\"\n        Renumber and rename all the Assemblies and Blocks.\n\n        Parameters\n        ----------\n        startIndex : int, optional\n            The default is to start counting at zero. But if you are renumbering assemblies across the entire Reactor,\n            you may want to start at a different number.\n\n        Returns\n        -------\n        int\n            The new max Assembly number.\n        \"\"\"\n        ind = startIndex\n        for a in self:\n            oldName = a.getName()\n            newName = a.makeNameFromAssemNum(ind)\n            if oldName == newName:\n                ind += 1\n                continue\n\n            a.p.assemNum = ind\n            a.setName(newName)\n\n            for b in a:\n                axialIndex = int(b.name.split(\"-\")[-1])\n                b.name = b.makeName(ind, axialIndex)\n\n            ind += 1\n\n        return ind\n"
  },
  {
    "path": "armi/reactor/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/reactor/tests/test_assemblies.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests assemblies.py.\"\"\"\n\nimport math\nimport pathlib\nimport random\nimport unittest\nfrom unittest.mock import patch\n\nimport numpy as np\nfrom numpy.testing import assert_allclose\n\nfrom armi import settings, tests\nfrom armi.physics.neutronics.settings import CONF_LOADING_FILE, CONF_XS_KERNEL\nfrom armi.reactor import assemblies, blocks, blueprints, components, geometry, parameters, reactors\nfrom armi.reactor.assemblies import Flags, HexAssembly, copy, grids, runLog\nfrom armi.reactor.parameters import ParamLocation\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils import directoryChangers, textProcessors\n\nNUM_BLOCKS = 3\n\n\ndef buildTestAssemblies():\n    \"\"\"\n    Build some assembly objects that will be used in testing.\n\n    This builds 2 HexBlocks:\n        * One with half UZr pins and half UTh pins\n        * One with all UZr pins\n    \"\"\"\n    settings.Settings()\n\n    temperature = 273.0\n    fuelID = 0.0\n    fuelOD = 1.0\n    cladOD = 1.1\n    # generate a reactor with assemblies\n    # generate components with materials\n    nPins = 100\n\n    fuelDims = {\n        \"Tinput\": temperature,\n        \"Thot\": temperature,\n        \"od\": fuelOD,\n        \"id\": fuelID,\n        \"mult\": nPins,\n    }\n\n    fuelUZr = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    fuelUTh = components.Circle(\"fuel UTh\", \"ThU\", **fuelDims)\n\n    fuelDims2nPins = {\n        \"Tinput\": temperature,\n        \"Thot\": temperature,\n        \"od\": fuelOD,\n        \"id\": fuelID,\n        \"mult\": 2 * nPins,\n    }\n\n    fuelUZrB = components.Circle(\"fuel B\", \"UZr\", **fuelDims2nPins)\n\n    cladDims = {\n        \"Tinput\": temperature,\n        \"Thot\": temperature,\n        \"od\": cladOD,\n        \"id\": fuelOD,\n        \"mult\": 2 * nPins,\n    }\n\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n\n    interDims = {\n        \"Tinput\": temperature,\n        \"Thot\": temperature,\n        \"op\": 16.8,\n        \"ip\": 16.0,\n        \"mult\": 1.0,\n    }\n\n    interSodium = components.Hexagon(\"interCoolant\", \"Sodium\", **interDims)\n\n    block = blocks.HexBlock(\"fuel\")\n    block2 = blocks.HexBlock(\"fuel\")\n    block.setType(\"fuel\")\n    block.setHeight(10.0)\n    block.add(fuelUZr)\n    block.add(fuelUTh)\n    block.add(clad)\n    block.add(interSodium)\n    block.p.axMesh = 1\n    block.p.molesHmBOL = 1.0\n    block.p.molesHmNow = 1.0\n\n    block2.setType(\"fuel\")\n    block2.setHeight(10.0)\n    block2.add(fuelUZrB)\n    block2.add(clad)\n    block2.add(interSodium)\n    block2.p.axMesh = 1\n    block2.p.molesHmBOL = 2\n    block2.p.molesHmNow = 1.0\n\n    assemblieObjs = []\n    for numBlocks, blockTemplate in zip([1, 1, 5, 4], [block, block2, block, block]):\n        assembly = assemblies.HexAssembly(\"testAssemblyType\")\n        assembly.spatialGrid = grids.AxialGrid.fromNCells(numBlocks)\n        assembly.spatialGrid.armiObject = assembly\n        for _i in range(numBlocks):\n            newBlock = copy.deepcopy(blockTemplate)\n            assembly.add(newBlock)\n        assembly.calculateZCoords()\n        assembly.reestablishBlockOrder()\n        assemblieObjs.append(assembly)\n\n    return assemblieObjs\n\n\nclass MaterialInAssembly_TestCase(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.assembly, cls.assembly2, cls.assembly3, cls.assembly4 = buildTestAssemblies()\n\n    def test_sortNoLocator(self):\n        self.assembly.spatialLocator = None\n        self.assembly2.spatialLocator = None\n        self.assertFalse(self.assembly < self.assembly2)\n        self.assertFalse(self.assembly2 < self.assembly)\n        grid = grids.HexGrid()\n        self.assembly.spatialLocator = grid[0, 0, 0]\n        self.assembly2.spatialLocator = grid[0, 1, 0]\n        self.assertTrue(self.assembly < self.assembly2)\n        self.assertFalse(self.assembly2 < self.assembly)\n\n    def test_UThZrMaterial(self):\n        \"\"\"Test the ternary UZr material.\"\"\"\n        b2 = self.assembly2[0]\n        uZrFuel = b2.getComponent(Flags.FUEL | Flags.B)\n        mat = uZrFuel.getProperties()\n        mat.applyInputParams(0.1, 0.0)\n        self.assertAlmostEqual(uZrFuel.getMass(\"U235\") / (uZrFuel.getMass(\"U238\") + uZrFuel.getMass(\"U235\")), 0.1)\n\n\ndef makeTestAssembly(numBlocks, assemNum, spatialGrid=grids.HexGrid.fromPitch(1.0), r=None):\n    coreGrid = r.core.spatialGrid if r is not None else spatialGrid\n    a = HexAssembly(\"TestAssem\", assemNum=assemNum)\n    a.spatialGrid = grids.AxialGrid.fromNCells(numBlocks)\n    a.spatialGrid.armiObject = a\n    a.spatialLocator = coreGrid[2, 2, 0]\n    return a\n\n\nclass AssemblyReadOnlyTests(unittest.TestCase):\n    \"\"\"These tests of Assemblies do not modify the test assembly, which can be created in a setUpClass method.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.name = \"A0015\"\n        cls.assemNum = 15\n        cls.height = 10\n        cls.cs = settings.Settings()\n        # Print nothing to the screen that would normally go to the log.\n        runLog.setVerbosity(\"error\")\n\n        cls.r = tests.getEmptyHexReactor()\n        cls.r.core.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n\n        cls.assembly = makeTestAssembly(NUM_BLOCKS, cls.assemNum, r=cls.r)\n\n        # Use these if they are needed\n        cls.blockParams = {\n            \"height\": cls.height,\n            \"bondRemoved\": 0.0,\n            \"envGroupNum\": 0,\n            \"buLimit\": 35,\n            \"buRate\": 0.0,\n            \"eqRegion\": -1,\n            \"id\": 212.0,\n            \"pdens\": 10.0,\n            \"percentBu\": 25.3,\n            \"power\": 100000.0,\n            \"residence\": 4.0,\n            \"smearDensity\": 0.6996721711791459,\n            \"timeToLimit\": 2.7e5,\n            \"xsTypeNum\": 65,\n            \"zbottom\": 97.3521,\n            \"ztop\": 111.80279999999999,\n        }\n\n        # add some blocks with a component\n        cls.blockList = []\n        for i in range(NUM_BLOCKS):\n            b = blocks.HexBlock(\"TestHexBlock\")\n            b.setHeight(cls.height)\n\n            cls.hexDims = {\n                \"Tinput\": 273.0,\n                \"Thot\": 273.0,\n                \"op\": 0.76,\n                \"ip\": 0.0,\n                \"mult\": 1.0,\n            }\n            h = components.Hexagon(\"fuel\", \"UZr\", **cls.hexDims)\n\n            # non-flaggy name important for testing\n            b.setType(\"igniter fuel unitst\")\n            b.add(h)\n            b.parent = cls.assembly\n            b.setName(b.makeName(cls.assembly.getNum(), i))\n            cls.assembly.add(b)\n            cls.blockList.append(b)\n\n        cls.r.core.add(cls.assembly)\n        cls.assembly.calculateZCoords()\n\n    def test_isOnWhichSymmetryLine(self):\n        line = self.assembly.isOnWhichSymmetryLine()\n        self.assertEqual(line, 2)\n\n    def test_iter(self):\n        cur = []\n        for block in self.assembly:\n            cur.append(block)\n        ref = self.blockList\n        self.assertEqual(cur, ref)\n\n    def test_len(self):\n        cur = len(self.assembly)\n        ref = len(self.blockList)\n        self.assertEqual(cur, ref)\n\n    def test_getName(self):\n        cur = self.assembly.getName()\n        ref = self.name\n        self.assertEqual(cur, ref)\n\n    def test_getNum(self):\n        cur = self.assembly.getNum()\n        ref = self.assemNum\n        self.assertEqual(cur, ref)\n\n    def test_getLocation(self):\n        \"\"\"\n        Test for getting string location of assembly.\n\n        .. test:: Assembly location is retrievable.\n            :id: T_ARMI_ASSEM_POSI0\n            :tests: R_ARMI_ASSEM_POSI\n        \"\"\"\n        cur = self.assembly.getLocation()\n        ref = str(\"005-003\")\n        self.assertEqual(cur, ref)\n\n    def test_getArea(self):\n        \"\"\"Tests area calculation for hex assembly.\"\"\"\n        # Default case: for assemblies with no blocks\n        a = HexAssembly(\"TestAssem\", assemNum=10)\n        self.assertIsNone(a.getArea())\n\n        # more realistic case: a hex block/assembly\n        cur = self.assembly.getArea()\n        ref = math.sqrt(3) / 2.0 * self.hexDims[\"op\"] ** 2\n        self.assertAlmostEqual(cur, ref, places=6)\n\n    def test_getVolume(self):\n        \"\"\"Tests volume calculation for hex assembly.\"\"\"\n        cur = self.assembly.getVolume()\n        ref = math.sqrt(3) / 2.0 * self.hexDims[\"op\"] ** 2 * self.height * NUM_BLOCKS\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getAxialMesh(self):\n        cur = self.assembly.getAxialMesh()\n        ref = [i * self.height + self.height for i in range(NUM_BLOCKS)]\n        self.assertEqual(cur, ref)\n\n    def test_calculateZCoords(self):\n        self.assembly.calculateZCoords()\n\n        places = 6\n        bottom = 0.0\n        for b in self.assembly:\n            top = bottom + self.height\n\n            cur = b.p.z\n            ref = bottom + (top - bottom) / 2.0\n            self.assertAlmostEqual(cur, ref, places=places)\n\n            cur = b.p.zbottom\n            ref = bottom\n            self.assertAlmostEqual(cur, ref, places=places)\n\n            cur = b.p.ztop\n            ref = top\n            self.assertAlmostEqual(cur, ref, places=places)\n\n            bottom = top\n\n    def test_getTotalHeight(self):\n        cur = self.assembly.getTotalHeight()\n        ref = self.height * NUM_BLOCKS\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getHeight(self):\n        \"\"\"Test height of assembly calculation.\"\"\"\n        cur = self.assembly.getHeight()\n        ref = self.height * NUM_BLOCKS\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getReactiveHeight(self):\n        self.assembly[2].getComponent(Flags.FUEL).adjustMassEnrichment(0.01)\n        self.assembly[2].setNumberDensity(\"PU239\", 0.0)\n        bottomElevation, reactiveHeight = self.assembly.getReactiveHeight(enrichThresh=0.02)\n        self.assertEqual(bottomElevation, 0.0)\n        self.assertEqual(reactiveHeight, 20.0)\n\n    def test_hasFlags(self):\n        self.assembly.setType(\"fuel\")\n\n        cur = self.assembly.hasFlags(Flags.FUEL)\n        self.assertTrue(cur)\n\n    def test_getBlocks(self):\n        cur = self.assembly.getBlocks()\n        ref = self.blockList\n        self.assertEqual(cur, ref)\n\n    def test_getFirstBlock(self):\n        cur = self.assembly.getFirstBlock()\n        ref = self.blockList[0]\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getFirstBlockByType(self):\n        b = self.assembly.getFirstBlockByType(\"igniter fuel unitst\")\n        self.assertEqual(b.getType(), \"igniter fuel unitst\")\n        b = self.assembly.getFirstBlockByType(\"i do not exist\")\n        self.assertIsNone(b)\n\n    def test_getDim(self):\n        \"\"\"Tests dimensions are retrievable.\"\"\"\n        # quick test, if there are no blocks\n        a = HexAssembly(\"TestAssem\", assemNum=10)\n        self.assertIsNone(a.getDim(Flags.FUEL, \"op\"))\n\n        # more interesting test, with blocks\n        cur = self.assembly.getDim(Flags.FUEL, \"op\")\n        ref = self.hexDims[\"op\"]\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getDominantMaterial(self):\n        cur = self.assembly.getDominantMaterial(Flags.FUEL).getName()\n        ref = \"UZr\"\n        self.assertEqual(cur, ref)\n        self.assertEqual(self.assembly.getDominantMaterial().getName(), ref)\n\n    def test_countBlocksOfType(self):\n        cur = self.assembly.countBlocksWithFlags(Flags.IGNITER | Flags.FUEL)\n        self.assertEqual(cur, 3)\n\n    def test_iteration(self):\n        \"\"\"Tests the ability to doubly-loop over assemblies (under development).\"\"\"\n        a = self.assembly\n\n        for bi, b in enumerate(a):\n            if bi == 2:\n                h = 0.0\n                for bi2, b2 in enumerate(a):\n                    if bi2 == 0:\n                        self.assertEqual(\n                            b2,\n                            a[0],\n                            msg=\"First block in new iteration is not the first block of assembly\",\n                        )\n                    h += b2.getHeight()\n\n            # make sure the loop continues with the right counter\n            self.assertEqual(\n                b,\n                a[bi],\n                msg=\"The {0}th block in the loop ({1}) is not equal to the {0}th block in the assembly {2}\".format(\n                    bi, b, \"dummy\"\n                ),\n            )\n\n    def test_getBlocksAndZ(self):\n        blocksAndCenters = self.assembly.getBlocksAndZ()\n        lastZ = -1.0\n        for b, c in blocksAndCenters:\n            self.assertIn(b, self.assembly.getBlocks())\n            self.assertGreater(c, lastZ)\n            lastZ = c\n\n        self.assertRaises(TypeError, self.assembly.getBlocksAndZ, 1.0)\n\n    def test_getBlocksBetweenElevations(self):\n        # assembly should have 3 blocks of 10 cm in it\n        blocksAndHeights = self.assembly.getBlocksBetweenElevations(0, 10)\n        self.assertEqual(blocksAndHeights[0], (self.assembly[0], 10.0))\n\n        blocksAndHeights = self.assembly.getBlocksBetweenElevations(0, 5.0)\n        self.assertEqual(blocksAndHeights[0], (self.assembly[0], 5.0))\n\n        blocksAndHeights = self.assembly.getBlocksBetweenElevations(1.0, 5.0)\n        self.assertEqual(blocksAndHeights[0], (self.assembly[0], 4.0))\n\n        blocksAndHeights = self.assembly.getBlocksBetweenElevations(9.0, 21.0)\n        self.assertEqual(blocksAndHeights[0], (self.assembly[0], 1.0))\n        self.assertEqual(blocksAndHeights[1], (self.assembly[1], 10.0))\n        self.assertEqual(blocksAndHeights[2], (self.assembly[2], 1.0))\n\n        blocksAndHeights = self.assembly.getBlocksBetweenElevations(-10, 1000.0)\n        self.assertEqual(len(blocksAndHeights), len(self.assembly))\n        self.assertAlmostEqual(sum([height for _b, height in blocksAndHeights]), self.assembly.getHeight())\n\n    def test_hasContinuousCoolantChannel(self):\n        self.assertFalse(self.assembly.hasContinuousCoolantChannel())\n        modifiedAssem = self.assembly\n        coolantDims = {\"Tinput\": 273.0, \"Thot\": 273.0}\n        h = components.DerivedShape(\"coolant\", \"Sodium\", **coolantDims)\n        for b in modifiedAssem:\n            b.add(h)\n        self.assertTrue(modifiedAssem.hasContinuousCoolantChannel())\n\n    def test_carestianCoordinates(self):\n        \"\"\"Check the coordinates of the assembly within the core with a CarestianGrid.\n\n        .. test:: Cartesian coordinates are retrievable.\n            :id: T_ARMI_ASSEM_POSI1\n            :tests: R_ARMI_ASSEM_POSI\n        \"\"\"\n        a = makeTestAssembly(\n            numBlocks=1,\n            assemNum=1,\n            spatialGrid=grids.CartesianGrid.fromRectangle(1.0, 1.0),\n        )\n        self.assertEqual(a.coords(), (2.0, 2.0))\n\n    def test_assem_hex_type(self):\n        \"\"\"Test that all children of a hex assembly are hexagons.\"\"\"\n        for b in self.assembly:\n            # For a hex assem, confirm they are of type \"Hexagon\"\n            pitch_comp_type = b.PITCH_COMPONENT_TYPE[0]\n            self.assertEqual(pitch_comp_type.__name__, \"Hexagon\")\n\n    def test_getElevationBoundariesByBlockType(self):\n        elevations = self.assembly.getElevationBoundariesByBlockType()\n        self.assertEqual(elevations, [0.0, 10.0, 10.0, 20.0, 20.0, 30.0])\n\n\nclass AssemblyTests(unittest.TestCase):\n    \"\"\"These tests of Assemblies modify the test assembly, so each unit tests needs a fresh test assembly.\"\"\"\n\n    def setUp(self):\n        self.name = \"A0015\"\n        self.assemNum = 15\n        self.height = 10\n        self.cs = settings.Settings()\n        # Print nothing to the screen that would normally go to the log.\n        runLog.setVerbosity(\"error\")\n\n        self.r = tests.getEmptyHexReactor()\n        self.r.core.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n\n        self.assembly = makeTestAssembly(NUM_BLOCKS, self.assemNum, r=self.r)\n\n        # Use these if they are needed\n        self.blockParams = {\n            \"height\": self.height,\n            \"bondRemoved\": 0.0,\n            \"envGroupNum\": 0,\n            \"buLimit\": 35,\n            \"buRate\": 0.0,\n            \"eqRegion\": -1,\n            \"id\": 212.0,\n            \"pdens\": 10.0,\n            \"percentBu\": 25.3,\n            \"power\": 100000.0,\n            \"residence\": 4.0,\n            \"smearDensity\": 0.6996721711791459,\n            \"timeToLimit\": 2.7e5,\n            \"xsTypeNum\": 65,\n            \"zbottom\": 97.3521,\n            \"ztop\": 111.80279999999999,\n        }\n\n        # add some blocks with a component\n        self.blockList = []\n        for i in range(NUM_BLOCKS):\n            b = blocks.HexBlock(\"TestHexBlock\")\n            b.setHeight(self.height)\n\n            self.hexDims = {\n                \"Tinput\": 273.0,\n                \"Thot\": 273.0,\n                \"op\": 0.76,\n                \"ip\": 0.0,\n                \"mult\": 1.0,\n            }\n            h = components.Hexagon(\"fuel\", \"UZr\", **self.hexDims)\n\n            # non-flaggy name important for testing\n            b.setType(\"igniter fuel unitst\")\n            b.add(h)\n            b.parent = self.assembly\n            b.setName(b.makeName(self.assembly.getNum(), i))\n            self.assembly.add(b)\n            self.blockList.append(b)\n\n        self.r.core.add(self.assembly)\n        self.assembly.calculateZCoords()\n\n    def test_notesParameter(self):\n        self.assertEqual(self.assembly.p.notes, \"\")\n\n        with self.assertRaises(ValueError):\n            # try to assign a non-string\n            self.assembly.p.notes = 1\n\n        note = \"This is a short, acceptable not about the assembly\"\n        self.assembly.p.notes = note\n        self.assertEqual(self.assembly.p.notes, note)\n\n        tooLongNote = \"a\" * 1001\n\n        self.assembly.p.notes = tooLongNote\n        self.assertEqual(self.assembly.p.notes, tooLongNote[0:1000])\n\n    def test_append(self):\n        b = blocks.HexBlock(\"TestBlock\")\n        self.blockList.append(b)\n        self.assembly.append(b)\n        cur = self.assembly.getBlocks()\n        ref = self.blockList\n        self.assertEqual(cur, ref)\n\n    def test_extend(self):\n        blockList = []\n        for _ in range(2):\n            b = blocks.HexBlock(\"TestBlock\")\n            self.blockList.append(b)\n            blockList.append(b)\n\n        self.assembly.extend(blockList)\n        cur = self.assembly.getBlocks()\n        ref = self.blockList\n        self.assertEqual(cur, ref)\n\n        for c in self.assembly:\n            self.assertIs(c.parent, self.assembly)\n\n    def test_add(self):\n        a = makeTestAssembly(1, 1)\n\n        # successfully add some Blocks to an Assembly\n        for n in range(3):\n            self.assertEqual(len(a), n)\n            b = blocks.HexBlock(\"TestBlock\")\n            a.add(b)\n            self.assertIn(b, a)\n            self.assertEqual(b.parent, a)\n            self.assertEqual(len(a), n + 1)\n\n        with self.assertRaises(TypeError):\n            a.add(blocks.CartesianBlock(\"Test Cart Block\"))\n\n    def test_moveTo(self):\n        ref = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 10)\n        i, j = grids.HexGrid.getIndicesFromRingAndPos(3, 10)\n        locator = self.r.core.spatialGrid[i, j, 0]\n        self.assembly.moveTo(locator)\n\n        cur = self.assembly.spatialLocator\n        self.assertEqual(cur, ref)\n\n    def test_scaleParamsWhenMoved(self):\n        \"\"\"Volume integrated parameters must be scaled when an assembly is placed on a core boundary.\"\"\"\n        with patch.object(self.assembly.p.paramDefs[\"chargeFis\"], \"location\", ParamLocation.VOLUME_INTEGRATED):\n            # patch makes chargeFis look volume integrated\n            assemblyParams = {\"chargeFis\": 6.0, \"chargeTime\": 2}\n            blockParams = {\n                # volume integrated parameters\n                \"massHmBOL\": 9.0,\n                \"molesHmBOL\": np.array([[1, 2, 3], [4, 5, 6]]),  # ndarray for testing\n                \"adjMgFlux\": [1, 2, 3],  # Should normally be an ndarray, list for testing\n                \"lastMgFlux\": \"foo\",  # Should normally be an ndarray, str for testing\n            }\n            self.assembly.p.update(assemblyParams)\n            for b in self.assembly.iterBlocks(Flags.FUEL):\n                b.p.update(blockParams)\n\n            i, j = grids.HexGrid.getIndicesFromRingAndPos(1, 1)\n            locator = self.r.core.spatialGrid[i, j, 0]\n            self.assertEqual(self.assembly.getSymmetryFactor(), 1)\n            self.assembly.moveTo(locator)\n            self.assertEqual(self.assembly.getSymmetryFactor(), 3)\n            for b in self.assembly.iterBlocks(Flags.FUEL):\n                # float\n                assert_allclose(b.p[\"massHmBOL\"] / blockParams[\"massHmBOL\"], 1 / 3)\n                # np.ndarray\n                assert_allclose(b.p[\"molesHmBOL\"] / blockParams[\"molesHmBOL\"], 1 / 3)\n                # list\n                assert_allclose(np.array(b.p[\"adjMgFlux\"]) / np.array(blockParams[\"adjMgFlux\"]), 1 / 3)\n                # string\n                self.assertEqual(b.p[\"lastMgFlux\"], blockParams[\"lastMgFlux\"])\n            self.assertEqual(self.assembly.p[\"chargeFis\"] / assemblyParams[\"chargeFis\"], 1 / 3)\n            self.assertEqual(self.assembly.p[\"chargeTime\"] / assemblyParams[\"chargeTime\"], 1)\n\n    def test_adjustResolution(self):\n        # Make a second assembly with 4 times the resolution\n        assemNum2 = self.assemNum * 4\n        height2 = self.height / 4.0\n        assembly2 = makeTestAssembly(assemNum2, assemNum2)\n\n        # add some blocks with a component\n        for _ in range(assemNum2):\n            b = blocks.HexBlock(\"TestBlock\")\n            b.setHeight(height2)\n            assembly2.add(b)\n\n        self.assembly.adjustResolution(assembly2)\n\n        cur = len(self.assembly.getBlocks())\n        ref = 4.0 * len(self.blockList)\n        self.assertEqual(cur, ref)\n\n        cur = self.assembly.getBlocks()[0].getHeight()\n        ref = self.height / 4.0\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getFissileMass(self):\n        for b in self.assembly:\n            b.p.massHmBOL = b.getHMMass()\n            b.p.enrichmentBOL = b.getFissileMassEnrich()\n        cur = self.assembly.getFissileMass()\n        ref = sum(bi.getMass([\"U235\", \"PU239\"]) for bi in self.assembly)\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getMass(self):\n        mass0 = self.assembly.getMass(\"U235\")\n        mass1 = sum(bi.getMass(\"U235\") for bi in self.assembly)\n        self.assertAlmostEqual(mass0, mass1)\n\n        fuelBlock = next(self.assembly.iterBlocks(Flags.FUEL))\n        blockU35Mass = fuelBlock.getMass(\"U235\")\n        fuelBlock.setMass(\"U235\", 2 * blockU35Mass)\n        self.assertAlmostEqual(fuelBlock.getMass(\"U235\"), blockU35Mass * 2)\n        self.assertAlmostEqual(self.assembly.getMass(\"U235\"), mass0 + blockU35Mass)\n\n        fuelBlock.setMass(\"U238\", 0.0)\n        self.assertAlmostEqual(blockU35Mass * 2, fuelBlock.getMass(\"U235\"))\n\n    def test_getAge(self):\n        res = 5.0\n        for b in self.assembly:\n            b.p.residence = res\n\n        cur = self.assembly.getAge()\n        ref = res\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_makeAxialSnapList(self):\n        # Make a second assembly with 4 times the resolution\n        assemNum2 = self.assemNum * 4\n        height2 = self.height / 4.0\n        assembly2 = makeTestAssembly(assemNum2, assemNum2)\n\n        # add some blocks with a component\n        for _i in range(assemNum2):\n            self.hexDims = {\n                \"Tinput\": 273.0,\n                \"Thot\": 273.0,\n                \"op\": 0.76,\n                \"ip\": 0.0,\n                \"mult\": 1.0,\n            }\n\n            h = components.Hexagon(\"fuel\", \"UZr\", **self.hexDims)\n            b = blocks.HexBlock(\"fuel\")\n            b.setType(\"igniter fuel\")\n            b.add(h)\n            b.setHeight(height2)\n            assembly2.add(b)\n\n        self.assembly.makeAxialSnapList(assembly2)\n\n        cur = []\n        for b in self.assembly:\n            cur.append(b.p.topIndex)\n\n        ref = [3, 7, 11]\n        self.assertEqual(cur, ref)\n\n    def test_snapAxialMeshToReference(self):\n        ref = [11, 22, 33]\n        for b, i in zip(self.assembly, range(self.assemNum)):\n            b.p.topIndex = i\n\n        self.assembly.setBlockMesh(ref)\n\n        cur = []\n        for b in self.assembly:\n            cur.append(b.p.ztop)\n\n        self.assertEqual(cur, ref)\n\n    def test_updateFromAssembly(self):\n        assembly2 = makeTestAssembly(self.assemNum, self.assemNum)\n\n        params = {}\n        params[\"maxPercentBu\"] = 30.0\n        params[\"numMoves\"] = 5.0\n        params[\"maxPercentBu\"] = 0\n        params[\"timeToLimit\"] = 2.7e5\n        params[\"arealPd\"] = 110.0\n        params[\"maxDpaPeak\"] = 14.0\n        params[\"kInf\"] = 60.0\n\n        for key, param in params.items():\n            assembly2.p[key] = param\n\n        self.assembly.updateParamsFrom(assembly2)\n\n        for key, param in params.items():\n            cur = self.assembly.p[key]\n            ref = param\n            self.assertEqual(cur, ref)\n\n    def _setup_blueprints(self, filename=\"refSmallReactor.yaml\"):\n        # need this for the getAllNuclides call\n        with directoryChangers.DirectoryChanger(TEST_ROOT):\n            newSettings = {CONF_LOADING_FILE: filename}\n            self.cs = self.cs.modified(newSettings=newSettings)\n\n            with open(self.cs[CONF_LOADING_FILE], \"r\") as y:\n                y = textProcessors.resolveMarkupInclusions(y, pathlib.Path(self.cs.inputDirectory))\n                self.r.blueprints = blueprints.Blueprints.load(y)\n\n            self.r.blueprints._prepConstruction(self.cs)\n\n    def test_duplicate(self):\n        self._setup_blueprints()\n\n        # Perform the copy\n        assembly2 = copy.deepcopy(self.assembly)\n\n        for refBlock, curBlock in zip(self.assembly, assembly2):\n            numNucs = 0\n            for nuc in self.assembly.getAncestor(\n                lambda c: isinstance(c, reactors.Reactor)\n            ).blueprints.allNuclidesInProblem:\n                numNucs += 1\n                # Block level density\n                ref = refBlock.getNumberDensity(nuc)\n                cur = curBlock.getNumberDensity(nuc)\n                self.assertEqual(cur, ref)\n\n            self.assertGreater(numNucs, 5)\n\n            refFracs = refBlock.getVolumeFractions()\n            curFracs = curBlock.getVolumeFractions()\n\n            # Block level area fractions\n            for ref, cur in zip(refFracs, curFracs):\n                ref = ref[1]\n                cur = cur[1]\n                places = 6\n                self.assertAlmostEqual(cur, ref, places=places)\n\n            # Block level params\n            for refParam in refBlock.p:\n                if refParam == \"serialNum\":\n                    continue\n                ref = refBlock.p[refParam]\n                cur = curBlock.p[refParam]\n                if isinstance(cur, np.ndarray):\n                    self.assertTrue((cur == ref).all())\n                else:\n                    if refParam == \"location\":\n                        ref = str(ref)\n                        cur = str(cur)\n                    self.assertEqual(\n                        cur,\n                        ref,\n                        msg=\"The {} param differs: {} vs. {}\".format(refParam, cur, ref),\n                    )\n\n        # Block level height\n        for b, b2 in zip(self.assembly, assembly2):\n            ref = b.getHeight()\n            cur = b2.getHeight()\n            self.assertEqual(cur, ref)\n            assert_allclose(b.spatialLocator.indices, b2.spatialLocator.indices)\n\n        # Assembly level params\n        for param in self.assembly.p:\n            if param == \"serialNum\":\n                continue\n            ref = self.assembly.p[param]\n            cur = assembly2.p[param]\n            if isinstance(cur, np.ndarray):\n                assert_allclose(cur, ref)\n            else:\n                self.assertEqual(cur, ref)\n\n        # Block level core and parent\n        for b in assembly2:\n            self.assertEqual(b.core, None)\n            self.assertEqual(b.parent, assembly2)\n\n    def test_pinPlenumVolume(self):\n        \"\"\"Test the volume of a pin in the assembly's plenum.\"\"\"\n        pinPlenumVolume = 5.951978e-05\n\n        self._setup_blueprints(\"refSmallReactorBase.yaml\")\n        assembly = self.r.blueprints.assemblies.get(\"igniter fuel\")\n        self.assertAlmostEqual(pinPlenumVolume, assembly.getPinPlenumVolumeInCubicMeters())\n\n    def test_renameBlocksAccordingToAssemblyNum(self):\n        self.assembly.p.assemNum = 55\n        self.assembly.renameBlocksAccordingToAssemblyNum()\n        self.assertIn(\"{0:04d}\".format(self.assembly.getNum()), self.assembly[1].getName())\n\n    def test_getBlockData(self):\n        paramDict = {\n            \"timeToLimit\": 40.0,\n            \"power\": 10000.0,\n            \"envGroup\": 4,\n            \"residence\": 3.145,\n            \"eqRegion\": -1,\n            \"id\": 299.0,\n            \"bondRemoved\": 0.337,\n            \"buRate\": 42.0,\n        }\n        # Set some params\n        for b in self.assembly:\n            for param, paramVal in paramDict.items():\n                b.p[param] = paramVal\n\n        for param in paramDict:\n            cur = list(self.assembly.getChildParamValues(param))\n            ref = []\n            for i, b in enumerate(self.blockList):\n                ref.append(self.blockList[i].p[param])\n            self.assertAlmostEqual(cur, ref, places=6)\n\n    def test_getMaxParam(self):\n        for bi, b in enumerate(self.assembly):\n            b.p.power = bi\n\n        self.assertAlmostEqual(self.assembly.getMaxParam(\"power\"), len(self.assembly) - 1)\n\n    def test_getElevationsMatchingParamValue(self):\n        self.assembly[0].p.power = 0.0\n        self.assembly[1].p.power = 20.0\n        self.assembly[2].p.power = 10.0\n\n        heights = self.assembly.getElevationsMatchingParamValue(\"power\", 15.0)\n        self.assertListEqual(heights, [12.5, 20.0])\n\n    def test_calcAvgParam(self):\n        nums = []\n        for b in self.assembly:\n            nums.append(random.random())\n            b.p.power = nums[-1]\n        self.assertGreater(len(nums), 2)\n        self.assertAlmostEqual(self.assembly.calcAvgParam(\"power\"), sum(nums) / len(nums))\n\n    def test_calcTotalParam(self):\n        # Remake original assembly\n        self.assembly = makeTestAssembly(self.assemNum, self.assemNum)\n\n        # add some blocks with a component\n        for i in range(self.assemNum):\n            b = blocks.HexBlock(\"TestBlock\")\n\n            # Set the 1st block to have higher params than the rest.\n            self.blockParamsTemp = {}\n            for key, val in self.blockParams.items():\n                # Iterate with i in self.assemNum, so higher assemNums get the high values.\n                if key != \"xsTypeNum\":  # must keep valid\n                    b.p[key] = self.blockParamsTemp[key] = val * i\n\n            b.setHeight(self.height)\n            b.setType(\"fuel\")\n\n            self.hexDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"op\": 0.76, \"ip\": 0.0, \"mult\": 1.0}\n\n            h = components.Hexagon(\"intercoolant\", \"Sodium\", **self.hexDims)\n            b.add(h)\n\n            self.assembly.add(b)\n\n        for param in self.blockParamsTemp:\n            tot = 0.0\n            for b in self.assembly:\n                try:\n                    tot += b.p[param]\n                except TypeError:\n                    pass\n            ref = tot\n\n            try:\n                cur = self.assembly.calcTotalParam(param)\n                places = 6\n                self.assertAlmostEqual(cur, ref, places=places)\n            except TypeError:\n                pass\n\n    def test_reattach(self):\n        # Remake original assembly\n        self.assembly = makeTestAssembly(self.assemNum, self.assemNum)\n        self.assertEqual(0, len(self.assembly))\n\n        # add some blocks with a component\n        for i in range(self.assemNum):\n            b = blocks.HexBlock(\"TestBlock\")\n\n            # Set the 1st block to have higher params than the rest.\n            self.blockParamsTemp = {}\n            for key, val in self.blockParams.items():\n                # Iterate with i in self.assemNum, so higher assemNums get the high values.\n                b.p[key] = self.blockParamsTemp[key] = val * (i + 1)\n\n            b.setHeight(self.height)\n            b.setType(\"fuel\")\n\n            self.hexDims = {\n                \"Tinput\": 273.0,\n                \"Thot\": 273.0,\n                \"op\": 0.76,\n                \"ip\": 0.0,\n                \"mult\": 1.0,\n            }\n\n            h = components.Hexagon(\"intercoolant\", \"Sodium\", **self.hexDims)\n            b.add(h)\n\n            self.assembly.add(b)\n\n        self.assertEqual(self.assemNum, len(self.assembly))\n        for b in self.assembly:\n            self.assertEqual(\"fuel\", b.getType())\n\n    def test_reestablishBlockOrder(self):\n        self.assertEqual(self.assembly.spatialLocator.indices[0], 2)\n        self.assertEqual(self.assembly[0].spatialLocator.getRingPos(), (5, 3))\n        self.assertEqual(self.assembly[0].spatialLocator.indices[2], 0)\n        axialIndices = [2, 1, 0]\n        for ai, b in zip(axialIndices, self.assembly):\n            b.spatialLocator = self.assembly.spatialGrid[0, 0, ai]\n        self.assembly.reestablishBlockOrder()\n        cur = []\n        for b in self.assembly:\n            cur.append(b.getLocation())\n        ref = [\"005-003-000\", \"005-003-001\", \"005-003-002\"]\n        self.assertEqual(cur, ref)\n\n    def test_getParamValuesAtZ(self):\n        # single value param\n        for b, temp in zip(self.assembly, [80, 85, 90]):\n            b.p.percentBu = temp\n        percentBuDef = b.p.paramDefs[\"percentBu\"]\n        originalLoc = percentBuDef.location\n        try:\n            self.assertAlmostEqual(87.5, self.assembly.getParamValuesAtZ(\"percentBu\", 20.0))\n            percentBuDef.location = parameters.ParamLocation.BOTTOM\n            self.assertAlmostEqual(\n                82.5,\n                self.assembly.getParamValuesAtZ(\"percentBu\", 5.0, fillValue=\"extend\"),\n            )\n            percentBuDef.location = parameters.ParamLocation.TOP\n            self.assertAlmostEqual(82.5, self.assembly.getParamValuesAtZ(\"percentBu\", 15.0))\n            for b in self.assembly:\n                b.p.percentBu = None\n            self.assertTrue(np.isnan(self.assembly.getParamValuesAtZ(\"percentBu\", 25.0)))\n\n            # multiDimensional param\n            for b, flux in zip(self.assembly, [[1, 10], [2, 8], [3, 6]]):\n                b.p.mgFlux = flux\n            self.assertTrue(np.allclose([2.5, 7.0], self.assembly.getParamValuesAtZ(\"mgFlux\", 20.0)))\n            self.assertTrue(np.allclose([1.5, 9.0], self.assembly.getParamValuesAtZ(\"mgFlux\", 10.0)))\n            for b in self.assembly:\n                b.p.mgFlux = [0.0] * 2\n            self.assertTrue(np.allclose([0.0, 0.0], self.assembly.getParamValuesAtZ(\"mgFlux\", 10.0)))\n\n            # single value param at corner\n            for b, temp in zip(self.assembly, [100, 200, 300]):\n                b.p.THcornTemp = [temp + iCorner for iCorner in range(6)]\n            value = self.assembly.getParamValuesAtZ(\"THcornTemp\", 20.0)\n            self.assertTrue(np.allclose([300, 301, 302, 303, 304, 305], value))\n        finally:\n            percentBuDef.location = originalLoc\n\n    def test_averagePlenumTemperature(self):\n        \"\"\"Test an assembly's average plenum temperature with a single block outlet.\"\"\"\n        averagePlenumTemp = 42.0\n        plenumBlock = makeTestAssembly(1, 2, grids.CartesianGrid.fromRectangle(1.0, 1.0))\n\n        plenumBlock.setType(\"plenum\", Flags.PLENUM)\n        plenumBlock.p.THcoolantOutletT = averagePlenumTemp\n        self.assembly.setBlockMesh([10.0, 20.0, 30.0], conserveMassFlag=\"auto\")\n        self.assembly.append(plenumBlock)\n\n        self.assertEqual(averagePlenumTemp, self.assembly.getAveragePlenumTemperature())\n\n    def test_rotate(self):\n        \"\"\"Test rotation of an assembly spatial objects.\n\n        .. test:: An assembly can be rotated about its z-axis.\n            :id: T_ARMI_ROTATE_HEX_ASSEM\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        a = makeTestAssembly(1, 1)\n        b = blocks.HexBlock(\"TestBlock\")\n        b.p.THcornTemp = [400, 450, 500, 550, 600, 650]\n        rotTemp = [600, 650, 400, 450, 500, 550]\n        b.p.displacementX = 0\n        b.p.displacementY = 1\n        rotX = -math.sqrt(3) / 2\n        rotY = -0.5\n        a.add(b)\n        a.rotate(math.radians(120))\n        # test list rotation\n        b = a[0]\n        self.assertEqual(b.p.THcornTemp, rotTemp)\n        self.assertAlmostEqual(b.p.displacementX, rotX)\n        self.assertAlmostEqual(b.p.displacementY, rotY)\n\n        b.p.THcornTemp = np.array([400, 450, 500, 550, 600, 650])\n        rotTemp = np.array([600, 650, 400, 450, 500, 550])\n        a.rotate(math.radians(120))\n        # test np.ndarray rotation\n        for i in range(len(b.p.THcornTemp)):\n            self.assertEqual(b.p.THcornTemp[i], rotTemp[i])\n\n        # test that floats and ints are left alone\n        b.p.THcornTemp = 3\n        a.rotate(math.radians(120))\n        self.assertEqual(b.p.THcornTemp, 3)\n        b.p.THcornTemp = 4.0\n        a.rotate(math.radians(120))\n        self.assertEqual(b.p.THcornTemp, 4.0)\n\n        # check that TypeError is raised for unexpected data type\n        b.p.THcornTemp = \"bad data\"\n        with self.assertRaises(TypeError):\n            a.rotate(math.radians(120))\n\n        # check that list of len != 6 ends up in runlog warning\n        # list len=5\n        b.p.THcornTemp = [400, 450, 500, 550, 600]\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            a.rotate(math.radians(120))\n            self.assertIn(\"No rotation method defined\", mock.getStdout())\n        # np.ndarray len=5\n        b.p.THcornTemp = np.array([400, 450, 500, 550, 600])\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            a.rotate(math.radians(120))\n            self.assertIn(\"No rotation method defined\", mock.getStdout())\n        # list len=7\n        b.p.THcornTemp = [400, 450, 500, 550, 600, 650, 700]\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            a.rotate(math.radians(120))\n            self.assertIn(\"No rotation method defined\", mock.getStdout())\n        # np.ndarray len=7\n        b.p.THcornTemp = np.array([400, 450, 500, 550, 600, 650, 700])\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            a.rotate(math.radians(120))\n            self.assertIn(\"No rotation method defined\", mock.getStdout())\n\n        with self.assertRaisesRegex(ValueError, expected_regex=\"60 degree\"):\n            a.rotate(math.radians(40))\n\n    def test_assemBlockTypes(self):\n        \"\"\"Test that all children of an assembly are blocks, ordered from top to bottom.\n\n        .. test:: Validate child types of assembly are blocks, ordered from top to bottom.\n            :id: T_ARMI_ASSEM_BLOCKS\n            :tests: R_ARMI_ASSEM_BLOCKS\n        \"\"\"\n        coords = []\n        for b in self.assembly.iterBlocks():\n            # Confirm children are blocks\n            self.assertIsInstance(b, blocks.Block)\n\n            # get coords from the child blocks\n            coords.append(b.getLocation())\n\n        # get the Z-coords for each block\n        zCoords = [int(c.split(\"-\")[-1]) for c in coords]\n\n        # verify the blocks are ordered top-to-bottom, vertically\n        for i in range(1, len(zCoords)):\n            self.assertGreater(zCoords[i], zCoords[i - 1])\n\n    def test_getBIndexFromZIndex(self):\n        # make sure the axMesh parameters are set in our test block\n        for b in self.assembly:\n            b.p.axMesh = 1\n\n        for zIndex in range(6):\n            bIndex = self.assembly.getBIndexFromZIndex(zIndex * 0.5)\n            self.assertEqual(bIndex, math.ceil(zIndex / 2) if zIndex < 5 else -1)\n\n\nclass AssemblyInReactor_TestCase(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = test_reactors.loadTestReactor(TEST_ROOT)\n\n    def test_snapAxialMesViaBlockIgn(self):\n        \"\"\"Snap axial mesh to a reference mesh should conserve mass based on Block igniter fuel.\"\"\"\n        originalMesh = [25.0, 50.0, 75.0, 100.0, 175.0]\n        refMesh = [26.0, 52.0, 79.0, 108.0, 175.0]\n\n        grid = self.r.core.spatialGrid\n\n        # 1. examine mass change in igniterFuel\n\n        igniterFuel = self.r.core.childrenByLocator[grid[0, 0, 0]]\n        # gridplate, fuel, fuel, fuel, plenum\n        for b in igniterFuel.iterBlocks(Flags.FUEL):\n            fuelComp = b.getComponent(Flags.FUEL)\n            # add isotopes from clad and coolant to fuel component to test mass conservation\n            # mass should only be conserved within fuel component, not over the whole block\n            fuelComp.setNumberDensity(\"FE56\", 1e-10)\n            fuelComp.setNumberDensity(\"NA23\", 1e-10)\n        b = igniterFuel[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterMassGrid = b.getMass() - coolMass\n        igniterMassGridTotal = b.getMass()\n\n        b = igniterFuel[1]\n        igniterHMMass1 = b.getHMMass()\n        igniterZircMass1 = b.getMass(\"ZR\")\n        igniterFuelBlockMass = b.getMass()\n        igniterDuctMass = b.getComponent(Flags.DUCT).getMass()\n        igniterCoolMass = b.getComponent(Flags.COOLANT).getMass()\n\n        coolMass = 0\n        b = igniterFuel[4]\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterPlenumMass = b.getMass() - coolMass\n\n        # expand the core to the new reference mesh\n        for a in self.r.core:\n            a.setBlockMesh(refMesh, conserveMassFlag=\"auto\")\n\n        # 2. check igniter mass after expansion\n\n        # gridplate, fuel, fuel, fuel, plenum\n        b = igniterFuel[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterMassGridAfterExpand = b.getMass() - coolMass\n\n        b = igniterFuel[1]\n        igniterHMMass1AfterExpand = b.getHMMass()\n        igniterZircMass1AfterExpand = b.getMass(\"ZR\")\n        igniterDuctMassAfterExpand = b.getComponent(Flags.DUCT).getMass()\n        igniterCoolMassAfterExpand = b.getComponent(Flags.COOLANT).getMass()\n\n        coolMass = 0\n        b = igniterFuel[4]\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterPlenumMassAfterExpand = b.getMass() - coolMass\n\n        self.assertAlmostEqual(igniterMassGrid, igniterMassGridAfterExpand, 7)\n        self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterExpand, 7)\n        self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterExpand, 7)\n        # demonstrate that the duct and coolant mass are not conserved.\n        # number density stays constant, mass is scaled by ratio of new to old height\n        self.assertAlmostEqual(igniterDuctMass, igniterDuctMassAfterExpand * 25.0 / 26.0, 7)\n        self.assertAlmostEqual(igniterCoolMass, igniterCoolMassAfterExpand * 25.0 / 26.0, 7)\n        # Note the masses are linearly different by the amount that the plenum shrunk\n        self.assertAlmostEqual(igniterPlenumMass, igniterPlenumMassAfterExpand * 75 / 67.0, 7)\n\n        # Shrink the core back to the original mesh size to see if mass is conserved\n        for a in self.r.core:\n            a.setBlockMesh(originalMesh, conserveMassFlag=\"auto\")\n\n        # 3. check igniter mass after shrink to original\n\n        # gridplate, fuel, fuel, fuel, plenum\n        b = igniterFuel[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterMassGridAfterShrink = b.getMass() - coolMass\n        igniterMassGridTotalAfterShrink = b.getMass()\n\n        b = igniterFuel[1]\n        igniterHMMass1AfterShrink = b.getHMMass()\n        igniterZircMass1AfterShrink = b.getMass(\"ZR\")\n        igniterFuelBlockMassAfterShrink = b.getMass()\n        igniterDuctMassAfterShrink = b.getComponent(Flags.DUCT).getMass()\n        igniterCoolMassAfterShrink = b.getComponent(Flags.COOLANT).getMass()\n\n        coolMass = 0\n        b = igniterFuel[4]\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        igniterPlenumMassAfterShrink = b.getMass() - coolMass\n\n        self.assertAlmostEqual(igniterMassGrid, igniterMassGridAfterShrink, 7)\n        self.assertAlmostEqual(igniterMassGridTotal, igniterMassGridTotalAfterShrink, 7)\n        self.assertAlmostEqual(igniterHMMass1, igniterHMMass1AfterShrink, 7)\n        self.assertAlmostEqual(igniterZircMass1, igniterZircMass1AfterShrink, 7)\n        self.assertAlmostEqual(igniterFuelBlockMass, igniterFuelBlockMassAfterShrink, 7)\n        self.assertAlmostEqual(igniterDuctMass, igniterDuctMassAfterShrink, 7)\n        self.assertAlmostEqual(igniterCoolMass, igniterCoolMassAfterShrink, 7)\n        self.assertAlmostEqual(igniterPlenumMass, igniterPlenumMassAfterShrink, 7)\n\n    def test_snapAxialMeshViaBlockShield(self):\n        \"\"\"Snap axial mesh to a reference mesh should conserve mass based on Block shield.\"\"\"\n        originalMesh = [25.0, 50.0, 75.0, 100.0, 175.0]\n        refMesh = [26.0, 52.0, 79.0, 108.0, 175.0]\n\n        # access the shield in ring 9, pos 2\n        grid = self.r.core.spatialGrid\n        i, j = grid.getIndicesFromRingAndPos(9, 2)\n\n        # 1. examine mass change in radial shield\n\n        a = self.r.core.childrenByLocator[grid[i, j, 0]]\n        # gridplate, axial shield, axial shield, axial shield, plenum\n        b = a[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldMassGrid = b.getMass() - coolMass\n\n        b = a[1]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldShieldMass = b.getMass() - coolMass\n\n        b = a[4]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldPlenumMass = b.getMass() - coolMass\n\n        # expand the core to the new reference mesh\n        for a in self.r.core:\n            a.setBlockMesh(refMesh, conserveMassFlag=\"auto\")\n\n        # 2. examine mass change in radial shield after expansion\n\n        # gridplate, axial shield, axial shield, axial shield, plenum\n        b = a[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldMassGridAfterExpand = b.getMass() - coolMass\n\n        b = a[1]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldShieldMassAfterExpand = b.getMass() - coolMass\n\n        b = a[4]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldPlenumMassAfterExpand = b.getMass() - coolMass\n\n        # non mass conserving expansions\n        self.assertAlmostEqual(shieldMassGrid * 26.0 / 25.0, shieldMassGridAfterExpand, 7)\n        self.assertAlmostEqual(shieldShieldMass * 26.0 / 25.0, shieldShieldMassAfterExpand, 7)\n        self.assertAlmostEqual(shieldPlenumMass, shieldPlenumMassAfterExpand * 75.0 / 67.0, 7)\n\n        # Shrink the core back to the original mesh size to see if mass is conserved\n        for a in self.r.core:\n            a.setBlockMesh(originalMesh, conserveMassFlag=\"auto\")\n\n        # 3. examine mass change in radial shield after shrink to original\n\n        # gridplate, axial shield, axial shield, axial shield, plenum\n        b = a[0]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldMassGridAfterShrink = b.getMass() - coolMass\n\n        b = a[1]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldShieldMassAfterShrink = b.getMass() - coolMass\n\n        b = a[4]\n        coolantNucs = b.getComponent(Flags.COOLANT).getNuclides()\n        coolMass = 0\n        for nuc in coolantNucs:\n            coolMass += b.getMass(nuc)\n        shieldPlenumMassAfterShrink = b.getMass() - coolMass\n\n        # non mass conserving expansions\n        self.assertAlmostEqual(shieldMassGrid, shieldMassGridAfterShrink, 7)\n        self.assertAlmostEqual(shieldShieldMass, shieldShieldMassAfterShrink, 7)\n        self.assertAlmostEqual(shieldPlenumMass, shieldPlenumMassAfterShrink, 7)\n\n\nclass AnnularFuelTestCase(unittest.TestCase):\n    \"\"\"Test fuel with a whole in the center.\"\"\"\n\n    def setUp(self):\n        self.cs = settings.Settings()\n        newSettings = {CONF_XS_KERNEL: \"MC2v2\"}  # don't try to expand elementals\n        self.cs = self.cs.modified(newSettings=newSettings)\n\n        bp = blueprints.Blueprints()\n        self.r = reactors.Reactor(\"test\", bp)\n        self.r.add(reactors.Core(\"Core\"))\n\n        inputStr = \"\"\"blocks:\n    ann fuel: &block_ann_fuel\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 20.0\n            Thot: 435.0\n            id: 0.0\n            mult: fuel.mult\n            od: fuel.id\n        fuel:\n            shape: Circle\n            material: UZr\n            Tinput: 20.0\n            Thot: 600.0\n            id: 0.1\n            mult: 127\n            od: 0.8\n        gap1:\n            shape: Circle\n            material: Void\n            Tinput: 20.0\n            Thot: 435.0\n            id: fuel.od\n            mult: fuel.mult\n            od: clad.id\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 20.0\n            Thot: 435.0\n            id: .85\n            mult: fuel.mult\n            od: .95\n        duct: &component_type2_fuel_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 20.0\n            Thot: 435.0\n            ip: 13.00\n            op: 13.9\n            mult: 1\n        intercoolant: &component_type2_fuel_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 435.0\n            Thot: 435.0\n            ip: duct.op\n            mult: 1\n            op: 16\n        coolant: &component_type2_fuel_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 435.0\n            Thot: 435.0\nassemblies:\n    heights: &standard_heights [30.0]\n    axial mesh points: &standard_axial_mesh_points [2]\n    ann fuel:\n        specifier: FA\n        blocks: &inner_igniter_fuel_blocks [*block_ann_fuel]\n        height: *standard_heights\n        axial mesh points: *standard_axial_mesh_points\n        hotChannelFactors: TWRPclad\n        xs types:  &inner_igniter_fuel_xs_types [D]\n\"\"\"\n        self.blueprints = blueprints.Blueprints.load(inputStr)\n        self.blueprints._prepConstruction(self.cs)\n\n    def test_areaCheck(self):\n        assembly = list(self.blueprints.assemblies.values())[0]\n        fuelBlock = assembly.getFirstBlock(Flags.FUEL)\n        intercoolant = fuelBlock.getComponent(Flags.INTERCOOLANT)\n\n        bpAssemblyArea = assembly.getArea()\n        actualAssemblyArea = math.sqrt(3) / 2.0 * intercoolant.p.op**2\n\n        self.assertAlmostEqual(bpAssemblyArea, actualAssemblyArea)\n"
  },
  {
    "path": "armi/reactor/tests/test_blocks.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests blocks.py.\"\"\"\n\nimport copy\nimport io\nimport logging\nimport math\nimport os\nimport shutil\nimport unittest\nfrom glob import glob\nfrom unittest.mock import MagicMock, patch\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_array_equal\n\nfrom armi import materials, runLog, settings, tests\nfrom armi.nucDirectory import nucDir\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.nuclearDataIO import xsCollections\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.physics.neutronics import GAMMA, NEUTRON\nfrom armi.physics.neutronics.settings import (\n    CONF_LOADING_FILE,\n    CONF_XS_KERNEL,\n)\nfrom armi.reactor import blocks, blueprints, components, geometry, grids\nfrom armi.reactor.components import basicShapes, complexShapes\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.grids.cartesian import CartesianGrid\nfrom armi.reactor.tests.test_assemblies import makeTestAssembly\nfrom armi.testing import getEmptyCartesianReactor, loadTestReactor\nfrom armi.testing.singleMixedAssembly import buildMixedPinAssembly\nfrom armi.tests import ISOAA_PATH, TEST_ROOT, mockRunLogs\nfrom armi.utils import densityTools, hexagon, units\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\nfrom armi.utils.units import (\n    ASCII_LETTER_A,\n    ASCII_LETTER_Z,\n    MOLES_PER_CC_TO_ATOMS_PER_BARN_CM,\n    ASCII_LETTER_a,\n)\n\nNUM_PINS_IN_TEST_BLOCK = 217\n\n\ndef buildSimpleFuelBlock():\n    \"\"\"Return a simple hex block containing fuel, clad, duct, and coolant.\"\"\"\n    b = blocks.HexBlock(\"fuel\", height=10.0)\n\n    fuelDims = {\"Tinput\": 25.0, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n    cladDims = {\"Tinput\": 25.0, \"Thot\": 450, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n    ductDims = {\"Tinput\": 25.0, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    intercoolantDims = {\n        \"Tinput\": 400,\n        \"Thot\": 400,\n        \"op\": 17.0,\n        \"ip\": ductDims[\"op\"],\n        \"mult\": 1.0,\n    }\n    coolDims = {\"Tinput\": 25.0, \"Thot\": 400}\n\n    fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n    intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n    b.add(fuel)\n    b.add(clad)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n\n    return b\n\n\ndef buildLinkedFuelBlock():\n    \"\"\"Return a simple hex block containing linked bond.\"\"\"\n    b = blocks.HexBlock(\"fuel\", height=10.0)\n\n    fuelDims = {\"Tinput\": 25.0, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n    bondDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 450,\n        \"od\": \"clad.id\",\n        \"id\": \"fuel.od\",\n        \"mult\": 127.0,\n    }\n    cladDims = {\"Tinput\": 25.0, \"Thot\": 450, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n    ductDims = {\"Tinput\": 25.0, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n    intercoolantDims = {\n        \"Tinput\": 400,\n        \"Thot\": 400,\n        \"op\": 17.0,\n        \"ip\": ductDims[\"op\"],\n        \"mult\": 1.0,\n    }\n    coolDims = {\"Tinput\": 25.0, \"Thot\": 400}\n\n    fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n    bondDims[\"components\"] = {\"clad\": clad, \"fuel\": fuel}\n    bond = components.Circle(\"bond\", \"HT9\", **bondDims)\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n    intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n    b.add(fuel)\n    b.add(bond)\n    b.add(clad)\n    b.add(duct)\n    b.add(coolant)\n    b.add(intercoolant)\n\n    return b\n\n\ndef loadTestBlock(cold=True, depletable=False) -> blocks.HexBlock:\n    \"\"\"Build an annular test block for evaluating unit tests.\"\"\"\n    caseSetting = settings.Settings()\n    caseSetting[CONF_XS_KERNEL] = \"MC2v2\"\n    runLog.setVerbosity(\"error\")\n    caseSetting[\"nCycles\"] = 1\n    r = tests.getEmptyHexReactor()\n\n    assemNum = 3\n    block = blocks.HexBlock(\"TestHexBlock\")\n    block.setType(\"defaultType\")\n    block.p.nPins = NUM_PINS_IN_TEST_BLOCK\n    assembly = makeTestAssembly(assemNum, 1, r=r)\n\n    # NOTE: temperatures are supposed to be in C\n    coldTemp = 25.0\n    hotTempCoolant = 430.0\n    hotTempStructure = 25.0 if cold else hotTempCoolant\n    hotTempFuel = 25.0 if cold else 600.0\n\n    fuelDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempFuel,\n        \"od\": 0.84,\n        \"id\": 0.6,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n    if depletable:\n        fuel.p.flags = Flags.fromString(\"fuel depletable\")\n\n    bondDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempCoolant,\n        \"od\": \"fuel.id\",\n        \"id\": 0.3,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    bondDims[\"components\"] = {\"fuel\": fuel}\n    bond = components.Circle(\"bond\", \"Sodium\", **bondDims)\n\n    annularVoidDims = {\n        \"Tinput\": hotTempStructure,\n        \"Thot\": hotTempStructure,\n        \"od\": \"bond.id\",\n        \"id\": 0.0,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    annularVoidDims[\"components\"] = {\"bond\": bond}\n    annularVoid = components.Circle(\"annular void\", \"Void\", **annularVoidDims)\n\n    innerLinerDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempStructure,\n        \"od\": 0.90,\n        \"id\": 0.85,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    innerLiner = components.Circle(\"inner liner\", \"Graphite\", **innerLinerDims)\n\n    fuelLinerGapDims = {\n        \"Tinput\": hotTempStructure,\n        \"Thot\": hotTempStructure,\n        \"od\": \"inner liner.id\",\n        \"id\": \"fuel.od\",\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    fuelLinerGapDims[\"components\"] = {\"inner liner\": innerLiner, \"fuel\": fuel}\n    fuelLinerGap = components.Circle(\"gap1\", \"Void\", **fuelLinerGapDims)\n\n    outerLinerDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempStructure,\n        \"od\": 0.95,\n        \"id\": 0.90,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    outerLiner = components.Circle(\"outer liner\", \"HT9\", **outerLinerDims)\n\n    linerLinerGapDims = {\n        \"Tinput\": hotTempStructure,\n        \"Thot\": hotTempStructure,\n        \"od\": \"outer liner.id\",\n        \"id\": \"inner liner.od\",\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    linerLinerGapDims[\"components\"] = {\n        \"outer liner\": outerLiner,\n        \"inner liner\": innerLiner,\n    }\n    linerLinerGap = components.Circle(\"gap2\", \"Void\", **linerLinerGapDims)\n\n    claddingDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempStructure,\n        \"od\": 1.05,\n        \"id\": 0.95,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    cladding = components.Circle(\"clad\", \"HT9\", **claddingDims)\n    if depletable:\n        cladding.p.flags = Flags.fromString(\"clad depletable\")\n\n    linerCladGapDims = {\n        \"Tinput\": hotTempStructure,\n        \"Thot\": hotTempStructure,\n        \"od\": \"clad.id\",\n        \"id\": \"outer liner.od\",\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    linerCladGapDims[\"components\"] = {\"clad\": cladding, \"outer liner\": outerLiner}\n    linerCladGap = components.Circle(\"gap3\", \"Void\", **linerCladGapDims)\n\n    wireDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempStructure,\n        \"od\": 0.1,\n        \"id\": 0.0,\n        \"axialPitch\": 30.0,\n        \"helixDiameter\": 1.1,\n        \"mult\": NUM_PINS_IN_TEST_BLOCK,\n    }\n    wire = components.Helix(\"wire\", \"HT9\", **wireDims)\n    if depletable:\n        wire.p.flags = Flags.fromString(\"wire depletable\")\n\n    coolantDims = {\"Tinput\": hotTempCoolant, \"Thot\": hotTempCoolant}\n    coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolantDims)\n\n    ductDims = {\n        \"Tinput\": coldTemp,\n        \"Thot\": hotTempStructure,\n        \"ip\": 16.6,\n        \"op\": 17.3,\n        \"mult\": 1,\n    }\n    duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n    if depletable:\n        duct.p.flags = Flags.fromString(\"duct depletable\")\n\n    interDims = {\n        \"Tinput\": hotTempCoolant,\n        \"Thot\": hotTempCoolant,\n        \"op\": 17.8,\n        \"ip\": \"duct.op\",\n        \"mult\": 1,\n    }\n    interDims[\"components\"] = {\"duct\": duct}\n    interSodium = components.Hexagon(\"interCoolant\", \"Sodium\", **interDims)\n\n    block.add(annularVoid)\n    block.add(bond)\n    block.add(fuel)\n    block.add(fuelLinerGap)\n    block.add(innerLiner)\n    block.add(linerLinerGap)\n    block.add(outerLiner)\n    block.add(linerCladGap)\n    block.add(cladding)\n\n    block.add(wire)\n    block.add(coolant)\n    block.add(duct)\n    block.add(interSodium)\n\n    block.setHeight(16.0)\n\n    block.autoCreateSpatialGrids(r.core.spatialGrid)\n    assembly.add(block)\n    r.core.add(assembly)\n    return block\n\n\ndef applyDummyData(block):\n    \"\"\"Add some dummy data to a block for physics-like tests.\"\"\"\n    # typical SFR-ish flux in 1/cm^2/s\n    flux = [\n        161720716762.12997,\n        2288219224332.647,\n        11068159130271.139,\n        26473095948525.742,\n        45590249703180.945,\n        78780459664094.23,\n        143729928505629.06,\n        224219073208464.06,\n        229677567456769.22,\n        267303906113313.16,\n        220996878365852.22,\n        169895433093246.28,\n        126750484612975.31,\n        143215138794766.53,\n        74813432842005.5,\n        32130372366225.85,\n        21556243034771.582,\n        6297567411518.368,\n        22365198294698.45,\n        12211256796917.86,\n        5236367197121.363,\n        1490736020048.7847,\n        1369603135573.731,\n        285579041041.55945,\n        73955783965.98692,\n        55003146502.73623,\n        18564831886.20426,\n        4955747691.052108,\n        3584030491.076041,\n        884015567.3986057,\n        4298964991.043116,\n        1348809158.0353086,\n        601494405.293505,\n    ]\n    xslib = isotxs.readBinary(ISOAA_PATH)\n    # Slight hack here because the test block was created by hand rather than via blueprints and so\n    # elemental expansion of isotopics did not occur. But, the ISOTXS library being used did go\n    # through an isotopic expansion, so we map nuclides here.\n    xslib._nuclides[\"NAAA\"] = xslib._nuclides[\"NA23AA\"]\n    xslib._nuclides[\"WAA\"] = xslib._nuclides[\"W184AA\"]\n    xslib._nuclides[\"MNAA\"] = xslib._nuclides[\"MN55AA\"]\n    block.p.mgFlux = flux\n    block.core.lib = xslib\n\n\ndef getComponentData(component):\n    density = 0.0\n    for nuc in component.getNuclides():\n        density += (\n            component.getNumberDensity(nuc) * nucDir.getAtomicWeight(nuc) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n        )\n    volume = component.getVolume()\n    mass = component.getMass()\n    return component, density, volume, mass\n\n\nclass TestDetailedNDensUpdate(unittest.TestCase):\n    def test_updateDetailedNdens(self):\n        from armi.reactor.blueprints.tests.test_blockBlueprints import FULL_BP\n\n        cs = settings.Settings()\n        with io.StringIO(FULL_BP) as stream:\n            bps = blueprints.Blueprints.load(stream)\n            bps._prepConstruction(cs)\n            self.r = tests.getEmptyHexReactor()\n            self.r.blueprints = bps\n            a = makeTestAssembly(numBlocks=1, assemNum=0)\n            a.add(buildSimpleFuelBlock())\n            self.r.core.add(a)\n\n        # get first block in assembly with 'fuel' key\n        block = self.r.core[0][0]\n        # get nuclides in first component in block\n        adjList = block[0].getNuclides()\n        block.p.detailedNDens = np.array([1.0])\n        block.p.pdensDecay = 1.0\n        block._updateDetailedNdens(frac=0.5, adjustList=adjList)\n        self.assertEqual(block.p.pdensDecay, 0.5)\n        self.assertEqual(block.p.detailedNDens, np.array([0.5]))\n\n\nclass TestValidateSFPSpatialGrids(unittest.TestCase):\n    def test_noSFPExists(self):\n        \"\"\"Validate the spatial grid for a new SFP is None if it was not provided.\"\"\"\n        # copy the inputs, so we can modify them\n        with TemporaryDirectoryChanger() as newDir:\n            oldDir = os.path.join(TEST_ROOT, \"smallestTestReactor\")\n            newDir2 = os.path.join(newDir.destination, \"smallestTestReactor\")\n            shutil.copytree(oldDir, newDir2)\n\n            # cut out the SFP grid in the input file\n            testFile = os.path.join(newDir2, \"refSmallestReactor.yaml\")\n            txt = open(testFile, \"r\").read()\n            txt = txt.split(\"symmetry: full\")[0]\n            open(testFile, \"w\").write(txt)\n\n            # verify there is no spatial grid defined\n            _o, r = loadTestReactor(newDir2, inputFileName=\"armiRunSmallest.yaml\")\n            self.assertIsNone(r.excore.sfp.spatialGrid)\n\n    def test_SFPSpatialGridExists(self):\n        \"\"\"Validate the spatial grid for a new SFP is not None if it was provided.\"\"\"\n        _o, r = loadTestReactor(os.path.join(TEST_ROOT, \"smallestTestReactor\"), inputFileName=\"armiRunSmallest.yaml\")\n        self.assertIsNotNone(r.excore.sfp.spatialGrid)\n\n    def test_orientationBOL(self):\n        _o, r = loadTestReactor(os.path.join(TEST_ROOT, \"smallestTestReactor\"), inputFileName=\"armiRunSmallest.yaml\")\n\n        # Test the null-case; these should all be zero.\n        for a in r.core.iterChildren():\n            self.assertEqual(a.p.orientation[0], 0.0)\n            self.assertEqual(a.p.orientation[1], 0.0)\n            self.assertEqual(a.p.orientation[2], 0.0)\n\n\nclass Block_TestCase(unittest.TestCase):\n    def setUp(self):\n        self.block = loadTestBlock()\n        self._hotBlock = loadTestBlock(cold=False)\n        self._deplBlock = loadTestBlock(depletable=True)\n\n    def test_getSmearDensity(self):\n        cur = self.block.getSmearDensity()\n        ref = (self.block.getDim(Flags.FUEL, \"od\") ** 2 - self.block.getDim(Flags.FUEL, \"id\") ** 2) / self.block.getDim(\n            Flags.LINER, \"id\"\n        ) ** 2\n        places = 10\n        self.assertAlmostEqual(cur, ref, places=places)\n\n        # test with liner instead of clad\n        ref = (self.block.getDim(Flags.FUEL, \"od\") ** 2 - self.block.getDim(Flags.FUEL, \"id\") ** 2) / self.block.getDim(\n            Flags.LINER, \"id\"\n        ) ** 2\n        cur = self.block.getSmearDensity()\n        self.assertAlmostEqual(\n            cur,\n            ref,\n            places=places,\n            msg=\"Incorrect getSmearDensity with liner. Got {0}. Should be {1}\".format(cur, ref),\n        )\n\n        # test with annular fuel.\n        fuelDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": 0.87,\n            \"id\": 0.2,\n            \"mult\": 271.0,\n        }\n        self.fuelComponent = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n\n        ref = (self.block.getDim(Flags.FUEL, \"od\") ** 2 - self.block.getDim(Flags.FUEL, \"id\") ** 2) / self.block.getDim(\n            Flags.LINER, \"id\"\n        ) ** 2\n        cur = self.block.getSmearDensity()\n        self.assertAlmostEqual(\n            cur,\n            ref,\n            places=places,\n            msg=\"Incorrect getSmearDensity with annular fuel. Got {0}. Should be {1}\".format(cur, ref),\n        )\n\n    def test_getSmearDensityMultipleClads(self):\n        # add clad of different size\n        clad = self.block.getComponent(Flags.CLAD)\n        self.block.remove(clad)\n        cladDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": clad.getDimension(\"od\") + 0.02,\n            \"id\": clad.getDimension(\"id\"),\n            \"mult\": 117.0,\n        }\n        self.block.add(components.Circle(\"clad test\", \"HT9\", **cladDims))\n\n        # add clad of different size\n        cladDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": clad.getDimension(\"od\"),\n            \"id\": clad.getDimension(\"id\") + 0.02,\n            \"mult\": 100.0,\n        }\n        self.block.add(components.Circle(\"clad\", \"HT9\", **cladDims))\n\n        cur = self.block.getSmearDensity()\n        fuel = self.block.getComponent(Flags.FUEL, exact=True)\n        liner = self.block.getComponent(Flags.LINER | Flags.INNER)\n        clads = self.block.getComponents(Flags.CLAD)\n        ref = (fuel.getDimension(\"od\", cold=True) ** 2 - fuel.getDimension(\"id\", cold=True) ** 2) / liner.getDimension(\n            \"id\", cold=True\n        ) ** 2\n        fuelArea = fuel.getArea(cold=True)\n        innerArea = 0.0\n        for clad in clads:\n            innerArea += math.pi / 4.0 * clad.getDimension(\"id\", cold=True) ** 2 * clad.getDimension(\"mult\")\n        for liner in self.block.getComponents(Flags.LINER):\n            innerArea -= liner.getArea(cold=True)\n\n        ref = fuelArea / innerArea\n        self.assertAlmostEqual(cur, ref, places=10)\n\n    def test_getSmearDensityMixedPin(self):\n        fuel = self.block.getComponent(Flags.FUEL)\n        self.block.remove(fuel)\n\n        fuelDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": fuel.getDimension(\"od\"),\n            \"id\": fuel.getDimension(\"id\"),\n            \"mult\": 117.0,\n        }\n        self.block.add(components.Circle(\"fuel annular\", \"UZr\", **fuelDims))\n\n        # add non-annular fuel\n        fuelDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": 0.75,\n            \"id\": 0.0,\n            \"mult\": 100.0,\n        }\n        self.block.add(components.Circle(\"fuel\", \"UZr\", **fuelDims))\n\n        # add clad of different size\n        clad = self.block.getComponent(Flags.CLAD)\n        self.block.remove(clad)\n        cladDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": clad.getDimension(\"od\") + 0.02,\n            \"id\": clad.getDimension(\"id\"),\n            \"mult\": 117.0,\n        }\n        self.block.add(components.Circle(\"clad test\", \"HT9\", **cladDims))\n\n        # add clad of different size\n        cladDims = {\n            \"Tinput\": 273.0,\n            \"Thot\": 273.0,\n            \"od\": clad.getDimension(\"od\"),\n            \"id\": clad.getDimension(\"id\") + 0.02,\n            \"mult\": 100.0,\n        }\n        self.block.add(components.Circle(\"clad\", \"HT9\", **cladDims))\n\n        # calculate reference smear density\n        fuel = self.block.getComponent(Flags.FUEL, exact=True)\n        annularFuel = self.block.getComponent(Flags.FUEL | Flags.ANNULAR)\n        liner = self.block.getComponent(Flags.LINER | Flags.INNER)\n        clads = self.block.getComponents(Flags.CLAD)\n        fuelArea = math.pi / 4.0 * fuel.getDimension(\"od\", cold=True) ** 2 * fuel.getDimension(\"mult\")\n        fuelArea += (\n            math.pi\n            / 4.0\n            * (annularFuel.getDimension(\"od\", cold=True) ** 2 - annularFuel.getDimension(\"id\", cold=True) ** 2)\n            * annularFuel.getDimension(\"mult\")\n        )\n        innerArea = 0.0\n        for clad in clads:\n            innerArea += math.pi / 4.0 * clad.getDimension(\"id\", cold=True) ** 2 * clad.getDimension(\"mult\")\n        for liner in self.block.getComponents(Flags.LINER):\n            innerArea -= liner.getArea(cold=True)\n\n        ref = fuelArea / innerArea\n        cur = self.block.getSmearDensity()\n        self.assertAlmostEqual(cur, ref, places=10)\n\n    def test_getSmearDensityMultipleLiner(self):\n        numLiners = sum(1 for c in self.block if \"liner\" in c.name and \"gap\" not in c.name)\n        self.assertEqual(\n            numLiners,\n            2,\n            \"self.block needs at least 2 liners for this test to be functional.\",\n        )\n        cur = self.block.getSmearDensity()\n        ref = (self.block.getDim(Flags.FUEL, \"od\") ** 2 - self.block.getDim(Flags.FUEL, \"id\") ** 2) / self.block.getDim(\n            Flags.INNER | Flags.LINER, \"id\"\n        ) ** 2\n        self.assertAlmostEqual(cur, ref, places=10)\n\n    def test_getSmearDensityEdgeCases(self):\n        # show smear density is not computed for non-fuel blocks\n        b0 = blocks.Block(\"DummyReflectorBlock\")\n        self.assertEqual(b0.getSmearDensity(), 0.0)\n\n        # show smear density is only defined for pinned fuel blocks\n        b1 = blocks.HexBlock(\"TestFuelHexBlock\")\n        b1.setType(\"fuel\")\n        b1.p.nPins = 0\n        fuel = components.Circle(\"fuel\", \"UZr\", Tinput=25.0, Thot=25.0, od=0.84, id=0.6, mult=0)\n        b1.add(fuel)\n        self.assertEqual(b1.getSmearDensity(), 0.0)\n\n    def test_computeSmearDensity(self):\n        # test the null case\n        smearDensity = blocks.Block.computeSmearDensity(123.4, [], True)\n        self.assertEqual(smearDensity, 0.0)\n\n        smearDensity = blocks.Block.computeSmearDensity(123.4, [], False)\n        self.assertEqual(smearDensity, 0.0)\n\n        # test one circle component\n        circles = self.block.getComponentsOfShape(components.Circle)\n        smearDensity = blocks.Block.computeSmearDensity(123.4, [circles[0]], True)\n        self.assertEqual(smearDensity, 0.0)\n\n        # use the test block\n        clads = set(self.block.getComponents(Flags.CLAD)).intersection(set(circles))\n        cladID = np.mean([clad.getDimension(\"id\", cold=True) for clad in clads])\n        sortedCircles = self.block.getSortedComponentsInsideOfComponent(circles.pop())\n\n        fuelCompArea = sum(f.getArea(cold=True) for f in self.block.getComponents(Flags.FUEL))\n        innerCladdingArea = math.pi * (cladID**2) / 4.0 * self.block.getNumComponents(Flags.FUEL)\n        unmovableCompArea = sum(\n            c.getArea(cold=True)\n            for c in sortedCircles\n            if not c.isFuel() and not c.hasFlags([Flags.SLUG, Flags.DUMMY]) and c.containsSolidMaterial()\n        )\n\n        refSmearDensity = fuelCompArea / (innerCladdingArea - unmovableCompArea)\n        smearDensity = blocks.Block.computeSmearDensity(153.81433981516477, sortedCircles, True)\n        self.assertAlmostEqual(smearDensity, refSmearDensity, places=10)\n\n    def test_timeNodeParams(self):\n        self.block.p[\"buRate\", 3] = 0.1\n        self.assertEqual(0.1, self.block.p[(\"buRate\", 3)])\n\n    def test_getType(self):\n        ref = \"plenum pin\"\n        self.block.setType(ref)\n        cur = self.block.getType()\n        self.assertEqual(cur, ref)\n        self.assertTrue(self.block.hasFlags(Flags.PLENUM))\n        self.assertTrue(self.block.hasFlags(Flags.PLENUM | Flags.PIN))\n        self.assertTrue(self.block.hasFlags(Flags.PLENUM | Flags.PIN, exact=True))\n        self.assertFalse(self.block.hasFlags(Flags.PLENUM, exact=True))\n\n    def test_hasFlags(self):\n        self.block.setType(\"feed fuel\")\n\n        cur = self.block.hasFlags(Flags.FEED | Flags.FUEL)\n        self.assertTrue(cur)\n\n        cur = self.block.hasFlags(Flags.PLENUM)\n        self.assertFalse(cur)\n\n    def test_setType(self):\n        self.block.setType(\"igniter fuel\")\n\n        self.assertEqual(\"igniter fuel\", self.block.getType())\n        self.assertTrue(self.block.hasFlags(Flags.IGNITER | Flags.FUEL))\n\n        self.block.adjustUEnrich(0.0001)\n        self.block.setType(\"feed fuel\")\n\n        self.assertTrue(self.block.hasFlags(Flags.FEED | Flags.FUEL))\n        self.assertTrue(self.block.hasFlags(Flags.FUEL))\n        self.assertFalse(self.block.hasFlags(Flags.IGNITER | Flags.FUEL))\n\n    def test_duplicate(self):\n        Block2 = blocks.Block.createHomogenizedCopy(self.block)\n        originalComponents = self.block.getComponents()\n        newComponents = Block2.getComponents()\n        for c1, c2 in zip(originalComponents, newComponents):\n            self.assertEqual(c1.getName(), c2.getName())\n            a1, a2 = c1.getArea(), c2.getArea()\n            self.assertIsNot(c1, c2)\n            self.assertAlmostEqual(\n                a1,\n                a2,\n                msg=\"The area of {0}={1} but the area of {2} in the copy={3}\".format(c1, a1, c2, a2),\n            )\n            for key in c2.DIMENSION_NAMES:\n                dim = c2.p[key]\n                if isinstance(dim, tuple):\n                    self.assertNotIn(dim[0], originalComponents)\n                    self.assertIn(dim[0], newComponents)\n\n        ref = self.block.getMass()\n        cur = Block2.getMass()\n        places = 6\n        self.assertAlmostEqual(ref, cur, places=places)\n\n        ref = self.block.getArea()\n        cur = Block2.getArea()\n        places = 6\n        self.assertAlmostEqual(ref, cur, places=places)\n\n        ref = self.block.getHeight()\n        cur = Block2.getHeight()\n        places = 6\n        self.assertAlmostEqual(ref, cur, places=places)\n\n        self.assertEqual(self.block.p.flags, Block2.p.flags)\n\n    def test_homogenizedMixture(self):\n        \"\"\"\n        Confirms homogenized blocks have correct properties.\n\n        .. test:: Homogenize the compositions of a block.\n            :id: T_ARMI_BLOCK_HOMOG\n            :tests: R_ARMI_BLOCK_HOMOG\n        \"\"\"\n        args = [False, True]  # pinSpatialLocator argument\n        expectedShapes = [\n            [basicShapes.Hexagon],\n            [basicShapes.Hexagon, basicShapes.Circle],\n        ]\n\n        for arg, shapes in zip(args, expectedShapes):\n            homogBlock = self.block.createHomogenizedCopy(pinSpatialLocators=arg)\n            for shapeType in shapes:\n                for c in homogBlock.getComponents():\n                    if isinstance(c, shapeType):\n                        break\n                else:\n                    # didn't find the homogenized hex in the block copy\n                    self.assertTrue(False, f\"{self.block} does not have a {shapeType} component!\")\n            if arg:\n                # check that homogenized block has correct pin coordinates\n                self.assertEqual(self.block.getNumPins(), homogBlock.getNumPins())\n                self.assertEqual(self.block.p.nPins, homogBlock.p.nPins)\n                pinCoords = self.block.getPinCoordinates()\n                homogPinCoords = homogBlock.getPinCoordinates()\n                for refXYZ, homogXYZ in zip(list(pinCoords), list(homogPinCoords)):\n                    self.assertListEqual(list(refXYZ), list(homogXYZ))\n\n            cur = homogBlock.getMass()\n            self.assertAlmostEqual(self.block.getMass(), homogBlock.getMass())\n\n            self.assertEqual(homogBlock.getType(), self.block.getType())\n            self.assertEqual(homogBlock.p.flags, self.block.p.flags)\n            self.assertEqual(homogBlock.macros, self.block.macros)\n            self.assertEqual(homogBlock._lumpedFissionProducts, self.block._lumpedFissionProducts)\n\n            ref = self.block.getArea()\n            cur = homogBlock.getArea()\n            places = 6\n            self.assertAlmostEqual(ref, cur, places=places)\n\n            ref = self.block.getHeight()\n            cur = homogBlock.getHeight()\n            places = 6\n            self.assertAlmostEqual(ref, cur, places=places)\n\n    def test_getXsType(self):\n        self.cs = settings.Settings()\n        newSettings = {CONF_LOADING_FILE: os.path.join(TEST_ROOT, \"refSmallReactor.yaml\")}\n        self.cs = self.cs.modified(newSettings=newSettings)\n\n        self.block.p.xsType = \"B\"\n        cur = self.block.p.xsType\n        ref = \"B\"\n        self.assertEqual(cur, ref)\n\n        _oldBuGroups = self.cs[\"buGroups\"]\n        newSettings = {\"buGroups\": [100]}\n        self.cs = self.cs.modified(newSettings=newSettings)\n\n        self.block.p.xsType = \"BB\"\n        cur = self.block.p.xsType\n        ref = \"BB\"\n        self.assertEqual(cur, ref)\n\n    def test_27b_setEnvGroup(self):\n        type_ = \"A\"\n        self.block.p.envGroup = type_\n        cur = self.block.p.envGroupNum\n        ref = ord(type_) - ASCII_LETTER_A\n        self.assertEqual(cur, ref)\n\n        typeNumber = 25  # this is Z due to 0 based numbers\n        self.block.p.envGroupNum = typeNumber\n        cur = self.block.p.envGroup\n        ref = chr(typeNumber + ASCII_LETTER_A)\n        self.assertEqual(cur, ref)\n        self.assertEqual(cur, \"Z\")\n\n        before_a = ASCII_LETTER_a - 1\n        type_ = \"a\"\n        self.block.p.envGroup = type_\n        cur = self.block.p.envGroupNum\n        ref = ord(type_) - (before_a) + (ASCII_LETTER_Z - ASCII_LETTER_A)\n        self.assertEqual(cur, ref)\n\n        typeNumber = 26  # this is a due to 0 based numbers\n        self.block.p.envGroupNum = typeNumber\n        cur = self.block.p.envGroup\n        self.assertEqual(cur, \"a\")\n\n        type_ = \"z\"\n        self.block.p.envGroup = type_\n        cur = self.block.p.envGroupNum\n        ref = ord(type_) - before_a + (ASCII_LETTER_Z - ASCII_LETTER_A)\n        self.assertEqual(cur, ref)\n\n        typeNumber = 26 * 2 - 1  # 2x letters in alpha with 0 based index\n        self.block.p.envGroupNum = typeNumber\n        cur = self.block.p.envGroup\n        ref = chr((typeNumber - 26) + ASCII_LETTER_a)\n        self.assertEqual(cur, ref)\n        self.assertEqual(cur, \"z\")\n\n    def test_setZeroHeight(self):\n        \"\"\"Test that demonstrates that a block's height can be set to zero.\"\"\"\n        b = buildSimpleFuelBlock()\n\n        # Check for a DerivedShape component\n        self.assertEqual(len([c for c in b if c.__class__ is components.DerivedShape]), 1)\n        m1 = b.getMass()\n        v1 = b.getVolume()\n        a1 = b.getArea()\n        nd1 = copy.deepcopy(b.getNumberDensities())\n        h1 = b.getHeight()\n        self.assertNotEqual(h1, 0.0)\n\n        # Set height to 0.0\n        b.setHeight(0.0)\n        m2 = b.getMass()\n        v2 = b.getVolume()\n        a2 = b.getArea()\n        nd2 = copy.deepcopy(b.getNumberDensities())\n        h2 = b.getHeight()\n\n        self.assertEqual(m2, 0.0)\n        self.assertEqual(v2, 0.0)\n        self.assertEqual(h2, 0.0)\n        self.assertAlmostEqual(a2, a1)\n        for nuc, ndens in nd2.items():\n            self.assertEqual(ndens, 0.0, msg=(f\"Number density of {nuc} is expected to be zero.\"))\n\n        # Set height back to the original height\n        b.setHeight(h1)\n        m3 = b.getMass()\n        v3 = b.getVolume()\n        a3 = b.getArea()\n        nd3 = copy.deepcopy(b.getNumberDensities())\n        h3 = b.getHeight()\n\n        self.assertAlmostEqual(m3, m1)\n        self.assertAlmostEqual(v3, v1)\n        self.assertAlmostEqual(a3, a1)\n        self.assertEqual(h3, h1)\n\n        for nuc in nd3.keys():\n            self.assertAlmostEqual(nd3[nuc], nd1[nuc])\n\n    def test_getVolumeFractionsWithZeroHeight(self):\n        \"\"\"Tests that the component fractions are the same with a zero height block.\"\"\"\n        b = buildSimpleFuelBlock()\n\n        h1 = b.getHeight()\n        originalVolFracs = b.getVolumeFractions()\n        for _c, vf in originalVolFracs:\n            self.assertNotEqual(vf, 0.0)\n\n        b.setHeight(0.0)\n        volFracs = b.getVolumeFractions()\n        for (_c, vf1), (_c, vf2) in zip(volFracs, originalVolFracs):\n            self.assertAlmostEqual(vf1, vf2)\n\n        b.setHeight(h1)\n        volFracs = b.getVolumeFractions()\n        for (_c, vf1), (_c, vf2) in zip(volFracs, originalVolFracs):\n            self.assertAlmostEqual(vf1, vf2)\n\n    def test_getVolumeFractionWithoutParent(self):\n        \"\"\"Tests that the volume fraction of a block with no parent is zero.\"\"\"\n        b = buildSimpleFuelBlock()\n        self.assertIsNone(b.parent)\n        with self.assertRaises(ValueError):\n            b.getVolumeFraction()\n\n    def test_clearDensity(self):\n        self.block.clearNumberDensities()\n\n        for nuc in self.block.getNuclides():\n            cur = self.block.getNumberDensity(nuc)\n            ref = 0.0\n            places = 5\n            self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getNumberDensity(self):\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n\n        self.block.setNumberDensities(refDict)\n\n        for nucKey, nucItem in refDict.items():\n            cur = self.block.getNumberDensity(nucKey)\n            ref = nucItem\n            places = 6\n            self.assertAlmostEqual(ref, cur, places=places)\n\n    def test_getMasses(self):\n        masses = sorted(self.block.getMasses())\n        self.assertEqual(len(masses), 13)\n        self.assertEqual(masses[0], \"C\")\n\n    def test_removeMass(self):\n        mass0 = self.block.getMass(\"U238\")\n        self.assertGreater(mass0, 0.1)\n        self.block.removeMass(\"U238\", 0.1)\n        mass1 = self.block.getMass(\"U238\")\n        self.assertGreater(mass1, 0)\n        self.assertGreater(mass0, mass1)\n\n    def test_setNumberDensity(self):\n        ref = 0.05\n        self.block.setNumberDensity(\"U235\", ref)\n\n        cur = self.block.getNumberDensity(\"U235\")\n        places = 5\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_setNumberDensities(self):\n        \"\"\"Make sure we can set multiple number densities at once.\"\"\"\n        b = self.block\n        b.setNumberDensity(\"NA\", 0.5)\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W\": 1.09115150103e-05,\n            \"ZR\": 0.00709003962772,\n        }\n\n        b.setNumberDensities(refDict)\n\n        for nucKey, nucItem in refDict.items():\n            cur = self.block.getNumberDensity(nucKey)\n            ref = nucItem\n            places = 6\n            self.assertAlmostEqual(cur, ref, places=places)\n\n        # make sure U235 stayed fully contained in the fuel component\n        fuelC = b.getComponent(Flags.FUEL)\n        self.assertAlmostEqual(\n            b.getNumberDensity(\"U235\"),\n            fuelC.getNumberDensity(\"U235\") * fuelC.getVolumeFraction(),\n        )\n\n        # make sure other vals were zeroed out\n        self.assertAlmostEqual(b.getNumberDensity(\"NA23\"), 0.0)\n\n    def test_getMass(self):\n        self.block.setHeight(100.0)\n\n        nucName = \"U235\"\n        d = self.block.getNumberDensity(nucName)\n        v = self.block.getVolume()\n        A = nucDir.getAtomicWeight(nucName)\n\n        ref = d * v * A / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n        cur = self.block.getMass(nucName)\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_setMass(self):\n        self.block.setHeight(100.0)\n\n        mass = 100.0\n        nuc = \"U238\"\n        self.block.setMass(nuc, mass)\n\n        cur = self.block.getMass(nuc)\n        ref = mass\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n        cur = self.block.getNumberDensity(nuc)\n        v = self.block.getVolume()\n        A = nucDir.getAtomicWeight(nuc)\n        ref = MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (v * A)\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getTotalMass(self):\n        self.block.setHeight(100.0)\n\n        self.block.clearNumberDensities()\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(refDict)\n\n        cur = self.block.getMass()\n\n        tot = 0.0\n        for nucName, nucItem in refDict.items():\n            d = nucItem\n            A = nucDir.getAtomicWeight(nucName)\n            tot += d * A\n\n        v = self.block.getVolume()\n        ref = tot * v / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n\n        places = 9\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_replaceBlockWithBlock(self):\n        \"\"\"Tests conservation of mass flag in replaceBlockWithBlock.\"\"\"\n        block = self.block\n        ductBlock = block.__class__(\"duct\")\n        ductBlock.add(block.getComponent(Flags.COOLANT, exact=True))\n        ductBlock.add(block.getComponent(Flags.DUCT, exact=True))\n        ductBlock.add(block.getComponent(Flags.INTERCOOLANT, exact=True))\n\n        # get reference data\n        refLoc = block.spatialLocator\n        refName = block.name\n        refHeight = block.p.height\n        ductBlock.p.height = 99 * block.p.height\n\n        self.assertGreater(len(block), 3)\n\n        block.replaceBlockWithBlock(ductBlock)\n\n        self.assertEqual(block.spatialLocator, refLoc)\n        self.assertEqual(refName, block.name)\n        self.assertEqual(3, len(block))\n        self.assertEqual(block.p.height, refHeight)\n\n    def test_getWettedPerimeterDepletable(self):\n        # calculate the reference value\n        wire = self._deplBlock.getComponent(Flags.WIRE)\n        correctionFactor = np.hypot(\n            1.0,\n            math.pi * wire.getDimension(\"helixDiameter\") / wire.getDimension(\"axialPitch\"),\n        )\n        wireDiam = wire.getDimension(\"od\") * correctionFactor\n\n        ipDim = self.block.getDim(Flags.DUCT, \"ip\")\n        odDim = self.block.getDim(Flags.CLAD, \"od\")\n        mult = self.block.getDim(Flags.CLAD, \"mult\")\n        ref = math.pi * (odDim + wireDiam) * mult + 6 * ipDim / math.sqrt(3)\n\n        # test getWettedPerimeter\n        cur = self._deplBlock.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getWettedPerimeter(self):\n        # calculate the reference value\n        wire = self.block.getComponent(Flags.WIRE)\n        correctionFactor = np.hypot(\n            1.0,\n            math.pi * wire.getDimension(\"helixDiameter\") / wire.getDimension(\"axialPitch\"),\n        )\n        wireDiam = wire.getDimension(\"od\") * correctionFactor\n\n        ipDim = self.block.getDim(Flags.DUCT, \"ip\")\n        odDim = self.block.getDim(Flags.CLAD, \"od\")\n        mult = self.block.getDim(Flags.CLAD, \"mult\")\n        ref = math.pi * (odDim + wireDiam) * mult + 6 * ipDim / math.sqrt(3)\n\n        # test getWettedPerimeter\n        cur = self.block.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getWettedPerimeterCircularInnerDuct(self):\n        \"\"\"Calculate the wetted perimeter for a HexBlock with circular inner duct.\"\"\"\n        # build a test block with a Hex inner duct\n        fuelDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n        cladDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n        ductDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 16, \"id\": 15.3, \"mult\": 1.0}\n        intercoolantDims = {\n            \"Tinput\": 400,\n            \"Thot\": 400,\n            \"od\": 17.0,\n            \"id\": ductDims[\"od\"],\n            \"mult\": 1.0,\n        }\n\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n        duct = components.Circle(\"inner duct\", \"HT9\", **ductDims)\n        intercoolant = components.Circle(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n        b = blocks.HexBlock(\"fuel\", height=10.0)\n        b.add(fuel)\n        b.add(clad)\n        b.add(duct)\n        b.add(intercoolant)\n\n        # calculate the reference value\n        ref = (ductDims[\"id\"] + ductDims[\"od\"]) * math.pi\n        ref += b.getNumPins() * cladDims[\"od\"] * math.pi\n\n        # test getWettedPerimeter\n        cur = b.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getWettedPerimeterHexInnerDuct(self):\n        \"\"\"Calculate the wetted perimeter for a HexBlock with hexagonal inner duct.\"\"\"\n        # build a test block with a Hex inner duct\n        fuelDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n        cladDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 0.80, \"id\": 0.77, \"mult\": 127.0}\n        ductDims = {\"Tinput\": 400, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n        intercoolantDims = {\n            \"Tinput\": 400,\n            \"Thot\": 400,\n            \"op\": 17.0,\n            \"ip\": ductDims[\"op\"],\n            \"mult\": 1.0,\n        }\n\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n        duct = components.Hexagon(\"inner duct\", \"HT9\", **ductDims)\n        intercoolant = components.Hexagon(\"intercoolant\", \"Sodium\", **intercoolantDims)\n\n        b = blocks.HexBlock(\"fuel\", height=10.0)\n        b.add(fuel)\n        b.add(clad)\n        b.add(duct)\n        b.add(intercoolant)\n\n        # calculate the reference value\n        ref = 6 * (ductDims[\"ip\"] + ductDims[\"op\"]) / math.sqrt(3)\n        ref += b.getNumPins() * cladDims[\"od\"] * math.pi\n\n        # test getWettedPerimeter\n        cur = b.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getWettedPerimeterMultiPins(self):\n        assembly = buildMixedPinAssembly()\n        block = assembly.getFirstBlock(Flags.FUEL)\n        # calculate the reference value\n        wires = block.getComponents(Flags.WIRE)\n        clads = block.getComponents(Flags.CLAD)\n        ref = 0\n        for wire in wires:\n            mult = wire.getDimension(\"mult\")\n            correctionFactor = np.hypot(\n                1.0,\n                math.pi * wire.getDimension(\"helixDiameter\") / wire.getDimension(\"axialPitch\"),\n            )\n            wireDiam = wire.getDimension(\"od\") * correctionFactor\n            ref += math.pi * wireDiam * mult\n        ref += sum(math.pi * clad.getDimension(\"od\") * clad.getDimension(\"mult\") for clad in clads)\n\n        ipDim = block.getDim(Flags.DUCT, \"ip\")\n        ref += 6 * ipDim / math.sqrt(3)\n\n        # test getWettedPerimeter\n        cur = block.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getFlowAreaPerPin(self):\n        area = self.block.getComponent(Flags.COOLANT).getArea()\n        nPins = self.block.getNumPins()\n        cur = self.block.getFlowAreaPerPin()\n        ref = area / nPins\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getFlowArea(self):\n        \"\"\"Test Block.getFlowArea() for a Block with just coolant.\"\"\"\n        ref = self.block.getComponent(Flags.COOLANT).getArea()\n        cur = self.block.getFlowArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getFlowAreaInterDuctCoolant(self):\n        \"\"\"Test Block.getFlowArea() for a Block with coolant and interductcoolant.\"\"\"\n        # build a test block with a Hex inter duct collant\n        fuelDims = {\"Tinput\": 400, \"Thot\": 400, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n        ductDims = {\"Tinput\": 400, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n        coolDims = {\"Tinput\": 400, \"Thot\": 400}\n        iCoolantDims = {\"Tinput\": 400, \"Thot\": 400, \"op\": 17.0, \"ip\": 16, \"mult\": 1.0}\n\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        duct = components.Hexagon(\"inner duct\", \"HT9\", **ductDims)\n        coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolDims)\n        iCoolant = components.Hexagon(\"interductcoolant\", \"Sodium\", **iCoolantDims)\n\n        b = blocks.HexBlock(\"fuel\", height=10.0)\n        b.add(fuel)\n        b.add(coolant)\n        b.add(duct)\n        b.add(iCoolant)\n\n        ref = b.getComponent(Flags.COOLANT).getArea()\n        ref += b.getComponent(Flags.INTERDUCTCOOLANT).getArea()\n        cur = b.getFlowArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getHydraulicDiameter(self):\n        cur = self.block.getHydraulicDiameter()\n        ref = 4.0 * self.block.getFlowArea() / self.block.getWettedPerimeter()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_adjustUEnrich(self):\n        self.block.setHeight(100.0)\n\n        ref = 0.25\n        self.block.adjustUEnrich(ref)\n\n        cur = self.block.getComponent(Flags.FUEL).getEnrichment()\n        places = 5\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_setLocation(self):\n        \"\"\"\n        Retrieve a blocks location.\n\n        .. test:: Location of a block is retrievable.\n            :id: T_ARMI_BLOCK_POSI0\n            :tests: R_ARMI_BLOCK_POSI\n        \"\"\"\n        b = self.block\n        # a bit obvious, but location is a property now...\n        i, j = grids.HexGrid.getIndicesFromRingAndPos(2, 3)\n        b.spatialLocator = b.core.spatialGrid[i, j, 0]\n        self.assertEqual(b.getLocation(), \"002-003-000\")\n        self.assertEqual(0, b.spatialLocator.k)\n        self.assertEqual(b.getSymmetryFactor(), 1.0)\n\n        # now if we don't specify axial, it will move to the new xy, location and have original z index\n        i, j = grids.HexGrid.getIndicesFromRingAndPos(4, 4)\n        b.spatialLocator = b.core.spatialGrid[i, j, 0]\n        self.assertEqual(0, b.spatialLocator.k)\n        self.assertEqual(b.getSymmetryFactor(), 1.0)\n\n        # center blocks have a different symmetry factor for 1/3rd core\n        for symmetry, powerMult in (\n            (geometry.FULL_CORE, 1),\n            (\n                geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n                3,\n            ),\n        ):\n            self.block.core.symmetry = geometry.SymmetryType.fromAny(symmetry)\n            i, j = grids.HexGrid.getIndicesFromRingAndPos(1, 1)\n            b.spatialLocator = b.core.spatialGrid[i, j, 0]\n            self.assertEqual(0, b.spatialLocator.k)\n            self.assertEqual(b.getSymmetryFactor(), powerMult)\n\n    def test_setBuLimitInfo(self):\n        self.block.adjustUEnrich(0.1)\n        self.block.setType(\"igniter fuel\")\n\n        self.block.setBuLimitInfo()\n\n        cur = self.block.p.buLimit\n        ref = 0.0\n        self.assertEqual(cur, ref)\n\n    def test_getTotalNDens(self):\n        self.block.setType(\"fuel\")\n\n        self.block.clearNumberDensities()\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(refDict)\n\n        cur = self.block.getTotalNDens()\n\n        tot = 0.0\n        for nucName in refDict.keys():\n            ndens = self.block.getNumberDensity(nucName)\n            tot += ndens\n\n        ref = tot\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getHMDens(self):\n        self.block.setType(\"fuel\")\n        self.block.clearNumberDensities()\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(refDict)\n\n        cur = self.block.getHMDens()\n\n        hmDens = 0.0\n        for nuclide in refDict.keys():\n            if nucDir.isHeavyMetal(nuclide):\n                # then nuclide is a HM\n                hmDens += self.block.getNumberDensity(nuclide)\n\n        ref = hmDens\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getFissileMassEnrich(self):\n        fuelDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 0.76, \"id\": 0.0, \"mult\": 1.0}\n        self.fuelComponent = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        self.block.add(self.fuelComponent)\n        self.block.setHeight(100.0)\n\n        self.block.clearNumberDensities()\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(refDict)\n\n        cur = self.block.getFissileMassEnrich()\n\n        ref = self.block.getFissileMass() / self.block.getHMMass()\n        places = 4\n        self.assertAlmostEqual(cur, ref, places=places)\n        self.block.remove(self.fuelComponent)\n\n    def test_getMicroSuffix(self):\n        self.assertEqual(self.block.getMicroSuffix(), \"AA\")\n\n        self.block.p.xsType = \"Z\"\n        self.assertEqual(self.block.getMicroSuffix(), \"ZA\")\n\n        self.block.p.xsType = \"RS\"\n        self.assertEqual(self.block.getMicroSuffix(), \"RS\")\n\n        self.block.p.envGroup = \"X\"\n        self.block.p.xsType = \"AB\"\n        with self.assertRaises(ValueError):\n            self.block.getMicroSuffix()\n\n    def test_getUraniumMassEnrich(self):\n        fuel = self.block.getComponent(Flags.FUEL)\n        fuel.setNumberDensity(\"U234\", 1.0e-4)\n        self.block.adjustUEnrich(0.25)\n\n        ref = 0.25\n\n        self.block.adjustUEnrich(ref)\n        cur = self.block.getUraniumMassEnrich()\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getUraniumNumEnrich(self):\n        fuel = self.block.getComponent(Flags.FUEL)\n        fuel.setNumberDensity(\"U234\", 1.0e-4)\n        self.block.adjustUEnrich(0.25)\n\n        cur = self.block.getUraniumNumEnrich()\n\n        u8 = self.block.getNumberDensity(\"U238\")\n        u5 = self.block.getNumberDensity(\"U235\")\n        u4 = self.block.getNumberDensity(\"U234\")\n        ref = u5 / (u8 + u5 + u4)\n\n        self.assertAlmostEqual(cur, ref, places=6)\n\n        # test the zero edge case\n        self.block.adjustUEnrich(0)\n        cur = self.block.getUraniumNumEnrich()\n        self.assertEqual(cur, 0.0)\n\n        self.block.setNumberDensity(\"U238\", 0.0)\n        cur = self.block.getUraniumNumEnrich()\n        self.assertEqual(cur, 0.0)\n\n    def test_getUraniumNumEnrichWith233(self):\n        fuel = self.block.getComponent(Flags.FUEL)\n        u5 = fuel.getNumberDensity(\"U235\")\n        fuel.setNumberDensity(\"U233\", 0.005)\n        self.block.adjustUEnrich(0.25)\n\n        cur = self.block.getUraniumNumEnrich()\n\n        u3 = self.block.getNumberDensity(\"U233\")\n        u5 = self.block.getNumberDensity(\"U235\")\n        u8 = self.block.getNumberDensity(\"U238\")\n        ref = (u3 + u5) / (u3 + u5 + u8)\n\n        self.assertAlmostEqual(cur, ref, places=6)\n\n    def test_getNumberOfAtoms(self):\n        self.block.clearNumberDensities()\n        refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(refDict)\n\n        nucName = \"U238\"\n        moles = self.block.getNumberOfAtoms(nucName) / units.AVOGADROS_NUMBER  # about 158 moles\n        refMoles = refDict[\"U238\"] * self.block.getVolume() / (units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM)\n        self.assertAlmostEqual(moles, refMoles)\n\n    def test_getPu(self):\n        fuel = self.block.getComponent(Flags.FUEL)\n        vFrac = fuel.getVolumeFraction()\n        refDict = {\n            \"AM241\": 2.695633500634074e-05,\n            \"U238\": 0.015278429635341755,\n            \"O16\": 0.04829586365251901,\n            \"U235\": 0.004619446966056436,\n            \"PU239\": 0.0032640382635406515,\n            \"PU238\": 4.266845903720035e-06,\n            \"PU240\": 0.000813669265183342,\n            \"PU241\": 0.00011209296581262849,\n            \"PU242\": 2.3078961257395204e-05,\n        }\n        fuel.setNumberDensities({nuc: v / vFrac for nuc, v in refDict.items()})\n\n        # test moles\n        cur = self.block.getPuMoles()\n        ndens = 0.0\n        for nucName in refDict.keys():\n            if nucName in [\"PU238\", \"PU239\", \"PU240\", \"PU241\", \"PU242\"]:\n                ndens += self.block.getNumberDensity(nucName)\n        ref = ndens / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * self.block.getVolume() * self.block.getSymmetryFactor()\n        self.assertAlmostEqual(cur, ref, places=6)\n\n    def test_adjustDensity(self):\n        u235Dens = 0.003\n        u238Dens = 0.010\n        self.block.setNumberDensity(\"U235\", u235Dens)\n        self.block.setNumberDensity(\"U238\", u238Dens)\n        mass1 = self.block.getMass([\"U235\", \"U238\"])\n        densAdj = 0.9\n        nucList = [\"U235\", \"U238\"]\n        massDiff = self.block.adjustDensity(densAdj, nucList, returnMass=True)\n        mass2 = self.block.getMass([\"U235\", \"U238\"])\n\n        cur = self.block.getNumberDensity(\"U235\")\n        ref = densAdj * u235Dens\n        self.assertAlmostEqual(cur, ref, places=9)\n\n        cur = self.block.getNumberDensity(\"U238\")\n        ref = densAdj * u238Dens\n        self.assertAlmostEqual(cur, ref, places=9)\n\n        self.assertAlmostEqual(mass2 - mass1, massDiff)\n\n    @patch.object(blocks.HexBlock, \"getSymmetryFactor\")\n    def test_getMgFlux(self, mock_sf):\n        # calculate Mg Flux with a Symmetry Factor of 3\n        mock_sf.return_value = 3\n        neutronFlux = 1.0\n        gammaFlux = 2.0\n        self.block.p.mgFlux = np.full(5, neutronFlux)\n        self.block.p.mgFluxGamma = np.full(4, gammaFlux)\n        fuel = self.block.getComponent(Flags.FUEL)\n        blockVol = self.block.getVolume()\n        fuelVol = fuel.getVolume()\n        # compute volume fraction of component; need symmetry factor\n        volFrac = fuelVol / blockVol / self.block.getSymmetryFactor()\n        neutronFluxInt = fuel.getIntegratedMgFlux()\n        gammaFluxInt = fuel.getIntegratedMgFlux(gamma=True)\n        # getIntegratedMgFlux should be scaled by the component volume fraction\n        np.testing.assert_almost_equal(neutronFluxInt, np.full(5, neutronFlux * volFrac))\n        np.testing.assert_almost_equal(gammaFluxInt, np.full(4, gammaFlux * volFrac))\n\n        # getMgFlux should return regular, non-integrated flux\n        neutronMgFlux = fuel.getMgFlux()\n        gammaMgFlux = fuel.getMgFlux(gamma=True)\n        np.testing.assert_almost_equal(neutronMgFlux, np.full(5, neutronFlux / blockVol))\n        np.testing.assert_almost_equal(gammaMgFlux, np.full(4, gammaFlux / blockVol))\n\n        # calculate Mg Flux with a Symmetry Factor of 1\n        mock_sf.return_value = 1\n        self.block.p.mgFlux = np.full(5, neutronFlux)\n        self.block.p.mgFluxGamma = np.full(4, gammaFlux)\n        fuel = self.block.getComponent(Flags.FUEL)\n        blockVol = self.block.getVolume()\n        fuelVol = fuel.getVolume()\n        volFrac = fuelVol / blockVol / self.block.getSymmetryFactor()\n        neutronFluxInt = fuel.getIntegratedMgFlux()\n        gammaFluxInt = fuel.getIntegratedMgFlux(gamma=True)\n        # getIntegratedMgFlux should be scaled by the component volume fraction\n        np.testing.assert_almost_equal(neutronFluxInt, np.full(5, neutronFlux * volFrac))\n        np.testing.assert_almost_equal(gammaFluxInt, np.full(4, gammaFlux * volFrac))\n\n        # getMgFlux should return regular, non-integrated flux\n        neutronMgFlux = fuel.getMgFlux()\n        gammaMgFlux = fuel.getMgFlux(gamma=True)\n        np.testing.assert_almost_equal(neutronMgFlux, np.full(5, neutronFlux / blockVol))\n        np.testing.assert_almost_equal(gammaMgFlux, np.full(4, gammaFlux / blockVol))\n\n    @patch.object(blocks.HexBlock, \"getSymmetryFactor\")\n    def test_completeInitialLoading(self, mock_sf):\n        \"\"\"Ensure that some BOL block and component params are populated properly.\n\n        Notes\n        -----\n        - When checking component-level BOL params, puFrac is skipped due to 1) there's no Pu in the block, and 2)\n          getPuMoles is functionally identical to getHMMoles (just limits nuclides from heavy metal to just Pu).\n        - getSymmetryFactor is mocked to return 3. This indicates that the block is in the center-most assembly.\n          Providing this mock ensures that symmetry factors are tested as well (otherwise it's just a factor of 1\n          and it is a less robust test).\n        \"\"\"\n        mock_sf.return_value = 3\n        area = self.block.getArea()\n        height = 2.0\n        self.block.setHeight(height)\n\n        self.block.clearNumberDensities()\n        self.block.setNumberDensities(\n            {\n                \"U238\": 0.018518936996911595,\n                \"ZR\": 0.006040713762820692,\n                \"U235\": 0.0023444806416701184,\n                \"NA23\": 0.009810163826158255,\n            }\n        )\n\n        self.block.completeInitialLoading()\n\n        sf = self.block.getSymmetryFactor()\n        cur = self.block.p.molesHmBOL\n        ref = self.block.getHMDens() / MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * height * area\n        self.assertAlmostEqual(cur, ref, places=12)\n\n        totalHMMass = 0.0\n        for c in self.block:\n            nucs = c.getNuclides()\n            hmNucs = [nuc for nuc in nucs if nucDir.isHeavyMetal(nuc)]\n            hmNDens = {hmNuc: c.getNumberDensity(hmNuc) for hmNuc in hmNucs}\n            # use sf to account for only a 1/sf portion of the component being in the block\n            hmMass = densityTools.calculateMassDensity(hmNDens) * c.getVolume() / sf\n            totalHMMass += hmMass\n            if hmMass:\n                self.assertAlmostEqual(c.p.massHmBOL, hmMass, places=12)\n                self.assertAlmostEqual(\n                    c.p.molesHmBOL,\n                    sum(ndens for ndens in hmNDens.values()) / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * c.getVolume(),\n                    places=12,\n                )\n                self.assertAlmostEqual(c.p.enrichmentBOL, c.getFissileMassEnrich(), places=12)\n            else:\n                self.assertEqual(c.p.massHmBOL, 0.0)\n                self.assertEqual(c.p.molesHmBOL, 0.0)\n                self.assertEqual(c.p.enrichmentBOL, 0.0)\n\n        self.assertAlmostEqual(self.block.p.massHmBOL, totalHMMass)\n        self.assertAlmostEqual(self.block.p.enrichmentBOL, self.block.getFissileMassEnrich(), places=12)\n\n    def test_add(self):\n        numComps = len(self.block.getComponents())\n\n        fuelDims = {\"Tinput\": 25.0, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 127.0}\n\n        newComp = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        self.block.add(newComp)\n        self.assertEqual(numComps + 1, len(self.block.getComponents()))\n\n        self.assertIn(newComp, self.block.getComponents())\n        self.block.remove(newComp)\n\n    def test_extend(self):\n        # generate a list of composites to extend onto this block\n        comps = []\n        nunComps = 3\n        for i in range(nunComps):\n            fuelDims = {\n                \"Tinput\": 25.0 * i,\n                \"Thot\": 600,\n                \"od\": 0.76,\n                \"id\": 0.00,\n                \"mult\": 127.0,\n            }\n            comps.append(components.Circle(\"fuel\", \"UZr\", **fuelDims))\n\n        # show the composites have no parents\n        for c in comps:\n            self.assertIsNone(c.parent)\n\n        # add the composites to the block\n        lenBlock = len(self.block)\n        self.block.extend(comps)\n        self.assertEqual(len(self.block), lenBlock + nunComps)\n\n        # show all the composites in the block have the block as the parent\n        for c in self.block:\n            self.assertIs(c.parent, self.block)\n\n    def test_hasComponents(self):\n        self.assertTrue(self.block.hasComponents([Flags.FUEL, Flags.CLAD]))\n        self.assertTrue(self.block.hasComponents(Flags.FUEL))\n        self.assertFalse(self.block.hasComponents([Flags.FUEL, Flags.CLAD, Flags.DUMMY]))\n\n    def test_getComponentNames(self):\n        cur = self.block.getComponentNames()\n        ref = set(\n            [\n                \"annular void\",\n                \"bond\",\n                \"fuel\",\n                \"gap1\",\n                \"inner liner\",\n                \"gap2\",\n                \"outer liner\",\n                \"gap3\",\n                \"clad\",\n                \"wire\",\n                \"coolant\",\n                \"duct\",\n                \"interCoolant\",\n            ]\n        )\n        self.assertEqual(cur, ref)\n\n    def test_getComponents(self):\n        cur = self.block.getComponents(Flags.FUEL)\n        self.assertEqual(len(cur), 1)\n\n        comps = self.block.getComponents(Flags.FUEL) + self.block.getComponents(Flags.CLAD)\n        self.assertEqual(len(comps), 2)\n\n        inter = self.block.getComponents(Flags.INTERCOOLANT)\n        self.assertEqual(len(inter), 1)\n\n        inter = self.block.getComponents(Flags.INTERCOOLANT, exact=True)  # case insensitive\n        self.assertEqual(inter, [self.block.getComponent(Flags.INTERCOOLANT)])\n\n        cool = self.block.getComponents(Flags.COOLANT, exact=True)\n        self.assertEqual(len(cool), 1)\n\n    def test_getComponent(self):\n        cur = self.block.getComponent(Flags.FUEL)\n        self.assertIsInstance(cur, components.Component)\n\n        inter = self.block.getComponent(Flags.INTERCOOLANT)\n        self.assertIsInstance(inter, components.Component)\n\n        with self.assertRaises(KeyError):\n            # this really isn't the responsibility of block, more of Flags, but until this refactor\n            # is over...\n            inter = self.block.getComponent(Flags.fromString(\"intercoolantlala\"), exact=True)\n\n        cool = self.block.getComponent(Flags.COOLANT, exact=True)\n        self.assertIsInstance(cool, components.Component)\n\n    def test_getComponentsOfShape(self):\n        ref = [\n            \"annular void\",\n            \"bond\",\n            \"fuel\",\n            \"gap1\",\n            \"inner liner\",\n            \"gap2\",\n            \"outer liner\",\n            \"gap3\",\n            \"clad\",\n        ]\n        cur = [c.name for c in self.block.getComponentsOfShape(components.Circle)]\n        self.assertEqual(sorted(ref), sorted(cur))\n\n    def test_getComponentsOfMaterial(self):\n        cur = self.block.getComponentsOfMaterial(materials.UZr())\n        ref = self.block.getComponent(Flags.FUEL)\n        self.assertEqual(cur[0], ref)\n\n        self.assertEqual(\n            self.block.getComponentsOfMaterial(materials.HT9()),\n            [\n                self.block.getComponent(Flags.OUTER | Flags.LINER),\n                self.block.getComponent(Flags.CLAD),\n                self.block.getComponent(Flags.WIRE),\n                self.block.getComponent(Flags.DUCT),\n            ],\n        )\n\n        # test edge case\n        cur = self.block.getComponentsOfMaterial(None, \"UZr\")\n        self.assertEqual(cur[0], ref)\n\n    def test_getComponentByName(self):\n        \"\"\"Test children by name.\"\"\"\n        self.assertIsNone(self.block.getComponentByName(\"not the droid you are looking for\"))\n        self.assertIsNotNone(self.block.getComponentByName(\"annular void\"))\n\n    def test_getSortedCompsInClad(self):\n        \"\"\"Test that components can be sorted within a block and returned in the correct order.\n\n        For an arbitrary example: a clad component.\n        \"\"\"\n        expected = [\n            self.block.getComponentByName(c)\n            for c in [\n                \"annular void\",\n                \"bond\",\n                \"fuel\",\n                \"gap1\",\n                \"inner liner\",\n                \"gap2\",\n                \"outer liner\",\n                \"gap3\",\n            ]\n        ]\n        clad = self.block.getComponent(Flags.CLAD)\n        actual = self.block.getSortedComponentsInsideOfComponent(clad)\n        self.assertListEqual(actual, expected)\n\n    def test_getSortedCompsInDuct(self):\n        \"\"\"Test that components can be sorted within a block and returned in the correct order.\n\n        For an arbitrary example: a duct.\n        \"\"\"\n        expected = [\n            self.block.getComponentByName(c)\n            for c in [\n                \"annular void\",\n                \"bond\",\n                \"fuel\",\n                \"gap1\",\n                \"inner liner\",\n                \"gap2\",\n                \"outer liner\",\n                \"gap3\",\n                \"clad\",\n                \"wire\",\n                \"coolant\",\n            ]\n        ]\n        clad = self.block.getComponent(Flags.DUCT)\n        actual = self.block.getSortedComponentsInsideOfComponent(clad)\n        self.assertListEqual(actual, expected)\n\n    def test_getNumComponents(self):\n        cur = self.block.getNumComponents(Flags.FUEL)\n        ref = self.block.getDim(Flags.FUEL, \"mult\")\n        self.assertEqual(cur, ref)\n\n        self.assertEqual(ref, self.block.getNumComponents(Flags.CLAD))\n\n        self.assertEqual(1, self.block.getNumComponents(Flags.DUCT))\n\n    def test_getNumPins(self):\n        \"\"\"Test that we can get the number of pins from various blocks.\n\n        .. test:: Retrieve the number of pins from various blocks.\n            :id: T_ARMI_BLOCK_NPINS\n            :tests: R_ARMI_BLOCK_NPINS\n        \"\"\"\n        cur = self.block.getNumPins()\n        ref = self.block.getDim(Flags.FUEL, \"mult\")\n        self.assertEqual(cur, ref)\n\n        emptyBlock = blocks.HexBlock(\"empty\")\n        self.assertEqual(emptyBlock.getNumPins(), 0)\n\n        holedRectangle = complexShapes.HoledRectangle(\"holedRectangle\", \"HT9\", 100, 100, 0.5, 1.0, 1.0)\n        holedRectangle.setType(\"component\", flags=Flags.CONTROL)\n        emptyBlock.add(holedRectangle)\n        self.assertEqual(emptyBlock.getNumPins(), 0)\n\n        hexagon = basicShapes.Hexagon(\"hexagon\", \"HT9\", 100, 100, 1)\n        hexagon.setType(\"component\", flags=Flags.SHIELD)\n        emptyBlock.add(hexagon)\n        self.assertEqual(emptyBlock.getNumPins(), 0)\n\n        pins = basicShapes.Circle(\"circle\", \"HT9\", 100, 100, 1, 0, 8)\n        pins.setType(\"component\", flags=Flags.PLENUM)\n        emptyBlock.add(pins)\n        self.assertEqual(emptyBlock.getNumPins(), 8)\n\n    def test_setLinPowByPin(self):\n        numPins = self.block.getNumPins()\n        neutronPower = [10.0 * i for i in range(numPins)]\n        gammaPower = [1.0 * i for i in range(numPins)]\n        totalPower = [x + y for x, y in zip(neutronPower, gammaPower)]\n\n        totalPowerKey = \"linPowByPin\"\n        neutronPowerKey = f\"linPowByPin{NEUTRON}\"\n        gammaPowerKey = f\"linPowByPin{GAMMA}\"\n\n        # Try setting gamma power too early and then reset\n        with self.assertRaises(UnboundLocalError) as context:\n            self.block.setPinPowers(\n                gammaPower,\n                powerKeySuffix=GAMMA,\n            )\n        errorMsg = f\"Neutron power has not been set yet. Cannot set total power for {self.block}.\"\n        self.assertTrue(errorMsg in str(context.exception))\n        self.block.p[gammaPowerKey] = None\n\n        # Test with no powerKeySuffix\n        self.block.setPinPowers(neutronPower)\n        assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower))\n        self.assertIsNone(self.block.p[neutronPowerKey])\n        self.assertIsNone(self.block.p[gammaPowerKey])\n\n        # Test with neutron powers\n        self.block.setPinPowers(\n            neutronPower,\n            powerKeySuffix=NEUTRON,\n        )\n        assert_allclose(self.block.p[totalPowerKey], np.array(neutronPower))\n        assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower))\n        self.assertIsNone(self.block.p[gammaPowerKey])\n\n        # Test with gamma powers\n        self.block.setPinPowers(\n            gammaPower,\n            powerKeySuffix=GAMMA,\n        )\n        assert_allclose(self.block.p[totalPowerKey], np.array(totalPower))\n        assert_allclose(self.block.p[neutronPowerKey], np.array(neutronPower))\n        assert_allclose(self.block.p[gammaPowerKey], np.array(gammaPower))\n\n    def test_getComponentAreaFrac(self):\n        def calcFracManually(names):\n            tFrac = 0.0\n            for n in names:\n                for c, frac in fracs:\n                    if c.getName() == n:\n                        tFrac += frac\n            return tFrac\n\n        self.block.setHeight(2.0)\n\n        refList = [Flags.BOND, Flags.COOLANT]\n        cur = self.block.getComponentAreaFrac(refList)\n        fracs = self.block.getVolumeFractions()\n\n        ref = calcFracManually((\"bond\", \"coolant\"))\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n        # allow inexact for things like fuel1, fuel2 or clad vs. cladding\n        val = self.block.getComponentAreaFrac([Flags.COOLANT, Flags.INTERCOOLANT])\n        ref = calcFracManually([\"coolant\", \"interCoolant\"])\n        refWrong = calcFracManually(\n            [\"coolant\", \"interCoolant\", \"clad\"]\n        )  # can't use 'clad' b/c ``calcFracManually`` is exact only\n        self.assertAlmostEqual(ref, val)\n        self.assertNotAlmostEqual(refWrong, val)\n\n    def test_100_getPinPitch(self):\n        cur = self.block.getPinPitch()\n        ref = self.block.getDim(Flags.CLAD, \"od\") + self.block.getDim(Flags.WIRE, \"od\")\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_101_getPitch(self):\n        cur = self.block.getPitch(returnComp=True)\n        ref = (\n            self.block.getDim(Flags.INTERCOOLANT, \"op\"),\n            self.block.getComponent(Flags.INTERCOOLANT),\n        )\n        self.assertEqual(cur, ref)\n\n        newb = copy.deepcopy(self.block)\n        p1, c1 = self.block.getPitch(returnComp=True)\n        p2, c2 = newb.getPitch(returnComp=True)\n\n        self.assertNotEqual(c1, c2)\n        self.assertEqual(newb.getLargestComponent(\"op\"), c2)\n        self.assertEqual(p1, p2)\n\n    def test_102_setPitch(self):\n        pitch = 17.5\n        self.block.setPitch(pitch)\n        cur = self.block.getPitch()\n        self.assertEqual(cur, pitch)\n        self.assertEqual(self.block.getComponent(Flags.INTERCOOLANT).getDimension(\"op\"), pitch)\n\n    def test_106_getAreaFractions(self):\n        cur = self.block.getVolumeFractions()\n        tot = 0.0\n        areas = []\n        for c in self.block.iterComponents():\n            a = c.getArea()\n            tot += a\n            areas.append((c, a))\n        fracs = {}\n        for c, a in areas:\n            fracs[c.getName()] = a / tot\n\n        places = 6\n        for c, a in cur:\n            self.assertAlmostEqual(a, fracs[c.getName()], places=places)\n\n        self.assertAlmostEqual(sum(fracs.values()), sum([a for c, a in cur]))\n\n    def test_expandElementalToIsotopics(self):\n        \"\"\"Tests the expand to elementals capability.\"\"\"\n        initialN = {}\n        initialM = {}\n        byName = NuclideBases().byName\n        elementals = [byName[nn] for nn in [\"FE\", \"CR\", \"SI\", \"V\", \"MO\"]]\n        for elemental in elementals:\n            initialN[elemental] = self.block.getNumberDensity(elemental.name)  # homogenized\n            initialM[elemental] = self.block.getMass(elemental.name)\n\n        for elemental in elementals:\n            self.block.expandElementalToIsotopics(elemental)\n            newDens = 0.0\n            newMass = 0.0\n            for natNuc in elemental.getNaturalIsotopics():\n                newDens += self.block.getNumberDensity(natNuc.name)\n                newMass += self.block.getMass(natNuc.name)\n\n            self.assertAlmostEqual(\n                initialN[elemental],\n                newDens,\n                msg=\"Isotopic {2} ndens does not add up to {0}. It adds to {1}\".format(\n                    initialN[elemental], newDens, elemental\n                ),\n            )\n            self.assertAlmostEqual(\n                initialM[elemental],\n                newMass,\n                msg=\"Isotopic {2} mass does not add up to {0} g. It adds to {1}\".format(\n                    initialM[elemental], newMass, elemental\n                ),\n            )\n\n    def test_expandAllElementalsToIsotopics(self):\n        \"\"\"Tests the expand all elementals simlutaneously capability.\"\"\"\n        initialN = {}\n        initialM = {}\n        byName = NuclideBases().byName\n        elementals = [byName[nn] for nn in [\"FE\", \"CR\", \"SI\", \"V\", \"MO\"]]\n        for elemental in elementals:\n            initialN[elemental] = self.block.getNumberDensity(elemental.name)  # homogenized\n            initialM[elemental] = self.block.getMass(elemental.name)\n\n        self.block.expandAllElementalsToIsotopics()\n\n        for elemental in elementals:\n            newDens = 0.0\n            newMass = 0.0\n            for natNuc in elemental.getNaturalIsotopics():\n                newDens += self.block.getNumberDensity(natNuc.name)\n                newMass += self.block.getMass(natNuc.name)\n\n            self.assertAlmostEqual(\n                initialN[elemental],\n                newDens,\n                msg=\"Isotopic {2} ndens does not add up to {0}. It adds to {1}\".format(\n                    initialN[elemental], newDens, elemental\n                ),\n            )\n            self.assertAlmostEqual(\n                initialM[elemental],\n                newMass,\n                msg=\"Isotopic {2} mass does not add up to {0} g. It adds to {1}\".format(\n                    initialM[elemental], newMass, elemental\n                ),\n            )\n\n    def test_setPitch(self):\n        \"\"\"\n        Checks consistency after adjusting pitch.\n\n        Needed to verify fix to Issue #165.\n        \"\"\"\n        b = self.block\n        moles1 = b.p.molesHmBOL\n        b.setPitch(17.5)\n        moles2 = b.p.molesHmBOL\n        self.assertAlmostEqual(moles1, moles2)\n        b.setPitch(20.0)\n        moles3 = b.p.molesHmBOL\n        self.assertAlmostEqual(moles2, moles3)\n\n    def test_setImportantParams(self):\n        \"\"\"Confirm that important block parameters can be set and get.\"\"\"\n        # Test ability to set and get flux\n        applyDummyData(self.block)\n        self.assertEqual(self.block.p.mgFlux[0], 161720716762.12997)\n        self.assertEqual(self.block.p.mgFlux[-1], 601494405.293505)\n\n        # Test ability to set and get number density\n        fuel = self.block.getComponent(Flags.FUEL)\n\n        u235_dens = fuel.getNumberDensity(\"U235\")\n        self.assertEqual(u235_dens, 0.003695461770836022)\n\n        fuel.setNumberDensity(\"U235\", 0.5)\n        u235_dens = fuel.getNumberDensity(\"U235\")\n        self.assertEqual(u235_dens, 0.5)\n\n        # TH parameter test\n        self.assertEqual(0, self.block.p.THmassFlowRate)\n        self.block.p.THmassFlowRate = 10\n        self.assertEqual(10, self.block.p.THmassFlowRate)\n\n    def test_getMfp(self):\n        \"\"\"Test mean free path.\"\"\"\n        applyDummyData(self.block)\n        # These are unverified numbers, just the result of this calculation.\n        mfp, mfpAbs, diffusionLength = self.block.getMfp()\n        # no point testing these number to high accuracy.\n        assert_allclose(3.9, mfp, rtol=0.1)\n        assert_allclose(235.0, mfpAbs, rtol=0.1)\n        assert_allclose(17.0, diffusionLength, rtol=0.1)\n\n    def test_consistentMassDensVolCold(self):\n        \"\"\"Consistent mass density and volume betwen cold block and component.\"\"\"\n        block = self.block\n        expectedData = []\n        actualData = []\n        for c in block:\n            expectedData.append(getComponentData(c))\n            actualData.append((c, c.density(), c.getVolume(), c.density() * c.getVolume()))\n\n        for expected, actual in zip(expectedData, actualData):\n            msg = (\n                \"Data (component, density, volume, mass) for component {} does not match. \"\n                \"Expected: {}, Actual: {}\".format(expected[0], expected, actual)\n            )\n            for expectedVal, actualVal in zip(expected, actual):\n                self.assertAlmostEqual(expectedVal, actualVal, msg=msg)\n\n    def test_consistentMassDensVolHot(self):\n        \"\"\"Consistent mass density and volume betwen hot block and component.\"\"\"\n        block = self._hotBlock\n        expectedData = []\n        actualData = []\n        for c in block:\n            expectedData.append(getComponentData(c))\n            actualData.append((c, c.density(), c.getVolume(), c.density() * c.getVolume()))\n\n        for expected, actual in zip(expectedData, actualData):\n            msg = (\n                \"Data (component, density, volume, mass) for component {} does not match. \"\n                \"Expected: {}, Actual: {}\".format(expected[0], expected, actual)\n            )\n            for expectedVal, actualVal in zip(expected, actual):\n                self.assertAlmostEqual(expectedVal, actualVal, msg=msg)\n\n    def test_consistentAreaWithOverlappingComp(self):\n        \"\"\"\n        Test that negative gap areas correctly account for area overlapping upon thermal expansion.\n\n        Notes\n        -----\n        This test calculates a reference coolant area by subtracting the areas of the intercoolant, duct, wire wrap, and\n        pins from the total hex block area. The area of the pins is calculated using only the outer radius of the clad.\n        This avoids the use of negative areas as implemented in Block.getVolumeFractions. Na-23 mass will not be\n        conserved as when duct/clad expands sodium is evacuated.\n\n        See Also\n        --------\n        armi.reactor.blocks.Block.getVolumeFractions\n        \"\"\"\n        numFE56 = self.block.getNumberOfAtoms(\"FE56\")\n        numU235 = self.block.getNumberOfAtoms(\"U235\")\n        for c in self.block:\n            c.setTemperature(700)\n        hasNegativeArea = any(c.getArea() < 0 for c in self.block)\n        self.assertTrue(hasNegativeArea)\n        self.block.getVolumeFractions()  # sets coolant area\n        self._testDimensionsAreLinked()  # linked dimensions are needed for this test to work\n\n        blockPitch = self.block.getPitch()\n        self.assertAlmostEqual(blockPitch, self.block.getComponent(Flags.INTERCOOLANT).getDimension(\"op\"))\n        totalHexArea = blockPitch**2 * math.sqrt(3) / 2.0\n\n        clad = self.block.getComponent(Flags.CLAD)\n        pinArea = math.pi / 4.0 * clad.getDimension(\"od\") ** 2 * clad.getDimension(\"mult\")\n        ref = (\n            totalHexArea\n            - self.block.getComponent(Flags.INTERCOOLANT).getArea()\n            - self.block.getComponent(Flags.DUCT).getArea()\n            - self.block.getComponent(Flags.WIRE).getArea()\n            - pinArea\n        )\n\n        self.assertAlmostEqual(totalHexArea, self.block.getArea())\n        self.assertAlmostEqual(ref, self.block.getComponent(Flags.COOLANT).getArea())\n\n        self.assertTrue(np.allclose(numFE56, self.block.getNumberOfAtoms(\"FE56\")))\n        self.assertTrue(np.allclose(numU235, self.block.getNumberOfAtoms(\"U235\")))\n\n    def _testDimensionsAreLinked(self):\n        prevC = None\n        for c in self.block.getComponentsOfShape(components.Circle):\n            if prevC:\n                self.assertAlmostEqual(prevC.getDimension(\"od\"), c.getDimension(\"id\"))\n            prevC = c\n        self.assertAlmostEqual(\n            self.block.getComponent(Flags.DUCT).getDimension(\"op\"),\n            self.block.getComponent(Flags.INTERCOOLANT).getDimension(\"ip\"),\n        )\n\n    def test_pinMgFluxes(self):\n        \"\"\"Test setting/getting of pin-wise multigroup fluxes.\"\"\"\n        self.assertIsNone(self.block.p.pinMgFluxes)\n        self.assertIsNone(self.block.p.pinMgFluxesAdj)\n        self.assertIsNone(self.block.p.pinMgFluxesGamma)\n\n        nFlux = np.random.rand(10, 33)\n        aFlux = np.random.random(nFlux.shape)\n        gFlux = np.random.random(nFlux.shape)\n\n        self.block.setPinMgFluxes(nFlux)\n        assert_array_equal(self.block.p.pinMgFluxes, nFlux)\n        self.assertIsNone(self.block.p.pinMgFluxesAdj)\n        self.assertIsNone(self.block.p.pinMgFluxesGamma)\n\n        self.block.setPinMgFluxes(aFlux, adjoint=True)\n        assert_array_equal(self.block.p.pinMgFluxesAdj, aFlux)\n        # Make sure we didn't modify anything else\n        assert_array_equal(self.block.p.pinMgFluxes, nFlux)\n        self.assertIsNone(self.block.p.pinMgFluxesGamma)\n\n        self.block.setPinMgFluxes(gFlux, gamma=True)\n        assert_array_equal(self.block.p.pinMgFluxesGamma, gFlux)\n        assert_array_equal(self.block.p.pinMgFluxesAdj, aFlux)\n        assert_array_equal(self.block.p.pinMgFluxes, nFlux)\n\n    def test_getComponentsInLinkedOrder(self):\n        comps = self.block.getComponentsInLinkedOrder()\n        self.assertEqual(len(comps), len(self.block))\n\n        comps.pop(0)\n        with self.assertRaises(RuntimeError):\n            _ = self.block.getComponentsInLinkedOrder(comps)\n\n    def test_mergeWithBlock(self):\n        fuel1 = self.block.getComponent(Flags.FUEL)\n        fuel1.setNumberDensity(\"CM246\", 0.0)\n        block2 = loadTestBlock()\n        fuel2 = block2.getComponent(Flags.FUEL)\n        fuel2.setNumberDensity(\"CM246\", 0.02)\n        self.assertEqual(self.block.getNumberDensity(\"CM246\"), 0.0)\n        self.block.mergeWithBlock(block2, 0.1)\n        self.assertGreater(self.block.getNumberDensity(\"CM246\"), 0.0)\n        self.assertLess(self.block.getNumberDensity(\"CM246\"), 0.02)\n\n    def test_getDimensions(self):\n        dims = self.block.getDimensions(\"od\")\n        self.assertIn(self.block.getComponent(Flags.FUEL).p.od, dims)\n\n    def test_getPlenumPin(self):\n        pin = self.block.getPlenumPin()\n        self.assertIsNone(pin)\n\n        b = copy.deepcopy(self.block)\n        b.p.flags = Flags.fromString(\"plenum aclp\")\n        pinDims = {\n            \"Tinput\": 25,\n            \"Thot\": 250,\n            \"od\": 1.0,\n            \"id\": 0,\n            \"mult\": 1,\n        }\n        pin = components.Circle(\"plenum pin\", \"HT9\", **pinDims)\n        pin.p.flags = Flags.fromString(\"gap\")\n        b.add(pin)\n        pin = b.getPlenumPin()\n        self.assertTrue(pin)\n\n    def test_pinPitches(self):\n        self.assertTrue(self.block.hasPinPitch())\n        self.assertAlmostEqual(self.block.getPinPitch(cold=True), 1.15)\n        self.assertAlmostEqual(self.block.getPinPitch(cold=False), 1.15)\n\n    def test_getReactionRates(self):\n        block = blocks.HexBlock(\"HexBlock\")\n        block.setType(\"defaultType\")\n        comp = basicShapes.Hexagon(\"hexagon\", \"MOX\", 1, 1, 1)\n        block.add(comp)\n        block.setHeight(1)\n        block.p.xsType = \"A\"\n\n        r = tests.getEmptyHexReactor()\n        assembly = makeTestAssembly(1, 1, r=r)\n        assembly.add(block)\n        r.core.add(assembly)\n        r.core.lib = isotxs.readBinary(ISOAA_PATH)\n        block.p.mgFlux = 1\n\n        self.assertAlmostEqual(\n            block.getReactionRates(\"PU239\")[\"nG\"],\n            block.getNumberDensity(\"PU239\") * sum(r.core.lib[\"PU39AA\"].micros.nGamma),\n        )\n\n        # the key is invalid, so should get back all zeros\n        self.assertEqual(\n            block.getReactionRates(\"PU39\"),\n            {\"nG\": 0, \"nF\": 0, \"n2n\": 0, \"nA\": 0, \"nP\": 0, \"n3n\": 0},\n        )\n\n    def test_getComponentsThatAreLinkedTo(self):\n        c = self.block.getFirstComponent(Flags.FUEL)\n        linked = self.block.getComponentsThatAreLinkedTo(c, \"id\")\n        self.assertEqual(linked[0][1], \"od\")\n\n        c = self.block.getFirstComponent(Flags.CLAD)\n        linked = self.block.getComponentsThatAreLinkedTo(c, \"id\")\n        self.assertEqual(linked[0][1], \"od\")\n\n        c = self.block.getFirstComponent(Flags.DUCT)\n        linked = self.block.getComponentsThatAreLinkedTo(c, \"ip\")\n        self.assertEqual(len(linked), 0)\n\n\nclass BlockInputHeightsTests(unittest.TestCase):\n    def test_foundReactor(self):\n        \"\"\"Test the input height is pullable from blueprints.\"\"\"\n        r = loadTestReactor()[1]\n        msg = \"Input height from blueprints differs. Did a blueprint get updated and not this test?\"\n\n        # Grab a block from an assembly, so long as we have the height\n        assem = r.core.getFirstAssembly(Flags.IGNITER | Flags.FUEL)\n        lowerB = assem[0]\n        self.assertEqual(\n            lowerB.getInputHeight(),\n            25,\n            msg=msg,\n        )\n        # Grab another block just for good measure\n        midBlock = assem[2]\n        self.assertEqual(\n            midBlock.getInputHeight(),\n            25,\n            msg=msg,\n        )\n        # Top block has a different height. Make sure we don't just\n        # return 25 all the time\n        topBlock = assem[4]\n        self.assertEqual(topBlock.getInputHeight(), 75, msg=msg)\n\n    def test_noBlueprints(self):\n        \"\"\"Verify an error is raised if there are no blueprints.\"\"\"\n        b = buildSimpleFuelBlock()\n        with self.assertRaisesRegex(AttributeError, \"No ancestor.*blueprints\"):\n            b.getInputHeight()\n\n\nclass BlockEnergyDepositionConstants(unittest.TestCase):\n    \"\"\"Tests the energy deposition methods.\n\n    MagicMocks xsCollections.compute*Constants() -- we're not testing those methods specifically\n    so just make sure they're hit\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.block = loadTestBlock()\n\n    def setUp(self):\n        self.block.core.lib = MagicMock()\n\n    @patch.object(xsCollections, \"computeFissionEnergyGenerationConstants\")\n    @patch.object(xsCollections, \"computeCaptureEnergyGenerationConstants\")\n    def test_getTotalEnergyGenerationConstants(self, mock_capture, mock_fission):\n        \"\"\"Mock both xsCollections methods so you get complete coverage.\"\"\"\n        _x = self.block.getTotalEnergyGenerationConstants()\n        self.assertEqual(mock_fission.call_count, 1)\n        self.assertEqual(mock_capture.call_count, 1)\n\n    @patch.object(xsCollections, \"computeFissionEnergyGenerationConstants\")\n    def test_getFissionEnergyDepositionConstants(self, mock_method):\n        \"\"\"Test RuntimeError and that it gets to the deposition constant call.\"\"\"\n        # make sure xsCollections.compute* gets hit\n        _x = self.block.getFissionEnergyGenerationConstants()\n        self.assertEqual(mock_method.call_count, 1)\n        # set core.lib to None and get RuntimeError\n        self.block.core.lib = None\n        with self.assertRaises(RuntimeError):\n            # fails because this test reactor does not have a cross-section library\n            _x = self.block.getFissionEnergyGenerationConstants()\n\n    @patch.object(xsCollections, \"computeCaptureEnergyGenerationConstants\")\n    def test_getCaptureEnergyGenerationConstants(self, mock_method):\n        \"\"\"Test RuntimeError and that it gets to the deposition constant call.\"\"\"\n        # make sure xsCollections.compute* gets hit\n        _x = self.block.getCaptureEnergyGenerationConstants()\n        self.assertEqual(mock_method.call_count, 1)\n        # set core.lib to None and get RuntimeError\n        self.block.core.lib = None\n        with self.assertRaises(RuntimeError):\n            # fails because this test reactor does not have a cross-section library\n            _x = self.block.getCaptureEnergyGenerationConstants()\n\n    @patch.object(xsCollections, \"computeNeutronEnergyDepositionConstants\")\n    def test_getNeutronEnergyDepositionConstants(self, mock_method):\n        \"\"\"Test RuntimeError and that it gets to the deposition constant call.\"\"\"\n        # make sure xsCollections.compute* gets hit\n        _x = self.block.getNeutronEnergyDepositionConstants()\n        self.assertEqual(mock_method.call_count, 1)\n        # set core.lib to None and get RuntimeError\n        self.block.core.lib = None\n        with self.assertRaises(RuntimeError):\n            _x = self.block.getNeutronEnergyDepositionConstants()\n\n    @patch.object(xsCollections, \"computeGammaEnergyDepositionConstants\")\n    def test_getGammaEnergyDepositionConstants(self, mock_method):\n        \"\"\"Test RuntimeError and that it gets to the deposition constant call.\"\"\"\n        # make sure xsCollections.compute* gets hit\n        _x = self.block.getGammaEnergyDepositionConstants()\n        self.assertEqual(mock_method.call_count, 1)\n        # set core.lib to None and get RuntimeError\n        self.block.core.lib = None\n        with self.assertRaises(RuntimeError):\n            # fails because this test reactor does not have a cross-section library\n            _x = self.block.getGammaEnergyDepositionConstants()\n\n\nclass TestNegativeVolume(unittest.TestCase):\n    def test_negativeVolume(self):\n        \"\"\"Build a Block with WAY too many fuel pins & show that the derived volume is negative.\"\"\"\n        block = blocks.HexBlock(\"TestHexBlock\")\n\n        coldTemp = 20\n        hotTemp = 200\n\n        fuelDims = {\n            \"Tinput\": coldTemp,\n            \"Thot\": hotTemp,\n            \"od\": 0.84,\n            \"id\": 0.6,\n            \"mult\": 1000.0,  # pack in too many fuels\n        }\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n\n        coolantDims = {\"Tinput\": hotTemp, \"Thot\": hotTemp}\n        coolant = components.DerivedShape(\"coolant\", \"Sodium\", **coolantDims)\n\n        interDims = {\n            \"Tinput\": hotTemp,\n            \"Thot\": hotTemp,\n            \"op\": 17.8,\n            \"ip\": 17.3,\n            \"mult\": 1.0,\n        }\n        interSodium = components.Hexagon(\"interCoolant\", \"Sodium\", **interDims)\n\n        block.add(fuel)\n        block.add(coolant)\n        block.add(interSodium)\n        block.setHeight(16.0)\n        with self.assertRaises(ValueError):\n            block.getVolumeFractions()\n\n\nclass TestHexBlock(unittest.TestCase):\n    def setUp(self):\n        self.hexBlock = blocks.HexBlock(\"TestHexBlock\")\n        hexDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"op\": 70.6, \"ip\": 70.0, \"mult\": 1.0}\n        self.hexComponent = components.Hexagon(\"duct\", \"UZr\", **hexDims)\n        self.hexBlock.add(self.hexComponent)\n        self.hexBlock.add(components.Circle(\"clad\", \"HT9\", Tinput=273.0, Thot=273.0, od=0.1, mult=169.0))\n        self.hexBlock.add(components.Circle(\"wire\", \"HT9\", Tinput=273.0, Thot=273.0, od=0.01, mult=169.0))\n        self.hexBlock.add(components.DerivedShape(\"coolant\", \"Sodium\", Tinput=273.0, Thot=273.0))\n        self.r = tests.getEmptyHexReactor()\n        self.hexBlock.autoCreateSpatialGrids(self.r.core.spatialGrid)\n        a = makeTestAssembly(1, 1)\n        a.add(self.hexBlock)\n        loc1 = self.r.core.spatialGrid[0, 1, 0]\n        self.r.core.add(a, loc1)\n\n    def test_getArea(self):\n        \"\"\"Test that we can correctly calculate the area of a hexagonal block.\n\n        .. test:: Users can create blocks that have the correct hexagonal area.\n            :id: T_ARMI_BLOCK_HEX0\n            :tests: R_ARMI_BLOCK_HEX\n        \"\"\"\n        # Test for various outer and inner pitches for HexBlocks with hex holes\n        for op in (20.0, 20.4, 20.1234, 25.001):\n            for ip in (0.0, 5.0001, 7.123, 10.0):\n                # generate a block with a different outer pitch\n                hBlock = blocks.HexBlock(\"TestAreaHexBlock\")\n                hexDims = {\n                    \"Tinput\": 273.0,\n                    \"Thot\": 273.0,\n                    \"op\": op,\n                    \"ip\": ip,\n                    \"mult\": 1.0,\n                }\n                hComponent = components.Hexagon(\"duct\", \"UZr\", **hexDims)\n                hBlock.add(hComponent)\n\n                # verify the area of the hexagon (with a hex hole) is correct\n                cur = hBlock.getArea()\n                ref = math.sqrt(3) / 2.0 * op**2\n                ref -= math.sqrt(3) / 2.0 * ip**2\n                self.assertAlmostEqual(cur, ref, places=6, msg=str(op))\n\n    def test_component_type(self):\n        \"\"\"\n        Test that a hex block has the proper \"hexagon\" __name__.\n\n        .. test:: Users can create blocks with a hexagonal shape.\n            :id: T_ARMI_BLOCK_HEX1\n            :tests: R_ARMI_BLOCK_HEX\n        \"\"\"\n        pitch_comp_type = self.hexBlock.PITCH_COMPONENT_TYPE[0]\n        self.assertEqual(pitch_comp_type.__name__, \"Hexagon\")\n\n    def test_coords(self):\n        \"\"\"\n        Test that coordinates are retrievable from a block.\n\n        .. test:: Coordinates of a block are queryable.\n            :id: T_ARMI_BLOCK_POSI1\n            :tests: R_ARMI_BLOCK_POSI\n        \"\"\"\n        core = self.hexBlock.core\n        a = self.hexBlock.parent\n        loc1 = core.spatialGrid[0, 1, 0]\n        a.spatialLocator = loc1\n        x0, y0 = self.hexBlock.coords()\n        a.spatialLocator = core.spatialGrid[0, -1, 0]  # symmetric\n        x2, y2 = self.hexBlock.coords()\n        a.spatialLocator = loc1\n        self.hexBlock.p.displacementX = 0.01\n        self.hexBlock.p.displacementY = 0.02\n        x1, y1 = self.hexBlock.coords()\n\n        # make sure displacements are working\n        self.assertAlmostEqual(x1 - x0, 1.0)\n        self.assertAlmostEqual(y1 - y0, 2.0)\n\n        # make sure location symmetry is working\n        self.assertAlmostEqual(x0, -x2)\n        self.assertAlmostEqual(y0, -y2)\n\n    def test_getNumPins(self):\n        self.assertEqual(self.hexBlock.getNumPins(), 169)\n\n    def test_block_dims(self):\n        \"\"\"Tests that the block class can provide basic dimensionality information about itself.\"\"\"\n        self.assertAlmostEqual(4316.582, self.hexBlock.getVolume(), 3)\n        self.assertAlmostEqual(70.6, self.hexBlock.getPitch(), 1)\n        self.assertAlmostEqual(4316.582, self.hexBlock.getMaxArea(), 3)\n\n        self.assertEqual(70, self.hexBlock.getDuctIP())\n        self.assertEqual(70.6, self.hexBlock.getDuctOP())\n\n        self.assertAlmostEqual(34.273, self.hexBlock.getPinToDuctGap(), 3)\n        self.assertEqual(0.11, self.hexBlock.getPinPitch())\n        self.assertAlmostEqual(300.889, self.hexBlock.getWettedPerimeter(), 3)\n        self.assertAlmostEqual(4242.184, self.hexBlock.getFlowArea(), 3)\n        self.assertAlmostEqual(56.395, self.hexBlock.getHydraulicDiameter(), 3)\n\n    def test_symmetryFactor(self):\n        # full hex\n        self.hexBlock.spatialLocator = self.hexBlock.core.spatialGrid[2, 0, 0]\n        self.hexBlock.clearCache()\n        self.assertEqual(1.0, self.hexBlock.getSymmetryFactor())\n        a0 = self.hexBlock.getArea()\n        v0 = self.hexBlock.getVolume()\n        m0 = self.hexBlock.getMass()\n\n        # 1/3 symmetric\n        self.hexBlock.spatialLocator = self.hexBlock.core.spatialGrid[0, 0, 0]\n        self.hexBlock.clearCache()\n        self.assertEqual(3.0, self.hexBlock.getSymmetryFactor())\n        self.assertEqual(a0 / 3.0, self.hexBlock.getArea())\n        self.assertEqual(v0 / 3.0, self.hexBlock.getVolume())\n        self.assertAlmostEqual(m0 / 3.0, self.hexBlock.getMass())\n\n    def test_retainState(self):\n        \"\"\"Ensure retainState restores params and spatialGrids.\"\"\"\n        self.hexBlock.spatialGrid = grids.HexGrid.fromPitch(1.0)\n        self.hexBlock.setType(\"intercoolant\")\n        with self.hexBlock.retainState():\n            self.hexBlock.setType(\"fuel\")\n            self.hexBlock.spatialGrid.changePitch(2.0)\n        self.assertAlmostEqual(self.hexBlock.spatialGrid.pitch, 1.0)\n        self.assertTrue(self.hexBlock.hasFlags(Flags.INTERCOOLANT))\n\n    def test_getPinLocations(self):\n        \"\"\"Test pin locations can be obtained.\"\"\"\n        locs = set(self.hexBlock.getPinLocations())\n        nPins = self.hexBlock.getNumPins()\n        self.assertEqual(len(locs), nPins)\n        for l in locs:\n            self.assertIs(l.grid, self.hexBlock.spatialGrid)\n\n        # Check all clad components are represented\n        for c in self.hexBlock.getChildrenWithFlags(Flags.CLAD):\n            if isinstance(c.spatialLocator, grids.MultiIndexLocation):\n                for l in c.spatialLocator:\n                    locs.remove(l)\n            else:\n                locs.remove(c.spatialLocator)\n        self.assertFalse(\n            locs,\n            msg=\"Some clad locations were not found but returned by getPinLocations\",\n        )\n\n    def test_getPinCoordsAndLocsAgree(self):\n        \"\"\"Ensure consistency in ordering of pin locations and coordinates.\"\"\"\n        locs = self.hexBlock.getPinLocations()\n        coords = self.hexBlock.getPinCoordinates()\n        self.assertEqual(len(locs), len(coords))\n        for loc, coord in zip(locs, coords):\n            convertedCoords = loc.getLocalCoordinates()\n            np.testing.assert_array_equal(coord, convertedCoords, err_msg=f\"{loc=}\")\n\n    def test_getPinCoords(self):\n        blockPitch = self.hexBlock.getPitch()\n        pinPitch = self.hexBlock.getPinPitch()\n        nPins = self.hexBlock.getNumPins()\n        side = hexagon.side(blockPitch)\n        xyz = self.hexBlock.getPinCoordinates()\n        x, y, z = xyz.T\n\n        # these two pins should be side by side\n        self.assertTrue(self.hexBlock.spatialGrid.cornersUp)\n        self.assertAlmostEqual(y[1], y[2])\n        self.assertAlmostEqual(x[1], -x[2])\n        self.assertEqual(len(xyz), self.hexBlock.getNumPins())\n\n        # ensure all pins are within the proper bounds of a\n        # flats-up oriented hex block\n        self.assertLess(max(y), blockPitch / 2.0)\n        self.assertGreater(min(y), -blockPitch / 2.0)\n        self.assertLess(max(x), side)\n        self.assertGreater(min(x), -side)\n\n        # center pin should be at 0\n        mags = x * x + y * y\n        minIndex = mags.argmin()\n        cx = x[minIndex]\n        cy = y[minIndex]\n        self.assertAlmostEqual(cx, 0.0)\n        self.assertAlmostEqual(cy, 0.0)\n\n        # extreme pin should be at proper radius\n        cornerMag = mags.max()\n        nRings = hexagon.numRingsToHoldNumCells(nPins) - 1\n        self.assertAlmostEqual(math.sqrt(cornerMag), nRings * pinPitch)\n\n        # all z coords equal to zero\n        np.testing.assert_equal(z, 0)\n\n    def test_getPitchHomogeneousBlock(self):\n        \"\"\"\n        Demonstrate how to communicate pitch on a hex block with unshaped components.\n\n        Notes\n        -----\n        This assumes there are 3 materials in the homogeneous block, one with half the area\n        fraction, and 2 with 1/4 each.\n        \"\"\"\n        desiredPitch = 14.0\n        hexTotalArea = hexagon.area(desiredPitch)\n\n        compArgs = {\"Tinput\": 273.0, \"Thot\": 273.0}\n        areaFractions = [0.5, 0.25, 0.25]\n        materials = [\"HT9\", \"UZr\", \"Sodium\"]\n\n        # There are 2 ways to do this, the first is to pick a component to be the pitch defining\n        # component, and given it the shape of a hexagon to define the pitch. The hexagon outer\n        # pitch (op) is defined by the pitch of the block/assembly. The ip is defined by whatever\n        # thickness is necessary to have the desired area fraction. The second way is shown in the\n        # second half of this test.\n        hexBlock = blocks.HexBlock(\"TestHexBlock\")\n\n        hexComponentArea = areaFractions[0] * hexTotalArea\n\n        # Picking 1st material to use for the hex component here, but really the choice is\n        # arbitrary. area grows quadratically with op\n        ipNeededForCorrectArea = desiredPitch * areaFractions[0] ** 0.5\n        self.assertEqual(hexComponentArea, hexTotalArea - hexagon.area(ipNeededForCorrectArea))\n\n        hexArgs = {\"op\": desiredPitch, \"ip\": ipNeededForCorrectArea, \"mult\": 1.0}\n        hexArgs.update(compArgs)\n        pitchDefiningComponent = components.Hexagon(\"pitchComp\", materials[0], **hexArgs)\n        hexBlock.add(pitchDefiningComponent)\n\n        # hex component is added, now add the rest as unshaped.\n        for aFrac, material in zip(areaFractions[1:], materials[1:]):\n            unshapedArgs = {\"area\": hexTotalArea * aFrac}\n            unshapedArgs.update(compArgs)\n            name = f\"unshaped {material}\"\n            comp = components.UnshapedComponent(name, material, **unshapedArgs)\n            hexBlock.add(comp)\n\n        self.assertEqual(desiredPitch, hexBlock.getPitch())\n        self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())\n        self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)\n\n        # For this second way, we will simply define the 3 components as unshaped, with  the desired\n        # area fractions, and make a 4th component that is an infinitely thin hexagon with the the\n        # desired pitch. The downside of this method is that now the block has a fourth component\n        # with no volume.\n        hexBlock = blocks.HexBlock(\"TestHexBlock\")\n        for aFrac, material in zip(areaFractions, materials):\n            unshapedArgs = {\"area\": hexTotalArea * aFrac}\n            unshapedArgs.update(compArgs)\n            name = f\"unshaped {material}\"\n            comp = components.UnshapedComponent(name, material, **unshapedArgs)\n            hexBlock.add(comp)\n\n        # We haven't set a pitch defining component this time so set it now with 0 area.\n        pitchDefiningComponent = components.Hexagon(\n            \"pitchComp\", \"Void\", op=desiredPitch, ip=desiredPitch, mult=1, **compArgs\n        )\n        hexBlock.add(pitchDefiningComponent)\n        self.assertEqual(desiredPitch, hexBlock.getPitch())\n        self.assertAlmostEqual(hexTotalArea, hexBlock.getMaxArea())\n        self.assertAlmostEqual(sum(c.getArea() for c in hexBlock), hexTotalArea)\n\n    def test_getDuctPitch(self):\n        ductIP = self.hexBlock.getDuctIP()\n        self.assertAlmostEqual(70.0, ductIP)\n        ductOP = self.hexBlock.getDuctOP()\n        self.assertAlmostEqual(70.6, ductOP)\n\n    def test_getPinCenterFlatToFlat(self):\n        nRings = hexagon.numRingsToHoldNumCells(self.hexBlock.getNumPins())\n        pinPitch = self.hexBlock.getPinPitch()\n        pinCenterCornerToCorner = 2 * (nRings - 1) * pinPitch\n        pinCenterFlatToFlat = math.sqrt(3.0) / 2.0 * pinCenterCornerToCorner\n        f2f = self.hexBlock.getPinCenterFlatToFlat()\n        self.assertAlmostEqual(pinCenterFlatToFlat, f2f)\n\n    def test_gridCreation(self):\n        \"\"\"Create a grid for a block, and show that it can handle components with multiplicity > 1.\n\n        .. test:: Grids can handle components with multiplicity > 1.\n            :id: T_ARMI_GRID_MULT\n            :tests: R_ARMI_GRID_MULT\n        \"\"\"\n        b = self.hexBlock\n        # The block should have a spatial grid at construction,\n        # since it has mults = 1 or 169 from setup\n        b.autoCreateSpatialGrids(self.r.core.spatialGrid)\n        self.assertIsNotNone(b.spatialGrid)\n        for c in b:\n            if c.getDimension(\"mult\", cold=True) == 169:\n                # Then it's spatialLocator must be of size 169\n                locations = c.spatialLocator\n                self.assertEqual(type(locations), grids.MultiIndexLocation)\n\n                mult = 0\n                uniqueLocations = set()\n                for loc in locations:\n                    mult = mult + 1\n\n                    # test for the uniqueness of the locations (since mult > 1)\n                    if loc not in uniqueLocations:\n                        uniqueLocations.add(loc)\n                    else:\n                        self.assertTrue(False, msg=\"Duplicate location found!\")\n\n                self.assertEqual(mult, 169)\n\n    def test_gridNumPinsAndLocations(self):\n        b = blocks.HexBlock(\"fuel\", height=10.0)\n\n        fuelDims = {\"Tinput\": 25.0, \"Thot\": 600, \"od\": 0.76, \"id\": 0.00, \"mult\": 168.0}\n        cladDims = {\"Tinput\": 25.0, \"Thot\": 450, \"od\": 0.80, \"id\": 0.77, \"mult\": 168.0}\n        ductDims = {\"Tinput\": 25.0, \"Thot\": 400, \"op\": 16, \"ip\": 15.3, \"mult\": 1.0}\n        wireDims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 600,\n            \"od\": 0.1,\n            \"id\": 0.0,\n            \"axialPitch\": 30.0,\n            \"helixDiameter\": 0.9,\n            \"mult\": 168.0,\n        }\n        wire = components.Helix(\"wire\", \"HT9\", **wireDims)\n        fuel = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        clad = components.Circle(\"clad\", \"HT9\", **cladDims)\n        duct = components.Hexagon(\"duct\", \"HT9\", **ductDims)\n        b.add(fuel)\n        b.add(clad)\n        b.add(duct)\n        b.add(wire)\n        with self.assertRaises(ValueError):\n            b.autoCreateSpatialGrids(self.r.core.spatialGrid)\n        self.assertIsNone(b.spatialGrid)\n\n    def test_gridNotCreatedMultipleMultiplicities(self):\n        wireDims = {\n            \"Tinput\": 200,\n            \"Thot\": 200,\n            \"od\": 0.1,\n            \"id\": 0.0,\n            \"axialPitch\": 30.0,\n            \"helixDiameter\": 1.1,\n            \"mult\": 21.0,\n        }\n        # add a wire only some places in the block, so grid should not be created.\n        wire = components.Helix(\"wire\", \"HT9\", **wireDims)\n        self.hexBlock.add(wire)\n        self.hexBlock.spatialGrid = None  # clear existing\n        self.hexBlock.autoCreateSpatialGrids(self.r.core.spatialGrid)\n        self.assertIsNone(self.hexBlock.spatialGrid)\n\n    def test_assignPinIndicesToFullGrid(self):\n        \"\"\"Ensure we can assign pin indices to fuel if it occupies the entire spatial grid.\"\"\"\n        b = blocks.HexBlock(\"fuel\")\n        fuel = components.Circle(\n            \"fuel\",\n            \"UZr\",\n            Tinput=25.0,\n            Thot=600.0,\n            od=0.76,\n            mult=169,\n        )\n        b.add(fuel)\n\n        clad = components.Circle(\n            \"clad\",\n            \"HT9\",\n            Tinput=25.0,\n            Thot=450.0,\n            id=0.77,\n            od=0.80,\n            mult=169,\n        )\n        b.add(clad)\n\n        wire = components.Helix(\n            \"wire\",\n            \"HT9\",\n            Tinput=25.0,\n            Thot=600,\n            id=0,\n            od=0.1,\n            axialPitch=30,\n            helixDiameter=0.9,\n            mult=169,\n        )\n        b.add(wire)\n\n        duct = components.Hexagon(\"duct\", \"HT9\", Tinput=25.0, Thot=400, ip=15.3, op=16, mult=1)\n        b.add(duct)\n\n        b.autoCreateSpatialGrids(self.r.core.spatialGrid)\n        self.assertIsNotNone(b.spatialGrid)\n\n        b.assignPinIndices()\n        self.assertIsNotNone(fuel.p.pinIndices)\n        indices = fuel.getPinIndices()\n        self.assertIsNotNone(indices)\n        np.testing.assert_allclose(indices, np.arange(169, dtype=int))\n\n    def test_pinPitches(self):\n        self.assertTrue(self.hexBlock.hasPinPitch())\n        self.assertAlmostEqual(self.hexBlock.getPinPitch(cold=True), 0.11)\n        self.assertAlmostEqual(self.hexBlock.getPinPitch(cold=False), 0.11)\n\n    def test_hasPinPitch(self):\n        # A HexBlock with no components inside should return False\n        b = blocks.HexBlock(\"EmptyHexBlock\")\n        self.assertFalse(b.hasPinPitch())\n\n        # A HexBlock with only a clad or a wire component, but not both, should return False\n        b.add(components.Circle(\"clad\", \"HT9\", Tinput=273.0, Thot=273.0, od=0.1, mult=169.0))\n        self.assertFalse(b.hasPinPitch())\n\n        # A HexBlock with a clad and a wire component should return True\n        b.add(components.Circle(\"wire\", \"HT9\", Tinput=273.0, Thot=273.0, od=0.01, mult=169.0))\n        self.assertTrue(b.hasPinPitch())\n\n    def test_getBlocks(self):\n        self.assertEqual(len(self.hexBlock.getBlocks()), 1)\n\n    def test_getBoronMassEnrich(self):\n        self.assertAlmostEqual(self.hexBlock.getBoronMassEnrich(), 0.0)\n\n    def test_rotationNumbers(self):\n        self.assertEqual(self.hexBlock.getRotationNum(), 0.0)\n        self.hexBlock.setRotationNum(1)\n        self.assertEqual(self.hexBlock.getRotationNum(), 1.0)\n        self.hexBlock.setRotationNum(2)\n        self.assertEqual(self.hexBlock.getRotationNum(), 2.0)\n\n\nclass MultiPinIndicesTests(unittest.TestCase):\n    BP_STR = \"\"\"\nblocks:\n    fuel: &fuel_block\n        grid name: fuel grid\n        fuel 1: &fuel_def\n            shape: Circle\n            # Use void material because we don't need nuclides, just components with flags\n            material: Void\n            od: 0.68\n            Tinput: 25\n            Thot: 600\n            latticeIDs: [1]\n            flags: primary fuel\n        clad 1: &clad_def\n            shape: Circle\n            material: Void\n            id: 0.7\n            od: 0.71\n            Tinput: 600\n            Thot: 450\n            latticeIDs: [1]\n        # Smaller pin so it gets placed earlier in the sorting\n        fuel 2:\n            <<: *fuel_def\n            id: 0.6\n            latticeIDs: [2]\n            flags: secondary fuel\n        clad 2:\n            <<: *clad_def\n            id: 0.62\n            od: 0.65\n            latticeIDs: [2]\n        duct:\n            shape: Hexagon\n            material: Void\n            Tinput: 25\n            Thot: 450\n            ip: 15.3\n            op: 16\nassemblies:\n    fuel:\n        specifier: F\n        blocks: [*fuel_block]\n        height: [10]\n        axial mesh points: [1]\n        xs types: [A]\ngrids:\n    fuel grid:\n        geom: hex_corners_up\n        symmetry: full\n        # Kind of a convoluted map but helps test a lot of edge conditions\n        lattice map: |\n            - - -  1 1 1 1\n              - - 1 1 1 1 1\n               - 1 1 2 2 1 1\n                1 1 2 1 2 1 1\n                 1 1 2 2 1 1\n                  1 1 1 1 1\n                   1 2 1 1\nnuclide flags:\n\n\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cs = settings.Settings()\n        bp: blueprints.Blueprints = blueprints.Blueprints.load(cls.BP_STR)\n        bp._prepConstruction(cs)\n        cls._originalBlock: blocks.HexBlock = bp.blockDesigns[\"fuel\"].construct(cs, bp, 0, 2, 10, \"A\", {})\n\n    def setUp(self):\n        self.block = copy.deepcopy(self._originalBlock)\n        self.block.assignPinIndices()\n        self.allLocations = self.block.getPinLocations()\n        self.fuelPins = self.block.getComponents(Flags.FUEL)\n\n    def test_nonOverlappingIndices(self):\n        \"\"\"Test pin indices are complete and non-overlapping.\"\"\"\n        foundIndices: set[int] = set()\n        for fp in self.fuelPins:\n            actualIndices = fp.getPinIndices()\n            self.assertIsNotNone(actualIndices, fp)\n            overlap = foundIndices.intersection(actualIndices)\n            self.assertFalse(overlap, msg=\"Found overlapping indices on unique fuel pin\")\n            foundIndices.update(actualIndices)\n        # Make sure we have all the indices covered\n        for i in range(len(self.allLocations)):\n            self.assertIn(i, foundIndices)\n\n    def test_consistentPinOrdering(self):\n        \"\"\"Test values of pin indices on a component align with pin locations of that component within the block.\"\"\"\n        for fp in self.fuelPins:\n            locations: list[grids.IndexLocation] = list(fp.spatialLocator)\n            indices = fp.getPinIndices()\n            self.assertEqual(len(locations), len(indices), msg=fp)\n            for loc, ix in zip(locations, indices):\n                indexInBlock = self.allLocations.index(loc)\n                self.assertEqual(ix, indexInBlock, msg=f\"{loc=} in {fp}\")\n\n    def test_noPinIndicesForHexes(self):\n        \"\"\"Test we never get pin indices for hexagons.\"\"\"\n        duct = self.block.getComponent(Flags.DUCT)\n        self.assertIsNone(duct.p.pinIndices)\n        with self.assertRaisesRegex(ValueError, \"no pin indices\"):\n            duct.getPinIndices()\n\n    def test_recoverCladIndicesFromFuel(self):\n        \"\"\"Show the same indices for cladding are found for fuel that it wraps.\"\"\"\n        clad = self.block.getComponents(Flags.CLAD)[0]\n        cladIndices = clad.getPinIndices()\n        fuel = self.block.getComponents(Flags.FUEL)[0]\n        fuelIndices = fuel.getPinIndices()\n        # Show not only are they equal, we get literally the same object\n        # through the dimension linking. This only works if the fuel pin\n        # is not at all the lattice sites, or else they'd both be equal\n        # equivalent to np.arange(0, N - 1) but different instances of the same data\n        self.assertIs(cladIndices, fuelIndices)\n\n    def test_locations(self):\n        \"\"\"Ensure we have locations consistent with the lattice map.\"\"\"\n        primary: components.Circle = self.block.getComponent(Flags.PRIMARY)\n        # Count the number of primary pins in the blueprint above\n        nPrimary = 30\n        expectedPrimaryRingPos = {\n            (1, 1),\n        }\n        # 12 and 18 pins in one-indexed rings three and four.\n        # remember that range is exclusive of the stop\n        expectedPrimaryRingPos.update((3, i) for i in range(1, 13))\n        expectedPrimaryRingPos.update((4, i) for i in range(1, 19))\n        # special pin designed to poke some edge cases\n        # remember ARMI hex positions start at 1 in the north east corner and go counterclockwise\n        trickyPin = (4, 11)\n        # drop the tricky pin in the fourth ring\n        expectedPrimaryRingPos.remove(trickyPin)\n        self._checkPinLocationsAndIndices(primary, nPrimary, expectedPrimaryRingPos)\n\n        secondary: components.Circle = self.block.getComponent(Flags.SECONDARY)\n        nSecondary = 7\n        # six pins in one-indexed ring two\n        expectedSecondaryRingPos = {(2, i) for i in range(1, 7)}\n        expectedSecondaryRingPos.add(trickyPin)\n        self._checkPinLocationsAndIndices(secondary, nSecondary, expectedSecondaryRingPos)\n\n    def _checkPinLocationsAndIndices(\n        self,\n        pin: components.Circle,\n        expectedNumPins: int,\n        expectedRingPos: set[tuple[int, int]],\n    ):\n        self.assertEqual(\n            len(expectedRingPos),\n            expectedNumPins,\n            msg=\"Expected pins and locations differ. Your test inputs are not setup correct.\",\n        )\n        self.assertEqual(pin.getDimension(\"mult\"), expectedNumPins)\n        self.assertEqual(len(pin.spatialLocator), expectedNumPins)\n        primaryIndices = pin.getPinIndices()\n        self.assertIsNotNone(primaryIndices)\n        self.assertEqual(primaryIndices.size, expectedNumPins)\n        allLocations = self.block.getPinLocations()\n        for ix in primaryIndices:\n            loc = allLocations[ix]\n            ringPos = loc.getRingPos()\n            self.assertIn(ringPos, expectedRingPos, msg=f\"{ix=} : {loc=}\")\n\n    def test_nonFueledBlock(self):\n        \"\"\"If we have no fuel, but we have clad, we should still have pin indices.\"\"\"\n        nonFuel = copy.deepcopy(self._originalBlock)\n        # strip out fuel flags\n        for c in nonFuel.iterComponents(Flags.FUEL):\n            c.p.flags &= ~Flags.FUEL\n        nonFuel.assignPinIndices()\n        # Should still have what ARMI considers pins\n        self.assertTrue(nonFuel.getPinLocations())\n        for c in nonFuel.iterComponents(Flags.CLAD):\n            self.assertIsNotNone(c.getPinIndices())\n\n    def test_assignmentChangesPreviousPinIndices(self):\n        \"\"\"Show successive calls to assignPinIndices clear out previous state.\"\"\"\n        # assign pin indices to something that maybe doesn't need it\n        firstFuel = self.block.getFirstComponent(Flags.FUEL)\n        firstClad = self.block.getFirstComponent(Flags.CLAD)\n        self.assertIsNone(firstClad.p.pinIndices)\n        self.assertIsNotNone(firstFuel.p.pinIndices)\n        firstClad.p.pinIndices = firstFuel.p.pinIndices\n        self.block.assignPinIndices()\n        self.assertIsNone(firstClad.p.pinIndices)\n\n    def test_fuelAndNonFuel(self):\n        \"\"\"If you have fuel and non-fuel pins in the block, all pins should have indices still.\"\"\"\n        firstBefore = self.fuelPins[0].getPinIndices()\n        secondBefore = self.fuelPins[1].getPinIndices()\n\n        for c in self.block:\n            c.p.pinIndices = None\n\n        self.fuelPins[1].p.flags &= ~Flags.FUEL\n\n        self.block.assignPinIndices()\n        firstAfter = self.fuelPins[0].getPinIndices()\n        assert_array_equal(firstAfter, firstBefore)\n\n        secondAfter = self.fuelPins[1].getPinIndices()\n        assert_array_equal(secondAfter, secondBefore)\n\n    def test_reassignOnSort(self):\n        \"\"\"Show the pin indices are reassigned when the block is sorted.\"\"\"\n        # Make sure we get new block-level pin locations or else this test is meaningless\n        with patch.object(self.block, \"assignPinIndices\") as patchAssign:\n            self.block.sort()\n        newPinLocations = self.block.getPinLocations()\n        self.assertNotEqual(\n            newPinLocations,\n            self.allLocations,\n            msg=\"Test requires new pin locations post-sort.\",\n        )\n        # Make sure we called it. Other tests confirm that assignPinIndices is correct.\n        # this makes sure we've called it where we want to call it\n        patchAssign.assert_called_once()\n\n\nclass TestHexBlockOrientation(unittest.TestCase):\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    @staticmethod\n    def getLocalCoordinatesBlockBounds(b: blocks.HexBlock):\n        \"\"\"Call getLocalCoordinates() for every Component in the Block and find the X/Y bounds.\"\"\"\n        maxX = -111\n        minX = 999\n        maxY = -111\n        minY = 999\n        for comp in b:\n            locs = comp.spatialLocator\n            if not isinstance(locs, grids.MultiIndexLocation):\n                locs = [locs]\n\n            for loc in locs:\n                x, y, _ = loc.getLocalCoordinates()\n                if x > maxX:\n                    maxX = x\n                elif x < minX:\n                    minX = x\n\n                if y > maxY:\n                    maxY = y\n                elif y < minY:\n                    minY = y\n\n        return minX, maxX, minY, maxY\n\n    def test_validateReactorCornersUp(self):\n        \"\"\"Validate the spatial grid for a corners up HexBlock and its children.\"\"\"\n        # load a corners up reactor\n        _o, r = loadTestReactor(\n            os.path.join(TEST_ROOT, \"smallestTestReactor\"),\n            inputFileName=\"armiRunSmallest.yaml\",\n        )\n\n        # grab a pinned fuel block, and verify it is flats up\n        b = r.core.getFirstBlock(Flags.FUEL)\n        self.assertTrue(r.core.spatialGrid.cornersUp)\n        self.assertFalse(b.spatialGrid.cornersUp)\n        self.assertNotEqual(r.core.spatialGrid.cornersUp, b.spatialGrid.cornersUp)\n\n        # for a flats up block-grid, the hex centroids should stretch more in Y than X\n        minX, maxX, minY, maxY = self.getLocalCoordinatesBlockBounds(b)\n        ratio = (maxY - minY) / (maxX - minX)\n        self.assertAlmostEqual(ratio, 2 / math.sqrt(3), delta=0.0001)\n\n    def test_validateReactorFlatsUp(self):\n        \"\"\"Validate the spatial grid for a flats up HexBlock and its children.\"\"\"\n        # copy the files over\n        inDir = os.path.join(TEST_ROOT, \"smallestTestReactor\")\n        for filePath in glob(os.path.join(inDir, \"*.yaml\")):\n            outPath = os.path.join(self.td.destination, os.path.basename(filePath))\n            shutil.copyfile(filePath, outPath)\n\n        # modify the reactor to make it flats up\n        testFile = os.path.join(self.td.destination, \"refSmallestReactor.yaml\")\n        txt = open(testFile, \"r\").read()\n        txt = txt.replace(\"geom: hex_corners_up\", \"geom: hex\")\n        open(testFile, \"w\").write(txt)\n\n        # load a flats up reactor\n        _o, r = loadTestReactor(self.td.destination, inputFileName=\"armiRunSmallest.yaml\")\n\n        # grab a pinned fuel block, and verify it is corners up\n        b = r.core.getFirstBlock(Flags.FUEL)\n        self.assertFalse(r.core.spatialGrid.cornersUp)\n        self.assertTrue(b.spatialGrid.cornersUp)\n        self.assertNotEqual(r.core.spatialGrid.cornersUp, b.spatialGrid.cornersUp)\n\n        # for a corners up block-grid, the hex centroids should stretch more in X than Y\n        minX, maxX, minY, maxY = self.getLocalCoordinatesBlockBounds(b)\n        ratio = (maxX - minX) / (maxY - minY)\n        self.assertAlmostEqual(ratio, 2 / math.sqrt(3), delta=0.0001)\n\n\nclass ThRZBlock_TestCase(unittest.TestCase):\n    def setUp(self):\n        self.ThRZBlock = blocks.ThRZBlock(\"TestThRZBlock\")\n        self.ThRZBlock.add(\n            components.DifferentialRadialSegment(\n                \"fuel\",\n                \"UZr\",\n                Tinput=273.0,\n                Thot=273.0,\n                inner_radius=0.0,\n                radius_differential=40.0,\n                inner_theta=0.0,\n                azimuthal_differential=1.5 * math.pi,\n                inner_axial=5.0,\n                height=10.0,\n                mult=1.0,\n            )\n        )\n        self.ThRZBlock.add(\n            components.DifferentialRadialSegment(\n                \"coolant\",\n                \"Sodium\",\n                Tinput=273.0,\n                Thot=273.0,\n                inner_radius=40.0,\n                radius_differential=10.0,\n                inner_theta=0.0,\n                azimuthal_differential=1.5 * math.pi,\n                inner_axial=5.0,\n                height=10.0,\n                mult=1.0,\n            )\n        )\n        self.ThRZBlock.add(\n            components.DifferentialRadialSegment(\n                \"clad\",\n                \"HT9\",\n                Tinput=273.0,\n                Thot=273.0,\n                inner_radius=50.0,\n                radius_differential=7.0,\n                inner_theta=0.0,\n                azimuthal_differential=1.5 * math.pi,\n                inner_axial=5.0,\n                height=10.0,\n                mult=1.0,\n            )\n        )\n        self.ThRZBlock.add(\n            components.DifferentialRadialSegment(\n                \"wire\",\n                \"HT9\",\n                Tinput=273.0,\n                Thot=273.0,\n                inner_radius=57.0,\n                radius_differential=3.0,\n                inner_theta=0.0,\n                azimuthal_differential=1.5 * math.pi,\n                inner_axial=5.0,\n                height=10.0,\n                mult=1.0,\n            )\n        )\n        # random 1/4 chunk taken out to exercise Theta-RZ block capabilities\n        self.ThRZBlock.add(\n            components.DifferentialRadialSegment(\n                \"chunk\",\n                \"Sodium\",\n                Tinput=273.0,\n                Thot=273.0,\n                inner_radius=0.0,\n                radius_differential=60.0,\n                inner_theta=1.5 * math.pi,\n                azimuthal_differential=0.5 * math.pi,\n                inner_axial=5.0,\n                height=10.0,\n                mult=1.0,\n            )\n        )\n\n    def test_radii(self):\n        radialInner = self.ThRZBlock.radialInner()\n        self.assertEqual(0.0, radialInner)\n        radialOuter = self.ThRZBlock.radialOuter()\n        self.assertEqual(60.0, radialOuter)\n\n    def test_theta(self):\n        thetaInner = self.ThRZBlock.thetaInner()\n        self.assertEqual(0.0, thetaInner)\n        thetaOuter = self.ThRZBlock.thetaOuter()\n        self.assertEqual(2.0 * math.pi, thetaOuter)\n\n    def test_axial(self):\n        axialInner = self.ThRZBlock.axialInner()\n        self.assertEqual({5.0}, axialInner)\n        axialOuter = self.ThRZBlock.axialOuter()\n        self.assertEqual({15.0}, axialOuter)\n\n    def test_verifyBlockDims(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate, before debug logging\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.setVerbosity(logging.WARNING)\n            runLog.LOG.startLog(\"test_updateComponentDims\")\n\n            # the verify method throws a ton of warnings or raises errors when there are problems\n            self.ThRZBlock.verifyBlockDims()\n            self.assertEqual(\"\", mock.getStdout())\n\n    def test_getThetaRZGrid(self):\n        \"\"\"Since not applicable to ThetaRZ Grids.\"\"\"\n        b = self.ThRZBlock\n        self.assertIsNone(b.spatialGrid)\n        b.autoCreateSpatialGrids(\"FakeSpatilGrid\")\n        self.assertIsNotNone(b.spatialGrid)\n\n    def test_getWettedPerimeter(self):\n        with self.assertRaises(NotImplementedError):\n            _ = self.ThRZBlock.getWettedPerimeter()\n\n    def test_getHydraulicDiameter(self):\n        with self.assertRaises(NotImplementedError):\n            _ = self.ThRZBlock.getHydraulicDiameter()\n\n    def test_pinPitches(self):\n        self.assertFalse(self.ThRZBlock.hasPinPitch())\n\n        with self.assertRaises(AttributeError):\n            self.ThRZBlock.getPinPitch(cold=False)\n\n        with self.assertRaises(AttributeError):\n            self.ThRZBlock.getPinPitch(cold=True)\n\n    def test_updateComponentDims(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate, before logging\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.setVerbosity(logging.WARNING)\n            runLog.LOG.startLog(\"test_updateComponentDims\")\n\n            # if this fails, we get a warning. Here we just test the warning isn't thrown.\n            self.ThRZBlock.updateComponentDims()\n            self.assertEqual(\"\", mock.getStdout())\n\n    def test_getBoronMassEnrich(self):\n        self.assertAlmostEqual(self.ThRZBlock.getBoronMassEnrich(), 0.0)\n\n\nclass CartesianBlockTests(unittest.TestCase):\n    \"\"\"Tests for blocks with rectangular/square outer shape.\"\"\"\n\n    PITCH = 70\n\n    def setUp(self):\n        self.cartesianBlock = blocks.CartesianBlock(\"TestCartesianBlock\")\n\n        self.cartesianComponent = components.HoledSquare(\n            \"duct\",\n            \"UZr\",\n            Tinput=273.0,\n            Thot=273.0,\n            holeOD=68.0,\n            widthOuter=self.PITCH,\n            mult=1.0,\n        )\n        self.cartesianBlock.add(self.cartesianComponent)\n        self.cartesianBlock.add(components.Circle(\"clad\", \"HT9\", Tinput=273.0, Thot=273.0, od=68.0, mult=169.0))\n\n        self.rCenter = getEmptyCartesianReactor(throughCenterAssembly=True)\n        self.rBorder = getEmptyCartesianReactor(throughCenterAssembly=False)\n        self.cartesianBlock.parent = self.rCenter.core\n        self.cartesianBlock.autoCreateSpatialGrids(self.rCenter.core.spatialGrid)\n\n    def test_getPitchSquare(self):\n        self.assertEqual(self.cartesianBlock.getPitch(), (self.PITCH, self.PITCH))\n\n    def test_getPitchHomogeneousBlock(self):\n        \"\"\"\n        Demonstrate how to communicate pitch on a hex block with unshaped components.\n\n        Notes\n        -----\n        This assumes there are 3 materials in the homogeneous block, one with half the area fraction, and 2 with 1/4\n        each.\n        \"\"\"\n        desiredPitch = (10.0, 12.0)\n        rectTotalArea = desiredPitch[0] * desiredPitch[1]\n\n        compArgs = {\"Tinput\": 273.0, \"Thot\": 273.0}\n        areaFractions = [0.5, 0.25, 0.25]\n        materials = [\"HT9\", \"UZr\", \"Sodium\"]\n\n        # There are 2 ways to do this, the first is to pick a component to be the pitch defining component, and given it\n        # the shape of a rectangle to define the pitch. The rectangle outer dimensions is defined by the pitch of the\n        # block/assembly. The inner dimensions is defined by whatever thickness is necessary to have the desired area\n        # fraction. The second way is to define all physical material components as unshaped, and add an additional\n        # infinitely thin Void component (no area) that defines pitch. See second part of\n        # HexBlock_TestCase.test_getPitchHomogeneousBlock for demonstration.\n        cartBlock = blocks.CartesianBlock(\"TestCartBlock\")\n\n        hexComponentArea = areaFractions[0] * rectTotalArea\n\n        # Picking 1st material to use for the hex component here, but really the choice is arbitrary.\n        # area grows quadratically with outer dimensions.\n        # Note there are infinitely many inner dims that would preserve area, this is just one.\n        innerDims = [dim * areaFractions[0] ** 0.5 for dim in desiredPitch]\n        self.assertAlmostEqual(hexComponentArea, rectTotalArea - innerDims[0] * innerDims[1])\n\n        rectArgs = {\n            \"lengthOuter\": desiredPitch[0],\n            \"lengthInner\": innerDims[0],\n            \"widthOuter\": desiredPitch[1],\n            \"widthInner\": innerDims[1],\n            \"mult\": 1.0,\n        }\n        rectArgs.update(compArgs)\n        pitchDefiningComponent = components.Rectangle(\"pitchComp\", materials[0], **rectArgs)\n        cartBlock.add(pitchDefiningComponent)\n\n        # Rectangle component is added, now add the rest as unshaped.\n        for aFrac, material in zip(areaFractions[1:], materials[1:]):\n            unshapedArgs = {\"area\": rectTotalArea * aFrac}\n            unshapedArgs.update(compArgs)\n            name = f\"unshaped {material}\"\n            comp = components.UnshapedComponent(name, material, **unshapedArgs)\n            cartBlock.add(comp)\n\n        self.assertEqual(desiredPitch, cartBlock.getPitch())\n        self.assertAlmostEqual(rectTotalArea, cartBlock.getMaxArea())\n        self.assertAlmostEqual(sum(c.getArea() for c in cartBlock), rectTotalArea)\n\n    def test_getCartesianGrid(self):\n        \"\"\"Since not applicable to Cartesian Grids.\"\"\"\n        b = self.cartesianBlock\n        self.assertIsNotNone(b.spatialGrid)\n        b.autoCreateSpatialGrids(\"FakeSpatialGrid\")\n        self.assertIsInstance(b.spatialGrid, CartesianGrid)\n\n    def test_getWettedPerimeter(self):\n        with self.assertRaises(NotImplementedError):\n            _ = self.cartesianBlock.getWettedPerimeter()\n\n    def test_getHydraulicDiameter(self):\n        with self.assertRaises(NotImplementedError):\n            _ = self.cartesianBlock.getHydraulicDiameter()\n\n    def test_pinPitches(self):\n        self.assertTrue(self.cartesianBlock.hasPinPitch())\n        pinPitch = self.cartesianBlock.getPinPitch(cold=True)\n        self.assertAlmostEqual(pinPitch[0], 10.0)\n        self.assertAlmostEqual(pinPitch[1], 16.0)\n\n        pinPitch = self.cartesianBlock.getPinPitch(cold=False)\n        self.assertAlmostEqual(pinPitch[0], 10.0)\n        self.assertAlmostEqual(pinPitch[1], 16.0)\n\n    def test_getBoronMassEnrich(self):\n        self.assertAlmostEqual(self.cartesianBlock.getBoronMassEnrich(), 0.0)\n\n    def test_getPinCenterFlatToFlat(self):\n        # test with isThroughCenterAssembly=True\n        self.cartesianBlock.parent = self.rCenter.core\n        self.assertAlmostEqual(self.cartesianBlock.getPinCenterFlatToFlat(), 226.4155471693585, delta=1e-6)\n\n        # test with isThroughCenterAssembly=False\n        self.cartesianBlock.parent = self.rBorder.core\n        self.assertAlmostEqual(self.cartesianBlock.getPinCenterFlatToFlat(), 245.2835094334717, delta=1e-6)\n\n    def test_getNumCellsGivenRings(self):\n        \"\"\"\n        Testing CartesianBlock.getNumCellsGivenRings, in the two different origin locations.\n\n        There are some diagrams in the docstrings for Cartesian Grids and docs explaining this, but the number of\n        cells in a ring on a Cartesian grid changes depending on if the origin is at the center of a grid cell, or at\n        the boundary between 4 grid cells.\n        \"\"\"\n        # test with isThroughCenterAssembly=True\n        self.cartesianBlock.parent = self.rCenter.core\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(1), 1)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(2), 9)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(3), 25)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(4), 49)\n\n        # test with isThroughCenterAssembly=False\n        self.cartesianBlock.parent = self.rBorder.core\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(1), 4)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(2), 16)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(3), 36)\n        self.assertEqual(self.cartesianBlock.getNumCellsGivenRings(4), 64)\n\n    def test_numRingsToHoldNumCells(self):\n        \"\"\"\n        Testing CartesianBlock.numRingsToHoldNumCells, in the two different origin locations.\n\n        There are some diagrams in the docstrings for Cartesian Grids and docs explaining this, but the number of\n        cells in a ring on a Cartesian grid changes depending on if the origin is at the center of a grid cell, or at\n        the boundary between 4 grid cells.\n        \"\"\"\n        # test with isThroughCenterAssembly=True\n        self.cartesianBlock.parent = self.rCenter.core\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(1), 1)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(9), 2)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(24), 3)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(26), 4)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(50), 5)\n\n        # test with isThroughCenterAssembly=False\n        self.cartesianBlock.parent = self.rBorder.core\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(3), 1)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(16), 2)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(36), 3)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(64), 4)\n        self.assertEqual(self.cartesianBlock.numRingsToHoldNumCells(65), 5)\n\n\nclass MassConservationTests(unittest.TestCase):\n    \"\"\"Tests designed to verify mass conservation during thermal expansion.\"\"\"\n\n    def setUp(self):\n        self.b = buildSimpleFuelBlock()\n\n    def test_heightExpansionDifferences(self):\n        \"\"\"The point of this test is to determine if the number densities stay the same with two different heights of\n        the same block.  Since we want to expand a block from cold temperatures to hot using the fuel expansion\n        coefficient (most important neutronicall), other components are not grown correctly. This means that on the\n        block level, axial expansion will NOT conserve mass of non-fuel components. However, the excess mass is simply\n        added to the top of the reactor in the plenum regions (or any non fueled region).\n        \"\"\"\n        # Assume the default block height is 'cold' height.  Now we must determine what the hot height should be based\n        # on thermal expansion. Change the height of the block based on the different thermal expansions of the\n        # components then see the effect on number densities.\n        fuel = self.b.getComponent(Flags.FUEL)\n        height = self.b.getHeight()\n        Thot = fuel.temperatureInC\n        Tcold = fuel.inputTemperatureInC\n\n        dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)\n        hotFuelHeight = height * (1 + dllHot)\n\n        self.b.setHeight(hotFuelHeight)\n\n        hotFuelU238 = self.b.getNumberDensity(\"U238\")\n        hotFuelIRON = self.b.getNumberDensity(\"FE\")\n\n        # look at clad\n        clad = self.b.getComponent(Flags.CLAD)\n\n        Thot = clad.temperatureInC\n        Tcold = clad.inputTemperatureInC\n\n        dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)\n        hotCladHeight = height * (1 + dllHot)\n\n        self.b.setHeight(hotCladHeight)\n\n        hotCladU238 = self.b.getNumberDensity(\"U238\")\n        hotCladIRON = self.b.getNumberDensity(\"FE\")\n\n        self.assertAlmostEqual(\n            hotFuelU238,\n            hotCladU238,\n            10,\n            \"Number Density of fuel in one height ({0}) != number density of fuel at another \"\n            \"height {1}. Number density conservation violated during thermal \"\n            \"expansion\".format(hotFuelU238, hotCladU238),\n        )\n\n        self.assertAlmostEqual(\n            hotFuelIRON,\n            hotCladIRON,\n            10,\n            \"Number Density of clad in one height ({0}) != number density of clad at another \"\n            \"height {1}. Number density conservation violated during thermal \"\n            \"expansion\".format(hotFuelIRON, hotCladIRON),\n        )\n\n    def test_massFuelHeatup(self):\n        fuel = self.b.getComponent(Flags.FUEL)\n        massCold = fuel.getMass()\n        fuel.setTemperature(100)\n        massHot = fuel.getMass()\n\n        self.assertAlmostEqual(\n            massCold,\n            massHot,\n            10,\n            \"Cold mass of fuel ({0}) != hot mass {1}. Mass conservation violated during thermal expansion\".format(\n                massCold, massHot\n            ),\n        )\n\n    def test_massCladHeatup(self):\n        cladding = self.b.getComponent(Flags.CLAD)\n        massCold = cladding.getMass()\n        cladding.setTemperature(100)\n        massHot = cladding.getMass()\n\n        self.assertAlmostEqual(\n            massCold,\n            massHot,\n            10,\n            \"Cold mass of clad ({0}) != hot mass {1}. Mass conservation violated during thermal expansion\".format(\n                massCold, massHot\n            ),\n        )\n\n    def test_massDuctHeatup(self):\n        duct = self.b.getComponent(Flags.DUCT)\n        massCold = duct.getMass()\n        duct.setTemperature(100)\n        massHot = duct.getMass()\n\n        self.assertAlmostEqual(\n            massCold,\n            massHot,\n            10,\n            \"Cold mass of duct ({0}) != hot mass {1}. Mass conservation violated during thermal expansion\".format(\n                massCold, massHot\n            ),\n        )\n\n    def test_massCoolHeatup(self):\n        \"\"\"Make sure mass of coolant goes down when it heats up.\"\"\"\n        coolant = self.b.getComponent(Flags.COOLANT)\n        massCold = coolant.getMass()\n        coolant.setTemperature(coolant.temperatureInC + 100)\n        massHot = coolant.getMass()\n\n        self.assertGreater(\n            massCold,\n            massHot,\n            \"Cold mass of coolant ({0}) <= hot mass {1}. Mass conservation not violated during \"\n            \"thermal expansion of coolant\".format(massCold, massHot),\n        )\n\n    def test_dimensionDuctHeatup(self):\n        duct = self.b.getComponent(Flags.DUCT)\n        pitchCold = duct.getDimension(\"op\", cold=True)\n        duct.setTemperature(100)\n        pitchHot = duct.getDimension(\"op\")\n        dLL = duct.getProperties().linearExpansionFactor(100, 25)\n        correctHot = pitchCold * (1 + dLL)\n        self.assertAlmostEqual(\n            correctHot,\n            pitchHot,\n            10,\n            \"Theoretical pitch of duct ({0}) != hot pitch {1}. Linear expansion violated during \"\n            \"heatup. \\nTc={tc} Tref={tref} dLL={dLL} cold={pcold}\".format(\n                correctHot,\n                pitchHot,\n                tc=duct.temperatureInC,\n                tref=duct.inputTemperatureInC,\n                dLL=dLL,\n                pcold=pitchCold,\n            ),\n        )\n\n    def test_coldMass(self):\n        \"\"\"\n        Verify that the cold mass is what it should be, even though the hot height is input.\n\n        At the cold temperature (but with hot height), the mass should be the same as at hot\n        temperature and hot height.\n        \"\"\"\n        fuel = self.b.getComponent(Flags.FUEL)\n        # set ref (input/cold) temperature.\n        Thot = fuel.temperatureInC\n        Tcold = fuel.inputTemperatureInC\n\n        # change temp to cold\n        fuel.setTemperature(Tcold)\n        massCold = fuel.getMass()\n        fuelArea = fuel.getArea()\n        # we are at cold temp so cold and hot area are equal\n        self.assertAlmostEqual(fuel.getArea(cold=True), fuel.getArea())\n        height = self.b.getHeight()  # hot height.\n        rho = fuel.getProperties().density(Tc=Tcold)\n        # can't use getThermalExpansionFactor since hot=cold so it would be 0\n        dllHot = fuel.getProperties().linearExpansionFactor(Tc=Thot, T0=Tcold)\n        coldHeight = height / (1 + dllHot)\n        theoreticalMass = fuelArea * coldHeight * rho\n\n        self.assertAlmostEqual(\n            massCold,\n            theoreticalMass,\n            7,\n            msg=\"Cold mass of fuel ({0}) != theoretical mass {1}.  Check calculation of cold mass\".format(\n                massCold, theoreticalMass\n            ),\n        )\n\n    def test_massConsistency(self):\n        \"\"\"Verify that the sum of the component masses equals the total mass.\"\"\"\n        tMass = 0.0\n        for child in self.b:\n            tMass += child.getMass()\n        bMass = self.b.getMass()\n        self.assertAlmostEqual(\n            tMass,\n            bMass,\n            10,\n            \"Sum of component mass {0} != total block mass {1}. \".format(tMass, bMass),\n        )\n"
  },
  {
    "path": "armi/reactor/tests/test_components.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests functionalities of components within ARMI.\"\"\"\n\nimport copy\nimport math\nimport random\nimport unittest\n\nimport numpy as np\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom armi.materials import air, alloy200\nfrom armi.materials.material import Material\nfrom armi.reactor import components, flags\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.components import (\n    Circle,\n    Component,\n    ComponentType,\n    Cube,\n    DerivedShape,\n    DifferentialRadialSegment,\n    FilletedHexagon,\n    Helix,\n    Hexagon,\n    HexHoledCircle,\n    HoledHexagon,\n    HoledRectangle,\n    HoledSquare,\n    NullComponent,\n    RadialSegment,\n    Rectangle,\n    SolidRectangle,\n    Sphere,\n    Square,\n    Triangle,\n    UnshapedComponent,\n    UnshapedVolumetricComponent,\n    materials,\n)\nfrom armi.reactor.reactors import Reactor\nfrom armi.testing import loadTestReactor\nfrom armi.utils.units import getTc\n\n\nclass MockCompositionDependentExpander(materials.Material):\n    \"\"\"Dummy material that has a composition-dependent thermal expansion coefficient.\"\"\"\n\n    def linearExpansionPercent(self, Tk: float = None, Tc: float = None) -> float:\n        \"\"\"\n        Composition-dependent linear expansion coefficient.\n\n        Parameters\n        ----------\n        Tk : float, optional\n            Temperature in Kelvin.\n        Tc : float, optional\n            Temperature in Celsius.\n        \"\"\"\n        alpha = 1.0e-5\n        beta = 1.0e-5 * self.parent.getMassFrac(\"C\")\n        refTemp = 20\n        return (alpha + beta) * getTc(Tc=Tc, Tk=Tk) * (Tc - refTemp)\n\n\nclass TestComponentFactory(unittest.TestCase):\n    def getCircleVoidDict(self):\n        return dict(\n            shape=\"circle\",\n            name=\"gap\",\n            Tinput=25,\n            Thot=600,\n            od=2.1,\n            id=0.0,\n            mult=7,\n            material=\"Void\",\n            isotopics=\"\",\n        )\n\n    def getCircleFuelDict(self):\n        return dict(\n            shape=\"circle\",\n            name=\"fuel\",\n            Tinput=25,\n            Thot=600,\n            od=2.1,\n            id=0.0,\n            mult=7,\n            material=\"UZr\",\n            isotopics=\"\",\n        )\n\n    def test_factory(self):\n        \"\"\"Creating and verifying void and fuel components.\n\n        .. test:: Example void and fuel components are initialized.\n            :id: T_ARMI_COMP_DEF0\n            :tests: R_ARMI_COMP_DEF\n        \"\"\"\n        voidAttrs = self.getCircleVoidDict()\n        voidComp = components.factory(voidAttrs.pop(\"shape\"), [], voidAttrs)\n        fuelAttrs = self.getCircleFuelDict()\n        fuelComp = components.factory(fuelAttrs.pop(\"shape\"), [], fuelAttrs)\n        self.assertIsInstance(voidComp, components.Circle)\n        self.assertIsInstance(voidComp.material, materials.Void)\n        self.assertIsInstance(fuelComp, components.Circle)\n        self.assertIsInstance(fuelComp.material, materials.UZr)\n\n    def test_componentInitializationAndDuplication(self):\n        \"\"\"Initialize and duplicate a component, veifying the parameters.\n\n        .. test:: Verify the parameters of an initialized component.\n            :id: T_ARMI_COMP_DEF1\n            :tests: R_ARMI_COMP_DEF\n        \"\"\"\n        # populate the class/signature dict, and create a basis attrs\n        attrs = {\n            \"name\": \"gap\",\n            \"Tinput\": 25,\n            \"Thot\": 600,\n            \"material\": \"Void\",\n            \"isotopics\": \"\",\n        }\n\n        for i, (name, klass) in enumerate(ComponentType.TYPES.items()):\n            # hack together a dictionary input\n            thisAttrs = {k: 1.0 for k in set(klass.INIT_SIGNATURE).difference(attrs)}\n            if \"oR\" in thisAttrs:\n                thisAttrs[\"oR\"] /= 20.0\n            if \"iR\" in thisAttrs:\n                thisAttrs[\"iR\"] /= 20.0\n            del thisAttrs[\"components\"]\n            thisAttrs.update(attrs)\n            thisAttrs[\"name\"] = f\"banana{i}\"\n            if \"modArea\" in thisAttrs:\n                thisAttrs[\"modArea\"] = None\n            component = components.factory(name, [], thisAttrs)\n            duped = copy.deepcopy(component)\n            for key, val in component.p.items():\n                if key in [\"numberDensities\", \"nuclides\"]:\n                    for i in range(len(val)):\n                        self.assertEqual(val[i], duped.p[key][i])\n                elif key not in [\"area\", \"volume\", \"serialNum\"]:\n                    # they get recomputed\n                    self.assertEqual(\n                        val,\n                        duped.p[key],\n                        msg=f\"Key: {key}, val1: {val}, val2: {duped.p[key]}\",\n                    )\n\n    def test_factoryBadShapeName(self):\n        badDict = self.getCircleFuelDict()\n        with self.assertRaises(ValueError):\n            components.factory(\"turtle\", [], badDict)\n\n\nclass TestGeneralComponents(unittest.TestCase):\n    \"\"\"Base test for all individual component tests.\"\"\"\n\n    componentCls = Component\n    componentMaterial = \"HT9\"\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 25.0}\n\n    def setUp(self, component=None):\n        \"\"\"\n        Most of the time nothing will be passed as `component` and the result will be stored in\n        self, but you can also pass a component object as `component`, in which case the object will\n        be returned with the `parent` attribute assigned.\n        \"\"\"\n\n        class _Parent:\n            def getSymmetryFactor(self):\n                return 1.0\n\n            def getHeight(self):\n                return 1.0\n\n            def clearCache(self):\n                pass\n\n            def __iter__(self):\n                \"\"\"Act like an iterator but don't actually iterate.\"\"\"\n                return iter(())\n\n            derivedMustUpdate = False\n\n        if component is None:\n            self.component = self.componentCls(\"TestComponent\", self.componentMaterial, **self.componentDims)\n            self.component.parent = _Parent()\n        else:\n            component.parent = _Parent()\n            return component\n\n\nclass TestComponentNDens(TestGeneralComponents):\n    \"\"\"Test component number density setting.\"\"\"\n\n    componentCls = Circle\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 25.0, \"id\": 0.0, \"od\": 0.5}\n\n    def test_setNumberDensity(self):\n        \"\"\"Test setting a single number density.\n\n        .. test:: Users can set Component number density.\n            :id: T_ARMI_COMP_NUCLIDE_FRACS0\n            :tests: R_ARMI_COMP_NUCLIDE_FRACS\n        \"\"\"\n        component = self.component\n        self.assertAlmostEqual(component.getNumberDensity(\"C\"), 0.000780, 6)\n        component.setNumberDensity(\"C\", 0.57)\n        self.assertEqual(component.getNumberDensity(\"C\"), 0.57)\n\n    def test_setNumberDensities(self):\n        \"\"\"Test setting multiple number densities.\n\n        .. test:: Users can set Component number densities.\n            :id: T_ARMI_COMP_NUCLIDE_FRACS1\n            :tests: R_ARMI_COMP_NUCLIDE_FRACS\n        \"\"\"\n        component = self.component\n        self.assertAlmostEqual(component.getNumberDensity(\"MN\"), 0.000426, 6)\n        component.setNumberDensities({\"C\": 1, \"MN\": 0.58})\n        self.assertEqual(component.getNumberDensity(\"C\"), 1.0)\n        self.assertEqual(component.getNumberDensity(\"MN\"), 0.58)\n\n    def test_setNumberDensitiesWithExpansion(self):\n        expansionMaterial = MockCompositionDependentExpander()\n        expansionMaterial.parent = self.component\n        self.component.material = expansionMaterial\n        component = self.component\n        initialVolume = component.getVolume()\n        component.temperatureInC = 50\n        self.assertAlmostEqual(component.getNumberDensity(\"MN\"), 0.000426, 6)\n        component.setNumberDensities({\"C\": 1, \"MN\": 0.58})\n        newVolume = component.getVolume()\n        expansionFactor = initialVolume / newVolume\n        self.assertEqual(component.getNumberDensity(\"C\"), 1.0 * expansionFactor)\n        self.assertEqual(component.getNumberDensity(\"MN\"), 0.58 * expansionFactor)\n\n    def test_changeNDensByFactor(self):\n        \"\"\"Test the ability to change just the component number densities.\"\"\"\n        referenceDensity = self.component.getNumberDensities()\n        self.component.p.detailedNDens = None\n        self.component.p.pinNDens = None\n        scalingFactor = random.uniform(0, 10)\n        self.component.changeNDensByFactor(scalingFactor)\n        for nuc, refDens in referenceDensity.items():\n            actual = self.component.getNumberDensity(nuc)\n            self.assertEqual(actual, refDens * scalingFactor, msg=nuc)\n        self.assertIsNone(self.component.p.detailedNDens)\n        self.assertIsNone(self.component.p.pinNDens)\n\n    def test_changeNDensByFactorWithExtraParams(self):\n        \"\"\"Test scaling other parameters when component number density is scaled.\"\"\"\n        referenceDensity = self.component.getNumberDensities()\n        refDetailedNDens = np.random.random(100)\n        # Use copy to avoid spoiling the reference data with in-place multiplication\n        self.component.p.detailedNDens = refDetailedNDens.copy()\n        # Array of number densities per pin\n        refPinDens = np.random.random(size=(50, 10))\n        self.component.p.pinNDens = refPinDens.copy()\n\n        scalingFactor = random.uniform(0, 10)\n        self.component.changeNDensByFactor(scalingFactor)\n\n        for nuc, refDens in referenceDensity.items():\n            actual = self.component.getNumberDensity(nuc)\n            self.assertEqual(actual, refDens * scalingFactor)\n\n        assert_allclose(self.component.p.detailedNDens, refDetailedNDens * scalingFactor, rtol=1e-6)\n        assert_allclose(self.component.p.pinNDens, refPinDens * scalingFactor, rtol=1e-6)\n\n\nclass TestComponent(TestGeneralComponents):\n    \"\"\"Test the base component.\"\"\"\n\n    componentCls = Component\n\n    def test_initializeComponentMaterial(self):\n        \"\"\"Creating component with single material.\n\n        .. test:: Components are made of one material.\n            :id: T_ARMI_COMP_1MAT0\n            :tests: R_ARMI_COMP_1MAT\n        \"\"\"\n        expectedName = \"TestComponent\"\n        actualName = self.component.getName()\n        expectedMaterialName = \"HT9\"\n        actualMaterialName = self.component.material.getName()\n        self.assertEqual(expectedName, actualName)\n        self.assertEqual(expectedMaterialName, actualMaterialName)\n\n    def test_solid_material(self):\n        \"\"\"Determine if material is solid.\n\n        .. test:: Components have material properties.\n            :id: T_ARMI_COMP_MAT\n            :tests: R_ARMI_COMP_MAT\n        \"\"\"\n        self.assertTrue(isinstance(self.component.getProperties(), Material))\n        self.assertTrue(hasattr(self.component.material, \"density\"))\n        self.assertIn(\"HT9\", str(self.component.getProperties()))\n\n        self.component.material = air.Air()\n        self.assertFalse(self.component.containsSolidMaterial())\n\n        self.component.material = alloy200.Alloy200()\n        self.assertTrue(self.component.containsSolidMaterial())\n\n        self.assertTrue(isinstance(self.component.getProperties(), Material))\n        self.assertTrue(hasattr(self.component.material, \"density\"))\n        self.assertIn(\"Alloy200\", str(self.component.getProperties()))\n\n\nclass TestNullComponent(TestGeneralComponents):\n    componentCls = NullComponent\n\n    def test_cmp(self):\n        \"\"\"Test null component.\"\"\"\n        cur = self.component\n        ref = DerivedShape(\"DerivedShape\", \"Material\", 0, 0)\n        self.assertLess(cur, ref)\n\n    def test_nonzero(self):\n        cur = bool(self.component)\n        ref = False\n        self.assertEqual(cur, ref)\n\n    def test_getDimension(self):\n        \"\"\"Test getting empty component.\n\n        .. test:: Retrieve a null dimension.\n            :id: T_ARMI_COMP_DIMS0\n            :tests: R_ARMI_COMP_DIMS\n        \"\"\"\n        for temp in range(400, 901, 25):\n            self.assertEqual(self.component.getDimension(\"\", Tc=temp), 0.0)\n\n\nclass TestUnshapedComponent(TestGeneralComponents):\n    componentCls = UnshapedComponent\n    componentMaterial = \"HT9\"\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"area\": math.pi}\n\n    def test_getComponentArea(self):\n        # a case without thermal expansion\n        self.assertEqual(self.component.getComponentArea(cold=True), math.pi)\n\n        # a case with thermal expansion\n        self.assertEqual(\n            self.component.getComponentArea(cold=False),\n            math.pi * self.component.getThermalExpansionFactor(self.component.temperatureInC) ** 2,\n        )\n\n        # Passing temperature directly\n        self.assertEqual(\n            self.component.getComponentArea(cold=False),\n            self.component.getComponentArea(Tc=self.component.temperatureInC),\n        )\n\n        # show that area expansion is consistent with the density change in the material\n        hotDensity = self.component.density()\n        hotArea = self.component.getArea()\n        thermalExpansionFactor = self.component.getThermalExpansionFactor(self.component.temperatureInC)\n\n        coldComponent = self.setUp(\n            UnshapedComponent(\n                name=\"coldComponent\",\n                material=self.componentMaterial,\n                Tinput=self.component.inputTemperatureInC,\n                Thot=self.component.inputTemperatureInC,\n                area=math.pi,\n            )\n        )\n        coldDensity = coldComponent.density()\n        coldArea = coldComponent.getArea()\n\n        self.assertGreater(thermalExpansionFactor, 1)\n        # thermalExpansionFactor accounts for density being 3D while area is 2D\n        self.assertAlmostEqual(\n            (coldDensity * coldArea),\n            (thermalExpansionFactor * hotDensity * hotArea),\n        )\n\n    def test_getBoundingCircleOuterDiameter(self):\n        # a case without thermal expansion\n        self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 2.0)\n\n        # a case with thermal expansion\n        self.assertEqual(\n            self.component.getBoundingCircleOuterDiameter(cold=False),\n            2.0 * self.component.getThermalExpansionFactor(self.component.temperatureInC),\n        )\n\n    def test_component_less_than(self):\n        \"\"\"Ensure that comparisons between components properly reference bounding circle outer diameter.\n\n        .. test:: Order components by their outermost diameter\n            :id: T_ARMI_COMP_ORDER\n            :tests: R_ARMI_COMP_ORDER\n        \"\"\"\n        componentCls = UnshapedComponent\n        componentMaterial = \"HT9\"\n\n        smallDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"area\": 0.5 * math.pi}\n        sameDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"area\": 1.0 * math.pi}\n        bigDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"area\": 2.0 * math.pi}\n\n        smallComponent = componentCls(\"TestComponent\", componentMaterial, **smallDims)\n        sameComponent = componentCls(\"TestComponent\", componentMaterial, **sameDims)\n        bigComponent = componentCls(\"TestComponent\", componentMaterial, **bigDims)\n\n        self.assertTrue(smallComponent < self.component)\n        self.assertFalse(bigComponent < self.component)\n        self.assertFalse(sameComponent < self.component)\n\n    def test_fromComponent(self):\n        circle = components.Circle(\"testCircle\", \"HT9\", 25, 500, 1.0)\n        unshaped = components.UnshapedComponent.fromComponent(circle)\n        self.assertEqual(circle.getComponentArea(), unshaped.getComponentArea())\n\n\nclass TestShapedComponent(TestGeneralComponents):\n    \"\"\"Abstract class for all shaped components.\"\"\"\n\n    def test_preserveMassDuringThermalExpansion(self):\n        \"\"\"Test that when we thermally expand any arbitrary shape, mass is conserved.\"\"\"\n        if not self.component.THERMAL_EXPANSION_DIMS:\n            return\n        temperatures = [25.0, 30.0, 40.0, 60.0, 80.0, 430.0]\n        masses = []\n        report = \"Temperature, mass, volume, dLL\\n\"\n        for ht in temperatures:\n            self.component.setTemperature(ht)\n            mass = self.component.getMass()\n            masses.append(mass)\n            report += \"{:10.1f}, {:7.5e}, {:7.5e}, {:7.5e}\\n\".format(\n                ht,\n                mass,\n                self.component.getVolume(),\n                self.component.getThermalExpansionFactor(),\n            )\n\n        for mass in masses:\n            self.assertNotAlmostEqual(mass, 0.0)\n            self.assertAlmostEqual(\n                masses[0],\n                mass,\n                msg=\"Masses are not preserved during thermal expansion of component {} at {} C. \"\n                \"Original Mass: {}, Thermally Expanded Mass: {}\\n{}\"\n                \"\".format(self.component, ht, masses[0], mass, report),\n            )\n\n    def test_volumeAfterClearCache(self):\n        \"\"\"\n        Test volume after cache has been cleared.\n\n        .. test:: Clear cache after a dimensions updated.\n            :id: T_ARMI_COMP_VOL0\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        c = UnshapedVolumetricComponent(\"testComponent\", \"Custom\", 0, 0, volume=1)\n        self.assertAlmostEqual(c.getVolume(), 1, 6)\n        c.clearCache()\n        self.assertAlmostEqual(c.getVolume(), 1, 6)\n\n    def test_densityConsistent(self):\n        \"\"\"Testing the Component matches quick hand calc.\"\"\"\n        c = self.component\n\n        # no volume defined\n        if isinstance(c, (DerivedShape, UnshapedVolumetricComponent)):\n            return\n        elif isinstance(c, Component):\n            return\n\n        # basic density sanity test\n        self.assertAlmostEqual(c.density(), c.getMass() / c.getVolume())\n\n        # test 2D expanding density\n        if c.temperatureInC == c.inputTemperatureInC:\n            self.assertAlmostEqual(c.density(), c.material.pseudoDensity(Tc=c.temperatureInC), delta=0.001)\n\n        if not c.is3D:\n            self.assertAlmostEqual(\n                c.getArea() * c.parent.getHeight() * c.density(),\n                self.component.getMass(),\n            )\n\n    def test_density(self):\n        \"\"\"Testing the Component density gets the correct 3D material density.\"\"\"\n\n        class StrangeMaterial(Material):\n            \"\"\"material designed to make the test easier to understand.\"\"\"\n\n            def pseduoDensity(self, Tk=None, Tc=None):\n                return 1.0\n\n            def density(self, Tk=None, Tc=None):\n                return 3.0\n\n        c = Sphere(\n            name=\"strangeBall\",\n            material=StrangeMaterial(),\n            Tinput=200,\n            Thot=500,\n            od=1,\n            id=0,\n            mult=1,\n        )\n\n        # we expect to see the 3D material density here\n        self.assertEqual(c.density(), 3.0)\n\n\nclass TestDerivedShape(TestShapedComponent):\n    componentCls = DerivedShape\n    componentMaterial = \"Sodium\"\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 400.0, \"area\": 1.0}\n\n    def test_getBoundingCircleOuterDiameter(self):\n        self.assertGreater(self.component.getBoundingCircleOuterDiameter(cold=True), 0.0)\n\n    def test_computeVolume(self):\n        \"\"\"Test the computeVolume method on a number of components in a block.\n\n        .. test:: Compute the volume of a DerivedShape inside solid shapes.\n            :id: T_ARMI_COMP_FLUID\n            :tests: R_ARMI_COMP_FLUID\n        \"\"\"\n        from armi.reactor.tests.test_blocks import buildSimpleFuelBlock\n\n        # Calculate the total volume of the block\n        b = buildSimpleFuelBlock()\n        totalVolume = b.getVolume()\n\n        # calculate the total volume by adding up all the components\n        c = b.getComponent(flags.Flags.COOLANT)\n        totalByParts = 0\n        for co in b.getComponents():\n            totalByParts += co.computeVolume()\n\n        self.assertAlmostEqual(totalByParts, totalVolume)\n\n        # test the computeVolume method on the one DerivedShape in this block\n        self.assertAlmostEqual(c.computeVolume(), 1386.5232044586771)\n\n\nclass TestDerivedShapeGetArea(unittest.TestCase):\n    def test_getAreaColdTrue(self):\n        \"\"\"Prove that the DerivedShape.getArea() works at cold=True.\"\"\"\n        # load one-block test reactor\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        b = r.core[0][0]\n\n        # ensure there is a DerivedShape in this Block\n        shapes = set([type(c) for c in b])\n        self.assertIn(Circle, shapes)\n        self.assertIn(DerivedShape, shapes)\n        self.assertIn(Helix, shapes)\n        self.assertIn(Hexagon, shapes)\n\n        # prove that getArea works on the block level\n        self.assertAlmostEqual(b.getArea(cold=True), b.getArea(cold=False), delta=1e-10)\n\n        # prove that getArea preserves the sum of all the areas, even if there is a DerivedShape\n        totalAreaCold = sum([c.getArea(cold=True) for c in b])\n        totalAreaHot = sum([c.getArea(cold=False) for c in b])\n        self.assertAlmostEqual(totalAreaCold, totalAreaHot, delta=1e-10)\n\n    def test_getAreaTemp(self):\n        \"\"\"Prove that the DerivedShape.getArea() works for an arbitrary temperature.\"\"\"\n        # load one-block test reactor\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        b = r.core[0][0]\n        b.clearCache()\n\n        # ensure there is a DerivedShape in this Block\n        shapes = set([type(c) for c in b])\n        self.assertIn(Circle, shapes)\n        self.assertIn(DerivedShape, shapes)\n        self.assertIn(Helix, shapes)\n        self.assertIn(Hexagon, shapes)\n\n        blockArea = b.getMaxArea()\n        compArea = sum([c.getArea(Tc=300) for c in b if not isinstance(c, DerivedShape)])\n\n        comp = [c for c in b if isinstance(c, DerivedShape)][0]\n\n        self.assertAlmostEqual(blockArea - compArea, comp.getComponentArea(Tc=300))\n\n\nclass TestComponentSort(unittest.TestCase):\n    def setUp(self):\n        self.components = []\n        pinComp = components.Circle(\"pin\", \"UZr\", Tinput=273.0, Thot=273.0, od=0.08, mult=169.0)\n        gapComp = components.Circle(\"gap\", \"Sodium\", Tinput=273.0, Thot=273.0, id=0.08, od=0.08, mult=169.0)\n        ductComp = components.Hexagon(\"duct\", \"HT9\", Tinput=273.0, Thot=273.0, op=2.6, ip=2.0, mult=1.0)\n        cladComp = components.Circle(\"clad\", \"HT9\", Tinput=273.0, Thot=273.0, id=0.08, od=0.1, mult=169.0)\n        wireComp = components.Helix(\n            \"wire\",\n            \"HT9\",\n            Tinput=273.0,\n            Thot=273.0,\n            axialPitch=10.0,\n            helixDiameter=0.11,\n            od=0.01,\n            mult=169.0,\n        )\n        self.components = [\n            wireComp,\n            cladComp,\n            ductComp,\n            pinComp,\n            gapComp,\n        ]\n\n    def test_sorting(self):\n        \"\"\"Test that components are sorted as expected.\"\"\"\n        sortedComps = sorted(self.components)\n        currentMaxOd = 0.0\n        for c in sortedComps:\n            self.assertGreaterEqual(c.getBoundingCircleOuterDiameter(cold=True), currentMaxOd)\n            currentMaxOd = c.getBoundingCircleOuterDiameter(cold=True)\n        self.assertEqual(sortedComps[1].name, \"gap\")\n        self.assertEqual(sortedComps[2].name, \"clad\")\n\n\nclass TestCircle(TestShapedComponent):\n    \"\"\"Test circle shaped component.\"\"\"\n\n    componentCls = Circle\n    _id = 5.0\n    _od = 10\n    _coldTemp = 25.0\n    componentDims = {\n        \"Tinput\": _coldTemp,\n        \"Thot\": 25.0,\n        \"od\": _od,\n        \"id\": _id,\n        \"mult\": 1.5,\n    }\n\n    def test_copy(self):\n        circle2 = copy.copy(self.component)\n        self.assertIsNot(circle2, self.component)\n\n        self.assertAlmostEqual(circle2.getDimension(\"id\"), self.component.getDimension(\"id\"))\n        self.assertAlmostEqual(circle2.getDimension(\"od\"), self.component.getDimension(\"od\"))\n        self.assertAlmostEqual(circle2.getDimension(\"mult\"), self.component.getDimension(\"mult\"))\n\n    def test_circleExpansionWorks(self):\n        \"\"\"Test that when ARMI thermally expands a circle, mass is conserved.\n\n        .. test:: Calculate thermal expansion.\n            :id: T_ARMI_COMP_EXPANSION0\n            :tests: R_ARMI_COMP_EXPANSION\n        \"\"\"\n        hotTemp = 700.0\n        dLL = self.component.material.linearExpansionFactor(Tc=hotTemp, T0=self._coldTemp)\n        ref = 1.0 + dLL\n        cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getDimension(self):\n        \"\"\"Test getting component dimension at specific temperature.\n\n        .. test:: Retrieve a dimension at a temperature.\n            :id: T_ARMI_COMP_DIMS1\n            :tests: R_ARMI_COMP_DIMS\n\n        .. test:: Calculate thermal expansion.\n            :id: T_ARMI_COMP_EXPANSION1\n            :tests: R_ARMI_COMP_EXPANSION\n        \"\"\"\n        for hotTemp in range(200, 400, 25):\n            ref = self._od * self.component.getThermalExpansionFactor(Tc=hotTemp)\n            cur = self.component.getDimension(\"od\", Tc=hotTemp)\n            self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a circle.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_getBoundingCircleOuterDiam(self):\n        ref = self._od\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(self._id, cur)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"od\", \"id\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n    def test_getArea(self):\n        \"\"\"Calculate area of circle.\n\n        .. test:: Calculate area of circle.\n            :id: T_ARMI_COMP_VOL1\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        # show we can calculate the area once\n        od = self.component.getDimension(\"od\")\n        idd = self.component.getDimension(\"id\")\n        mult = self.component.getDimension(\"mult\")\n        ref = math.pi * ((od / 2) ** 2 - (idd / 2) ** 2) * mult\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n        # show we can clear the cache, change the temp, and correctly re-calc the area\n        for newTemp in range(500, 690, 19):\n            self.component.clearCache()\n\n            # re-calc area\n            self.component.temperatureInC = newTemp\n            od = self.component.getDimension(\"od\", Tc=newTemp)\n            idd = self.component.getDimension(\"id\", Tc=newTemp)\n            ref = math.pi * ((od / 2) ** 2 - (idd / 2) ** 2) * mult\n            cur = self.component.getArea()\n            self.assertAlmostEqual(cur, ref)\n\n    def test_compInteractionsLinkingByDims(self):\n        \"\"\"Tests linking of Components by dimensions.\n\n        The component ``gap``, representing the fuel-clad gap filled with Void, is defined with\n        dimensions that depend on the fuel outer diameter and clad inner diameter. The\n        :py:meth:`~armi.reactor.components.component.Component.resolveLinkedDims` method links the\n        gap dimensions appropriately when the Component is constructed, and the test shows the area\n        of the gap is calculated correctly based on the thermally-expanded dimensions of the fuel\n        and clad Components.\n\n        .. test:: Show the dimensions of a liquid Component can be defined to depend on the solid\n            Components that bound it.\n            :id: T_ARMI_COMP_FLUID1\n            :tests: R_ARMI_COMP_FLUID\n        \"\"\"\n        nPins = 217\n        fuelDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 0.9, \"id\": 0.0, \"mult\": nPins}\n        cladDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n        fuel = Circle(\"fuel\", \"UZr\", **fuelDims)\n        clad = Circle(\"clad\", \"HT9\", **cladDims)\n        gapDims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"od\": \"clad.id\",\n            \"id\": \"fuel.od\",\n            \"mult\": nPins,\n        }\n        gapDims[\"components\"] = {\"clad\": clad, \"fuel\": fuel}\n        gap = Circle(\"gap\", \"Void\", **gapDims)\n        mult = gap.getDimension(\"mult\")\n        od = gap.getDimension(\"od\")\n        idd = gap.getDimension(\"id\")\n        ref = mult * math.pi * ((od / 2.0) ** 2 - (idd / 2.0) ** 2)\n        cur = gap.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_badComponentName(self):\n        \"\"\"This shows that resolveLinkedDims cannot support names with periods in them.\"\"\"\n        nPins = 12\n        fuelDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 0.9, \"id\": 0.0, \"mult\": nPins}\n        cladDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.1, \"id\": 1.0, \"mult\": nPins}\n        fuel = Circle(\"fuel\", \"UZr\", **fuelDims)\n        clad = Circle(\"clad_4.2.3\", \"HT9\", **cladDims)\n        gapDims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"od\": \"clad_4.2.3.id\",\n            \"id\": \"fuel.od\",\n            \"mult\": nPins,\n        }\n        gapDims[\"components\"] = {\"clad_4.2.3\": clad, \"fuel\": fuel}\n        with self.assertRaises(ValueError):\n            _gap = Circle(\"gap\", \"Void\", **gapDims)\n\n    def test_compInteractionsLinkingBySubt(self):\n        \"\"\"Tests linking of components by subtraction.\"\"\"\n        nPins = 217\n        gapDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.0, \"id\": 0.9, \"mult\": nPins}\n        gap = Circle(\"gap\", \"Void\", **gapDims)\n        fuelDims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"od\": 0.9,\n            \"id\": 0.0,\n            \"mult\": nPins,\n            \"modArea\": \"gap.sub\",\n        }\n        fuel = Circle(\"fuel\", \"UZr\", components={\"gap\": gap}, **fuelDims)\n        gapArea = (\n            gap.getDimension(\"mult\")\n            * math.pi\n            * ((gap.getDimension(\"od\") / 2.0) ** 2 - (gap.getDimension(\"id\") / 2.0) ** 2)\n        )\n        fuelArea = (\n            fuel.getDimension(\"mult\")\n            * math.pi\n            * ((fuel.getDimension(\"od\") / 2.0) ** 2 - (fuel.getDimension(\"id\") / 2.0) ** 2)\n        )\n        ref = fuelArea - gapArea\n        cur = fuel.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getNumberDensities(self):\n        \"\"\"Test that demonstrates that number densities can be retrieved on from component.\"\"\"\n        self.component.p.numberDensities = np.ones(1, dtype=np.float64)\n        self.component.p.nuclides = np.array([\"NA23\"], dtype=\"S6\")\n        self.assertEqual(self.component.getNumberDensity(\"NA23\"), 1.0)\n\n    def test_changeNumberDensities(self):\n        \"\"\"Test that demonstrates that the number densities on a component can be modified.\"\"\"\n        self.component.p.numberDensities = np.ones(1, dtype=np.float64)\n        self.component.p.nuclides = np.array([\"NA23\"], dtype=\"S6\")\n        self.component.p.detailedNDens = [1.0]\n        self.component.p.pinNDens = [1.0]\n        self.assertEqual(self.component.getNumberDensity(\"NA23\"), 1.0)\n        self.component.changeNDensByFactor(3.0)\n        self.assertEqual(self.component.getNumberDensity(\"NA23\"), 3.0)\n        self.assertEqual(self.component.p.detailedNDens[0], 3.0)\n        self.assertEqual(self.component.p.pinNDens[0], 3.0)\n\n    def test_fuelMass(self):\n        nominalMass = self.component.getMass()\n        self.component.p.flags = flags.Flags.FUEL\n        self.assertEqual(self.component.getFuelMass(), nominalMass)\n        self.component.p.flags = flags.Flags.MODERATOR\n        self.assertEqual(self.component.getFuelMass(), 0.0)\n\n    def test_theoreticalDensitySetter(self):\n        \"\"\"Ensure only fraction theoretical densities are supported.\"\"\"\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 1)\n        with self.assertRaises(ValueError):\n            self.component.p.theoreticalDensityFrac = 2.0\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 1)\n        self.component.p.theoreticalDensityFrac = 0.2\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2)\n        with self.assertRaises(ValueError):\n            self.component.p.theoreticalDensityFrac = -1.0\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 0.2)\n        self.component.p.theoreticalDensityFrac = 1.0\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 1)\n        self.component.p.theoreticalDensityFrac = 0.0\n        self.assertEqual(self.component.p.theoreticalDensityFrac, 0)\n\n\nclass TestComponentExpansion(unittest.TestCase):\n    tCold = 25\n    tWarm = 50\n    tHot = 500\n    coldOuterDiameter = 1.0\n\n    def test_HT9Expansion(self):\n        self.runExpansionTests(mat=\"HT9\", isotope=\"FE\")\n\n    def test_UZrExpansion(self):\n        self.runExpansionTests(mat=\"UZr\", isotope=\"U235\")\n\n    def test_B4CExpansion(self):\n        self.runExpansionTests(mat=\"B4C\", isotope=\"B10\")\n\n    def runExpansionTests(self, mat: str, isotope: str):\n        self.componentMassIndependentOfInputTemp(mat)\n        self.expansionConservationHotHeightDefined(mat, isotope)\n        self.expansionConservationColdHeightDefined(mat)\n\n    def componentMassIndependentOfInputTemp(self, mat: str):\n        circle1 = Circle(\"circle\", mat, self.tCold, self.tHot, self.coldOuterDiameter)\n        # pick the input dimension to get the same hot component\n        hotterDim = self.coldOuterDiameter * (1 + circle1.material.linearExpansionFactor(self.tCold + 200, self.tCold))\n        circle2 = Circle(\"circle\", mat, self.tCold + 200, self.tHot, hotterDim)\n        self.assertAlmostEqual(circle1.getDimension(\"od\"), circle2.getDimension(\"od\"))\n        self.assertAlmostEqual(circle1.getArea(), circle2.getArea())\n        self.assertAlmostEqual(circle1.density(), circle2.density())\n\n    def expansionConservationHotHeightDefined(self, mat: str, isotope: str):\n        \"\"\"\n        Demonstrate tutorial for how to expand and relationships conserved at during expansion.\n\n        Notes\n        -----\n        - height taken as hot height and show how quantity is conserved with\n          inputHeightsConsideredHot = True (the default)\n        \"\"\"\n        hotHeight = 1.0\n\n        circle1 = Circle(\"circle\", mat, self.tCold, self.tWarm, self.coldOuterDiameter)\n        circle2 = Circle(\"circle\", mat, self.tCold, self.tHot, self.coldOuterDiameter)\n\n        # mass density is proportional to Fe number density and derived from\n        # all the number densities and atomic masses\n        self.assertAlmostEqual(\n            circle1.getNumberDensity(isotope) / circle2.getNumberDensity(isotope),\n            circle1.density() / circle2.density(),\n        )\n\n        # the colder one has more because it is the same cold outer diameter but it would be taller\n        # at the same temperature\n        mass1 = circle1.density() * circle1.getArea() * hotHeight\n        mass2 = circle2.density() * circle2.getArea() * hotHeight\n        self.assertGreater(mass1, mass2)\n\n        # they are off by factor of thermal exp\n        self.assertAlmostEqual(\n            mass1 * circle1.getThermalExpansionFactor(),\n            mass2 * circle2.getThermalExpansionFactor(),\n        )\n\n        # material.pseudoDensity is the 2D density of a material\n        # material.density is true density and not equal in this case\n        for circle in [circle1, circle2]:\n            # 2D density is not equal after application of coldMatAxialExpansionFactor\n            # which happens during construction\n            self.assertNotAlmostEqual(\n                circle.density(),\n                circle.material.pseudoDensity(Tc=circle.temperatureInC),\n            )\n            # 2D density is off by the material thermal exp factor\n            percent = circle.material.linearExpansionPercent(Tc=circle.temperatureInC)\n            thermalExpansionFactorFromColdMatTemp = 1 + percent / 100\n            self.assertAlmostEqual(\n                circle.density() * thermalExpansionFactorFromColdMatTemp,\n                circle.material.pseudoDensity(Tc=circle.temperatureInC),\n            )\n            self.assertAlmostEqual(\n                circle.density(),\n                circle.material.density(Tc=circle.temperatureInC),\n            )\n\n        # brief 2D expansion with set temp to show mass is conserved hot height would come from\n        # block value\n        warmMass = circle1.density() * circle1.getArea() * hotHeight\n        circle1.setTemperature(self.tHot)\n        hotMass = circle1.density() * circle1.getArea() * hotHeight\n        self.assertAlmostEqual(warmMass, hotMass)\n        circle1.setTemperature(self.tWarm)\n\n        # Change temp to circle 2 temp  to show equal to circle2 and then change back to show\n        # recoverable to original values\n        oldArea = circle1.getArea()\n        initialDens = circle1.density()\n\n        # when block.setHeight is called (which effectively changes component height)\n        # component.setNumberDensity is called (for solid isotopes) to adjust the number density so\n        # that now the 2D expansion will be approximated/expanded around the hot temp which is akin\n        # to these adjustments\n        heightFactor = circle1.getHeightFactor(self.tHot)\n        circle1.adjustDensityForHeightExpansion(self.tHot)  # apply temp at new height\n        circle1.setTemperature(self.tHot)\n\n        # now its density is same as hot component\n        self.assertAlmostEqual(circle1.density(), circle2.density())\n\n        # show that mass is conserved after expansion\n        circle1NewHotHeight = hotHeight * heightFactor\n        self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * circle1NewHotHeight)\n\n        self.assertAlmostEqual(\n            circle1.density(),\n            circle1.material.density(Tc=circle1.temperatureInC),\n        )\n        # change back to old temp\n        circle1.adjustDensityForHeightExpansion(self.tWarm)\n        circle1.setTemperature(self.tWarm)\n\n        # check for consistency\n        self.assertAlmostEqual(initialDens, circle1.density())\n        self.assertAlmostEqual(oldArea, circle1.getArea())\n        self.assertAlmostEqual(mass1, circle1.density() * circle1.getArea() * hotHeight)\n\n    def expansionConservationColdHeightDefined(self, mat: str):\n        \"\"\"\n        Demonstrate that material is conserved at during expansion.\n\n        Notes\n        -----\n        - height taken as cold height and show how quantity is conserved with\n          inputHeightsConsideredHot = False\n        \"\"\"\n        coldHeight = 1.0\n        circle1 = Circle(\"circle\", mat, self.tCold, self.tWarm, self.coldOuterDiameter)\n        circle2 = Circle(\"circle\", mat, self.tCold, self.tHot, self.coldOuterDiameter)\n        # same as 1 but we will make like 2\n        circle1AdjustTo2 = Circle(\"circle\", mat, self.tCold, self.tWarm, self.coldOuterDiameter)\n\n        # make it hot like 2\n        circle1AdjustTo2.adjustDensityForHeightExpansion(self.tHot)\n        circle1AdjustTo2.setTemperature(self.tHot)\n        # check that its like 2\n        self.assertAlmostEqual(circle2.density(), circle1AdjustTo2.density())\n        self.assertAlmostEqual(circle2.getArea(), circle1AdjustTo2.getArea())\n\n        for circle in [circle1, circle2, circle1AdjustTo2]:\n            self.assertAlmostEqual(\n                circle.density(),\n                circle.material.density(Tc=circle.temperatureInC),\n            )\n            # total mass consistent between hot and cold. Hot height will be taller\n            hotHeight = coldHeight * circle.getThermalExpansionFactor()\n            self.assertAlmostEqual(\n                coldHeight * circle.getArea(cold=True) * circle.material.density(Tc=circle.inputTemperatureInC),\n                hotHeight * circle.getArea() * circle.density(),\n            )\n\n\nclass TestTriangle(TestShapedComponent):\n    \"\"\"Test triangle shaped component.\"\"\"\n\n    componentCls = Triangle\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"base\": 3.0,\n        \"height\": 2.0,\n        \"mult\": 30,\n    }\n\n    def test_getArea(self):\n        \"\"\"Calculate area of triangle.\n\n        .. test:: Calculate area of triangle.\n            :id: T_ARMI_COMP_VOL2\n            :tests: R_ARMI_COMP_VOL\n\n        .. test:: Triangle shaped component\n            :id: T_ARMI_COMP_SHAPES1\n            :tests: R_ARMI_COMP_SHAPES\n        \"\"\"\n        b = self.component.getDimension(\"base\")\n        h = self.component.getDimension(\"height\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * 0.5 * b * h\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a triangle.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"base\", \"height\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestRectangle(TestShapedComponent):\n    \"\"\"Test rectangle shaped component.\"\"\"\n\n    componentCls = Rectangle\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"lengthOuter\": 6.0,\n        \"lengthInner\": 4.0,\n        \"widthOuter\": 5.0,\n        \"widthInner\": 3.0,\n        \"mult\": 2,\n    }\n\n    def test_negativeArea(self):\n        dims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"lengthOuter\": 1.0,\n            \"lengthInner\": 2.0,\n            \"widthOuter\": 5.0,\n            \"widthInner\": 6.0,\n            \"mult\": 2,\n        }\n        refArea = dims[\"mult\"] * (dims[\"lengthOuter\"] * dims[\"widthOuter\"] - dims[\"lengthInner\"] * dims[\"widthInner\"])\n        negativeRectangle = Rectangle(\"test\", \"Void\", **dims)\n        self.assertAlmostEqual(negativeRectangle.getArea(), refArea)\n        with self.assertRaises(ArithmeticError):\n            negativeRectangle = Rectangle(\"test\", \"UZr\", **dims)\n            negativeRectangle.getArea()\n\n    def test_getBoundingCircleOuterDiam(self):\n        \"\"\"Get outer diameter bounding circle.\n\n        .. test:: Rectangle shaped component\n            :id: T_ARMI_COMP_SHAPES2\n            :tests: R_ARMI_COMP_SHAPES\n        \"\"\"\n        ref = math.sqrt(61.0)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n        # verify the area of the rectangle is correct\n        ref = self.componentDims[\"lengthOuter\"] * self.componentDims[\"widthOuter\"]\n        ref -= self.componentDims[\"lengthInner\"] * self.componentDims[\"widthInner\"]\n        ref *= self.componentDims[\"mult\"]\n        cur = self.component.getArea(cold=True)\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getCircleInnerDiam(self):\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(math.sqrt(25.0), cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of rectangle.\n\n        .. test:: Calculate area of rectangle.\n            :id: T_ARMI_COMP_VOL3\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        outerL = self.component.getDimension(\"lengthOuter\")\n        innerL = self.component.getDimension(\"lengthInner\")\n        outerW = self.component.getDimension(\"widthOuter\")\n        innerW = self.component.getDimension(\"widthInner\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * (outerL * outerW - innerL * innerW)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a rectangle.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\n            \"lengthInner\",\n            \"lengthOuter\",\n            \"widthInner\",\n            \"widthOuter\",\n            \"mult\",\n        ]\n        ref = [True, True, True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestSolidRectangle(TestShapedComponent):\n    componentCls = SolidRectangle\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"lengthOuter\": 5.0,\n        \"widthOuter\": 5.0,\n        \"mult\": 1,\n    }\n\n    def test_getBoundingCircleOuterDiam(self):\n        \"\"\"Test get bounding circle of the outer diameter.\"\"\"\n        ref = math.sqrt(50)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of solid rectangle.\n\n        .. test:: Calculate area of solid rectangle.\n            :id: T_ARMI_COMP_VOL4\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        outerL = self.component.getDimension(\"lengthOuter\")\n        outerW = self.component.getDimension(\"widthOuter\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * (outerL * outerW)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a solid rectangle.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"lengthOuter\", \"widthOuter\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestSquare(TestShapedComponent):\n    \"\"\"Test square shaped component.\"\"\"\n\n    componentCls = Square\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"widthOuter\": 3.0,\n        \"widthInner\": 2.0,\n        \"mult\": 1,\n    }\n\n    def test_negativeArea(self):\n        dims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"widthOuter\": 1.0,\n            \"widthInner\": 5.0,\n            \"mult\": 1,\n        }\n        refArea = dims[\"mult\"] * (dims[\"widthOuter\"] * dims[\"widthOuter\"] - dims[\"widthInner\"] * dims[\"widthInner\"])\n        negativeRectangle = Square(\"test\", \"Void\", **dims)\n        self.assertAlmostEqual(negativeRectangle.getArea(), refArea)\n        with self.assertRaises(ArithmeticError):\n            negativeRectangle = Square(\"test\", \"UZr\", **dims)\n            negativeRectangle.getArea()\n\n    def test_getBoundingCircleOuterDiam(self):\n        \"\"\"Get bounding circle outer diameter.\n\n        .. test:: Square shaped component\n            :id: T_ARMI_COMP_SHAPES3\n            :tests: R_ARMI_COMP_SHAPES\n        \"\"\"\n        ref = math.sqrt(18.0)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n        # verify the area of the circle is correct\n        ref = self.componentDims[\"widthOuter\"] ** 2 - self.componentDims[\"widthInner\"] ** 2\n        cur = self.component.getComponentArea(cold=True)\n        self.assertAlmostEqual(cur, ref)\n\n    def test_getCircleInnerDiam(self):\n        ref = math.sqrt(8.0)\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of square.\n\n        .. test:: Calculate area of square.\n            :id: T_ARMI_COMP_VOL5\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        outerW = self.component.getDimension(\"widthOuter\")\n        innerW = self.component.getDimension(\"widthInner\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * (outerW * outerW - innerW * innerW)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a square.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"widthOuter\", \"widthInner\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestCube(TestShapedComponent):\n    componentCls = Cube\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"lengthOuter\": 5.0,\n        \"lengthInner\": 4.0,\n        \"widthOuter\": 5.0,\n        \"widthInner\": 3.0,\n        \"heightOuter\": 20.0,\n        \"heightInner\": 10.0,\n        \"mult\": 2,\n    }\n\n    def test_negativeVolume(self):\n        dims = {\n            \"Tinput\": 25.0,\n            \"Thot\": 430.0,\n            \"lengthOuter\": 5.0,\n            \"lengthInner\": 20.0,\n            \"widthOuter\": 5.0,\n            \"widthInner\": 30.0,\n            \"heightOuter\": 20.0,\n            \"heightInner\": 30.0,\n            \"mult\": 2,\n        }\n        refVolume = dims[\"mult\"] * (\n            dims[\"lengthOuter\"] * dims[\"widthOuter\"] * dims[\"heightOuter\"]\n            - dims[\"lengthInner\"] * dims[\"widthInner\"] * dims[\"heightInner\"]\n        )\n        negativeCube = Cube(\"test\", \"Void\", **dims)\n        self.assertAlmostEqual(negativeCube.getVolume(), refVolume)\n        with self.assertRaises(ArithmeticError):\n            negativeCube = Cube(\"test\", \"UZr\", **dims)\n            negativeCube.getVolume()\n\n    def test_getVolume(self):\n        \"\"\"Calculate area of cube.\n\n        .. test:: Calculate area of cube.\n            :id: T_ARMI_COMP_VOL6\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        lengthO = self.component.getDimension(\"lengthOuter\")\n        widthO = self.component.getDimension(\"widthOuter\")\n        heightO = self.component.getDimension(\"heightOuter\")\n        lengthI = self.component.getDimension(\"lengthInner\")\n        widthI = self.component.getDimension(\"widthInner\")\n        heightI = self.component.getDimension(\"heightInner\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * (lengthO * widthO * heightO - lengthI * widthI * heightI)\n        cur = self.component.getVolume()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a cube.\"\"\"\n        self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)\n\n\nclass TestHexagon(TestShapedComponent):\n    \"\"\"Test hexagon shaped component.\"\"\"\n\n    componentCls = Hexagon\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"op\": 10.0, \"ip\": 5.0, \"mult\": 1}\n\n    def test_getBoundingCircleOuterDiam(self):\n        ref = 2.0 * 10 / math.sqrt(3)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        ref = 2.0 * 5.0 / math.sqrt(3)\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of hexagon.\n\n        .. test:: Calculate area of hexagon.\n            :id: T_ARMI_COMP_VOL7\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        cur = self.component.getArea()\n        mult = self.component.getDimension(\"mult\")\n        op = self.component.getDimension(\"op\")\n        ip = self.component.getDimension(\"ip\")\n        ref = math.sqrt(3.0) / 2.0 * (op**2 - ip**2) * mult\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a hexagon.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"op\", \"ip\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestFilletedHexagon(TestShapedComponent):\n    \"\"\"Test FilletedHexagon shaped component.\"\"\"\n\n    componentCls = FilletedHexagon\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"op\": 10.0,\n        \"ip\": 5.0,\n        \"mult\": 1,\n        \"oR\": 0.2,\n        \"iR\": 0.1,\n    }\n\n    def test_getBoundingCircleOuterDiameter(self):\n        ref = 2.0 * 10 / math.sqrt(3)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        ref = 2.0 * 5.0 / math.sqrt(3)\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getComponentArea(self):\n        cur = self.component.getComponentArea()\n        op = self.component.getDimension(\"op\")\n        ip = self.component.getDimension(\"ip\")\n        oR = self.component.getDimension(\"oR\")\n        iR = self.component.getDimension(\"iR\")\n        mult = self.component.getDimension(\"mult\")\n\n        ref = mult * (FilletedHexagon._area(op, oR) - FilletedHexagon._area(ip, iR))\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a Hexagon.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"op\", \"ip\", \"iR\", \"oR\", \"mult\"]\n        ref = [True, True, True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n    def test_filletedMatchesNormal(self):\n        \"\"\"Prove that if the radius of curvature is 0.0, FilletedHexagon is just a hexagon.\"\"\"\n        for ip in np.arange(0.1, 1, 0.1):\n            for op in np.arange(1.1, 5, 0.4):\n                componentDims = {\n                    \"Tinput\": 25.0,\n                    \"Thot\": 430.0,\n                    \"op\": op,\n                    \"ip\": ip,\n                    \"mult\": 1.0,\n                }\n                f = FilletedHexagon(\"xyz\", \"HT9\", **componentDims)\n                h = Hexagon(\"xyz\", \"HT9\", **componentDims)\n\n                self.assertAlmostEqual(f.getComponentArea(), h.getComponentArea(), delta=1e-7)\n                self.assertGreaterEqual(h.getArea(), f.getArea() - 1e-7)\n\n    def test_filletedBecomesACircle(self):\n        \"\"\"Prove that as the radius of curvature becomes D/2, the shape becomes a circle.\"\"\"\n        for op in np.arange(1.0, 5.0, 0.5):\n            componentDims = {\n                \"Tinput\": 425.0,\n                \"Thot\": 425.0,\n                \"op\": op,\n                \"ip\": 0.0,\n                \"oR\": op / 2.0,\n                \"iR\": 0.0,\n                \"mult\": 1.0,\n            }\n            f = FilletedHexagon(\"circleHex\", \"HT9\", **componentDims)\n            self.assertAlmostEqual(f.getComponentArea(), math.pi * (op / 2.0) ** 2, delta=1e-7)\n\n\nclass TestHoledHexagon(TestShapedComponent):\n    \"\"\"Test holed hexagon shaped component.\"\"\"\n\n    componentCls = HoledHexagon\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"op\": 16.5,\n        \"holeOD\": 3.6,\n        \"nHoles\": 7,\n        \"mult\": 1.0,\n    }\n\n    def test_getBoundingCircleOuterDiameter(self):\n        ref = 2.0 * 16.5 / math.sqrt(3)\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        ref = 0  # there are multiple holes, so the function should return 0\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertEqual(ref, cur)\n\n        # make and test another one with just 1 hole\n        simpleHoledHexagon = HoledHexagon(\n            \"hex\",\n            \"Void\",\n            self.componentDims[\"Tinput\"],\n            self.componentDims[\"Thot\"],\n            self.componentDims[\"op\"],\n            self.componentDims[\"holeOD\"],\n            nHoles=1,\n        )\n        self.assertEqual(\n            self.componentDims[\"holeOD\"],\n            simpleHoledHexagon.getCircleInnerDiameter(cold=True),\n        )\n\n    def test_getArea(self):\n        \"\"\"Calculate area of holed hexagon.\n\n        .. test:: Calculate area of holed hexagon.\n            :id: T_ARMI_COMP_VOL8\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        op = self.component.getDimension(\"op\")\n        odHole = self.component.getDimension(\"holeOD\")\n        nHoles = self.component.getDimension(\"nHoles\")\n        mult = self.component.getDimension(\"mult\")\n        hexarea = math.sqrt(3.0) / 2.0 * (op**2)\n        holeArea = nHoles * math.pi * ((odHole / 2.0) ** 2)\n        ref = mult * (hexarea - holeArea)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a holed hexagon.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"op\", \"holeOD\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestHexHoledCircle(TestShapedComponent):\n    componentCls = HexHoledCircle\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"od\": 16.5,\n        \"holeOP\": 3.6,\n        \"mult\": 1.0,\n    }\n\n    def test_getCircleInnerDiameter(self):\n        simpleHexHoledCircle = HexHoledCircle(\n            \"Circle\",\n            \"Void\",\n            self.componentDims[\"Tinput\"],\n            self.componentDims[\"Thot\"],\n            self.componentDims[\"od\"],\n            self.componentDims[\"holeOP\"],\n        )\n        self.assertEqual(\n            self.componentDims[\"holeOP\"],\n            simpleHexHoledCircle.getCircleInnerDiameter(cold=True),\n        )\n\n    def test_getArea(self):\n        \"\"\"Calculate area of hex holed circle.\n\n        .. test:: Calculate area of hex holed circle.\n            :id: T_ARMI_COMP_VOL9\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        od = self.component.getDimension(\"od\")\n        holeOP = self.component.getDimension(\"holeOP\")\n        mult = self.component.getDimension(\"mult\")\n        hexarea = math.sqrt(3.0) / 2.0 * (holeOP**2)\n        holeArea = math.pi * ((od / 2.0) ** 2)\n        ref = mult * (holeArea - hexarea)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        \"\"\"Test that ARMI can thermally expands a holed hexagon.\"\"\"\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"od\", \"holeOP\", \"mult\"]\n        ref = [True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestHoledRectangle(TestShapedComponent):\n    \"\"\"Tests HoledRectangle, and provides much support for HoledSquare test.\"\"\"\n\n    componentCls = HoledRectangle\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"lengthOuter\": 16.0,\n        \"widthOuter\": 10.0,\n        \"holeOD\": 3.6,\n        \"mult\": 1.0,\n    }\n\n    dimsToTestExpansion = [\"lengthOuter\", \"widthOuter\", \"holeOD\", \"mult\"]\n\n    def setUp(self):\n        TestShapedComponent.setUp(self)\n        self.setClassDims()\n\n    def setClassDims(self):\n        # This enables subclassing testing for square\n        self.length = self.component.getDimension(\"lengthOuter\")\n        self.width = self.component.getDimension(\"widthOuter\")\n\n    def test_getBoundingCircleOuterDiameter(self):\n        # hypotenuse\n        ref = (self.length**2 + self.width**2) ** 0.5\n        cur = self.component.getBoundingCircleOuterDiameter()\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        ref = self.componentDims[\"holeOD\"]\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertEqual(ref, cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of holed rectangle.\n\n        .. test:: Calculate area of holed rectangle.\n            :id: T_ARMI_COMP_VOL10\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        rectArea = self.length * self.width\n        odHole = self.component.getDimension(\"holeOD\")\n        mult = self.component.getDimension(\"mult\")\n        holeArea = math.pi * ((odHole / 2.0) ** 2)\n        ref = mult * (rectArea - holeArea)\n        cur = self.component.getArea()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        ref = [True] * len(self.dimsToTestExpansion)\n        ref[-1] = False  # mult shouldn't expand\n        for i, d in enumerate(self.dimsToTestExpansion):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n\nclass TestHoledSquare(TestHoledRectangle):\n    \"\"\"Test holed square shaped component.\"\"\"\n\n    componentCls = HoledSquare\n\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"widthOuter\": 16.0,\n        \"holeOD\": 3.6,\n        \"mult\": 1.0,\n    }\n\n    dimsToTestExpansion = [\"widthOuter\", \"holeOD\", \"mult\"]\n\n    def setClassDims(self):\n        # This enables subclassing testing for square\n        self.width = self.length = self.component.getDimension(\"widthOuter\")\n\n    def test_thermallyExpands(self):\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_getCircleInnerDiameter(self):\n        ref = self.componentDims[\"holeOD\"]\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertEqual(ref, cur)\n\n\nclass TestHelix(TestShapedComponent):\n    \"\"\"Test helix shaped component.\"\"\"\n\n    componentCls = Helix\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"od\": 0.25,\n        \"axialPitch\": 1.0,\n        \"mult\": 1.5,\n        \"helixDiameter\": 2.0,\n        \"id\": 0.1,\n    }\n\n    def test_getBoundingCircleOuterDiameter(self):\n        ref = 2.0 + 0.25\n        cur = self.component.getBoundingCircleOuterDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getCircleInnerDiameter(self):\n        ref = 2.0 - 0.25\n        cur = self.component.getCircleInnerDiameter(cold=True)\n        self.assertAlmostEqual(ref, cur)\n\n    def test_getArea(self):\n        \"\"\"Calculate area of helix.\n\n        .. test:: Calculate area of helix.\n            :id: T_ARMI_COMP_VOL11\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        cur = self.component.getArea()\n        axialPitch = self.component.getDimension(\"axialPitch\")\n        helixDiameter = self.component.getDimension(\"helixDiameter\")\n        innerDiameter = self.component.getDimension(\"id\")\n        outerDiameter = self.component.getDimension(\"od\")\n        mult = self.component.getDimension(\"mult\")\n        c = axialPitch / (2.0 * math.pi)\n        helixFactor = math.sqrt((helixDiameter / 2.0) ** 2 + c**2) / c\n        ref = mult * math.pi * (outerDiameter**2 / 4.0 - innerDiameter**2 / 4.0) * helixFactor\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        self.assertTrue(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_dimensionThermallyExpands(self):\n        expandedDims = [\"od\", \"id\", \"axialPitch\", \"helixDiameter\", \"mult\"]\n        ref = [True, True, True, True, False]\n        for i, d in enumerate(expandedDims):\n            cur = d in self.component.THERMAL_EXPANSION_DIMS\n            self.assertEqual(cur, ref[i])\n\n    def test_validParameters(self):\n        \"\"\"Testing the Helix class performs as expected with various inputs.\"\"\"\n        # stupid/simple inputs\n        h = Helix(\"thing\", \"Cu\", 0, 0, 1, 1, 1)\n        self.assertEqual(h.getDimension(\"axialPitch\"), 1)\n\n        # standard case / inputs ordered well\n        h = Helix(\n            \"what\",\n            \"Cu\",\n            Tinput=25.0,\n            Thot=425.0,\n            id=0.1,\n            od=0.35,\n            mult=1.0,\n            axialPitch=1.123,\n            helixDiameter=1.5,\n        )\n        self.assertTrue(1.123 < h.getDimension(\"axialPitch\") < 1.15)\n\n        # inputs ordered crazy\n        h = Helix(\n            material=\"Cu\",\n            id=0.1,\n            mult=1.0,\n            Tinput=25.0,\n            Thot=425.0,\n            axialPitch=1.123,\n            name=\"stuff\",\n            od=0.35,\n            helixDiameter=1.5,\n        )\n        self.assertTrue(1.123 < h.getDimension(\"axialPitch\") < 1.15)\n\n        # missing helixDiameter input\n        with self.assertRaises(TypeError):\n            h = Helix(\n                name=\"helix\",\n                material=\"Cu\",\n                Tinput=25.0,\n                Thot=425.0,\n                id=0.1,\n                od=0.35,\n                mult=1.0,\n                axialPitch=1.123,\n            )\n\n\nclass TestSphere(TestShapedComponent):\n    componentCls = Sphere\n    componentDims = {\"Tinput\": 25.0, \"Thot\": 430.0, \"od\": 1.0, \"id\": 0.0, \"mult\": 3}\n\n    def test_getVolume(self):\n        \"\"\"Calculate area of sphere.\n\n        .. test:: Calculate volume of sphere.\n            :id: T_ARMI_COMP_VOL12\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        od = self.component.getDimension(\"od\")\n        idd = self.component.getDimension(\"id\")\n        mult = self.component.getDimension(\"mult\")\n        ref = mult * 4.0 / 3.0 * math.pi * ((od / 2.0) ** 3 - (idd / 2.0) ** 3)\n        cur = self.component.getVolume()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)\n\n\nclass TestRadialSegment(TestShapedComponent):\n    componentCls = RadialSegment\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"inner_radius\": 110,\n        \"outer_radius\": 170,\n        \"height\": 160,\n        \"mult\": 1,\n    }\n\n    def test_getVolume(self):\n        mult = self.component.getDimension(\"mult\")\n        outerRad = self.component.getDimension(\"outer_radius\")\n        innerRad = self.component.getDimension(\"inner_radius\")\n        outerTheta = self.component.getDimension(\"outer_theta\")\n        innerTheta = self.component.getDimension(\"inner_theta\")\n        height = self.component.getDimension(\"height\")\n        radialArea = math.pi * (outerRad**2 - innerRad**2)\n        aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)\n        ref = mult * radialArea * aziFraction * height\n        cur = self.component.getVolume()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_thermallyExpands(self):\n        self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_getBoundingCircleOuterDiameter(self):\n        self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 340.0)\n\n\nclass TestDifferentialRadialSegment(TestShapedComponent):\n    componentCls = DifferentialRadialSegment\n    componentDims = {\n        \"Tinput\": 25.0,\n        \"Thot\": 430.0,\n        \"inner_radius\": 110,\n        \"radius_differential\": 60,\n        \"inner_axial\": 60,\n        \"height\": 160,\n    }\n\n    def test_getVolume(self):\n        mult = self.component.getDimension(\"mult\")\n        outerRad = self.component.getDimension(\"outer_radius\")\n        innerRad = self.component.getDimension(\"inner_radius\")\n        outerTheta = self.component.getDimension(\"outer_theta\")\n        innerTheta = self.component.getDimension(\"inner_theta\")\n        height = self.component.getDimension(\"height\")\n        radialArea = math.pi * (outerRad**2 - innerRad**2)\n        aziFraction = (outerTheta - innerTheta) / (math.pi * 2.0)\n        ref = mult * radialArea * aziFraction * height\n        cur = self.component.getVolume()\n        self.assertAlmostEqual(cur, ref)\n\n    def test_updateDims(self):\n        \"\"\"\n        Test Update dimensions.\n\n        .. test:: Dimensions can be updated.\n            :id: T_ARMI_COMP_VOL13\n            :tests: R_ARMI_COMP_VOL\n        \"\"\"\n        self.assertEqual(self.component.getDimension(\"inner_radius\"), 110)\n        self.assertEqual(self.component.getDimension(\"radius_differential\"), 60)\n        self.component.updateDims()\n        self.assertEqual(self.component.getDimension(\"outer_radius\"), 170)\n        self.assertEqual(self.component.getDimension(\"outer_axial\"), 220)\n        self.assertEqual(self.component.getDimension(\"outer_theta\"), 2 * math.pi)\n\n    def test_thermallyExpands(self):\n        self.assertFalse(self.component.THERMAL_EXPANSION_DIMS)\n\n    def test_getBoundingCircleOuterDiameter(self):\n        self.assertEqual(self.component.getBoundingCircleOuterDiameter(cold=True), 340)\n\n\nclass TestMaterialAdjustments(unittest.TestCase):\n    \"\"\"Tests to make sure enrichment and mass fractions can be adjusted properly.\"\"\"\n\n    def setUp(self):\n        dims = {\"Tinput\": 25.0, \"Thot\": 600.0, \"od\": 10.0, \"id\": 5.0, \"mult\": 1.0}\n        self.fuel = Circle(\"fuel\", \"UZr\", **dims)\n\n        class FakeBlock:\n            reactor = Reactor(\"testMatReactor\", None)\n\n            def getHeight(self):  # unit height\n                return 1.0\n\n            def getSymmetryFactor(self):\n                return 1.0\n\n            def getAncestor(self, fn):\n                return self.reactor\n\n        self.fuel.parent = FakeBlock()\n\n    def test_setMassFrac(self):\n        \"\"\"Make sure we can set a mass fraction properly.\"\"\"\n        target35 = 0.2\n        self.fuel.setMassFrac(\"U235\", target35)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), target35)\n\n    def test_setMassFracOnComponentMaterial(self):\n        \"\"\"Checks for valid and invalid mass fraction assignments on a component's material.\"\"\"\n        # Negative value is not acceptable.\n        with self.assertRaises(ValueError):\n            self.fuel.material.setMassFrac(\"U235\", -0.1)\n\n        # Greater than 1.0 value is not acceptable.\n        with self.assertRaises(ValueError):\n            self.fuel.material.setMassFrac(\"U235\", 1.1)\n\n        # String is not acceptable.\n        with self.assertRaises(TypeError):\n            self.fuel.material.setMassFrac(\"U235\", \"\")\n\n        # `NoneType` is not acceptable.\n        with self.assertRaises(TypeError):\n            self.fuel.material.setMassFrac(\"U235\", None)\n\n        # Zero is acceptable\n        self.fuel.material.setMassFrac(\"U235\", 0.0)\n        self.assertAlmostEqual(self.fuel.material.getMassFrac(\"U235\"), 0.0)\n\n        # One is acceptable\n        self.fuel.material.setMassFrac(\"U235\", 1.0)\n        self.assertAlmostEqual(self.fuel.material.getMassFrac(\"U235\"), 1.0)\n\n    def test_adjustMassFrac_invalid(self):\n        with self.assertRaises(ValueError):\n            self.fuel.adjustMassFrac(nuclideToAdjust=\"ZR\", val=-0.23)\n\n        with self.assertRaises(ValueError):\n            self.fuel.adjustMassFrac(nuclideToAdjust=\"ZR\", val=1.12)\n\n        alwaysFalse = lambda a: False\n        self.fuel.parent = None\n        self.assertIsNone(self.fuel.getAncestorAndDistance(alwaysFalse))\n\n    def test_adjustMassFrac_U235(self):\n        zrMass = self.fuel.getMass(\"ZR\")\n        uMass = self.fuel.getMass(\"U\")\n        zrFrac = zrMass / (uMass + zrMass)\n\n        enrichmentFrac = 0.3\n        u235Frac = enrichmentFrac * uMass / (uMass + zrMass)\n        u238Frac = (1.0 - enrichmentFrac) * uMass / (uMass + zrMass)\n\n        self.fuel.adjustMassFrac(nuclideToAdjust=\"U235\", elementToHoldConstant=\"ZR\", val=u235Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), u235Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U238\"), u238Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), zrFrac)\n\n    def test_adjustMassFrac_U(self):\n        self.fuel.adjustMassFrac(elementToAdjust=\"U\", val=0.7)\n        uFrac = self.fuel.getMassFrac(\"U\")\n        u235Enrichment = 0.1\n        u238Frac = (1.0 - u235Enrichment) * uFrac\n        u235Frac = u235Enrichment * uFrac\n\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), u235Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U238\"), u238Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), 0.30)\n\n    def test_adjustMassFrac_clear_ZR(self):\n        self.fuel.adjustMassFrac(nuclideToAdjust=\"ZR\", val=0.0)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), 0.0)\n        self.assertAlmostEqual(self.fuel.getNumberDensity(\"ZR\"), 0.0)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\") + self.fuel.getMassFrac(\"U238\"), 1.0)\n\n    def test_adjustMassFrac_set_ZR(self):\n        u235Enrichment = 0.1\n        zrFrac = 0.1\n        uFrac = 1.0 - zrFrac\n        u238Frac = (1.0 - u235Enrichment) * uFrac\n        u235Frac = u235Enrichment * uFrac\n\n        self.fuel.adjustMassFrac(nuclideToAdjust=\"ZR\", val=zrFrac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), u235Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U238\"), u238Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), zrFrac)\n\n    def test_adjustMassFrac_leave_same(self):\n        zrFrac = 0.1\n        u238Enrichment = 0.9\n        uFrac = 1.0 - zrFrac\n        u238Frac = uFrac * u238Enrichment\n\n        self.fuel.adjustMassFrac(nuclideToAdjust=\"ZR\", val=zrFrac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U238\"), u238Frac)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), zrFrac)\n\n    def test_adjustMassEnrichment(self):\n        self.fuel.adjustMassEnrichment(0.2)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), 0.18)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"U238\"), 0.72)\n        self.assertAlmostEqual(self.fuel.getMassFrac(\"ZR\"), 0.1)\n\n    def test_getEnrichment(self):\n        self.fuel.adjustMassEnrichment(0.3)\n        self.assertAlmostEqual(self.fuel.getEnrichment(), 0.3)\n\n    def test_finalizeLoadDBAdjustsTD(self):\n        \"\"\"Ensure component is fully loaded through finalize methods.\"\"\"\n        tdFrac = 0.54321\n        comp = self.fuel\n        comp.p.theoreticalDensityFrac = tdFrac\n        comp.finalizeLoadingFromDB()\n        self.assertEqual(comp.material.getTD(), tdFrac)\n\n\nclass TestPinQuantities(unittest.TestCase):\n    \"\"\"Test methods that involve retrieval of pin quantities.\"\"\"\n\n    def setUp(self):\n        self.r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")[1]\n\n    def test_getPinMgFluxes(self):\n        \"\"\"Test proper retrieval of pin multigroup flux for fuel component.\"\"\"\n        # Get a fuel block and its fuel component from the core\n        fuelBlock: Block = self.r.core.getFirstBlock(flags.Flags.FUEL)\n        fuelComponent: Component = fuelBlock.getComponent(flags.Flags.FUEL)\n        numPins = int(fuelComponent.p.mult)\n        self.assertEqual(numPins, 169)\n\n        # Set pin fluxes at block level\n        fuelBlock.assignPinIndices()\n        pinMgFluxes = np.random.rand(numPins, 33)\n        pinMgFluxesAdj = np.random.rand(numPins, 33)\n        pinMgFluxesGamma = np.random.rand(numPins, 33)\n        fuelBlock.setPinMgFluxes(pinMgFluxes)\n        fuelBlock.setPinMgFluxes(pinMgFluxesAdj, adjoint=True)\n        fuelBlock.setPinMgFluxes(pinMgFluxesGamma, gamma=True)\n\n        # Retrieve from component to ensure they match\n        simPinMgFluxes = fuelComponent.getPinMgFluxes()\n        simPinMgFluxesAdj = fuelComponent.getPinMgFluxes(adjoint=True)\n        simPinMgFluxesGamma = fuelComponent.getPinMgFluxes(gamma=True)\n        assert_equal(pinMgFluxes, simPinMgFluxes)\n        assert_equal(pinMgFluxesAdj, simPinMgFluxesAdj)\n        assert_equal(pinMgFluxesGamma, simPinMgFluxesGamma)\n\n        # Check assertion for adjoint gamma flux\n        with self.assertRaisesRegex(ValueError, \"Adjoint gamma flux is currently unsupported.\"):\n            fuelComponent.getPinMgFluxes(adjoint=True, gamma=True)\n\n        # Check assertion for not-found parameter\n        fuelBlock.p.pinMgFluxes = None\n        with self.assertRaisesRegex(\n            ValueError,\n            f\"Failure getting pinMgFluxes from {fuelComponent} via parent {fuelBlock}\",\n        ):\n            fuelComponent.getPinMgFluxes()\n"
  },
  {
    "path": "armi/reactor/tests/test_composites.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the composite pattern.\"\"\"\n\nimport itertools\nimport logging\nimport unittest\nfrom copy import deepcopy\n\nfrom armi import nuclearDataIO, runLog, settings, utils\nfrom armi.nucDirectory import nucDir\nfrom armi.nucDirectory.nuclideBases import NuclideBase, NuclideBases\nfrom armi.physics.neutronics.fissionProductModel.tests.test_lumpedFissionProduct import (\n    getDummyLFPFile,\n)\nfrom armi.reactor import assemblies, components, composites, grids, parameters\nfrom armi.reactor.blueprints import assemblyBlueprint\nfrom armi.reactor.components import basicShapes\nfrom armi.reactor.flags import Flags, TypeSpec\nfrom armi.reactor.tests.test_blocks import loadTestBlock\nfrom armi.testing import loadTestReactor\nfrom armi.tests import ISOAA_PATH, TEST_ROOT, mockRunLogs\n\n\nclass MockBP:\n    nuclideBases = NuclideBases()\n    allNuclidesInProblem = set(nuclideBases.byName.keys())\n    \"\"\":meta hide-value:\"\"\"\n    activeNuclides = allNuclidesInProblem\n    \"\"\":meta hide-value:\"\"\"\n    inactiveNuclides = set()\n    elementsToExpand = set()\n    customIsotopics = {}\n\n\ndef getDummyParamDefs():\n    dummyDefs = parameters.ParameterDefinitionCollection()\n    with dummyDefs.createBuilder() as pb:\n        pb.defParam(\"type\", units=utils.units.UNITLESS, description=\"Fake type\")\n    return dummyDefs\n\n\n_testGrid = grids.CartesianGrid.fromRectangle(0.01, 0.01)\n\n\nclass DummyComposite(composites.Composite):\n    pDefs = getDummyParamDefs()\n\n    def __init__(self, name, i=0):\n        composites.Composite.__init__(self, name)\n        self.p.type = name\n        self.spatialLocator = grids.IndexLocation(i, i, i, _testGrid)\n\n\nclass DummyLeaf(composites.Composite):\n    pDefs = getDummyParamDefs()\n\n    def __init__(self, name, i=0):\n        composites.Composite.__init__(self, name)\n        self.p.type = name\n        self.spatialLocator = grids.IndexLocation(i, i, i, _testGrid)\n        # Some special material attribute for testing getChildren(includeMaterials=True)\n        self.material = (\"hello\", \"world\")\n\n    def getChildren(self, deep=False, generationNum=1, includeMaterials=False, predicate=None):\n        \"\"\"Return empty list, representing that this object has no children.\"\"\"\n        return []\n\n    def getChildrenWithFlags(self, typeSpec: TypeSpec, exactMatch=True):\n        \"\"\"Return empty list, representing that this object has no children.\"\"\"\n        return []\n\n    def getBoundingCircleOuterDiameter(self, Tc=None, cold=False):\n        return 1.0\n\n    def iterComponents(self, typeSpec=None, exact=False):\n        if self.hasFlags(typeSpec, exact):\n            yield self\n\n\nclass TestCompositePattern(unittest.TestCase):\n    def setUp(self):\n        self.cs = settings.Settings()\n        runLog.setVerbosity(\"error\")\n        self.container = DummyComposite(\"inner test fuel\", 99)\n\n        # Make sure the Composite is within the Reactor\n        _o, r = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        r.core.getFirstBlock().add(self.container)\n        lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH)\n        r.core.lib = lib\n\n        for i in range(5):\n            leaf = DummyLeaf(f\"duct {i}\", i + 100)\n            leaf.setType(\"duct\")\n            self.container.add(leaf)\n        nested = DummyComposite(\"clad\", 98)\n        nested.setType(\"clad\")\n        self.cladChild = nested\n        self.secondGen = DummyComposite(\"liner\", 97)\n        self.thirdGen = DummyLeaf(\"pin 77\", 33)\n        self.secondGen.add(self.thirdGen)\n        nested.add(self.secondGen)\n        self.container.add(nested)\n        # Composite tree structure in list of lists for testing. tree[i] contains the children at generation / depth i\n        self.tree: list[list[composites.Composite]] = [\n            [self.container],\n            list(self.container),\n            [self.secondGen],\n            [self.thirdGen],\n        ]\n\n    def test_composite(self):\n        \"\"\"Test basic Composite things.\n\n        .. test:: Composites are part of a hierarchical model.\n            :id: T_ARMI_CMP0\n            :tests: R_ARMI_CMP\n        \"\"\"\n        container = self.container\n\n        children = container.getChildren()\n        for child in children:\n            self.assertEqual(child.parent, container)\n\n        allChildren = container.getChildren(deep=True)\n        self.assertEqual(len(allChildren), 8)\n\n    def test_printContents(self):\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            testName = \"test_printContents\"\n            runLog.LOG.startLog(testName)\n            runLog.LOG.setVerbosity(logging.IMPORTANT)\n\n            self.container.printContents(includeNuclides=True)\n            logMsg = mock.getStdout()\n\n        self.assertIn(\"DummyComposite\", logMsg)\n        self.assertIn(\"DummyLeaf\", logMsg)\n\n    def test_iterComponents(self):\n        self.assertIn(self.thirdGen, list(self.container.iterComponents()))\n\n    def test_getChildren(self):\n        \"\"\"Test the get children method.\n\n        .. test:: Composites are part of a hierarchical model.\n            :id: T_ARMI_CMP1\n            :tests: R_ARMI_CMP\n        \"\"\"\n        firstGen = self.container.getChildren()\n        self.assertEqual(firstGen, self.tree[1])\n\n        secondGen = self.container.getChildren(generationNum=2)\n        self.assertEqual(secondGen, self.tree[2])\n\n        self.assertIs(secondGen[0], self.secondGen)\n        third = self.container.getChildren(generationNum=3)\n        self.assertEqual(third, self.tree[3])\n        self.assertIs(third[0], self.thirdGen)\n\n        allC = self.container.getChildren(deep=True)\n        expected = self.tree[1] + self.tree[2] + self.tree[3]\n        self.assertTrue(\n            all(a is e for a, e in itertools.zip_longest(allC, expected)),\n            msg=f\"Deep traversal differs: {allC=} != {expected=}\",\n        )\n\n        onlyLiner = self.container.getChildren(deep=True, predicate=lambda o: o.p.type == \"liner\")\n        self.assertEqual(len(onlyLiner), 1)\n        self.assertIs(onlyLiner[0], self.secondGen)\n\n    def test_getChildrenWithMaterials(self):\n        \"\"\"Test the ability for getChildren to place the material after the object.\"\"\"\n        withMaterials = self.container.getChildren(deep=True, includeMaterials=True)\n        # Grab the iterable so we can control the progression\n        items = iter(withMaterials)\n        for item in items:\n            expectedMat = getattr(item, \"material\", None)\n            if expectedMat is None:\n                continue\n            # Material should be the next item in the list\n            actualMat = next(items)\n            self.assertIs(actualMat, expectedMat)\n            break\n        else:\n            raise RuntimeError(\"No materials found with includeMaterials=True\")\n\n    def test_iterChildren(self):\n        \"\"\"Detailed testing on Composite.iterChildren.\"\"\"\n\n        def compareIterables(actual, expected: list[composites.Composite]):\n            for e in expected:\n                a = next(actual)\n                self.assertIs(a, e)\n            # Ensure we've consumed the actual iterator and there's nothing left\n            with self.assertRaises(StopIteration):\n                next(actual)\n\n        compareIterables(self.container.iterChildren(), self.tree[1])\n        compareIterables(self.container.iterChildren(generationNum=2), self.tree[2])\n        compareIterables(self.container.iterChildren(generationNum=3), self.tree[3])\n        compareIterables(\n            self.container.iterChildren(deep=True),\n            self.tree[1] + self.tree[2] + self.tree[3],\n        )\n\n    def test_iterAndGetChildren(self):\n        \"\"\"Compare that iter children and get children are consistent.\"\"\"\n        self._compareIterGetChildren()\n        self._compareIterGetChildren(deep=True)\n        self._compareIterGetChildren(generationNum=2)\n        # Some wacky predicate just to check we can use that too\n        self._compareIterGetChildren(deep=True, predicate=lambda c: len(c.name) % 3)\n\n    def _compareIterGetChildren(self, **kwargs):\n        fromIter = self.container.iterChildren(**kwargs)\n        fromGetter = self.container.getChildren(**kwargs)\n        msg = repr(kwargs)\n        # Use zip longest just in case one iterator comes up short\n        for count, (it, gt) in enumerate(itertools.zip_longest(fromIter, fromGetter)):\n            self.assertIs(it, gt, msg=f\"{count=} :: {msg}\")\n\n    def test_simpleIterChildren(self):\n        \"\"\"Test that C.iterChildren() is identical to iter(C).\"\"\"\n        for count, (fromNative, fromIterChildren) in enumerate(\n            itertools.zip_longest(self.container, self.container.iterChildren())\n        ):\n            self.assertIs(fromIterChildren, fromNative, msg=count)\n\n    def test_iterChildrenWithMaterials(self):\n        \"\"\"Test that C.iterChildrenWithMaterials gets materials following their parent component.\"\"\"\n        items = iter(self.container.iterChildrenWithMaterials(deep=True))\n        for item in items:\n            if isinstance(item, components.Component):\n                mat = next(items)\n                self.assertIs(mat, item.material)\n\n    def test_getName(self):\n        \"\"\"Test the getName method.\"\"\"\n        self.assertEqual(self.secondGen.getName(), \"liner\")\n        self.assertEqual(self.thirdGen.getName(), \"pin 77\")\n        self.assertEqual(self.secondGen.getName(), \"liner\")\n        self.assertEqual(self.container.getName(), \"inner test fuel\")\n\n    def test_sort(self):\n        # in this case, the children should start sorted\n        c0 = [c.name for c in self.container]\n        self.container.sort()\n        c1 = [c.name for c in self.container]\n        self.assertNotEqual(c0, c1)\n\n        # verify repeated sorting behave\n        for _ in range(3):\n            self.container.sort()\n            ci = [c.name for c in self.container]\n            self.assertEqual(c1, ci)\n\n        # break the order\n        children = self.container.getChildren()\n        self.container._children = children[2:] + children[:2]\n        c2 = [c.name for c in self.container]\n        self.assertNotEqual(c1, c2)\n\n        # verify the sort order\n        self.container.sort()\n        c3 = [c.name for c in self.container]\n        self.assertEqual(c1, c3)\n\n    def test_areChildernOfType(self):\n        expectedResults = [False, False, False, False, False, True]\n        for i, b in enumerate(self.container.doChildrenHaveFlags(Flags.CLAD)):\n            self.assertEqual(b, expectedResults[i])\n\n    def test_containsAtLeastOneChildOfType(self):\n        c = self.container\n        self.assertTrue(c.containsAtLeastOneChildWithFlags(Flags.DUCT))\n        self.assertTrue(c.containsAtLeastOneChildWithFlags(Flags.CLAD))\n\n    def test_containsOnlyChildrenOfType(self):\n        c = self.container\n        for b in c:\n            b.setType(\"bond\")\n        self.assertTrue(c.containsOnlyChildrenWithFlags(Flags.BOND))\n\n    def test_nameContains(self):\n        c = self.container\n        c.setName(\"test one two three\")\n        self.assertTrue(c.nameContains(\"one\"))\n        self.assertTrue(c.nameContains(\"One\"))\n        self.assertTrue(c.nameContains(\"THREE\"))\n        self.assertFalse(c.nameContains(\"nope\"))\n        self.assertFalse(c.nameContains([\"nope\"]))\n        self.assertTrue(c.nameContains([\"one\", \"TWO\", \"three\"]))\n        self.assertTrue(c.nameContains([\"nope\", \"dope\", \"three\"]))\n\n    def test_nucSpec(self):\n        self.assertEqual(self.container._getNuclidesFromSpecifier(\"U235\"), [\"U235\"])\n        uNucs = self.container._getNuclidesFromSpecifier(\"U\")\n        self.assertIn(\"U235\", uNucs)\n        self.assertIn(\"U241\", uNucs)\n        self.assertIn(\"U227\", uNucs)\n        self.assertEqual(self.container._getNuclidesFromSpecifier([\"U238\", \"U235\"]), [\"U235\", \"U238\"])\n\n        uzr = self.container._getNuclidesFromSpecifier([\"U238\", \"U235\", \"ZR\"])\n        self.assertIn(\"U235\", uzr)\n        self.assertIn(\"ZR92\", uzr)\n        self.assertNotIn(\"ZR\", uzr)\n\n        puIsos = self.container._getNuclidesFromSpecifier([\"PU\"])  # PU is special because it has no natural isotopics\n        self.assertIn(\"PU239\", puIsos)\n        self.assertNotIn(\"PU\", puIsos)\n\n        self.assertEqual(self.container._getNuclidesFromSpecifier([\"FE\", \"FE56\"]).count(\"FE56\"), 1)\n\n    def test_hasFlags(self):\n        \"\"\"Ensure flags are queryable.\n\n        .. test:: Flags can be queried.\n            :id: T_ARMI_CMP_FLAG\n            :tests: R_ARMI_CMP_FLAG\n        \"\"\"\n        self.container.setType(\"fuel\")\n        self.assertFalse(self.container.hasFlags(Flags.SHIELD | Flags.FUEL, exact=True))\n        self.assertTrue(self.container.hasFlags(Flags.FUEL))\n        self.assertTrue(self.container.hasFlags(None))\n\n    def test_hasFlagsSubstring(self):\n        \"\"\"Make sure typespecs with the same word in them no longer match.\"\"\"\n        self.container.setType(\"intercoolant\")\n        self.assertFalse(self.container.hasFlags(Flags.COOLANT))\n        self.assertFalse(self.container.hasFlags(Flags.COOLANT, exact=True))\n        self.assertTrue(self.container.hasFlags(Flags.INTERCOOLANT, exact=True))\n\n        self.container.setType(\"innerduct\")\n        self.assertFalse(self.container.hasFlags(Flags.DUCT, exact=True))\n\n    def test_hasFlagsNoTypeSpecified(self):\n        self.container.setType(\"fuel\")\n        types = [None, [], [None]]\n        for t in types:\n            self.assertTrue(self.container.hasFlags(t))\n            self.assertFalse(self.container.hasFlags(t, exact=True))\n\n    def test_calcTotalParam(self):\n        minSerialNumberCount = 21.0\n        kids = self.container.getChildren()\n\n        tot = self.container.calcTotalParam(\"serialNum\", kids)\n        self.assertGreaterEqual(tot, minSerialNumberCount)\n\n        tot = self.container.calcTotalParam(\"serialNum\", kids, calcBasedOnFullObj=True)\n        self.assertGreaterEqual(tot, minSerialNumberCount)\n\n        tot = self.container.calcTotalParam(\"serialNum\", kids, typeSpec=Flags.FUEL)\n        self.assertEqual(tot, 0.0)\n\n        with self.assertRaises(ValueError):\n            self.container.calcTotalParam(\n                \"power\", self.container.getChildren(), addSymmetricPositions=True, calcBasedOnFullObj=True\n            )\n\n    def test_getBoundingCirlceOuterDiameter(self):\n        od = self.container.getBoundingCircleOuterDiameter()\n        self.assertAlmostEqual(od, len(list(self.container.iterComponents())))\n\n    def test_getParamNames(self):\n        params = self.container.getParamNames()\n        self.assertEqual(len(params), 3)\n        self.assertIn(\"flags\", params)\n        self.assertIn(\"serialNum\", params)\n        self.assertIn(\"type\", params)\n\n    def test_updateVolume(self):\n        self.assertAlmostEqual(self.container.getVolume(), 0)\n        self.container._updateVolume()\n        self.assertAlmostEqual(self.container.getVolume(), 0)\n\n    def test_expandLFPs(self):\n        # simple test, with no lumped fission product mappings\n        numDens = {\"NA23\": 1.0}\n        numDens = self.container._expandLFPs(numDens)\n        self.assertEqual(len(numDens), 1)\n\n        # set the lumped fission product mapping\n        fpd = getDummyLFPFile()\n        lfps = fpd.createLFPsFromFile()\n        self.container.setLumpedFissionProducts(lfps)\n\n        # get back the lumped fission product mapping, just to check\n        lfp = self.container.getLumpedFissionProductCollection()\n        self.assertEqual(len(lfp), 3)\n        self.assertIn(\"LFP35\", lfp)\n        self.assertIn(\"LFP38\", lfp)\n        self.assertIn(\"LFP39\", lfp)\n\n        # quick test WITH some lumped fission products in the mix\n        numDens = {\"NA23\": 1.0, \"LFP35\": 2.0}\n        numDens = self.container._expandLFPs(numDens)\n        self.assertEqual(len(numDens), 9)\n        self.assertEqual(numDens[\"MO99\"], 0)\n\n    def test_setChildrenLumpedFissionProducts(self):\n        # build a lumped fission product collection\n        fpd = getDummyLFPFile()\n        lfps = fpd.createLFPsFromFile()\n\n        # validate that the LFP collection is None\n        self.container.setChildrenLumpedFissionProducts(None)\n        for c in self.container:\n            self.assertIsNone(c._lumpedFissionProducts)\n\n        # validate that the LFP collection is not None\n        self.container.setChildrenLumpedFissionProducts(lfps)\n        for c in self.container:\n            self.assertIsNotNone(c._lumpedFissionProducts)\n\n    def test_requiresLumpedFissionProds(self):\n        # build a lumped fission product collection\n        fpd = getDummyLFPFile()\n        lfps = fpd.createLFPsFromFile()\n        self.container.setChildrenLumpedFissionProducts(lfps)\n\n        # test the null case\n        result = self.container.requiresLumpedFissionProducts(None)\n        self.assertFalse(result)\n\n        # test the usual case\n        result = self.container.requiresLumpedFissionProducts(set())\n        self.assertFalse(result)\n\n        # test a positive case\n        result = self.container.requiresLumpedFissionProducts([\"LFP35\"])\n        self.assertTrue(result)\n\n    def test_getLumpedFissionProdsIfNullCase(self):\n        # build a lumped fission product collection\n        fpd = getDummyLFPFile()\n        lfps = fpd.createLFPsFromFile()\n        self.container.setChildrenLumpedFissionProducts(lfps)\n\n        # test the null case\n        result = self.container.getLumpedFissionProductsIfNecessary(None)\n        self.assertEqual(len(result), 0)\n\n        # test a positive case\n        result = self.container.getLumpedFissionProductsIfNecessary([\"LFP35\"])\n        self.assertGreater(len(result), 0)\n\n    def test_getIntegratedMgFlux(self):\n        mgFlux = self.container.getIntegratedMgFlux()\n        self.assertEqual(mgFlux, [0.0])\n\n    def test_getReactionRates(self):\n        # test the null case\n        rRates = self.container.getReactionRates(\"U235\")\n        self.assertEqual(len(rRates), 6)\n        self.assertEqual(sum([r for r in rRates.values()]), 0)\n\n        # init reactor\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH)\n        r.core.lib = lib\n\n        # test on a Component\n        b = r.core.getFirstAssembly().getFirstBlock()\n        b.p.mgFlux = 1\n        c = b.getComponents()[0]\n        rRatesComp = c.getReactionRates(\"U235\")\n        self.assertEqual(len(rRatesComp), 6)\n        self.assertGreater(sum([r for r in rRatesComp.values()]), 0)\n\n        # test on a Block\n        rRatesBlock = b.getReactionRates(\"U235\")\n        self.assertEqual(len(rRatesBlock), 6)\n        self.assertGreater(sum([r for r in rRatesBlock.values()]), 0)\n\n        # test on an Assembly\n        assem = r.core.getFirstAssembly()\n        rRatesAssem = assem.getReactionRates(\"U235\")\n        self.assertEqual(len(rRatesAssem), 6)\n        self.assertGreater(sum([r for r in rRatesAssem.values()]), 0)\n\n        # test on a Core\n        rRatesCore = r.core.getReactionRates(\"U235\")\n        self.assertEqual(len(rRatesCore), 6)\n        self.assertGreater(sum([r for r in rRatesCore.values()]), 0)\n\n        # test on a Reactor\n        rRatesReactor = r.getReactionRates(\"U235\")\n        self.assertEqual(len(rRatesReactor), 6)\n        self.assertGreater(sum([r for r in rRatesReactor.values()]), 0)\n\n        # test that all different levels of the hierarchy have the same reaction rates\n        for key, val in rRatesBlock.items():\n            self.assertAlmostEqual(rRatesAssem[key], val)\n            self.assertAlmostEqual(rRatesCore[key], val)\n            self.assertAlmostEqual(rRatesReactor[key], val)\n\n    def test_getFirstComponent(self):\n        c = self.container.getComponents()[0]\n        c0 = self.container.getFirstComponent()\n        self.assertIs(c, c0)\n        self.assertIsInstance(c0, composites.Composite)\n\n        c = self.cladChild.getComponents()[0]\n        c0 = self.cladChild.getFirstComponent()\n        self.assertIs(c, c0)\n        self.assertIsInstance(c0, composites.Composite)\n\n        c = self.secondGen.getComponents()[0]\n        c0 = self.secondGen.getFirstComponent()\n        self.assertIs(c, c0)\n        self.assertIsInstance(c0, composites.Composite)\n\n        b = loadTestBlock()\n        c = b.getComponents()[0]\n        c0 = b.getFirstComponent()\n        self.assertIs(c, c0)\n        self.assertIsInstance(c0, composites.Composite)\n\n        # covering edge case: someone passes in a flag that doesn't exist on on the object\n        with self.assertRaises(ValueError):\n            b.getFirstComponent(typeSpec=Flags.POISON)\n\n    def test_getReactionRateDict(self):\n        lib = nuclearDataIO.isotxs.readBinary(ISOAA_PATH)\n        rxRatesDict = self.container._getReactionRateDict(nucName=\"PU239\", lib=lib, xsSuffix=\"AA\", mgFlux=1, nDens=1)\n        self.assertEqual(rxRatesDict[\"nG\"], sum(lib[\"PU39AA\"].micros.nGamma))\n\n    def test_syncParameters(self):\n        data = [{\"serialNum\": 123}, {\"flags\": \"FAKE\"}]\n        numSynced = self.container._syncParameters(data, {})\n        self.assertEqual(numSynced, 2)\n\n    def test_iterChildrenWithFlags(self):\n        expectedChildren = {c for c in self.container if c.hasFlags(Flags.DUCT)}\n        found = set()\n        for c in self.container.iterChildrenWithFlags(Flags.DUCT):\n            self.assertIn(c, expectedChildren)\n            found.add(c)\n        self.assertSetEqual(found, expectedChildren)\n\n    def test_iterChildrenOfType(self):\n        clads = self.container.iterChildrenOfType(\"clad\")\n        first = next(clads)\n        self.assertIs(first, self.cladChild)\n        with self.assertRaises(StopIteration):\n            next(clads)\n\n    def test_removeAll(self):\n        \"\"\"Test the ability to remove all children of a composite.\"\"\"\n        self.container.removeAll()\n        self.assertEqual(len(self.container), 0)\n        # Nothing to iterate over\n        items = iter(self.container)\n        with self.assertRaises(StopIteration):\n            next(items)\n        for child in self.tree[1]:\n            self.assertIsNone(child.parent)\n\n    def test_setChildren(self):\n        \"\"\"Test the ability to override children on a composite.\"\"\"\n        newChildren = self.tree[2] + self.tree[3]\n        oldChildren = list(self.container)\n        self.container.setChildren(newChildren)\n        self.assertEqual(len(self.container), len(newChildren))\n        for old in oldChildren:\n            self.assertIsNone(old.parent)\n        for actualNew, expectedNew in zip(newChildren, self.container):\n            self.assertIs(actualNew, expectedNew)\n\n    def test_add(self):\n        # get the size of the container at the start\n        lenContainer = len(self.container)\n\n        # add a dummy leaf to the container\n        leaf = DummyLeaf(\"duct 9\", 99)\n        leaf.setType(\"duct\")\n        self.container.add(leaf)\n\n        # verify the container's size has increased by one\n        self.assertEqual(len(self.container), lenContainer + 1)\n\n    def test_extend(self):\n        # generate a list of elements to add to this container\n        elements = []\n        lenElements = 5\n        for i in range(lenElements):\n            leaf = DummyLeaf(f\"duct {i}\", i + 100)\n            leaf.setType(\"duct\")\n            elements.append(leaf)\n\n        # extend the container by the above list\n        lenContainer = len(self.container)\n        self.container.extend(elements)\n        self.assertEqual(len(self.container), lenContainer + lenElements)\n\n        # show all the composites in the block have the block as the parent\n        for c in self.container:\n            self.assertIs(c.parent, self.container)\n\n\nclass TestCompositeTree(unittest.TestCase):\n    blueprintYaml = \"\"\"\n    name: test assembly\n    height: [1, 1]  # 2 blocks\n    axial mesh points: [1, 1]\n    xs types: [A, A]\n    specifier: AA\n    blocks:\n    - &block_metal_fuel\n        name: metal fuel\n        fuel: &component_metal_fuel_fuel\n            shape: Circle\n            material: UZr\n            Tinput: 500\n            Thot: 500.0\n            id: 0.0\n            od: 1.0\n            mult: 7\n        clad: &component_metal_fuel_clad\n            shape: Circle\n            material: HT9\n            Tinput: 450.0\n            Thot: 450.0\n            id: 1.09\n            od: 1.1\n            mult: 7\n        bond: &component_metal_fuel_bond\n            shape: Circle\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            id: fuel.od\n            od: clad.id\n            mult: 7\n        coolant: &component_metal_fuel_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct: &component_metal_fuel_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.6\n    - &block_oxide_fuel\n        name: mox fuel\n        fuel:\n            <<: *component_metal_fuel_fuel\n            material: MOX\n        clad: *component_metal_fuel_clad\n        bond: *component_metal_fuel_bond\n        coolant: *component_metal_fuel_coolant\n        duct: *component_metal_fuel_duct\n        \"\"\"\n\n    def setUp(self):\n        self.block = loadTestBlock()\n        self.r = self.block.core.r\n        self.block.setHeight(100.0)\n        self.refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"V\": 2e-2,\n            \"NA23\": 2e-2,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(self.refDict)\n\n    def test_ordering(self):\n        a = assemblies.Assembly(\"dummy\")\n        a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a)\n        otherBlock = deepcopy(self.block)\n        a.add(self.block)\n        a.add(otherBlock)\n        self.assertTrue(self.block < otherBlock)\n        locator = self.block.spatialLocator\n        self.block.spatialLocator = otherBlock.spatialLocator\n        otherBlock.spatialLocator = locator\n        self.assertTrue(otherBlock < self.block)\n\n        # test some edge cases\n        otherBlock.spatialLocator._grid = None\n        with self.assertRaises(ValueError):\n            otherBlock < self.block\n\n        otherBlock.spatialLocator = None\n        with self.assertRaises(ValueError):\n            otherBlock < self.block\n\n    def test_getAncestorWithFlags(self):\n        # this test block is not part of an assembly, so it should not have a parent/ancestor\n        parent = self.block.getAncestorWithFlags(Flags.FUEL)\n        self.assertIsNone(parent)\n\n        # pick a component that is not part of a fuel composite, so it should not have a fuel ancestor\n        grandchild = self.block.getFirstComponent()\n        child = grandchild.getAncestorWithFlags(Flags.FUEL)\n        self.assertIsNone(child)\n\n        # test the usual case: get a ancestor with the fuel flag\n        child = self.block.getChildrenWithFlags(Flags.FUEL)[0]\n        grandchild = child.getFirstComponent()\n        child1 = grandchild.getAncestorWithFlags(Flags.FUEL)\n        self.assertEqual(child1, grandchild)\n\n        # default case: the only ancestor with the fuel flag is the composite itself, so return that\n        child2 = child.getAncestorWithFlags(Flags.FUEL)\n        self.assertEqual(child2, child)\n\n    def test_changeNDensByFactor(self):\n        b = deepcopy(self.block.getChildrenWithFlags(Flags.FUEL)[0])\n\n        # test inital state\n        dens = b.getNumberDensities()\n        zrDens = dens[\"ZR\"]\n        u235Dens = dens[\"U235\"]\n        u238Dens = dens[\"U238\"]\n\n        b.changeNDensByFactor(0.5)\n\n        # test new state\n        dens = b.getNumberDensities()\n        self.assertAlmostEqual(dens[\"ZR\"], zrDens / 2, delta=1e-6)\n        self.assertAlmostEqual(dens[\"U235\"], u235Dens / 2, delta=1e-6)\n        self.assertAlmostEqual(dens[\"U238\"], u238Dens / 2, delta=1e-6)\n\n    def test_summing(self):\n        a = assemblies.Assembly(\"dummy\")\n        a.spatialGrid = grids.AxialGrid.fromNCells(2, armiObject=a)\n        otherBlock = deepcopy(self.block)\n        a.add(self.block)\n        a.add(otherBlock)\n\n        b = self.block + otherBlock\n        self.assertEqual(len(b), 26)\n        self.assertFalse(b[0].is3D)\n        self.assertIn(\"Circle\", str(b[0]))\n        self.assertFalse(b[-1].is3D)\n        self.assertIn(\"Hexagon\", str(b[-1]))\n\n    def test_constituentReport(self):\n        runLog.info(self.r.core.constituentReport())\n        runLog.info(self.r.core.getFirstAssembly().constituentReport())\n        runLog.info(self.r.core.getFirstBlock().constituentReport())\n        runLog.info(self.r.core.getFirstBlock().getComponents()[0].constituentReport())\n\n    def test_getNuclides(self):\n        \"\"\"\n        The getNuclides should return all keys that have ever been in this block, including values\n        that are at trace.\n        \"\"\"\n        cur = self.block.getNuclides()\n        ref = self.refDict.keys()\n        for key in ref:\n            self.assertIn(key, cur)\n        self.assertIn(\"FE\", cur)  # this is in at trace value.\n\n    def test_getFuelMass(self):\n        \"\"\"\n        This test creates a dummy assembly and ensures that the assembly, block, and fuel component\n        masses are consistent. `getFuelMass` ensures that the fuel component is used to `getMass`.\n        \"\"\"\n        cs = settings.Settings()\n        assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml)\n        a = assemDesign.construct(cs, MockBP)\n\n        fuelMass = 0.0\n        for b in a:\n            fuel = b.getComponent(Flags.FUEL)\n            fuelMass += fuel.getMass()\n            self.assertEqual(b.getFuelMass(), fuel.getMass())\n\n        self.assertEqual(fuelMass, a.getFuelMass())\n\n    def test_getChildrenIncludeMaterials(self):\n        \"\"\"Test that the ``StateRetainer`` retains material properties when they are modified.\"\"\"\n        cs = settings.Settings()\n        assemDesign = assemblyBlueprint.AssemblyBlueprint.load(self.blueprintYaml)\n        a = assemDesign.construct(cs, MockBP)\n        component = a[0][0]\n        referenceDensity = component.material.pseudoDensity(Tc=200)\n        self.assertEqual(component.material.pseudoDensity(Tc=200), referenceDensity)\n\n    def test_getHMMass(self):\n        fuelDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 0.76, \"id\": 0.0, \"mult\": 1.0}\n        self.fuelComponent = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        self.block.add(self.fuelComponent)\n\n        self.block.clearNumberDensities()\n        self.refDict = {\n            \"U235\": 0.00275173784234,\n            \"U238\": 0.0217358415457,\n            \"W182\": 1.09115150103e-05,\n            \"W183\": 5.89214392093e-06,\n            \"W184\": 1.26159558164e-05,\n            \"W186\": 1.17057432664e-05,\n            \"V\": 3e-2,\n            \"NA23\": 2e-2,\n            \"ZR\": 0.00709003962772,\n        }\n        self.block.setNumberDensities(self.refDict)\n\n        cur = self.block.getHMMass()\n\n        mass = 0.0\n        for nucName in self.refDict.keys():\n            if nucDir.isHeavyMetal(nucName):\n                mass += self.block.getMass(nucName)\n\n        places = 6\n        self.assertAlmostEqual(cur, mass, places=places)\n\n    def test_getFPMass(self):\n        fuelDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 0.76, \"id\": 0.0, \"mult\": 1.0}\n        self.fuelComponent = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        self.fuelComponent.material.setMassFrac(\"LFP38\", 0.25)\n        self.block.add(self.fuelComponent)\n\n        refDict = {\"LFP35\": 0.1, \"LFP38\": 0.05, \"LFP39\": 0.7}\n        self.fuelComponent.setNumberDensities(refDict)\n\n        cur = self.block.getFPMass()\n\n        mass = 0.0\n        for nucName in refDict.keys():\n            mass += self.block.getMass(nucName)\n        ref = mass\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_setMassFrac(self):\n        # build test component\n        c = DummyComposite(\"test_setMassFrac\")\n        c.getHeight = lambda: 1.0\n\n        fuelDims = {\"Tinput\": 273.0, \"Thot\": 273.0, \"od\": 0.76, \"id\": 0.0, \"mult\": 1.0}\n        fuelComponent = components.Circle(\"fuel\", \"UZr\", **fuelDims)\n        c.add(fuelComponent)\n\n        # test initial state\n        self.assertEqual(c.getFPMass(), 0.0)\n        self.assertAlmostEqual(c.getHMMass(), 6.468105962375698, delta=1e-6)\n        self.assertAlmostEqual(c.getMass(), 7.186784402639664, delta=1e-6)\n\n        # use setMassFrac\n        c.setMassFrac(\"U235\", 0.99)\n        c.setMassFrac(\"U238\", 0.01)\n\n        # test new state\n        self.assertEqual(c.getFPMass(), 0.0)\n        self.assertAlmostEqual(c.getHMMass(), 7.178895593948443, delta=1e-6)\n        self.assertAlmostEqual(c.getMass(), 7.186784402639666, delta=1e-6)\n\n        # test edge case were zero density\n        c.setNumberDensities({})\n        with self.assertRaises(ValueError):\n            c.setMassFrac(\"U235\", 0.98)\n\n    def test_getFissileMass(self):\n        cur = self.block.getFissileMass()\n\n        mass = 0.0\n        for nucName in self.refDict.keys():\n            if nucName in NuclideBase.fissile:\n                mass += self.block.getMass(nucName)\n        ref = mass\n\n        places = 6\n        self.assertAlmostEqual(cur, ref, places=places)\n\n    def test_getMaxParam(self):\n        \"\"\"Test getMaxParam().\n\n        .. test:: Composites have parameter collections.\n            :id: T_ARMI_CMP_PARAMS0\n            :tests: R_ARMI_CMP_PARAMS\n        \"\"\"\n        for ci, c in enumerate(self.block):\n            if isinstance(c, basicShapes.Circle):\n                c.p.id = ci\n                lastSeen = c\n                lastIndex = ci\n        cMax, comp = self.block.getMaxParam(\"id\", returnObj=True)\n        self.assertEqual(cMax, lastIndex)\n        self.assertIs(comp, lastSeen)\n\n    def test_getMinParam(self):\n        \"\"\"Test getMinParam().\n\n        .. test:: Composites have parameter collections.\n            :id: T_ARMI_CMP_PARAMS1\n            :tests: R_ARMI_CMP_PARAMS\n        \"\"\"\n        for ci, c in reversed(list(enumerate(self.block))):\n            if isinstance(c, basicShapes.Circle):\n                c.p.id = ci\n                lastSeen = c\n                lastIndex = ci\n        cMax, comp = self.block.getMinParam(\"id\", returnObj=True)\n        self.assertEqual(cMax, lastIndex)\n        self.assertIs(comp, lastSeen)\n\n\nclass TestFlagSerializer(unittest.TestCase):\n    class TestFlagsA(utils.Flag):\n        A = utils.flags.auto()\n        B = utils.flags.auto()\n        C = utils.flags.auto()\n        D = utils.flags.auto()\n\n    class TestFlagsB(utils.Flag):\n        A = utils.flags.auto()\n        B = utils.flags.auto()\n        BPRIME = utils.flags.auto()\n        C = utils.flags.auto()\n        D = utils.flags.auto()\n\n    def test_flagSerialization(self):\n        data = [\n            Flags.FUEL,\n            Flags.FUEL | Flags.INNER,\n            Flags.A | Flags.B | Flags.CONTROL,\n        ]\n\n        flagsArray, attrs = composites.FlagSerializer.pack(data)\n\n        data2 = composites.FlagSerializer.unpack(flagsArray, composites.FlagSerializer.version, attrs)\n        self.assertEqual(data, data2)\n\n        # discrepant versions\n        with self.assertRaises(ValueError):\n            data2 = composites.FlagSerializer.unpack(flagsArray, \"0\", attrs)\n\n        # missing flags in current version Flags\n        attrs[\"flag_order\"].append(\"NONEXISTANTFLAG\")\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            testName = \"test_flagSerialization\"\n            runLog.LOG.startLog(testName)\n            runLog.LOG.setVerbosity(logging.WARNING)\n\n            data2 = composites.FlagSerializer.unpack(flagsArray, composites.FlagSerializer.version, attrs)\n            flagLog = mock.getStdout()\n\n        self.assertIn(\"The set of flags\", flagLog)\n        self.assertIn(\"NONEXISTANTFLAG\", flagLog)\n\n    def test_flagConversion(self):\n        data = [\n            self.TestFlagsA.A,\n            self.TestFlagsA.A | self.TestFlagsA.C,\n            self.TestFlagsA.A | self.TestFlagsA.C | self.TestFlagsA.D,\n        ]\n\n        serialized, attrs = composites.FlagSerializer._packImpl(data, self.TestFlagsA)\n\n        data2 = composites.FlagSerializer._unpackImpl(\n            serialized, composites.FlagSerializer.version, attrs, self.TestFlagsB\n        )\n\n        expected = [\n            self.TestFlagsB.A,\n            self.TestFlagsB.A | self.TestFlagsB.C,\n            self.TestFlagsB.A | self.TestFlagsB.C | self.TestFlagsB.D,\n        ]\n\n        self.assertEqual(data2, expected)\n\n\nclass TestMiscMethods(unittest.TestCase):\n    \"\"\"\n    Test a variety of methods on the composite.\n\n    these may get moved to composted classes in the future.\n    \"\"\"\n\n    def setUp(self):\n        self.obj = loadTestBlock()\n\n    def test_setMass(self):\n        \"\"\"Test setting and retrieving mass.\n\n        .. test:: Mass of a composite is retrievable.\n            :id: T_ARMI_CMP_GET_MASS\n            :tests: R_ARMI_CMP_GET_MASS\n        \"\"\"\n        masses = {\"U235\": 5.0, \"U238\": 3.0}\n        self.obj.setMasses(masses)\n        self.assertAlmostEqual(self.obj.getMass(\"U235\"), 5.0)\n        self.assertAlmostEqual(self.obj.getMass(\"U238\"), 3.0)\n        self.assertAlmostEqual(self.obj.getMass(), 8.0)\n\n        self.obj.addMasses(masses)\n        self.assertAlmostEqual(self.obj.getMass(\"U238\"), 6.0)\n\n        # make sure it works with groups of groups\n        group = composites.Composite(\"group\")\n        group.add(self.obj)\n        group.add(loadTestBlock())\n        group.setMass(\"U235\", 5)\n        self.assertAlmostEqual(group.getMass(\"U235\"), 5)\n\n        # ad a second block, and confirm it works\n        group.add(loadTestBlock())\n        self.assertGreater(group.getMass(\"U235\"), 5)\n        self.assertAlmostEqual(group.getMass(\"U235\"), 1364.28376185)\n\n    def test_getNumberDensities(self):\n        \"\"\"Get number densities from composite.\n\n        .. test:: Number density of composite is retrievable.\n            :id: T_ARMI_CMP_GET_NDENS0\n            :tests: R_ARMI_CMP_GET_NDENS\n        \"\"\"\n        # verify the number densities from the composite\n        ndens = self.obj.getNumberDensities()\n        self.assertAlmostEqual(0.0001096, ndens[\"SI\"], 7)\n        self.assertAlmostEqual(0.0000368, ndens[\"W\"], 7)\n\n        ndens = self.obj.getNumberDensity(\"SI\")\n        self.assertAlmostEqual(0.0001096, ndens, 7)\n\n        # sum nuc densities from children components\n        totalVolume = self.obj.getVolume()\n        childDensities = {}\n        for o in self.obj:\n            m = o.getVolume()\n            d = o.getNumberDensities()\n            for nuc, val in d.items():\n                if nuc not in childDensities:\n                    childDensities[nuc] = val * (m / totalVolume)\n                else:\n                    childDensities[nuc] += val * (m / totalVolume)\n\n        # verify the children match this composite\n        for nuc in [\"FE\", \"SI\"]:\n            self.assertAlmostEqual(self.obj.getNumberDensity(nuc), childDensities[nuc], 4, msg=nuc)\n\n    def test_getNumDensWithExpandedFissProds(self):\n        \"\"\"Get number densities from composite.\n\n        .. test:: Get number densities.\n            :id: T_ARMI_CMP_NUC\n            :tests: R_ARMI_CMP_NUC\n        \"\"\"\n        # verify the number densities from the composite\n        ndens = self.obj.getNumberDensities(expandFissionProducts=True)\n        self.assertAlmostEqual(0.0001096, ndens[\"SI\"], 7)\n        self.assertAlmostEqual(0.0000368, ndens[\"W\"], 7)\n\n        ndens = self.obj.getNumberDensity(\"SI\")\n        self.assertAlmostEqual(0.0001096, ndens, 7)\n\n        # set the lumped fission product mapping\n        fpd = getDummyLFPFile()\n        lfps = fpd.createLFPsFromFile()\n        self.obj.setLumpedFissionProducts(lfps)\n\n        # sum nuc densities from children components\n        totalVolume = self.obj.getVolume()\n        childDensities = {}\n        for o in self.obj:\n            # get the number densities with and without fission products\n            d0 = o.getNumberDensities(expandFissionProducts=False)\n            d = o.getNumberDensities(expandFissionProducts=True)\n\n            # prove that the expanded fission products have more isotopes\n            if len(d0) > 0:\n                self.assertGreater(len(d), len(d0))\n\n            # sum the child nuclide densites (weighted by mass fraction)\n            m = o.getVolume()\n            for nuc, val in d.items():\n                if nuc not in childDensities:\n                    childDensities[nuc] = val * (m / totalVolume)\n                else:\n                    childDensities[nuc] += val * (m / totalVolume)\n\n        # verify the children match this composite\n        for nuc in [\"FE\", \"SI\"]:\n            self.assertAlmostEqual(self.obj.getNumberDensity(nuc), childDensities[nuc], 4, msg=nuc)\n\n    def test_dimensionReport(self):\n        report = self.obj.setComponentDimensionsReport()\n        self.assertEqual(len(report), len(self.obj))\n\n    def test_getAtomicWeight(self):\n        weight = self.obj.getAtomicWeight()\n        self.assertTrue(50 < weight < 100)\n\n    def test_containsHeavyMetal(self):\n        self.assertTrue(self.obj.containsHeavyMetal())\n\n    def test_copyParamsToChildren(self):\n        self.obj.p.percentBu = 5\n        self.obj.copyParamsToChildren([\"percentBu\"])\n        for child in self.obj:\n            self.assertEqual(child.p.percentBu, self.obj.p.percentBu)\n\n    def test_copyParamsFrom(self):\n        obj2 = loadTestBlock()\n        obj2.p.percentBu = 15.2\n        self.obj.copyParamsFrom(obj2)\n        self.assertEqual(obj2.p.percentBu, self.obj.p.percentBu)\n"
  },
  {
    "path": "armi/reactor/tests/test_cores.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport itertools\nimport random\nimport typing\nimport unittest\nfrom unittest import mock\nfrom unittest.mock import patch\n\nfrom armi.nuclearDataIO.xsLibraries import IsotxsLibrary\nfrom armi.reactor.assemblies import HexAssembly\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_reactors import TEST_ROOT, loadTestReactor\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import ISOAA_PATH\nfrom armi.utils import directoryChangers\n\n\nclass HexCoreTests(unittest.TestCase):\n    \"\"\"Tests on a hex reactor core.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)\n        cls.directoryChanger.open()\n        r = loadTestReactor(\n            inputFilePath=TESTING_ROOT, inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\"\n        )[1]\n        cls.core = r.core\n\n    def assertAllIs(self, actuals: typing.Iterable[typing.Any], expecteds: typing.Iterable[typing.Any], fill=None):\n        \"\"\"Assert that all items in two iterables are the same objects.\"\"\"\n        for actual, expected in itertools.zip_longest(actuals, expecteds, fillvalue=fill):\n            self.assertIs(actual, expected)\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.directoryChanger.close()\n\n    def test_getAllAssem(self):\n        \"\"\"Test the ability to produce all assemblies.\"\"\"\n        expectedAll = list(self.core)\n        actualAll = self.core.getAssemblies()\n        self.assertAllIs(actualAll, expectedAll)\n\n    def test_getAllAssemWithFlag(self):\n        \"\"\"Test the ability to produce assemblies with a flag.\"\"\"\n        for spec in (Flags.FUEL, Flags.CONTROL):\n            expected = self.core.getChildrenWithFlags(spec)\n            actual = self.core.getAssemblies(typeSpec=spec)\n            for a in actual:\n                self.assertIsInstance(a, HexAssembly)\n                self.assertTrue(a.hasFlags(spec))\n            self.assertAllIs(actual, expected)\n\n    def test_iterChildrenWithFlags(self):\n        aa = list(self.core.iterChildrenWithFlags(Flags.BOOSTER))\n        self.assertEqual(len(aa), 0)\n\n        aa = list(self.core.iterChildrenWithFlags(Flags.FUEL))\n        self.assertTrue(1 < len(aa) < 10)\n\n        aa = list(self.core.iterChildrenWithFlags(Flags.CONTROL))\n        self.assertEqual(len(aa), 0)\n\n    def test_getAssemsInZones(self):\n        \"\"\"Test the ability to produce assemblies in a zone.\"\"\"\n        # Grab a few assemblies and add their locations to those in the zones\n        selection = random.choices(self.core.getAssemblies(), k=5)\n        locations = [a.getLocation() for a in selection]\n        fakeZones = [\"hot\", \"cold\"]\n        with mock.patch.object(self.core.zones, \"getZoneLocations\", mock.Mock(return_value=locations)):\n            actuals = self.core.getAssemblies(zones=fakeZones)\n        for a in actuals:\n            self.assertIn(a.getLocation(), locations, msg=str(a))\n\n    def test_getBlocks(self):\n        \"\"\"Test the ability to get all blocks in the core.\"\"\"\n        blocks = []\n        for a in self.core:\n            blocks.extend(a)\n        actual = self.core.iterBlocks()\n        self.assertAllIs(actual, blocks)\n\n    def test_getBlocksWithFlag(self):\n        \"\"\"Test the ability to get all blocks with a flag in the core.\"\"\"\n        blocks = []\n        for a in self.core:\n            blocks.extend(filter(lambda b: b.hasFlags(Flags.FUEL), a))\n        actual = self.core.getBlocks(Flags.FUEL)\n        self.assertAllIs(actual, blocks)\n\n    def test_traverseAllBlocks(self):\n        \"\"\"Test the ability to iterate over all blocks in the core.\"\"\"\n        blocks = []\n        for a in self.core:\n            blocks.extend(a)\n        actual = self.core.iterBlocks()\n        self.assertAllIs(actual, blocks)\n\n    def test_traverseAllBlocksWithFlag(self):\n        \"\"\"Test the ability to traverse blocks in the core with a flag.\"\"\"\n        blocks: list[Block] = []\n        for a in self.core:\n            blocks.extend(a)\n        for spec in (Flags.FUEL, Flags.CONTROL, Flags.FUEL | Flags.CONTROL):\n            expected = list(filter(lambda b: b.hasFlags(spec), blocks))\n            actual = self.core.iterBlocks(spec)\n            self.assertAllIs(actual, expected)\n            # Fake the flag check with hasFlags as predicate\n            actual = self.core.iterBlocks(predicate=lambda b: b.hasFlags(spec))\n            self.assertAllIs(actual, expected)\n\n    def test_traverseBlocksWithPredicate(self):\n        \"\"\"Test the ability to traverse blocks that meet some criteria with a flag.\"\"\"\n        fuelBlocks: list[Block] = []\n        for a in self.core:\n            fuelBlocks.extend(filter(lambda b: b.hasFlags(Flags.FUEL), a))\n        # Make some contrived condition to exclude some blocks\n        meanElevation = sum(b.p.z for b in fuelBlocks) / len(fuelBlocks)\n        checker = lambda b: b.p.z >= meanElevation\n        expected = list(filter(checker, fuelBlocks))\n        actual = self.core.iterBlocks(Flags.FUEL, predicate=checker)\n        self.assertAllIs(actual, expected)\n\n    @patch(\"armi.nuclearDataIO.getExpectedISOTXSFileName\")\n    def test_lib(self, mockFileName):\n        # the default case will look something like this\n        mockFileName.return_value = \"ISOTXS-c0n0\"\n        self.assertIsNone(self.core.lib)\n        self.assertFalse(self.core.hasLib())\n\n        # we can inject some mock data, and retrieve it\n        mockFileName.return_value = ISOAA_PATH\n        self.assertTrue(isinstance(self.core.lib, IsotxsLibrary))\n        self.assertTrue(self.core.hasLib())\n\n    def test_getAssembliesInRing(self):\n        assems = self.core.getAssembliesInRing(0)\n        self.assertEqual(len(assems), 0)\n\n        assems = self.core.getAssembliesInRing(1)\n        self.assertEqual(len(assems), 1)\n        self.assertIsInstance(assems[0], HexAssembly)\n\n    def test_getAssembliesInSquareOrHexRing(self):\n        assems = self.core.getAssembliesInSquareOrHexRing(0)\n        self.assertEqual(len(assems), 0)\n\n        assems = self.core.getAssembliesInSquareOrHexRing(1)\n        self.assertEqual(len(assems), 1)\n        self.assertIsInstance(assems[0], HexAssembly)\n\n    def test_getAssembliesInCircularRing(self):\n        assems = self.core.getAssembliesInCircularRing(0)\n        self.assertEqual(len(assems), 0)\n\n        assems = self.core.getAssembliesInCircularRing(1)\n        self.assertEqual(len(assems), 5)\n        self.assertIsInstance(assems[0], HexAssembly)\n\n    def test_getBlockByName(self):\n        with self.assertRaises(KeyError):\n            self.core.getBlockByName(\"badName\")\n\n        b = self.core.getBlockByName(\"B0004-000\")\n        self.assertIsInstance(b, Block)\n\n    def test_getFirstBlock(self):\n        b = self.core.getFirstBlock()\n        self.assertIsInstance(b, Block)\n\n    def test_getFirstAssembly(self):\n        a = self.core.getFirstAssembly()\n        self.assertIsInstance(a, HexAssembly)\n"
  },
  {
    "path": "armi/reactor/tests/test_excoreStructures.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Direct tests of the Excore Structures and Spent Fuel Pools.\"\"\"\n\nfrom unittest import TestCase\nfrom unittest.mock import MagicMock\n\nfrom armi.reactor import grids\nfrom armi.reactor.composites import Composite\nfrom armi.reactor.excoreStructure import ExcoreCollection, ExcoreStructure\nfrom armi.reactor.reactors import Reactor\nfrom armi.reactor.spentFuelPool import SpentFuelPool\nfrom armi.reactor.tests.test_assemblies import makeTestAssembly\n\n\nclass TestExcoreStructure(TestCase):\n    def test_constructor(self):\n        evst1 = ExcoreStructure(\"evst1\")\n        self.assertEqual(evst1.name, \"evst1\")\n        self.assertIsNone(evst1.parent)\n        self.assertIsNone(evst1.spatialGrid)\n\n        evst2 = ExcoreStructure(\"evst2\", parent=evst1)\n        self.assertEqual(evst2.name, \"evst2\")\n        self.assertEqual(evst2.parent, evst1)\n        self.assertIsNone(evst2.spatialGrid)\n\n    def test_representation(self):\n        evst7 = ExcoreStructure(\"evst7\")\n        rep = evst7.__repr__()\n        self.assertIn(\"ExcoreStructure\", rep)\n        self.assertIn(\"evst7\", rep)\n        self.assertIn(\"id:\", rep)\n\n    def test_parentReactor(self):\n        fr = Reactor(\"Reactor\", MagicMock())\n        evst3 = ExcoreStructure(\"evst3\", parent=fr)\n        self.assertEqual(evst3.r, fr)\n\n    def test_add(self):\n        # build an ex-core structure\n        ivs = ExcoreStructure(\"ivs\")\n        ivs.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n\n        # add one composite object and validate\n        comp1 = Composite(\"thing1\")\n        loc = ivs.spatialGrid[(-5, -5, 0)]\n\n        self.assertEqual(len(ivs.getChildren()), 0)\n        ivs.add(comp1, loc)\n        self.assertEqual(len(ivs.getChildren()), 1)\n\n        # add another composite object and validate\n        comp1 = Composite(\"thing2\")\n        loc = ivs.spatialGrid[(1, -4, 0)]\n\n        ivs.add(comp1, loc)\n        self.assertEqual(len(ivs.getChildren()), 2)\n\n\nclass TestSpentFuelPool(TestCase):\n    def setUp(self):\n        self.sfp = SpentFuelPool(\"sfp\")\n        self.sfp.spatialGrid = grids.CartesianGrid.fromRectangle(1.0, 1.0)\n\n    def test_constructor(self):\n        \"\"\"Show that the spent fuel pool is a composite.\n\n        .. test:: The spent fuel pool is a Composite structure.\n            :id: T_ARMI_SFP0\n            :tests: R_ARMI_SFP\n        \"\"\"\n        self.assertEqual(self.sfp.name, \"sfp\")\n        self.assertIsNone(self.sfp.parent)\n        self.assertIsNone(self.sfp.numColumns)\n        self.assertTrue(isinstance(self.sfp, Composite))\n        self.assertTrue(isinstance(self.sfp, ExcoreStructure))\n        self.assertTrue(isinstance(self.sfp.spatialGrid, grids.CartesianGrid))\n\n    def test_representation(self):\n        rep = self.sfp.__repr__()\n        self.assertIn(\"SpentFuelPool\", rep)\n        self.assertIn(\"sfp\", rep)\n        self.assertIn(\"id:\", rep)\n\n    def test_addRemove(self):\n        \"\"\"Show that we can add and remove Assemblies from the spent fuel pool.\n\n        .. test:: Show that we can add and remove Assemblies from the spent fuel pool.\n            :id: T_ARMI_SFP1\n            :tests: R_ARMI_SFP\n        \"\"\"\n        self.assertEqual(len(self.sfp.getChildren()), 0)\n\n        # add one assembly object and validate\n        a0 = makeTestAssembly(1, 987, spatialGrid=self.sfp.spatialGrid)\n        self.sfp.add(a0)\n        self.assertEqual(len(self.sfp.getChildren()), 1)\n\n        # add another assembly object and validate\n        a1 = makeTestAssembly(1, 988, spatialGrid=self.sfp.spatialGrid)\n        loc = self.sfp.spatialGrid[(1, -4, 0)]\n        self.sfp.add(a1, loc)\n        self.assertEqual(len(self.sfp.getChildren()), 2)\n\n        # remove the first assembly we added and validate\n        self.sfp.remove(a0)\n        self.assertEqual(len(self.sfp.getChildren()), 1)\n\n    def test_getAssembly(self):\n        a0 = makeTestAssembly(1, 678, spatialGrid=self.sfp.spatialGrid)\n        self.sfp.add(a0)\n\n        aReturn = self.sfp.getAssembly(\"A0678\")\n        self.assertEqual(aReturn, a0)\n\n    def test_updateNumberOfColumns(self):\n        self.assertIsNone(self.sfp.numColumns)\n        self.sfp._updateNumberOfColumns()\n        self.assertEqual(self.sfp.numColumns, 10)\n\n    def test_getNextLocation(self):\n        self.sfp._updateNumberOfColumns()\n\n        # test against an empty grid\n        loc = self.sfp._getNextLocation()\n        self.assertEqual(loc._i, 0)\n        self.assertEqual(loc._j, 0)\n        self.assertEqual(loc._k, 0)\n\n        # test against a non-empty grid\n        a0 = makeTestAssembly(1, 234, spatialGrid=self.sfp.spatialGrid)\n        self.sfp.add(a0)\n\n    def test_normalizeNames(self):\n        # test against an empty grid\n        self.assertEqual(self.sfp.normalizeNames(), 0)\n        self.assertEqual(self.sfp.normalizeNames(17), 17)\n\n        # test against a non-empty grid\n        a0 = makeTestAssembly(1, 456, spatialGrid=self.sfp.spatialGrid)\n        self.sfp.add(a0)\n        self.assertEqual(self.sfp.normalizeNames(), 1)\n        self.assertEqual(self.sfp.normalizeNames(17), 18)\n\n\nclass TestExcoreCollection(TestCase):\n    def test_addLikeDict(self):\n        sfp = SpentFuelPool(\"sfp\")\n\n        excore = ExcoreCollection()\n        excore[\"sfp\"] = sfp\n\n        self.assertTrue(isinstance(excore[\"sfp\"], SpentFuelPool))\n        self.assertTrue(isinstance(excore.sfp, SpentFuelPool))\n\n    def test_addLikeAttribute(self):\n        ivs = ExcoreStructure(\"ivs\")\n\n        excore = ExcoreCollection()\n        excore.ivs = ivs\n\n        self.assertTrue(isinstance(excore[\"ivs\"], ExcoreStructure))\n        self.assertTrue(isinstance(excore.ivs, ExcoreStructure))\n"
  },
  {
    "path": "armi/reactor/tests/test_flags.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for flags.\"\"\"\n\nimport pickle\nimport unittest\n\nfrom armi.reactor import flags\n\n\nclass TestFlags(unittest.TestCase):\n    \"\"\"Tests for flags system.\"\"\"\n\n    def test_fromString(self):\n        self._help_fromString(flags.Flags.fromStringIgnoreErrors)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"invalid\"), flags.Flags(0))\n\n    def test_fromStringWithNumbers(self):\n        # testing pure numbers\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"1\"), flags.Flags(0))\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"7\"), flags.Flags(0))\n\n        # testing fuel naming logic\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"Fuel1\"), flags.Flags.FUEL)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"Fuel123\"), flags.Flags.FUEL)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"fuel 1\"), flags.Flags.FUEL)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"fuel 123\"), flags.Flags.FUEL)\n\n    def test_flagsDefinedWithNumbers(self):\n        \"\"\"Test that if we DEFINE flags with numbers in them, those are treated as exceptions.\"\"\"\n        # define flags TYPE1 and TYPE1B (arbitrary example)\n        flags.Flags.extend({\"TYPE1\": flags.auto(), \"TYPE1B\": flags.auto()})\n\n        # verify that these flags are correctly found\n        self.assertEqual(flags.Flags[\"TYPE1\"], flags.Flags.TYPE1)\n        self.assertEqual(flags.Flags[\"TYPE1B\"], flags.Flags.TYPE1B)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"type1\"), flags.Flags.TYPE1)\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"Type1b\"), flags.Flags.TYPE1B)\n\n        # the more complicated situation where our exceptions are mixed with the usual flag logic\n        self.assertEqual(flags.Flags.fromString(\"type1 fuel\"), flags.Flags.TYPE1 | flags.Flags.FUEL)\n\n        self.assertEqual(\n            flags.Flags.fromString(\"type1 fuel 123 bond\"),\n            flags.Flags.TYPE1 | flags.Flags.FUEL | flags.Flags.BOND,\n        )\n\n        self.assertEqual(\n            flags.Flags.fromString(\"type1 fuel123 bond\"),\n            flags.Flags.TYPE1 | flags.Flags.FUEL | flags.Flags.BOND,\n        )\n\n    def test_flagsToAndFromString(self):\n        \"\"\"\n        Convert flag to and from string for serialization.\n\n        .. test:: Convert flag to a string.\n            :id: T_ARMI_FLAG_TO_STR\n            :tests: R_ARMI_FLAG_TO_STR\n        \"\"\"\n        f = flags.Flags.FUEL\n        self.assertEqual(flags.Flags.toString(f), \"FUEL\")\n        self.assertEqual(f, flags.Flags.fromString(\"FUEL\"))\n\n    def test_toStringAlphabetical(self):\n        \"\"\"Ensure that, for multiple flags, toString() returns them in alphabetical order.\"\"\"\n        flagz = flags.Flags.AXIAL | flags.Flags.LOWER\n        self.assertEqual(flags.Flags.toString(flagz), \"AXIAL LOWER\")\n\n        flagz = flags.Flags.LOWER | flags.Flags.AXIAL\n        self.assertEqual(flags.Flags.toString(flagz), \"AXIAL LOWER\")\n\n    def test_fromStringStrict(self):\n        self._help_fromString(flags.Flags.fromString)\n        with self.assertRaises(flags.InvalidFlagsError):\n            flags.Flags.fromString(\"invalid\")\n        with self.assertRaises(flags.InvalidFlagsError):\n            flags.Flags.fromString(\"fuel invalid\")\n\n    def _help_fromString(self, method):\n        self.assertEqual(method(\"bond\"), flags.Flags.BOND)\n        self.assertEqual(method(\"bond1\"), flags.Flags.BOND)\n        self.assertEqual(method(\"bond 2\"), flags.Flags.BOND)\n        self.assertEqual(method(\"fuel test\"), flags.Flags.FUEL | flags.Flags.TEST)\n        # test the more strict GRID conversion, which can cause collisions with GRID_PLATE\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"grid_plate\"), flags.Flags.GRID_PLATE)\n        # test that \"nozzle\" is not consumed in the conversion, leaving behind \"inlet_\"\n        # and leading to an error. Interesting thing here is that if the IgnoreErrors\n        # variant is used, this works out fine since the \"inlet_\" is ignored and\n        # \"nozzle\" -> INLET_NOZZLE.\n        self.assertEqual(flags.Flags.fromString(\"inlet_nozzle\"), flags.Flags.INLET_NOZZLE)\n\n    def test_lookup(self):\n        \"\"\"Make sure lookup table is working.\"\"\"\n        self.assertEqual(flags.Flags.fromString(\"GAP1\"), flags.Flags.GAP | flags.Flags.A)\n        self.assertEqual(flags.Flags.fromString(\"handLing sOcket\"), flags.Flags.HANDLING_SOCKET)\n        # order in CONVERSIONS can matter for multi word flags.\n        # tests that order is good.\n        for conv, flag in flags._CONVERSIONS.items():\n            # the conversions are specified as RE patterns, so we need to do a little\n            # work to get them into something that can serve as candidate input (i.e. a\n            # string that the pattern would match). Since we are only using \\b and \\s+,\n            # this is pretty straightforward. If any more complicated patterns work\n            # their way in there, this will need to become more sophisticated. One might\n            # be tempted to bake the plain-text versions of the conversions in the\n            # collection in the flags module, but this is pretty much only needed for\n            # testing, so that wouldn't be appropriate.\n            exampleInput = conv.pattern.replace(r\"\\b\", \"\")\n            exampleInput = exampleInput.replace(r\"\\s+\", \" \")\n            self.assertEqual(flags.Flags.fromString(exampleInput), flag)\n\n    def test_convertsStringsWithNonFlags(self):\n        # Useful for verifying block / assembly names convert to Flags.\n        self.assertEqual(flags.Flags.fromStringIgnoreErrors(\"banana bond banana\"), flags.Flags.BOND)\n        self.assertEqual(\n            flags.Flags.fromStringIgnoreErrors(\"banana socket\"),\n            flags.Flags.HANDLING_SOCKET,\n        )\n        self.assertEqual(\n            flags.Flags.fromStringIgnoreErrors(\"grid plate banana\"),\n            flags.Flags.GRID_PLATE,\n        )\n        self.assertEqual(\n            flags.Flags.fromStringIgnoreErrors(\"handling socket socket\"),\n            flags.Flags.HANDLING_SOCKET,\n        )\n\n    def test_defaultIsFalse(self):\n        self.assertFalse(flags.Flags(0))\n\n    def test_isPickleable(self):\n        \"\"\"Must be pickleable to use syncMpiState.\"\"\"\n        stream = pickle.dumps(flags.Flags.BOND | flags.Flags.A)\n        flag = pickle.loads(stream)\n        self.assertEqual(flag, flags.Flags.BOND | flags.Flags.A)\n"
  },
  {
    "path": "armi/reactor/tests/test_geometry.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests the geometry (loading input) file.\"\"\"\n\nimport unittest\n\nfrom armi.reactor import geometry\n\n\nclass TestGeomType(unittest.TestCase):\n    def test_fromStr(self):\n        # note the bonkers case and extra whitespace to exercise the canonicalization\n        self.assertEqual(geometry.GeomType.fromStr(\"HeX\"), geometry.GeomType.HEX)\n        self.assertEqual(geometry.GeomType.fromStr(\"cARTESIAN\"), geometry.GeomType.CARTESIAN)\n        self.assertEqual(geometry.GeomType.fromStr(\" thetaRZ\"), geometry.GeomType.RZT)\n        self.assertEqual(geometry.GeomType.fromStr(\"rZ  \"), geometry.GeomType.RZ)\n\n        with self.assertRaises(ValueError):\n            geometry.GeomType.fromStr(\"what even is this?\")\n\n    def test_label(self):\n        gt = geometry.GeomType.fromStr(\"hex\")\n        self.assertEqual(gt.label, \"Hexagonal\")\n        gt = geometry.GeomType.fromStr(\"cartesian\")\n        self.assertEqual(gt.label, \"Cartesian\")\n        gt = geometry.GeomType.fromStr(\"rz\")\n        self.assertEqual(gt.label, \"R-Z\")\n        gt = geometry.GeomType.fromStr(\"thetarz\")\n        self.assertEqual(gt.label, \"R-Z-Theta\")\n\n    def test_str(self):\n        for geom in {geometry.HEX, geometry.CARTESIAN, geometry.RZ, geometry.RZT}:\n            self.assertEqual(str(geometry.GeomType.fromStr(geom)), geom)\n\n\nclass TestSymmetryType(unittest.TestCase):\n    def test_fromStr(self):\n        # note the bonkers case and extra whitespace to exercise the canonicalization\n        self.assertEqual(\n            geometry.SymmetryType.fromStr(\"thiRd periodic \").domain,\n            geometry.DomainType.THIRD_CORE,\n        )\n        st = geometry.SymmetryType.fromStr(\"sixteenth reflective\")\n        self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(str(st), \"sixteenth reflective\")\n\n        with self.assertRaises(ValueError):\n            geometry.SymmetryType.fromStr(\"what even is this?\")\n\n    def test_fromAny(self):\n        st = geometry.SymmetryType.fromAny(\"eighth reflective through center assembly\")\n        self.assertTrue(st.isThroughCenterAssembly)\n        self.assertEqual(st.domain, geometry.DomainType.EIGHTH_CORE)\n        self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE)\n\n        st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE, True)\n        self.assertTrue(st.isThroughCenterAssembly)\n        self.assertEqual(st.domain, geometry.DomainType.EIGHTH_CORE)\n        self.assertEqual(st.boundary, geometry.BoundaryType.REFLECTIVE)\n\n        newST = geometry.SymmetryType.fromAny(st)\n        self.assertTrue(newST.isThroughCenterAssembly)\n        self.assertEqual(newST.domain, geometry.DomainType.EIGHTH_CORE)\n        self.assertEqual(newST.boundary, geometry.BoundaryType.REFLECTIVE)\n\n    def test_baseConstructor(self):\n        self.assertEqual(\n            geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE).domain,\n            geometry.DomainType.SIXTEENTH_CORE,\n        )\n        self.assertEqual(\n            str(geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY).boundary),\n            \"\",\n        )\n\n    def test_label(self):\n        st = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY)\n        self.assertEqual(st.domain.label, \"Full\")\n        self.assertEqual(st.boundary.label, \"No Symmetry\")\n        st = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n        self.assertEqual(st.domain.label, \"Third\")\n        self.assertEqual(st.boundary.label, \"Periodic\")\n        st = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.domain.label, \"Quarter\")\n        self.assertEqual(st.boundary.label, \"Reflective\")\n        st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.domain.label, \"Eighth\")\n        st = geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.domain.label, \"Sixteenth\")\n\n    def test_SymmetryFactor(self):\n        st = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY)\n        self.assertEqual(st.symmetryFactor(), 1.0)\n        st = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n        self.assertEqual(st.symmetryFactor(), 3.0)\n        st = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.symmetryFactor(), 4.0)\n        st = geometry.SymmetryType(geometry.DomainType.EIGHTH_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.symmetryFactor(), 8.0)\n        st = geometry.SymmetryType(geometry.DomainType.SIXTEENTH_CORE, geometry.BoundaryType.REFLECTIVE)\n        self.assertEqual(st.symmetryFactor(), 16.0)\n\n    def test_domainTypeNulls(self):\n        self.assertEqual(geometry.DomainType.NULL.label, \"\")\n        self.assertEqual(str(geometry.DomainType.NULL), \"\")\n        with self.assertRaises(ValueError):\n            geometry.DomainType.NULL.symmetryFactor()\n\n    def test_checkValidGeomSymmetryCombo(self):\n        geomHex = geometry.GeomType.HEX\n        geomCart = geometry.GeomType.CARTESIAN\n        geomRZT = geometry.GeomType.RZT\n        geomRZ = geometry.GeomType.RZ\n        fullCore = geometry.SymmetryType(geometry.DomainType.FULL_CORE, geometry.BoundaryType.NO_SYMMETRY)\n        thirdPeriodic = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n        quarterCartesian = geometry.SymmetryType(geometry.DomainType.QUARTER_CORE, geometry.BoundaryType.REFLECTIVE)\n\n        self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomHex, thirdPeriodic))\n        self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomHex, fullCore))\n        self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomCart, quarterCartesian))\n        self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomRZT, quarterCartesian))\n        self.assertTrue(geometry.checkValidGeomSymmetryCombo(geomRZ, fullCore))\n\n        with self.assertRaises(ValueError):\n            _ = geometry.SymmetryType(\n                geometry.DomainType.THIRD_CORE,\n                geometry.BoundaryType.REFLECTIVE,\n                False,\n            )\n        with self.assertRaises(ValueError):\n            geometry.checkValidGeomSymmetryCombo(geomHex, quarterCartesian)\n\n        with self.assertRaises(ValueError):\n            geometry.checkValidGeomSymmetryCombo(geomCart, thirdPeriodic)\n"
  },
  {
    "path": "armi/reactor/tests/test_hexBlockRotate.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the ability to rotate a hexagonal block.\"\"\"\n\nimport copy\nimport math\nimport unittest\n\nimport numpy as np\n\nfrom armi.reactor.blocks import HexBlock\nfrom armi.reactor.components import Component\nfrom armi.reactor.grids import (\n    CoordinateLocation,\n    HexGrid,\n    IndexLocation,\n    MultiIndexLocation,\n)\nfrom armi.reactor.tests.test_blocks import NUM_PINS_IN_TEST_BLOCK, loadTestBlock\nfrom armi.utils import iterables\n\n\nclass HexBlockRotateTests(unittest.TestCase):\n    \"\"\"Tests for various rotation aspects of a hex block.\"\"\"\n\n    BOUNDARY_PARAMS = [\n        \"cornerFastFlux\",\n        \"pointsCornerDpa\",\n        \"pointsCornerDpaRate\",\n        \"pointsCornerFastFluxFr\",\n        \"pointsEdgeDpa\",\n        \"pointsEdgeDpaRate\",\n        \"pointsEdgeFastFluxFr\",\n        \"THedgeTemp\",\n        \"THcornTemp\",\n    ]\n    BOUNDARY_DATA = np.arange(6, dtype=float) * 10\n\n    PIN_PARAMS = [\n        \"percentBuByPin\",\n        \"linPowByPin\",\n    ]\n\n    PIN_DATA = np.arange(NUM_PINS_IN_TEST_BLOCK, dtype=float)\n\n    def setUp(self):\n        self.baseBlock = loadTestBlock()\n        self._assignParamData(self.BOUNDARY_PARAMS, self.BOUNDARY_DATA)\n        self._assignParamData(self.PIN_PARAMS, self.PIN_DATA)\n\n    def _assignParamData(self, names: list[str], referenceData: np.ndarray):\n        \"\"\"Assign initial rotatable pararameter data.\n\n        Make some arrays, some lists to make sure we have good coverage of usage.\n        \"\"\"\n        # Yes we're putting the variable type in the name but that's why this method exists\n        listData = referenceData.tolist()\n        for ix, name in enumerate(names):\n            self.baseBlock.p[name] = referenceData if (ix % 2) else listData\n\n    def test_orientationVector(self):\n        \"\"\"Test the z-value in the orientation vector matches rotation.\n\n        .. test:: Demonstrate that a HexBlock can be rotated in 60 degree increments, and the\n            resultant orientation parameter reflects the current rotation.\n            :id: T_ARMI_ROTATE_HEX_BLOCK\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        for nRotations in range(-10, 10):\n            rotationAmount = 60 * nRotations\n            fresh = copy.deepcopy(self.baseBlock)\n            self.assertEqual(fresh.p.orientation[2], 0.0, msg=nRotations)\n            fresh.rotate(math.radians(rotationAmount))\n            # Ensure rotation is bounded [0, 360)\n            postRotationOrientation = fresh.p.orientation[2]\n            self.assertTrue(0 <= postRotationOrientation < 360, msg=nRotations)\n            # Trim off any extra rotation if beyond 360 or negative\n            # What is the effective counter clockwise rotation?\n            expectedOrientation = rotationAmount % 360\n            self.assertEqual(postRotationOrientation, expectedOrientation, msg=nRotations)\n\n    def test_rotateBoundaryParameters(self):\n        \"\"\"Test that boundary parameters are correctly rotated.\n\n        .. test:: Rotating a hex block updates parameters on the boundary of the hexagon.\n            :id: T_ARMI_ROTATE_HEX_BOUNDARY\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        # No rotation == no changes to data\n        self._rotateAndCompareBoundaryParams(0, self.BOUNDARY_DATA)\n        for rotNum in range(1, 6):\n            expected = iterables.pivot(self.BOUNDARY_DATA, -rotNum)\n            self._rotateAndCompareBoundaryParams(rotNum * 60, expected)\n        # Six rotations of 60 degrees puts us back to the original layout\n        self._rotateAndCompareBoundaryParams(360, self.BOUNDARY_DATA)\n\n    def _rotateAndCompareBoundaryParams(self, degrees: float, expected: np.ndarray):\n        fresh = copy.deepcopy(self.baseBlock)\n        fresh.rotate(math.radians(degrees))\n        for name in self.BOUNDARY_PARAMS:\n            data = fresh.p[name]\n            msg = f\"{name=} :: {degrees=} :: {data=}\"\n            np.testing.assert_array_equal(data, expected, err_msg=msg)\n\n    def assertIndexLocationEquivalent(self, actual: IndexLocation, expected: IndexLocation):\n        \"\"\"More flexible equivalency check on index locations.\n\n        Specifically focused on locations on hex grids because this file\n        is testing things on hex blocks.\n\n        Checks that\n        1. ``i``, ``j``, and ``k`` are equal\n        2. Grids are both hex grid\n        3. Grids have same pitch and orientation.\n        \"\"\"\n        self.assertEqual(actual.i, expected.i)\n        self.assertEqual(actual.j, expected.j)\n        self.assertEqual(actual.k, expected.k)\n        self.assertIsInstance(actual.grid, HexGrid)\n        self.assertIsInstance(expected.grid, HexGrid)\n        self.assertEqual(actual.grid.cornersUp, expected.grid.cornersUp)\n        self.assertEqual(actual.grid.pitch, expected.grid.pitch)\n\n    def test_pinRotationLocations(self):\n        \"\"\"Test that pin locations are updated through rotation.\n\n        .. test:: HexBlock.getPinLocations is consistent with rotation.\n            :id: T_ARMI_ROTATE_HEX_PIN_LOCS\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        preRotation = self.baseBlock.getPinLocations()\n        for nRotations in range(-10, 10):\n            degrees = 60 * nRotations\n            fresh = copy.deepcopy(self.baseBlock)\n            g = fresh.spatialGrid\n            fresh.rotate(math.radians(degrees))\n            postRotation = fresh.getPinLocations()\n            self.assertEqual(len(preRotation), len(postRotation))\n            for pre, post in zip(preRotation, postRotation):\n                expected = g.rotateIndex(pre, nRotations)\n                self.assertIndexLocationEquivalent(post, expected)\n\n    def test_pinRotationCoordinates(self):\n        \"\"\"Test that pin coordinates are updated through rotation.\n\n        .. test:: HexBlock.getPinCoordinates is consistent through rotation.\n            :id: T_ARMI_ROTATE_HEX_PIN_COORDS\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        preRotation = self.baseBlock.getPinCoordinates()\n        # Over- and under-rotate to make sure we can handle clockwise and counter\n        # clockwise rotations, and cases that wrap around a full rotation\n        for degrees in range(-600, 600, 60):\n            fresh = copy.deepcopy(self.baseBlock)\n            rads = math.radians(degrees)\n            fresh.rotate(rads)\n            rotationMatrix = np.array(\n                [\n                    [math.cos(rads), -math.sin(rads)],\n                    [math.sin(rads), math.cos(rads)],\n                ]\n            )\n            postRotation = fresh.getPinCoordinates()\n            self.assertEqual(len(preRotation), len(postRotation))\n            for pre, post in zip(preRotation, postRotation):\n                start = pre[:2]\n                finish = post[:2]\n                if np.allclose(start, 0):\n                    np.testing.assert_equal(start, finish)\n                    continue\n                expected = rotationMatrix.dot(start)\n                np.testing.assert_allclose(expected, finish, atol=1e-8)\n\n    def test_updateChildLocations(self):\n        \"\"\"Test that locations of all children are updated through rotation.\n\n        .. test:: Rotating a hex block updates the spatial coordinates on contained objects.\n            :id: T_ARMI_ROTATE_HEX_CHILD_LOCS\n            :tests: R_ARMI_ROTATE_HEX\n        \"\"\"\n        for nRotations in range(-10, 10):\n            fresh = copy.deepcopy(self.baseBlock)\n            degrees = 60 * nRotations\n            rads = math.radians(degrees)\n            fresh.rotate(rads)\n            for originalC, newC in zip(self.baseBlock, fresh):\n                self._compareComponentLocationsAfterRotation(originalC, newC, nRotations, rads)\n\n    def _compareComponentLocationsAfterRotation(\n        self, original: Component, updated: Component, nRotations: int, radians: float\n    ):\n        if isinstance(original.spatialLocator, MultiIndexLocation):\n            for originalLoc, newLoc in zip(original.spatialLocator, updated.spatialLocator):\n                expected = originalLoc.grid.rotateIndex(originalLoc, nRotations)\n                self.assertIndexLocationEquivalent(newLoc, expected)\n        elif isinstance(original.spatialLocator, CoordinateLocation):\n            ox, oy, oz = original.spatialLocator.getLocalCoordinates()\n            nx, ny, nz = updated.spatialLocator.getLocalCoordinates()\n            self.assertEqual(nz, oz, msg=f\"{original=} :: {radians=}\")\n            rotationMatrix = np.array(\n                [\n                    [math.cos(radians), -math.sin(radians)],\n                    [math.sin(radians), math.cos(radians)],\n                ]\n            )\n            expectedX, expectedY = rotationMatrix.dot((ox, oy))\n            np.testing.assert_allclose((nx, ny), (expectedX, expectedY), err_msg=f\"{original=} :: {radians=}\")\n\n    def test_pinParametersUnmodified(self):\n        \"\"\"Test that pin data are not modified through rotation.\n\n        Reinforces the idea that data like ``linPowByPin[i]`` are assigned to\n        pin ``i``, wherever it may be. Locations are defined instead by ``getPinCoordinates()[i]``.\n        \"\"\"\n        fresh = copy.deepcopy(self.baseBlock)\n        fresh.rotate(math.radians(60))\n        for paramName in self.PIN_PARAMS:\n            actual = fresh.p[paramName]\n            np.testing.assert_equal(actual, self.PIN_DATA, err_msg=paramName)\n\n\nclass EmptyBlockRotateTest(unittest.TestCase):\n    \"\"\"Rotation tests on an empty hexagonal block.\n\n    Useful for enforcing rotation works on blocks without pins.\n    \"\"\"\n\n    def setUp(self):\n        self.block = HexBlock(\"empty\")\n\n    def test_orientation(self):\n        \"\"\"Test the orientation parameter is updated on a rotated empty block.\"\"\"\n        rotDegrees = 60\n        preRotateOrientation = self.block.p.orientation[2]\n        self.block.rotate(math.radians(rotDegrees))\n        postRotationOrientation = self.block.p.orientation[2]\n        self.assertNotEqual(preRotateOrientation, postRotationOrientation)\n        self.assertEqual(postRotationOrientation, rotDegrees)\n"
  },
  {
    "path": "armi/reactor/tests/test_parameters.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for assorted Parameters tools.\"\"\"\n\nimport copy\nimport os\nimport unittest\nfrom glob import glob\nfrom shutil import copyfile\n\nfrom armi.reactor import parameters\nfrom armi.reactor.reactorParameters import makeParametersReadOnly\nfrom armi.testing import loadTestReactor\nfrom armi.tests import TEST_ROOT\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass MockComposite:\n    def __init__(self, name):\n        self.name = name\n        self.p = {}\n\n\nclass MockCompositeGrandParent(MockComposite):\n    pass\n\n\nclass MockCompositeParent(MockCompositeGrandParent):\n    pass\n\n\nclass MockCompositeChild(MockCompositeParent):\n    pass\n\n\nclass ParameterTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.defs = parameters.ALL_DEFINITIONS._paramDefs\n\n    @classmethod\n    def tearDownClass(cls):\n        parameters.ALL_DEFINITIONS._paramDefs = cls.defs\n\n    def setUp(self):\n        parameters.ALL_DEFINITIONS._paramDefs = []\n\n    def test_mutableDefaultsNotSupported(self):\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                with self.assertRaises(AssertionError):\n                    pb.defParam(\"units\", \"description\", \"location\", default=[])\n                with self.assertRaises(AssertionError):\n                    pb.defParam(\"units\", \"description\", \"location\", default={})\n\n            with self.assertRaises(AssertionError):\n                fail = pDefs.createBuilder(default=[])\n\n            with self.assertRaises(AssertionError):\n                fail = pDefs.createBuilder(default={})\n\n    def test_writeSomeParamsToDB(self):\n        \"\"\"\n        This tests the ability to specify which parameters should be\n        written to the database. It assumes that the list returned by\n        ParameterDefinitionCollection.toWriteToDB() is used to filter for which\n        parameters to include in the database.\n\n        .. test:: Restrict parameters from DB write.\n            :id: T_ARMI_PARAM_DB\n            :tests: R_ARMI_PARAM_DB\n\n        .. test:: Ensure that new parameters can be defined.\n            :id: T_ARMI_PARAM0\n            :tests: R_ARMI_PARAM\n        \"\"\"\n        pDefs = parameters.ParameterDefinitionCollection()\n        with pDefs.createBuilder() as pb:\n            pb.defParam(\"write_me\", \"units\", \"description\", \"location\", default=42)\n            pb.defParam(\"and_me\", \"units\", \"description\", \"location\", default=42)\n            pb.defParam(\n                \"dont_write_me\",\n                \"units\",\n                \"description\",\n                \"location\",\n                default=42,\n                saveToDB=False,\n            )\n        db_params = pDefs.toWriteToDB(32)\n        self.assertListEqual([\"write_me\", \"and_me\"], [p.name for p in db_params])\n\n    def test_serializer_pack_unpack(self):\n        \"\"\"\n        This tests the ability to add a serializer to a parameter instantiation line.\n        It assumes that if this parameter is not None, that the pack and unpack methods\n        will be called during storage to and reading from the database. See\n        database._writeParams for an example use of this functionality.\n\n        .. test:: Custom parameter serializer\n            :id: T_ARMI_PARAM_SERIALIZE\n            :tests: R_ARMI_PARAM_SERIALIZE\n        \"\"\"\n\n        class TestSerializer(parameters.Serializer):\n            @staticmethod\n            def pack(data):\n                array = [d + 1 for d in data]\n                return array\n\n            @staticmethod\n            def unpack(data):\n                array = [d - 1 for d in data]\n                return array\n\n        param = parameters.Parameter(\n            name=\"myparam\",\n            units=\"kg\",\n            description=\"a param\",\n            location=None,\n            saveToDB=True,\n            default=[1],\n            setter=None,\n            categories=None,\n            serializer=TestSerializer(),\n        )\n        param.assigned = [1]\n\n        packed = param.serializer.pack(param.assigned)\n        unpacked = param.serializer.unpack(packed)\n\n        self.assertEqual(packed, [2])\n        self.assertEqual(unpacked, [1])\n\n    def test_paramPropertyDoesNotConflict(self):\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"doodle\", \"units\", \"description\", \"location\", default=42)\n\n            with pDefs.createBuilder(MockComposite, default=0.0) as pb:\n                pb.defParam(\"cheese\", \"kg\", \"pressed curds of milk\", \"avg\")\n                pb.defParam(\"fudge\", \"kg\", \"saturated chocolate\", \"avg\", default=19)\n                pb.defParam(\n                    \"noodles\",\n                    \"kg\",\n                    \"strip, ring, or tube of pasta\",\n                    \"avg\",\n                    default=None,\n                )\n\n        mock1 = Mock()\n        mock2 = Mock()\n        self.assertEqual(42, mock1.doodle)\n        self.assertEqual(42, mock2.doodle)\n        self.assertEqual(0.0, mock1.cheese)  # make sure factory default is applied\n        self.assertEqual(19, mock2.fudge)  # make sure we can override the factory default\n        self.assertEqual(None, mock2.noodles)  # make sure we can override the factory default\n        mock1.doodle = 17\n        self.assertEqual(17, mock1.doodle)\n        self.assertEqual(42, mock2.doodle)\n\n    def test_paramPropNoConflictNoneDefault(self):\n        \"\"\"Parameter property does not conflict with None default.\"\"\"\n\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"noneDefault\", \"units\", \"description\", \"location\", default=None)\n\n        mock1 = Mock()\n        mock2 = Mock()\n        self.assertIsNone(mock1.noneDefault)\n        self.assertIsNone(mock2.noneDefault)\n        mock1.noneDefault = 1.234\n        self.assertEqual(1.234, mock1.noneDefault)\n        self.assertEqual(None, mock2.noneDefault)\n\n    def test_getNoDefaultRaisesError(self):\n        \"\"\"Get without default raises parameter error.\"\"\"\n\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"noDefault\", \"units\", \"description\", \"location\")\n\n        mock = Mock()\n        with self.assertRaises(parameters.ParameterError):\n            print(mock.noDefault)\n\n    def test_setParamWithoutSetter(self):\n        \"\"\"Attempting to set paramter without setter fails.\"\"\"\n\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\n                    \"noSetter\",\n                    \"noSetter\",\n                    \"units\",\n                    \"description\",\n                    \"location\",\n                    default=\"encapsulated\",\n                    setter=None,\n                )\n\n        mock = Mock()\n        self.assertEqual(\"encapsulated\", mock.noSetter)\n        with self.assertRaises(parameters.ParameterError):\n            mock.noSetter = False\n        self.assertEqual(\"encapsulated\", mock.noSetter)\n\n    def test_setter(self):\n        \"\"\"Test the Parameter setter() tooling, that signifies if a Parameter has been updated.\n\n        .. test:: Tooling that allows a Parameter to signal it needs to be updated across processes.\n            :id: T_ARMI_PARAM_PARALLEL0\n            :tests: R_ARMI_PARAM_PARALLEL\n        \"\"\"\n\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n\n                def n(self, value):\n                    self._p_n = value\n                    self._p_nPlus1 = value + 1\n\n                pb.defParam(\"n\", \"units\", \"description\", \"location\", setter=n)\n\n                def nPlus1(self, value):\n                    self._p_nPlus1 = value\n                    self._p_n = value - 1\n\n                pb.defParam(\"nPlus1\", \"units\", \"description\", \"location\", setter=nPlus1)\n\n        mock = Mock()\n        self.assertTrue(all(pd.assigned == parameters.NEVER for pd in mock.paramDefs if pd.name != \"serialNum\"))\n        with self.assertRaises(parameters.ParameterError):\n            print(mock.n)\n        with self.assertRaises(parameters.ParameterError):\n            print(mock.nPlus1)\n\n        mock.n = 15\n        self.assertEqual(15, mock.n)\n        self.assertEqual(16, mock.nPlus1)\n\n        mock.nPlus1 = 22\n        self.assertEqual(21, mock.n)\n        self.assertEqual(22, mock.nPlus1)\n        self.assertTrue(all(pd.assigned != parameters.NEVER for pd in mock.paramDefs))\n\n    def test_setterGetterBasics(self):\n        \"\"\"Test the Parameter setter/getter tooling, through the lifecycle of a Parameter being updated.\n\n        .. test:: Tooling that allows a Parameter to signal it needs to be updated across processes.\n            :id: T_ARMI_PARAM_PARALLEL1\n            :tests: R_ARMI_PARAM_PARALLEL\n        \"\"\"\n\n        class Mock(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n\n                def n(self, value):\n                    self._p_n = value\n                    self._p_nPlus1 = value + 1\n\n                pb.defParam(\"n\", \"units\", \"description\", \"location\", setter=n)\n\n                def nPlus1(self, value):\n                    self._p_nPlus1 = value\n                    self._p_n = value - 1\n\n                pb.defParam(\"nPlus1\", \"units\", \"description\", \"location\", setter=nPlus1)\n\n        mock = Mock()\n        mock.n = 15\n        mock.nPlus1 = 22\n\n        # basic tests of setters and getters\n        self.assertEqual(mock[\"n\"], 21)\n        self.assertEqual(mock[\"nPlus1\"], 22)\n        with self.assertRaises(parameters.exceptions.UnknownParameterError):\n            _ = mock[\"fake\"]\n        with self.assertRaises(KeyError):\n            _ = mock[123]\n\n        # basic test of __delitem__ method\n        del mock[\"n\"]\n        with self.assertRaises(parameters.exceptions.UnknownParameterError):\n            _ = mock[\"n\"]\n\n        # basic tests of __in__ method\n        self.assertNotIn(\"n\", mock)\n        self.assertIn(\"nPlus1\", mock)\n\n        # basic tests of __eq__ method\n        mock2 = copy.deepcopy(mock)\n        self.assertEqual(mock, mock)\n        self.assertNotEqual(mock, mock2)\n\n        # basic tests of get() method\n        self.assertEqual(mock.get(\"nPlus1\"), 22)\n        self.assertIsNone(mock.get(\"fake\"))\n        self.assertEqual(mock.get(\"fake\", default=333), 333)\n\n        # basic test of values() method\n        vals = mock.values()\n        self.assertEqual(len(vals), 2)\n        self.assertEqual(vals[0], 22)\n\n        # basic test of update() method\n        mock.update({\"nPlus1\": 100})\n        self.assertEqual(mock.get(\"nPlus1\"), 100)\n\n        # basic test of getSyncData() method\n        data = mock.getSyncData()\n        self.assertEqual(data[\"n\"], 99)\n        self.assertEqual(data[\"nPlus1\"], 100)\n\n    def test_cannotDefineParamWithSameName(self):\n        with self.assertRaises(parameters.ParameterDefinitionError):\n\n            class MockParamCollection(parameters.ParameterCollection):\n                pDefs = parameters.ParameterDefinitionCollection()\n                with pDefs.createBuilder() as pb:\n                    pb.defParam(\"sameName\", \"units\", \"description 1\", \"location\")\n                    pb.defParam(\"sameName\", \"units\", \"description 2\", \"location\")\n\n            _ = MockParamCollection()\n\n    def test_paramDefinitionsCompose(self):\n        class MockBaseParamCollection(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"base1\", \"units\", \"a param on the base collection\", \"avg\")\n                pb.defParam(\"base2\", \"units\", \"another param on the base collection\", \"avg\")\n\n        class MockDerivedACollection(MockBaseParamCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"derAp1\", \"units\", \"derived a p 1\", \"centroid\")\n                pb.defParam(\"derAp2\", \"units\", \"derived a p 2\", \"centroid\")\n\n        class MockDerivedBCollection(MockDerivedACollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"derBp\", \"units\", \"derived b param\", \"centroid\")\n\n        base = MockBaseParamCollection()\n        derA = MockDerivedACollection()\n        derB = MockDerivedBCollection()\n\n        self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derA.paramDefs._paramDefs)))\n        self.assertTrue(set(base.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs)))\n        self.assertTrue(set(derA.paramDefs._paramDefs).issubset(set(derB.paramDefs._paramDefs)))\n\n    def test_cannotDefineParamSameNameColSubclass(self):\n        class MockPCParent(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"sameName\", \"units\", \"description 3\", \"location\")\n\n        with self.assertRaises(parameters.ParameterDefinitionError):\n\n            class MockPCChild(MockPCParent):\n                pDefs = parameters.ParameterDefinitionCollection()\n                with pDefs.createBuilder() as pb:\n                    pb.defParam(\"sameName\", \"units\", \"description 4\", \"location\")\n\n            _ = MockPCChild()\n\n        # same name along a different branch from the base ParameterCollection should\n        # be fine\n        class MockPCUncle(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"sameName\", \"units\", \"description 5\", \"location\")\n\n    def test_cannotCreateAttrOnParamColSubclass(self):\n        class MockPC(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"someParam\", \"units\", \"description\", \"location\")\n\n        _ = MockPC()\n\n    def test_cannotCreateInstanceOf_NoDefault(self):\n        with self.assertRaises(NotImplementedError):\n            _ = parameters.NoDefault()\n\n    def test_cannotCreateInstanceOf_Undefined(self):\n        with self.assertRaises(NotImplementedError):\n            _ = parameters.parameterDefinitions._Undefined()\n\n    def test_defaultLocation(self):\n        class MockPC(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder(location=parameters.ParamLocation.AVERAGE) as pb:\n                pb.defParam(\"p1\", \"units\", \"p1 description\")\n                pb.defParam(\"p2\", \"units\", \"p2 description\", parameters.ParamLocation.TOP)\n\n        pc = MockPC()\n        self.assertEqual(pc.paramDefs[\"p1\"].location, parameters.ParamLocation.AVERAGE)\n        self.assertEqual(pc.paramDefs[\"p2\"].location, parameters.ParamLocation.TOP)\n\n    def test_categories(self):\n        class MockPC0(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"p0\", \"units\", \"p0 description\", \"location\")\n\n        pc = MockPC0()\n        self.assertEqual(pc.paramDefs.categories, set())\n\n        class MockPC(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder(categories=[\"awesome\", \"stuff\"]) as pb:\n                pb.defParam(\"p1\", \"units\", \"p1 description\", \"location\")\n                pb.defParam(\"p2\", \"units\", \"p2 description\", \"location\", categories=[\"bacon\"])\n\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"p3\", \"units\", \"p3 description\", \"location\", categories=[\"bacon\"])\n\n        pc = MockPC()\n        self.assertEqual(pc.paramDefs.categories, set([\"awesome\", \"stuff\", \"bacon\"]))\n\n        p1 = pc.paramDefs[\"p1\"]\n        p2 = pc.paramDefs[\"p2\"]\n        p3 = pc.paramDefs[\"p3\"]\n        self.assertEqual(p1.categories, set([\"awesome\", \"stuff\"]))\n        self.assertEqual(p2.categories, set([\"awesome\", \"stuff\", \"bacon\"]))\n        self.assertEqual(p3.categories, set([\"bacon\"]))\n\n        for p in [p1, p2, p3]:\n            self._testCategoryConsistency(p)\n\n        self.assertEqual(set(pc.paramDefs.inCategory(\"awesome\")), set([p1, p2]))\n        self.assertEqual(set(pc.paramDefs.inCategory(\"stuff\")), set([p1, p2]))\n        self.assertEqual(set(pc.paramDefs.inCategory(\"bacon\")), set([p2, p3]))\n\n    def _testCategoryConsistency(self, p: parameters.Parameter):\n        for category in p.categories:\n            self.assertTrue(p.hasCategory(category))\n        self.assertFalse(p.hasCategory(\"this_shouldnot_exist\"))\n\n    def test_paramColHaveSlots(self):\n        \"\"\"Tests we prevent accidental creation of attributes.\"\"\"\n        self.assertEqual(\n            set(\n                [\n                    \"_hist\",\n                    \"_backup\",\n                    \"assigned\",\n                    \"_p_serialNum\",\n                    \"serialNum\",\n                    \"readOnly\",\n                ]\n            ),\n            set(parameters.ParameterCollection._slots),\n        )\n\n        class MockPC(parameters.ParameterCollection):\n            pass\n\n        pc = MockPC()\n        with self.assertRaises(AssertionError):\n            pc.whatever = 22\n\n        # try again after using a ParameterBuilder\n        class MockPC(parameters.ParameterCollection):\n            pDefs = parameters.ParameterDefinitionCollection()\n            # use of the ParameterBuilder creates an empty __slots__\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"p0\", \"units\", \"p0 description\", \"location\")\n\n        pc = MockPC()\n\n        self.assertIn(\"_p_p0\", MockPC._slots)\n        # Make sure we aren't making any weird copies of anything\n        self.assertEqual(pc._slots, MockPC._slots)\n        with self.assertRaises(AssertionError):\n            pc.whatever = 33\n\n        self.assertEqual([\"serialNum\"], pc.keys())\n        pc.p0 = \"hi\"\n        self.assertEqual({\"p0\", \"serialNum\"}, set(pc.keys()))\n\n        # Also make sure that subclasses of ParameterCollection subclasses use __slots__\n        class MockPCChild(MockPC):\n            pDefs = parameters.ParameterDefinitionCollection()\n            with pDefs.createBuilder() as pb:\n                pb.defParam(\"p2\", \"foo\", \"bar\")\n\n        pcc = MockPCChild()\n        with self.assertRaises(AssertionError):\n            pcc.whatever = 33\n\n\nclass ParamCollectionWhere(unittest.TestCase):\n    \"\"\"Tests for ParameterCollection.where.\"\"\"\n\n    class ScopeParamCollection(parameters.ParameterCollection):\n        pDefs = parameters.ParameterDefinitionCollection()\n        with pDefs.createBuilder() as pb:\n            pb.defParam(\n                name=\"empty\",\n                description=\"Bare\",\n                location=None,\n                categories=None,\n                units=\"\",\n            )\n            pb.defParam(\n                name=\"keff\",\n                description=\"keff\",\n                location=parameters.ParamLocation.VOLUME_INTEGRATED,\n                categories=[parameters.Category.neutronics],\n                units=\"\",\n            )\n            pb.defParam(\n                name=\"cornerFlux\",\n                description=\"corner flux\",\n                location=parameters.ParamLocation.CORNERS,\n                categories=[\n                    parameters.Category.neutronics,\n                ],\n                units=\"\",\n            )\n            pb.defParam(\n                name=\"edgeTemperature\",\n                description=\"edge temperature\",\n                location=parameters.ParamLocation.EDGES,\n                categories=[parameters.Category.thermalHydraulics],\n                units=\"\",\n            )\n\n    @classmethod\n    def setUpClass(cls) -> None:\n        \"\"\"Define a couple useful parameters with categories, locations, etc.\"\"\"\n        cls.pc = cls.ScopeParamCollection()\n\n    def test_onCategory(self):\n        \"\"\"Test the use of Parameter.hasCategory on filtering.\"\"\"\n        names = {\"keff\", \"cornerFlux\"}\n        for p in self.pc.where(lambda pd: pd.hasCategory(parameters.Category.neutronics)):\n            self.assertTrue(p.hasCategory(parameters.Category.neutronics), msg=p)\n            names.remove(p.name)\n        self.assertFalse(names, msg=f\"{names=} should be empty!\")\n\n    def test_onLocation(self):\n        \"\"\"Test the use of Parameter.atLocation in filtering.\"\"\"\n        names = {\"edgeTemperature\"}\n        for p in self.pc.where(lambda pd: pd.atLocation(parameters.ParamLocation.EDGES)):\n            self.assertTrue(p.atLocation(parameters.ParamLocation.EDGES), msg=p)\n            names.remove(p.name)\n        self.assertFalse(names, msg=f\"{names=} should be empty!\")\n\n    def test_complicated(self):\n        \"\"\"Test a multi-condition filter.\"\"\"\n        names = {\"cornerFlux\"}\n\n        def check(p: parameters.Parameter) -> bool:\n            return p.atLocation(parameters.ParamLocation.CORNERS) and p.hasCategory(parameters.Category.neutronics)\n\n        for p in self.pc.where(check):\n            self.assertTrue(check(p), msg=p)\n            names.remove(p.name)\n        self.assertFalse(names, msg=f\"{names=} should be empty\")\n\n\nclass TestMakeParametersReadOnly(unittest.TestCase):\n    def test_makeParametersReadOnly(self):\n        with TemporaryDirectoryChanger():\n            # copy test reactor to local\n            yamls = glob(os.path.join(TEST_ROOT, \"smallestTestReactor\", \"*.yaml\"))\n            for yamlFile in yamls:\n                copyfile(yamlFile, os.path.basename(yamlFile))\n\n            # load some random test reactor\n            _o, r = loadTestReactor(os.getcwd(), inputFileName=\"armiRunSmallest.yaml\")\n\n            # prove we can edit various params at will\n            r.core.p.keff = 1.01\n            b = r.core.getFirstBlock()\n            b.p.power = 123.4\n\n            makeParametersReadOnly(r)\n\n            # now show we can no longer edit those parameters\n            with self.assertRaises(RuntimeError):\n                r.core.p.keff = 0.99\n\n            with self.assertRaises(RuntimeError):\n                b.p.power = 432.1\n"
  },
  {
    "path": "armi/reactor/tests/test_reactors.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Testing for reactors.py.\"\"\"\n\nimport copy\nimport logging\nimport os\nimport pickle\nimport unittest\nfrom math import sqrt\nfrom unittest.mock import patch\n\nfrom numpy.testing import assert_allclose, assert_equal\n\nfrom armi import operators, runLog, settings, tests\nfrom armi.materials import uZr\nfrom armi.physics.neutronics.settings import CONF_XS_KERNEL\nfrom armi.reactor import assemblies, blocks, geometry, grids, reactors\nfrom armi.reactor.components import Hexagon, Rectangle\nfrom armi.reactor.composites import Composite\nfrom armi.reactor.converters import geometryConverters\nfrom armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.grids.hexagonal import HexGrid\nfrom armi.reactor.spentFuelPool import SpentFuelPool\nfrom armi.settings.fwSettings.globalSettings import (\n    CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP,\n    CONF_SORT_REACTOR,\n)\nfrom armi.testing import TESTING_ROOT, loadTestReactor, reduceTestReactorRings  # noqa: F401\nfrom armi.tests import TEST_ROOT, mockRunLogs\nfrom armi.utils import directoryChangers\n\n_THIS_DIR = os.path.dirname(__file__)\n\n\ndef buildOperatorOfEmptyHexBlocks(customSettings=None):\n    \"\"\"\n    Builds a operator w/ a reactor object with some hex assemblies and blocks, but all are empty.\n\n    Doesn't depend on inputs and loads quickly.\n\n    Parameters\n    ----------\n    customSettings : dict\n        Dictionary of off-default settings to update\n    \"\"\"\n    cs = settings.Settings()  # fetch new\n    if customSettings is None:\n        customSettings = {}\n\n    customSettings[\"db\"] = False  # stop use of database\n    cs = cs.modified(newSettings=customSettings)\n\n    r = tests.getEmptyHexReactor()\n    r.core.setOptionsFromCs(cs)\n    o = operators.Operator(cs)\n    o.initializeInterfaces(r)\n\n    a = assemblies.HexAssembly(\"fuel\")\n    a.spatialGrid = grids.AxialGrid.fromNCells(1)\n    b = blocks.HexBlock(\"TestBlock\")\n    b.setType(\"fuel\")\n    dims = {\"Tinput\": 600, \"Thot\": 600, \"op\": 16.0, \"ip\": 1, \"mult\": 1}\n    c = Hexagon(\"fuel\", uZr.UZr(), **dims)\n    b.add(c)\n    a.add(b)\n    a.spatialLocator = r.core.spatialGrid[1, 0, 0]\n    o.r.core.add(a)\n    o.r.sort()\n    return o\n\n\ndef buildOperatorOfEmptyCartesianBlocks(customSettings=None):\n    \"\"\"\n    Builds a operator w/ a reactor object with some Cartesian assemblies and blocks, but all are empty.\n\n    Doesn't depend on inputs and loads quickly.\n\n    Parameters\n    ----------\n    customSettings : dict\n        Off-default settings to update\n    \"\"\"\n    cs = settings.Settings()  # fetch new\n    if customSettings is None:\n        customSettings = {}\n\n    customSettings[\"db\"] = False  # stop use of database\n    cs = cs.modified(newSettings=customSettings)\n\n    r = tests.getEmptyCartesianReactor()\n    r.core.setOptionsFromCs(cs)\n    o = operators.Operator(cs)\n    o.initializeInterfaces(r)\n\n    a = assemblies.CartesianAssembly(\"fuel\")\n    a.spatialGrid = grids.AxialGrid.fromNCells(1)\n    b = blocks.CartesianBlock(\"TestBlock\")\n    b.setType(\"fuel\")\n    dims = {\n        \"Tinput\": 600,\n        \"Thot\": 600,\n        \"widthOuter\": 16.0,\n        \"lengthOuter\": 10.0,\n        \"widthInner\": 1,\n        \"lengthInner\": 1,\n        \"mult\": 1,\n    }\n    c = Rectangle(\"fuel\", uZr.UZr(), **dims)\n    b.add(c)\n    a.add(b)\n    a.spatialLocator = r.core.spatialGrid[1, 0, 0]\n    o.r.core.add(a)\n    o.r.sort()\n    return o\n\n\nclass ReactorTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        # Prepare the input files. This is important so the unit tests run from wherever they need to run from.\n        cls.td = directoryChangers.TemporaryDirectoryChanger()\n        cls.td.__enter__()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n\n\nclass HexReactorTests(ReactorTests):\n    \"\"\"\n    This is meant to pair with the ``HexReactorReadOnlyTests`` unit test class.\n\n    The tests in this class all modify the Reactor object, so we need to create a new test reactor for each test.\n    \"\"\"\n\n    def setUp(self):\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\"trackAssems\": True},\n        )\n        self.r.excore[\"sfp\"].spatialGrid = HexGrid(unitSteps=((2, 0, 0), (0, 3, 0), (0, 0, 0)))\n\n    def test_getAssembliesInCircularRing(self):\n        expectedAssemsInRing = [5, 2]\n        actualAssemsInRing = []\n        for ring in range(1, self.r.core.getNumRings()):\n            actualAssemsInRing.append(len(self.r.core.getAssembliesInCircularRing(ring)))\n        self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing)\n\n    def test_getAssembliesInHexRing(self):\n        expectedAssemsInRing = [1, 2, 4]\n        actualAssemsInRing = []\n        for ring in range(1, self.r.core.getNumRings() + 1):\n            actualAssemsInRing.append(len(self.r.core.getAssembliesInSquareOrHexRing(ring)))\n        self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing)\n\n    def test_factorySortSetting(self):\n        \"\"\"Create a core object from an input yaml.\"\"\"\n        # get a sorted Reactor (the default)\n        cs = settings.Settings(fName=os.path.join(TEST_ROOT, \"armiRun.yaml\"))\n        r0 = reactors.loadFromCs(cs)\n\n        # get an unsorted Reactor (for whatever reason)\n        customSettings = {CONF_SORT_REACTOR: False}\n        cs = cs.modified(newSettings=customSettings)\n        r1 = reactors.loadFromCs(cs)\n\n        # the reactor / core should be the same size\n        self.assertEqual(len(r0), len(r1))\n        self.assertEqual(len(r0.core), len(r1.core))\n\n        # the reactor / core should be in a different order\n        a0 = [a.name for a in r0.core]\n        a1 = [a.name for a in r1.core]\n        self.assertNotEqual(a0, a1)\n\n        # The reactor object is a Composite\n        self.assertTrue(isinstance(r0.core, Composite))\n\n    def test_getSetParameters(self):\n        \"\"\"\n        This test works through multiple levels of the data model hierarchy to test ability to modify parameters.\n\n        .. test:: Parameters are accessible throughout the armi tree.\n            :id: T_ARMI_PARAM1\n            :tests: R_ARMI_PARAM\n\n        .. test:: Ensure there is a setting for total core power.\n            :id: T_ARMI_SETTINGS_POWER0\n            :tests: R_ARMI_SETTINGS_POWER\n        \"\"\"\n        # Test at reactor level\n        self.assertEqual(self.r.p.cycle, 0)\n        self.assertEqual(self.r.p.availabilityFactor, 1.0)\n\n        # Test at core level\n        core = self.r.core\n        self.assertGreater(core.p.power, -1)\n\n        core.p.power = 123\n        self.assertEqual(core.p.power, 123)\n\n        # Test at assembly level\n        assembly = core.getFirstAssembly()\n        self.assertGreater(assembly.p.crRodLength, -1)\n\n        assembly.p.crRodLength = 234\n        self.assertEqual(assembly.p.crRodLength, 234)\n\n        # Test at block level\n        block = core.getFirstBlock()\n        self.assertIsNone(block.p.mgFlux)\n\n        block.p.mgFlux = 57\n        self.assertEqual(block.p.mgFlux, 57)\n\n        # Test at component level\n        component = block[0]\n        self.assertEqual(component.p.temperatureInC, 450.0)\n\n    def test_sortChildren(self):\n        self.assertEqual(next(self.r.core.__iter__()), self.r.core[0])\n        self.assertEqual(self.r.core._children, sorted(self.r.core._children))\n\n    def test_sortAssemByRing(self):\n        \"\"\"Demonstrate ring/pos sorting.\"\"\"\n        self.r.core.sortAssemsByRing()\n        self.assertEqual((1, 1), self.r.core[0].spatialLocator.getRingPos())\n        currentRing = -1\n        currentPos = -1\n        for a in self.r.core:\n            ring, pos = a.spatialLocator.getRingPos()\n            self.assertGreaterEqual(ring, currentRing)\n            if ring > currentRing:\n                ring = currentRing\n                currentPos = -1\n            self.assertGreater(pos, currentPos)\n            currentPos = pos\n\n    def test_growToFullCore(self):\n        nAssemThird = len(self.r.core)\n        self.assertEqual(self.r.core.powerMultiplier, 3.0)\n        self.assertFalse(self.r.core.isFullCore)\n        self.r.core.growToFullCore(self.o.cs)\n        aNums = []\n        for a in self.r.core:\n            self.assertNotIn(a.getNum(), aNums)\n            aNums.append(a.getNum())\n\n        bNames = [b.getName() for b in self.r.core.iterBlocks()]\n        for bName in bNames:\n            self.assertEqual(bNames.count(bName), 1)\n        self.assertEqual(self.r.core.powerMultiplier, 1.0)\n        self.assertTrue(self.r.core.isFullCore)\n        nAssemFull = len(self.r.core)\n        self.assertEqual(nAssemFull, (nAssemThird - 1) * 3 + 1)\n\n    def test_genBlocksByLocName(self):\n        self.r.core.genBlocksByLocName()\n        self.assertGreater(len(self.r.core.blocksByLocName), 20)\n        self.assertIn(\"003-002-002\", self.r.core.blocksByLocName)\n\n    def test_setPitchUniform(self):\n        # 1. Original reactor state\n        originalPitch = 16.142\n        hmMassBefore = 0.0\n        solidMassBefore = 0.0\n        liquidMassBefore = 0.0\n        for b in self.r.core.iterBlocks():\n            self.assertEqual(b.getPitch(), originalPitch)\n            for c in b:\n                hmMassBefore += c.getHMMass()\n                for comp in c:\n                    if comp.containsSolidMaterial():\n                        solidMassBefore += comp.getMass()\n                    else:\n                        liquidMassBefore += comp.getMass()\n\n        # 2. decrease pitch size\n        hmMassAfter = 0.0\n        solidMassAfter = 0.0\n        liquidMassAfter = 0.0\n        self.r.core.setPitchUniform(4.0)\n\n        for b in self.r.core.iterBlocks():\n            # verify pitch has correctly reduced\n            self.assertEqual(b.getPitch(), 4.0)\n            for c in b:\n                hmMassAfter += c.getHMMass()\n                for comp in c:\n                    if comp.containsSolidMaterial():\n                        solidMassAfter += comp.getMass()\n                    else:\n                        liquidMassAfter += comp.getMass()\n\n        # verify HM mass has not changed\n        self.assertAlmostEqual(hmMassBefore, hmMassAfter, delta=1e-8)\n\n        # check that solid masses and liquid masses return to the normal state\n        self.assertAlmostEqual(solidMassBefore, solidMassAfter, delta=1e-8)\n        self.assertLessEqual(liquidMassAfter, liquidMassBefore)\n\n        # 3. increase pitch size back to original\n        hmMassFinal = 0.0\n        solidMassFinal = 0.0\n        liquidMassFinal = 0.0\n        self.r.core.setPitchUniform(originalPitch)\n\n        for b in self.r.core.iterBlocks():\n            # verify pitch has correctly reduced\n            self.assertEqual(b.getPitch(), originalPitch)\n            for c in b:\n                hmMassFinal += c.getHMMass()\n                for comp in c:\n                    if comp.containsSolidMaterial():\n                        solidMassFinal += comp.getMass()\n                    else:\n                        liquidMassFinal += comp.getMass()\n\n        # verify HM mass goes back to original\n        self.assertAlmostEqual(hmMassBefore, hmMassFinal, delta=1e-8)\n\n        # check that solid masses and liquid masses return to original\n        self.assertAlmostEqual(solidMassBefore, solidMassFinal, delta=1e-8)\n        self.assertAlmostEqual(liquidMassBefore, liquidMassFinal)\n\n    def test_normalizeNames(self):\n        # these are the correct, normalized names\n        numAssems = 7\n        a = self.r.core.getFirstAssembly()\n        correctNames = [a.makeNameFromAssemNum(n) for n in range(numAssems)]\n\n        # validate the reactor is what we think now\n        self.assertEqual(len(self.r.core), numAssems)\n        currentNames = [a.getName() for a in self.r.core]\n        self.assertNotEqual(correctNames, currentNames)\n\n        # validate that we can normalize the names correctly once\n        self.r.normalizeNames()\n        currentNames = [a.getName() for a in self.r.core]\n        self.assertEqual(correctNames, currentNames)\n\n        # validate that repeated applications of this method are stable\n        for _ in range(3):\n            self.r.normalizeNames()\n            currentNames = [a.getName() for a in self.r.core]\n            self.assertEqual(correctNames, currentNames)\n\n    def test_setB10VolOnCreation(self):\n        \"\"\"Test the setting of b.p.initialB10ComponentVol.\"\"\"\n        for controlBlock in self.r.core.iterBlocks(Flags.CONTROL):\n            controlComps = [c for c in controlBlock if c.getNumberDensity(\"B10\") > 0]\n            self.assertEqual(len(controlComps), 1)\n            controlComp = controlComps[0]\n\n            startingVol = controlBlock.p.initialB10ComponentVol\n            self.assertGreater(startingVol, 0)\n            self.assertAlmostEqual(controlComp.getArea(cold=True) * controlBlock.getHeight(), startingVol)\n\n            # input temp is same as hot temp, so change input temp to test that behavior\n            controlComp.inputTemperatureInC = 30\n\n            # somewhat non-sensical since its hot, not cold but we just want to check the ratio\n            controlBlock.setB10VolParam(True)\n\n            self.assertGreater(startingVol, controlBlock.p.initialB10ComponentVol)\n\n            self.assertAlmostEqual(\n                startingVol / controlComp.getThermalExpansionFactor(),\n                controlBlock.p.initialB10ComponentVol,\n            )\n\n    def test_getReactor(self):\n        \"\"\"The Core object can return its Reactor parent; test that getter.\"\"\"\n        self.assertTrue(isinstance(self.r.core.r, reactors.Reactor))\n\n        self.r.core.parent = None\n        self.assertIsNone(self.r.core.r)\n\n    def test_addMoreNodes(self):\n        originalMesh = self.r.core.p.axialMesh\n        bigMesh = list(originalMesh)\n        bigMesh[2] = 30.0\n        smallMesh = originalMesh[0:2] + [40.0, 47.0] + originalMesh[2:]\n        newMesh1, originalMeshGood = self.r.core.addMoreNodes(originalMesh)\n        newMesh2, bigMeshGood = self.r.core.addMoreNodes(bigMesh)\n        newMesh3, smallMeshGood = self.r.core.addMoreNodes(smallMesh)\n        expectedMesh = [0.0, 15.0, 25.16, 35.32, 59.2125, 83.105, 106.9975, 130.89, 154.7825, 178.675, 202.5675, 226.46]\n        expectedBigMesh = [\n            0.0,\n            15.0,\n            30.0,\n            35.32,\n            59.2125,\n            83.105,\n            106.9975,\n            130.89,\n            154.7825,\n            178.675,\n            202.5675,\n            226.46,\n        ]\n        expectedSmallMesh = [\n            0.0,\n            15.0,\n            25.16,\n            35.32,\n            40.0,\n            43.724,\n            47.0,\n            59.2125,\n            83.105,\n            106.9975,\n            130.89,\n            154.7825,\n            178.675,\n            202.5675,\n            226.46,\n        ]\n        self.assertListEqual(expectedMesh, newMesh1)\n        self.assertListEqual(expectedBigMesh, newMesh2)\n        for i in range(len(expectedSmallMesh)):\n            self.assertAlmostEqual(expectedSmallMesh[i], newMesh3[i], delta=1e-8)\n        self.assertTrue(originalMeshGood)\n        self.assertTrue(bigMeshGood)\n        self.assertFalse(smallMeshGood)\n\n    def test_restoreReactor(self):\n        \"\"\"Restore a reactor after growing it from third to full core.\n\n        .. test:: Convert a third-core to a full-core geometry and then restore it.\n            :id: T_ARMI_THIRD_TO_FULL_CORE1\n            :tests: R_ARMI_THIRD_TO_FULL_CORE\n        \"\"\"\n        numOfAssembliesOneThird = len(self.r.core)\n        self.assertFalse(self.r.core.isFullCore)\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n        # grow to full core\n        converter = self.r.core.growToFullCore(self.o.cs)\n        self.assertTrue(self.r.core.isFullCore)\n        self.assertGreater(len(self.r.core), numOfAssembliesOneThird)\n        self.assertEqual(self.r.core.symmetry.domain, geometry.DomainType.FULL_CORE)\n        # restore back to 1/3 core\n        converter.restorePreviousGeometry(self.r)\n        self.assertEqual(numOfAssembliesOneThird, len(self.r.core))\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n        self.assertFalse(self.r.core.isFullCore)\n        self.assertEqual(numOfAssembliesOneThird, len(self.r.core))\n        self.assertEqual(\n            self.r.core.symmetry,\n            geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC),\n        )\n\n    def test_saveAllFlux(self):\n        # need a lightweight library to indicate number of groups.\n        class MockLib:\n            numGroups = 5\n\n        self.r.core.lib = MockLib()\n        for b in self.r.core.iterBlocks():\n            b.p.mgFlux = range(5)\n            b.p.adjMgFlux = range(5)\n\n        with directoryChangers.TemporaryDirectoryChanger(root=_THIS_DIR):\n            self.r.core.saveAllFlux()\n\n    def test_getFluxVector(self):\n        class MockLib:\n            numGroups = 5\n\n        self.r.core.lib = MockLib()\n        for b in self.r.core.iterBlocks():\n            b.p.mgFlux = range(5)\n            b.p.adjMgFlux = [i + 0.1 for i in range(5)]\n            b.p.extSrc = [i + 0.2 for i in range(5)]\n        mgFlux = self.r.core.getFluxVector(energyOrder=1)\n        adjFlux = self.r.core.getFluxVector(adjoint=True)\n        srcVec = self.r.core.getFluxVector(extSrc=True)\n        fluxVol = self.r.core.getFluxVector(volumeIntegrated=True)\n        blocks = self.r.core.getBlocks()\n        expFlux = [i for i in range(5) for _ in blocks]\n        expAdjFlux = [i + 0.1 for _ in blocks for i in range(5)]\n        expSrcVec = [i + 0.2 for _ in blocks for i in range(5)]\n        expFluxVol = list(range(5)) * len(blocks)\n        assert_allclose(expFlux, mgFlux)\n        assert_allclose(expAdjFlux, adjFlux)\n        assert_allclose(expSrcVec, srcVec)\n        assert_allclose(expFluxVol, fluxVol)\n\n    def test_getFuelBottomHeight(self):\n        for a in self.r.core.getAssemblies(Flags.FUEL):\n            if a[0].hasFlags(Flags.FUEL):\n                a[0].setType(\"mud\")\n            a[1].setType(\"fuel\")\n        fuelBottomHeightRef = self.r.core.getFirstAssembly(Flags.FUEL)[0].getHeight()\n        fuelBottomHeightInCm = self.r.core.getFuelBottomHeight()\n\n        self.assertEqual(fuelBottomHeightInCm, fuelBottomHeightRef)\n\n    def test_isPickleable(self):\n        loaded = pickle.loads(pickle.dumps(self.r))\n\n        # ensure we didn't break the current reactor\n        self.assertIs(self.r.core.spatialGrid.armiObject, self.r.core)\n\n        # make sure that the loaded reactor and grid are aligned\n        self.assertIs(loaded.core.spatialGrid.armiObject, loaded.core)\n        self.assertTrue(all(isinstance(key, grids.LocationBase) for key in loaded.core.childrenByLocator.keys()))\n        loc = loaded.core.spatialGrid[0, 0, 0]\n        loaded.core.sortAssemsByRing()\n        self.r.core.sortAssemsByRing()\n        self.assertIs(loc.grid, loaded.core.spatialGrid)\n        self.assertEqual(loaded.core.childrenByLocator[loc], loaded.core[0])\n\n        allIDs = set()\n\n        def checkAdd(comp):\n            self.assertNotIn(id(comp), allIDs)\n            self.assertNotIn(id(comp.p), allIDs)\n            allIDs.add(id(comp))\n            allIDs.add(id(comp.p))\n\n        # check a few locations to be equivalent\n        for a0, a1 in zip(self.r.core, loaded.core):\n            self.assertEqual(str(a0.getLocation()), str(a1.getLocation()))\n            self.assertIs(a0.spatialLocator.grid, self.r.core.spatialGrid)\n            self.assertIs(a1.spatialLocator.grid, loaded.core.spatialGrid)\n            checkAdd(a0)\n            checkAdd(a1)\n            for b0, b1 in zip(a0, a1):\n                self.assertIs(b0.spatialLocator.grid, a0.spatialGrid)\n                self.assertIs(b1.spatialLocator.grid, a1.spatialGrid)\n                self.assertEqual(str(b0.getLocation()), str(b1.getLocation()))\n                self.assertEqual(b0.getSymmetryFactor(), b1.getSymmetryFactor())\n                self.assertEqual(b0.getHMMoles(), b1.getHMMoles())\n                checkAdd(b0)\n                checkAdd(b1)\n\n    def test_removeAssemblyNoSfp(self):\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_removeAssemblyNoSfp\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            a = self.r.core[-1]  # last assembly\n            aLoc = a.spatialLocator\n            self.assertIsNotNone(aLoc.grid)\n            self.r.excore[\"sfp\"] = None\n            del self.r.excore[\"sfp\"]\n            self.r.core.removeAssembly(a)\n\n            self.assertIn(\"No Spent Fuel Pool\", mock.getStdout())\n\n    def test_createAssemblyOfType(self):\n        \"\"\"Test creation of new assemblies.\"\"\"\n        # basic creation\n        aOld = self.r.core.getFirstAssembly(Flags.FUEL)\n        aNew = self.r.core.createAssemblyOfType(aOld.getType(), cs=self.o.cs)\n        self.assertAlmostEqual(aOld.getMass(), aNew.getMass())\n\n        # test axial mesh alignment\n        aNewMesh = aNew.getAxialMesh()\n        for i, meshValue in enumerate(aNewMesh):\n            self.assertAlmostEqual(meshValue, self.r.core.p.referenceBlockAxialMesh[i + 1])  # use i+1 to skip 0.0\n\n        # creation with modified enrichment\n        aNew2 = self.r.core.createAssemblyOfType(aOld.getType(), 0.195, self.o.cs)\n        fuelBlock = aNew2.getFirstBlock(Flags.FUEL)\n        self.assertAlmostEqual(fuelBlock.getUraniumMassEnrich(), 0.195)\n\n        # creation with modified enrichment on an expanded BOL assem.\n        fuelComp = fuelBlock.getComponent(Flags.FUEL)\n        bol = self.r.blueprints.assemblies[aOld.getType()]\n        changer = AxialExpansionChanger()\n        changer.performPrescribedAxialExpansion(bol, [fuelComp], [0.05])\n        aNew3 = self.r.core.createAssemblyOfType(aOld.getType(), 0.195, self.o.cs)\n        self.assertAlmostEqual(aNew3.getFirstBlock(Flags.FUEL).getUraniumMassEnrich(), 0.195)\n        self.assertAlmostEqual(aNew3.getMass(), bol.getMass())\n\n    def test_createAssemOfTypeExpandCore(self):\n        \"\"\"Test creation of new assemblies in an expanded core.\"\"\"\n        # change the mesh of inner blocks\n        mesh = self.r.core.p.referenceBlockAxialMesh[1:]\n        lastIndex = len(mesh) - 1\n        mesh = [val + 5 for val in mesh]\n        mesh[0] -= 5\n        mesh[lastIndex] -= 5\n\n        # expand the core\n        self.r.core.p.referenceBlockAxialMesh = [0] + mesh\n        for a in self.r.core:\n            a.setBlockMesh(mesh)\n        aType = self.r.core.getFirstAssembly(Flags.FUEL).getType()\n\n        # demonstrate we can still create assemblies\n        self.assertTrue(self.r.core.createAssemblyOfType(aType, cs=self.o.cs))\n\n    def test_getScalarEvolution(self):\n        self.r.core.scalarVals[\"fake\"] = 123\n        x = self.r.core.getScalarEvolution(\"fake\")\n        self.assertEqual(x, 123)\n\n    def test_ifMissingSpatialGrid(self):\n        self.r.core.spatialGrid = None\n\n        with self.assertRaises(ValueError):\n            self.r.core.symmetry\n\n        with self.assertRaises(ValueError):\n            self.r.core.geomType\n\n    def test_pinCoordsAllBlocks(self):\n        \"\"\"Make sure all blocks can get pin coords.\"\"\"\n        for b in self.r.core.iterBlocks():\n            coords = b.getPinCoordinates()\n            self.assertGreater(len(coords), -1)\n\n    def test_updateBlockBOLHeights_DBLoad(self):\n        \"\"\"Test that blueprints assemblies are expanded in DB load.\"\"\"\n        originalAssems = sorted(a for a in self.r.blueprints.assemblies.values())\n        nonEqualParameters = [\"heightBOL\", \"molesHmBOL\", \"massHmBOL\"]\n        equalParameters = [\"smearDensity\", \"nHMAtBOL\", \"enrichmentBOL\"]\n\n        _o, coldHeightR = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\"inputHeightsConsideredHot\": False},\n        )\n        coldHeightAssems = sorted(a for a in coldHeightR.blueprints.assemblies.values())\n        for a, coldHeightA in zip(originalAssems, coldHeightAssems):\n            if not a.hasFlags(Flags.CONTROL):\n                for b, coldHeightB in zip(a[1:], coldHeightA[1:]):\n                    for param in nonEqualParameters:\n                        p, coldHeightP = b.p[param], coldHeightB.p[param]\n                        if p and coldHeightP:\n                            self.assertNotEqual(p, coldHeightP)\n                        else:\n                            self.assertAlmostEqual(p, coldHeightP)\n                    for param in equalParameters:\n                        p, coldHeightP = b.p[param], coldHeightB.p[param]\n                        self.assertAlmostEqual(p, coldHeightP)\n\n    def test_buildManualZones(self):\n        # define some manual zones in the settings\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = [\n            \"ring-1: 001-001\",\n            \"ring-2: 002-001, 002-002\",\n            \"ring-3: 003-001, 003-002, 003-003\",\n        ]\n        cs = self.o.cs.modified(newSettings=newSettings)\n        self.r.core.buildManualZones(cs)\n\n        zonez = self.r.core.zones\n        self.assertEqual(len(list(zonez)), 3)\n        self.assertIn(\"002-001\", zonez[\"ring-2\"])\n        self.assertIn(\"003-002\", zonez[\"ring-3\"])\n\n    def test_buildManualZonesEmpty(self):\n        # ensure there are no zone definitions in the settings\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = []\n        cs = self.o.cs.modified(newSettings=newSettings)\n\n        # verify that buildZones behaves well when no zones are defined\n        self.r.core.buildManualZones(cs)\n        self.assertEqual(len(list(self.r.core.zones)), 0)\n\n    def test_setPowerIfNecessary(self):\n        self.assertAlmostEqual(self.r.core.p.power, 0)\n        self.assertAlmostEqual(self.r.core.p.powerDensity, 0)\n\n        # to start, this method shouldn't do anything\n        self.r.core.setPowerIfNecessary()\n        self.assertAlmostEqual(self.r.core.p.power, 0)\n\n        # take the powerDensity when needed\n        self.r.core.p.power = 0\n        self.r.core.p.powerDensity = 1e9\n        mass = self.r.core.getHMMass()\n        self.r.core.setPowerIfNecessary()\n        self.assertAlmostEqual(self.r.core.p.power, 1e9 * mass)\n\n        # don't take the powerDensity when not needed\n        self.r.core.p.power = 3e9\n        self.r.core.p.powerDensity = 2e9\n        self.r.core.setPowerIfNecessary()\n        self.assertAlmostEqual(self.r.core.p.power, 3e9)\n\n    def test_findAllMeshPoints(self):\n        \"\"\"Test findAllMeshPoints().\n\n        .. test:: Test that the reactor can calculate its core block mesh.\n            :id: T_ARMI_R_MESH\n            :tests: R_ARMI_R_MESH\n        \"\"\"\n        # lets do some basic sanity checking of the meshpoints\n        x, y, z = self.r.core.findAllMeshPoints()\n\n        # no two meshpoints should be the same, and they should all be monotonically increasing\n        for xx in range(1, len(x)):\n            self.assertGreater(x[xx], x[xx - 1], msg=f\"x={xx}\")\n\n        for yy in range(1, len(y)):\n            self.assertGreater(y[yy], y[yy - 1], msg=f\"y={yy}\")\n\n        for zz in range(1, len(z)):\n            self.assertGreater(z[zz], z[zz - 1], msg=f\"z={zz}\")\n\n        # the z-index should start at zero (the bottom)\n        self.assertEqual(z[0], 0)\n\n        # ensure the X and Y mesh spacing is correct (for a hex core)\n        pitch = self.r.core.spatialGrid.pitch\n\n        xPitch = pitch / 2\n        for xx in range(1, len(x)):\n            self.assertAlmostEqual(x[xx] - x[xx - 1], xPitch, delta=0.0001)\n\n        yPitch = sqrt(3) * pitch / 2\n        for yy in range(1, len(y)):\n            self.assertAlmostEqual(y[yy] - y[yy - 1], yPitch, delta=0.001)\n\n    def test_removeAssembliesInRing(self):\n        aLoc = [self.r.core.spatialGrid.getLocatorFromRingAndPos(3, i + 1) for i in range(12)]\n        assems = {\n            i: self.r.core.childrenByLocator[loc] for i, loc in enumerate(aLoc) if loc in self.r.core.childrenByLocator\n        }\n        self.r.core.removeAssembliesInRing(3, self.o.cs)\n        for i, a in assems.items():\n            self.assertNotEqual(aLoc[i], a.spatialLocator)\n            self.assertEqual(a.spatialLocator.grid, self.r.excore[\"sfp\"].spatialGrid)\n\n    def test_removeAssembly(self):\n        \"\"\"Test the removeAssembly method.\n\n        In particular, the Settings here set trackAssems to True, so when an Assembly is removed\n        from the Core, it shows up in the SFP.\n        \"\"\"\n        a = self.r.core[-1]  # last assembly\n        b = a[-1]  # use the last block in case we ever figure out stationary blocks\n        aLoc = a.spatialLocator\n        self.assertIsNotNone(aLoc.grid)\n        bLoc = b.spatialLocator\n        self.r.core.removeAssembly(a)\n        self.assertNotEqual(aLoc, a.spatialLocator)\n\n        # confirm the Assembly is now in the SFP\n        self.assertEqual(a.spatialLocator.grid, self.r.excore[\"sfp\"].spatialGrid)\n\n        # confirm only attached to removed assem\n        self.assertIs(bLoc, b.spatialLocator)  # block location does not change\n        self.assertIs(a, b.parent)\n        self.assertIs(a, b.spatialLocator.grid.armiObject)\n\n    def test_removeAssembliesInRingHex(self):\n        \"\"\"\n        Since the test reactor is hex, we need to use the overrideCircularRingMode option\n        to remove assemblies from it.\n        \"\"\"\n        self.assertEqual(self.r.core.getNumRings(), 3)\n        for ringNum in range(6, 10):\n            self.r.core.removeAssembliesInRing(ringNum, self.o.cs, overrideCircularRingMode=True)\n        self.assertEqual(self.r.core.getNumRings(), 3)\n\n\nclass HexReactorReadOnlyTests(unittest.TestCase):\n    \"\"\"\n    This is meant to pair with the ``HexReactorTests`` unit test class.\n\n    The tests in this class only READ, and not WRITE to the Reactor object, so we only have to create one test reactor.\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.td = directoryChangers.TemporaryDirectoryChanger()\n        cls.td.__enter__()\n        cls.o, cls.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\"trackAssems\": True},\n        )\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n\n    def test_coreSfp(self):\n        \"\"\"The reactor object includes a core and an SFP.\n\n        .. test:: The reactor object is a composite.\n            :id: T_ARMI_R\n            :tests: R_ARMI_R\n        \"\"\"\n        self.assertTrue(isinstance(self.r.core, reactors.Core))\n        self.assertTrue(isinstance(self.r.excore[\"sfp\"], SpentFuelPool))\n\n        self.assertTrue(isinstance(self.r, Composite))\n        self.assertTrue(isinstance(self.r.core, Composite))\n        self.assertTrue(isinstance(self.r.excore[\"sfp\"], Composite))\n\n    def test_getTotalParam(self):\n        # verify that the block params are being read.\n        val = self.r.core.getTotalBlockParam(\"power\")\n        val2 = self.r.core.getTotalBlockParam(\"power\", addSymmetricPositions=True)\n        self.assertEqual(val2 / self.r.core.powerMultiplier, val)\n\n        with self.assertRaises(ValueError):\n            self.r.core.getTotalBlockParam(generationNum=1)\n\n    def test_geomType(self):\n        self.assertEqual(self.r.core.geomType, geometry.GeomType.HEX)\n\n    def test_getBlocksByIndices(self):\n        indices = [(1, 1, 1)]\n        actualBlocks = self.r.core.getBlocksByIndices(indices)\n        actualNames = [b.getName() for b in actualBlocks]\n        expectedNames = [\"B0005-001\"]\n        self.assertListEqual(expectedNames, actualNames)\n\n    def test_getAllXsSuffixes(self):\n        actualSuffixes = self.r.core.getAllXsSuffixes()\n        expectedSuffixes = [\"AA\", \"BA\"]\n        self.assertListEqual(expectedSuffixes, actualSuffixes)\n\n    def test_countBlocksOfType(self):\n        numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL])\n        self.assertEqual(numControlBlocks, 0)\n\n        numControlBlocks = self.r.core.countBlocksWithFlags([Flags.DUCT, Flags.CONTROL, Flags.FUEL], Flags.CONTROL)\n        self.assertEqual(numControlBlocks, 0)\n\n    def test_countFuelAxialBlocks(self):\n        \"\"\"Tests that the users definition of fuel blocks is preserved.\"\"\"\n        numFuelBlocks = self.r.core.countFuelAxialBlocks()\n        self.assertEqual(numFuelBlocks, 1)\n\n    def test_getFirstFuelBlockAxialNode(self):\n        firstFuelBlock = self.r.core.getFirstFuelBlockAxialNode()\n        self.assertEqual(firstFuelBlock, 1)\n\n    def test_getMaxAssembliesInHexRing(self):\n        maxAssems = self.r.core.getMaxAssembliesInHexRing(3)\n        self.assertEqual(maxAssems, 4)\n\n    def test_getMaxNumPins(self):\n        numPins = self.r.core.getMaxNumPins()\n        self.assertEqual(271, numPins)\n\n    def test_findAxialMeshIndexOf(self):\n        numMeshPoints = len(self.r.core.p.axialMesh) - 2  # -1 for typical reason, -1 more because mesh includes 0\n        self.assertEqual(self.r.core.findAxialMeshIndexOf(0.0), 0)\n        self.assertEqual(self.r.core.findAxialMeshIndexOf(0.1), 0)\n        self.assertEqual(self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight()), numMeshPoints)\n        self.assertEqual(\n            self.r.core.findAxialMeshIndexOf(self.r.core[0].getHeight() - 0.1),\n            numMeshPoints,\n        )\n        self.assertEqual(self.r.core.findAxialMeshIndexOf(self.r.core[0][0].getHeight() + 0.1), 1)\n\n    def test_findAllAxialMeshPoints(self):\n        mesh = self.r.core.findAllAxialMeshPoints(applySubMesh=False)\n\n        self.assertEqual(mesh[0], 0)\n        self.assertAlmostEqual(mesh[-1], self.r.core[0].getHeight())\n\n        blockMesh = self.r.core.getFirstAssembly(Flags.FUEL).spatialGrid._bounds[2]\n        assert_allclose(blockMesh, mesh)\n\n    def test_findAxialMeshsSubmesh(self):\n        \"\"\"Find all axial mesh points with a provided sub-mesh.\"\"\"\n        referenceMesh = [\n            0.0,\n            15.0,\n            25.16,\n            35.32,\n            59.2125,\n            83.105,\n            106.9975,\n            130.89,\n            154.7825,\n            178.675,\n            202.5675,\n            226.46,\n        ]\n        mesh = self.r.core.findAllAxialMeshPoints(assems=[self.r.core.getFirstAssembly(Flags.FUEL)], applySubMesh=True)\n        self.assertListEqual(referenceMesh, mesh)\n\n    def test_findAllAziMeshPoints(self):\n        aziPoints = self.r.core.findAllAziMeshPoints()\n        expectedPoints = [-16.142, -8.071, 0.0, 8.071, 16.142, 24.213]\n        assert_allclose(expectedPoints, aziPoints)\n\n    def test_findAllRadMeshPoints(self):\n        radPoints = self.r.core.findAllRadMeshPoints()\n        expectedPoints = [-13.979382, 0.0, 13.979382, 27.958764, 41.938146]\n        assert_allclose(expectedPoints, radPoints)\n\n    def test_getAssemblyPitch(self):\n        self.assertEqual(self.r.core.getAssemblyPitch(), 16.142)\n\n    def test_getNumAssemsAllRingsFilled(self):\n        \"\"\"Basic test of getNumAssembliesWithAllRingsFilledOut.\"\"\"\n        nRings = self.r.core.getNumRings(indexBased=True)\n        nAssmWithBlanks = self.r.core.getNumAssembliesWithAllRingsFilledOut(nRings)\n        self.assertEqual(8, nAssmWithBlanks)\n\n    @patch(\"armi.reactor.reactors.Core.powerMultiplier\", 1)\n    def test_getNumAssemsWithAllRingsBipass(self):\n        \"\"\"Test edge case in getNumAssembliesWithAllRingsFilledOut by bypassing some of the logic.\"\"\"\n        nAssems = self.r.core.getNumAssembliesWithAllRingsFilledOut(3)\n        self.assertEqual(19, nAssems)\n\n    def test_getNumEnergyGroups(self):\n        # this Core doesn't have a loaded ISOTXS library, so this test is minimally useful\n        with self.assertRaises(AttributeError):\n            self.r.core.getNumEnergyGroups()\n\n    def test_getMinimumPercentFluxInFuel(self):\n        # there is no flux in the test reactor YET, so this test is minimally useful\n        with self.assertRaises(ZeroDivisionError):\n            _targetRing, _fluxFraction = self.r.core.getMinimumPercentFluxInFuel()\n\n    def test_getAssemblyWithLoc(self):\n        \"\"\"\n        Get assembly by location, in a couple different ways to ensure they all work.\n\n        .. test:: Get assembly by location.\n            :id: T_ARMI_R_GET_ASSEM0\n            :tests: R_ARMI_R_GET_ASSEM\n        \"\"\"\n        a0 = self.r.core.getAssemblyWithStringLocation(\"003-012\")\n        a1 = self.r.core.getAssemblyWithAssemNum(assemNum=1)\n        a2 = self.r.core.getAssembly(locationString=\"003-012\")\n\n        self.assertEqual(a0, a2)\n        self.assertEqual(a1, a2)\n        self.assertEqual(a1.getLocation(), \"003-012\")\n\n    def test_getAssemblyWithName(self):\n        \"\"\"Test getting an assembly by name.\n\n        .. test:: Get assembly by name.\n            :id: T_ARMI_R_GET_ASSEM1\n            :tests: R_ARMI_R_GET_ASSEM\n        \"\"\"\n        a1 = self.r.core.getAssemblyWithAssemNum(assemNum=1)\n        a2 = self.r.core.getAssembly(assemblyName=\"A0001\")\n\n        self.assertEqual(a1, a2)\n        self.assertEqual(a1.name, \"A0001\")\n\n    def test_getDominantMaterial(self):\n        dominantDuct = self.r.core.getDominantMaterial(Flags.DUCT)\n        dominantFuel = self.r.core.getDominantMaterial(Flags.FUEL)\n        dominantCool = self.r.core.getDominantMaterial(Flags.COOLANT)\n\n        self.assertEqual(dominantDuct.getName(), \"HT9\")\n        self.assertEqual(dominantFuel.getName(), \"UraniumOxide\")\n        self.assertEqual(dominantCool.getName(), \"Sodium\")\n\n    def test_getSymmetryFactor(self):\n        \"\"\"\n        Test getSymmetryFactor().\n\n        .. test:: Get the core symmetry.\n            :id: T_ARMI_R_SYMM\n            :tests: R_ARMI_R_SYMM\n        \"\"\"\n        for b in self.r.core.iterBlocks():\n            sym = b.getSymmetryFactor()\n            i, j, _ = b.spatialLocator.getCompleteIndices()\n            if i == 0 and j == 0:\n                self.assertEqual(sym, 3.0)\n            else:\n                self.assertEqual(sym, 1.0)\n\n    def test_getAssembliesOnSymmetryLine(self):\n        center = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_CENTER)\n        self.assertEqual(len(center), 1)\n        upper = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_120_DEGREES)\n        self.assertEqual(len(upper), 0)\n        lower = self.r.core.getAssembliesOnSymmetryLine(grids.BOUNDARY_0_DEGREES)\n        self.assertEqual(len(lower), 1)\n\n    def test_getGridBounds(self):\n        \"\"\"Test getGridBounds() works on different scales.\n\n        .. test:: Test that assembly grids nest inside core grids.\n            :id: T_ARMI_GRID_NEST\n            :tests: R_ARMI_GRID_NEST\n        \"\"\"\n        (minI, maxI), (minJ, maxJ), (_minK, _maxK) = self.r.core.getBoundingIndices()\n        self.assertEqual((minI, maxI), (0, 2))\n        self.assertEqual((minJ, maxJ), (-1, 2))\n\n        randomBlock = self.r.core.getFirstAssembly()\n        (minI, maxI), (minJ, maxJ), (_minK, _maxK) = randomBlock.getBoundingIndices()\n        self.assertEqual((minI, maxI), (2, 2))\n        self.assertEqual((minJ, maxJ), (-1, -1))\n\n    def test_locations(self):\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 2)\n        a = self.r.core.childrenByLocator[loc]\n        assert_allclose(a.spatialLocator.indices, [1, 1, 0])\n        for bi, b in enumerate(a):\n            assert_allclose(b.spatialLocator.getCompleteIndices(), [1, 1, bi])\n        self.assertEqual(a.getLocation(), \"003-002\")\n        self.assertEqual(a[0].getLocation(), \"003-002-000\")\n\n    def test_getMass(self):\n        # If these are not in agreement check on block symmetry factor being applied to volumes\n        mass1 = self.r.core.getMass()\n        mass2 = sum([b.getMass() for b in self.r.core.iterBlocks()])\n        assert_allclose(mass1, mass2)\n\n    def test_getNumRings(self):\n        self.assertEqual(len(self.r.core.circularRingList), 0)\n        self.assertEqual(self.r.core.getNumRings(indexBased=True), 3)\n        self.assertEqual(self.r.core.getNumRings(indexBased=False), 3)\n\n        self.r.core.circularRingList = {1, 2, 3}\n        self.assertEqual(len(self.r.core.circularRingList), 3)\n        self.assertEqual(self.r.core.getNumRings(indexBased=True), 3)\n        self.assertEqual(self.r.core.getNumRings(indexBased=False), 3)\n\n    @patch(\"armi.reactor.reactors.Core.getAssemblies\")\n    def test_whenNoAssemblies(self, mockGetAssemblies):\n        \"\"\"Test various edge cases when there are no assemblies.\"\"\"\n        mockGetAssemblies.return_value = []\n\n        self.assertEqual(self.r.core.countBlocksWithFlags(Flags.FUEL), 0)\n        self.assertEqual(self.r.core.countFuelAxialBlocks(), 0)\n        self.assertGreater(self.r.core.getFirstFuelBlockAxialNode(), 9e9)\n\n    def test_addMultipleCores(self):\n        \"\"\"Test the catch that a reactor can only have one core.\"\"\"\n        with self.assertRaises(RuntimeError):\n            self.r.add(self.r.core)\n\n    def test_getNozzleTypes(self):\n        nozzleTypes = self.r.core.getNozzleTypes()\n        expectedTypes = [\"Default\"]\n        for nozzle in expectedTypes:\n            self.assertIn(nozzle, nozzleTypes)\n\n    def test_getAvgTemp(self):\n        t0 = self.r.core.getAvgTemp([Flags.CLAD, Flags.WIRE, Flags.DUCT])\n        self.assertAlmostEqual(t0, 450.0, delta=0.01)\n\n        t1 = self.r.core.getAvgTemp([Flags.CLAD, Flags.FUEL])\n        self.assertAlmostEqual(t1, 450.04232366477936, delta=0.01)\n\n        t2 = self.r.core.getAvgTemp([Flags.CLAD, Flags.WIRE, Flags.DUCT, Flags.FUEL])\n        self.assertAlmostEqual(t2, 450.02442095419906, delta=0.01)\n\n    def test_getNuclideCategories(self):\n        \"\"\"Test that nuclides are categorized correctly.\"\"\"\n        self.r.core.getNuclideCategories()\n        self.assertIn(\"coolant\", self.r.core._nuclideCategories)\n        self.assertIn(\"structure\", self.r.core._nuclideCategories)\n        self.assertIn(\"fuel\", self.r.core._nuclideCategories)\n        self.assertEqual(self.r.core._nuclideCategories[\"coolant\"], set([\"NA23\"]))\n        self.assertIn(\"FE56\", self.r.core._nuclideCategories[\"structure\"])\n        self.assertIn(\"U235\", self.r.core._nuclideCategories[\"fuel\"])\n\n    def test_differentNuclideModels(self):\n        self.assertEqual(self.o.cs[CONF_XS_KERNEL], \"MC2v3\")\n        _o2, r2 = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={CONF_XS_KERNEL: \"MC2v2\"},\n        )\n\n        self.assertNotEqual(set(self.r.blueprints.elementsToExpand), set(r2.blueprints.elementsToExpand))\n\n        for b2, b3 in zip(r2.core.iterBlocks(), self.r.core.iterBlocks()):\n            for element in self.r.blueprints.elementsToExpand:\n                # nucspec allows elemental mass to be computed\n                mass2 = b2.getMass(element.symbol)\n                mass3 = b3.getMass(element.symbol)\n                assert_allclose(mass2, mass3)\n\n                constituentNucs = [nn.name for nn in element.nuclides if nn.a > 0]\n                nuclideLevelMass3 = b3.getMass(constituentNucs)\n                assert_allclose(mass3, nuclideLevelMass3)\n\n    def test_applyThermalExpanCoreConst(self):\n        \"\"\"Test that assemblies in core are correctly expanded.\n\n        Notes\n        -----\n        All assertions skip the first block as it has no 'Delta T' and does not expand.\n        \"\"\"\n        originalAssems = self.r.core.getAssemblies()\n        # stash original axial mesh info\n        oldRefBlockAxialMesh = self.r.core.p.referenceBlockAxialMesh\n        oldAxialMesh = self.r.core.p.axialMesh\n\n        nonEqualParameters = [\"heightBOL\", \"molesHmBOL\", \"massHmBOL\"]\n        equalParameters = [\"smearDensity\", \"nHMAtBOL\", \"enrichmentBOL\"]\n\n        o, coldHeightR = loadTestReactor(\n            inputFilePath=TESTING_ROOT,\n            inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\",\n            customSettings={\n                \"inputHeightsConsideredHot\": False,\n                \"assemFlagsToSkipAxialExpansion\": [\"feed fuel\"],\n            },\n        )\n        aToSkip = list(Flags.fromStringIgnoreErrors(t) for t in o.cs[CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP])\n\n        for i, val in enumerate(oldRefBlockAxialMesh[1:]):\n            self.assertNotEqual(val, coldHeightR.core.p.referenceBlockAxialMesh[i])\n        for i, val in enumerate(oldAxialMesh[1:]):\n            self.assertNotEqual(val, coldHeightR.core.p.axialMesh[i])\n\n        coldHeightAssems = coldHeightR.core.getAssemblies()\n        for a, coldHeightA in zip(originalAssems, coldHeightAssems):\n            if a.hasFlags(Flags.CONTROL) or any(a.hasFlags(aFlags) for aFlags in aToSkip):\n                continue\n            for b, coldHeightB in zip(a[1:], coldHeightA[1:]):\n                for param in nonEqualParameters:\n                    p, coldHeightP = b.p[param], coldHeightB.p[param]\n                    if p and coldHeightP:\n                        self.assertNotEqual(p, coldHeightP, f\"{param} {p} {coldHeightP}\")\n                    else:\n                        self.assertAlmostEqual(p, coldHeightP)\n                for param in equalParameters:\n                    p, coldHeightP = b.p[param], coldHeightB.p[param]\n                    self.assertAlmostEqual(p, coldHeightP)\n\n\nclass HexReactorSoloTests(ReactorTests):\n    \"\"\"\n    This is meant to pair with the ``HexReactorTests`` unit test class.\n\n    Each test here creates its own, slightly unique, test reactor.\n    \"\"\"\n\n    def test_nonUniformAssems(self):\n        o, r = loadTestReactor(customSettings={\"nonUniformAssemFlags\": [\"primary control\"]})\n        a = o.r.core.getFirstAssembly(Flags.FUEL)\n        self.assertTrue(all(b.p.topIndex != 0 for b in a[1:]))\n        a = o.r.core.getFirstAssembly(Flags.PRIMARY)\n        self.assertTrue(all(b.p.topIndex == 0 for b in a))\n        originalHeights = [b.p.height for b in a]\n        differntMesh = [val + 2 for val in r.core.p.referenceBlockAxialMesh]\n        # won't change because nonUniform assem doesn't conform to reference mesh\n        a.setBlockMesh(differntMesh)\n        heights = [b.p.height for b in a]\n        self.assertEqual(originalHeights, heights)\n\n\nclass BigHexReactorTests(ReactorTests):\n    \"\"\"\n    This is meant to pair with the ``HexReactorTests`` unit test class.\n\n    These tests all need a larger test reactor. Ideally, we will migrate these to smaller test reactors one day.\n    \"\"\"\n\n    def setUp(self):\n        self.o, self.r = loadTestReactor(inputFilePath=TEST_ROOT, customSettings={\"trackAssems\": True})\n\n    def test_genAssembliesAddedThisCycle(self):\n        allAssems = self.r.core.getAssemblies()\n        self.assertTrue(all(a1 is a2 for a1, a2 in zip(allAssems, self.r.core.genAssembliesAddedThisCycle())))\n        a = self.r.core.getFirstAssembly()\n        newA = copy.deepcopy(a)\n        newA.name = None\n        self.r.p.cycle = 1\n        self.assertEqual(len(list(self.r.core.genAssembliesAddedThisCycle())), 0)\n        self.r.core.removeAssembly(a)\n        self.r.core.add(newA)\n        self.assertEqual(next(self.r.core.genAssembliesAddedThisCycle()), newA)\n\n    def test_createFreshFeed(self):\n        # basic creation\n        aOld = self.r.core.getFirstAssembly(Flags.FEED)\n        aNew = self.r.core.createFreshFeed(cs=self.o.cs)\n        self.assertAlmostEqual(aOld.getMass(), aNew.getMass())\n\n    def test_getAssemblies(self):\n        \"\"\"Basic test of getAssemblies, with and without including the SFP.\n\n        .. test:: The spent fuel pool is a Composite structure.\n            :id: T_ARMI_SFP2\n            :tests: R_ARMI_SFP\n        \"\"\"\n        # where are we starting\n        numCoreStart = len(self.r.core)\n        numTotalStart = len(self.r.core.getAssemblies(includeSFP=True))\n\n        # remove one assembly and confirm behavior\n        for i in range(1, 5):\n            self.r.core.removeAssembly(self.r.core.getFirstAssembly())\n            self.assertEqual(len(self.r.core), numCoreStart - i)\n            self.assertEqual(len(self.r.core.getAssemblies(includeSFP=True)), numTotalStart)\n\n    def test_findNeighbors(self):\n        \"\"\"\n        Find neighbors of a given assembly.\n\n        .. test:: Retrieve neighboring assemblies of a given assembly.\n            :id: T_ARMI_R_FIND_NEIGHBORS\n            :tests: R_ARMI_R_FIND_NEIGHBORS\n        \"\"\"\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        self.assertEqual(len(neighbs), 6)\n        self.assertIn((2, 1), locs)\n        self.assertIn((2, 2), locs)\n        self.assertEqual(locs.count((2, 1)), 3)\n\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(1, 1)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        self.assertEqual(locs, [(2, 1), (2, 2)] * 3, 6)\n\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)\n        a = self.r.core.childrenByLocator[loc]\n\n        neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        self.assertEqual(len(neighbs), 6)\n        self.assertEqual(locs, [(3, 2), (3, 3), (3, 12), (2, 1), (1, 1), (2, 1)])\n\n        # try with edge assemblies\n        # With edges, the neighbor is the one that's actually next to it.\n        converter = geometryConverters.EdgeAssemblyChanger()\n        converter.addEdgeAssemblies(self.r.core)\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        self.assertEqual(len(neighbs), 6)\n        # in this case no locations that aren't actually in the core should be returned\n        self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), (2, 1), (1, 1), (2, 1)])\n        converter.removeEdgeAssemblies(self.r.core)\n\n        # try with full core\n        self.r.core.growToFullCore(self.o.cs)\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(3, 4)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a)\n        self.assertEqual(len(neighbs), 6)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        for loc in [(2, 2), (2, 3), (3, 3), (3, 5), (4, 5), (4, 6)]:\n            self.assertIn(loc, locs)\n\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        for loc in [(1, 1), (2, 1), (2, 3), (3, 2), (3, 3), (3, 4)]:\n            self.assertIn(loc, locs)\n\n        # Try the duplicate option in full core as well\n        loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(2, 2)\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a, duplicateAssembliesOnReflectiveBoundary=True)\n        locs = [a.spatialLocator.getRingPos() for a in neighbs]\n        self.assertEqual(len(neighbs), 6)\n        self.assertEqual(locs, [(3, 2), (3, 3), (3, 4), (2, 3), (1, 1), (2, 1)])\n\n\nclass CartesianReactorTests(ReactorTests):\n    def setUp(self):\n        self.o = buildOperatorOfEmptyCartesianBlocks()\n        self.r = self.o.r\n\n    def test_add(self):\n        a = self.r.core.getFirstAssembly()\n        numA = len(a)\n        a.add(blocks.CartesianBlock(\"test cart block\"))\n        self.assertEqual(len(a), numA + 1)\n\n        with self.assertRaises(TypeError):\n            a.add(blocks.HexBlock(\"test hex block\"))\n\n    def test_getAssemblyPitch(self):\n        # Cartesian pitch should have 2 dims since it could be a rectangle that is not square.\n        assert_equal(self.r.core.getAssemblyPitch(), [10.0, 16.0])\n\n    def test_getAssembliesInSquareRing(self, exclusions=[2]):\n        expectedAssemsInRing = [1, 0]\n        actualAssemsInRing = []\n        for ring in range(1, self.r.core.getNumRings() + 1):\n            actualAssemsInRing.append(len(self.r.core.getAssembliesInSquareOrHexRing(ring)))\n        self.assertSequenceEqual(actualAssemsInRing, expectedAssemsInRing)\n\n    def test_getNuclideCategoriesLogging(self):\n        \"\"\"Simplest possible test of the getNuclideCategories method and its logging.\"\"\"\n        log = mockRunLogs.BufferLog()\n\n        # this strange namespace-stomping is used to the test to set the logger in reactors.Core\n        from armi.reactor import reactors\n\n        reactors.runLog = runLog\n        runLog.LOG = log\n\n        # run the actual method in question\n        self.r.core.getNuclideCategories()\n        messages = log.getStdout()\n\n        self.assertIn(\"Nuclide categorization\", messages)\n        self.assertIn(\"Structure\", messages)\n\n\nclass CartesianReactorNeighborTests(ReactorTests):\n    def setUp(self):\n        self.r = loadTestReactor(TEST_ROOT, inputFileName=\"zpprTest.yaml\")[1]\n\n    def test_findNeighborsCartesian(self):\n        \"\"\"Find neighbors of a given assembly in a Cartesian grid.\"\"\"\n        loc = self.r.core.spatialGrid[1, 1, 0]\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a)\n        locs = [tuple(a.spatialLocator.indices[:2]) for a in neighbs]\n        self.assertEqual(len(neighbs), 4)\n        self.assertIn((2, 1), locs)\n        self.assertIn((1, 2), locs)\n        self.assertIn((0, 1), locs)\n        self.assertIn((1, 0), locs)\n\n        # try with edge assembly\n        loc = self.r.core.spatialGrid[0, 0, 0]\n        a = self.r.core.childrenByLocator[loc]\n        neighbs = self.r.core.findNeighbors(a, showBlanks=False)\n        locs = [tuple(a.spatialLocator.indices[:2]) for a in neighbs]\n        self.assertEqual(len(neighbs), 2)\n        # in this case no locations that aren't actually in the core should be returned\n        self.assertIn((1, 0), locs)\n        self.assertIn((0, 1), locs)\n"
  },
  {
    "path": "armi/reactor/tests/test_rz_reactors.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test loading Theta-RZ reactor models.\"\"\"\n\nimport math\nimport os\nimport unittest\n\nfrom armi import settings\nfrom armi.reactor import reactors\nfrom armi.testing import TESTING_ROOT\n\n\nclass TestRZTReactorModern(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cs = settings.Settings(fName=os.path.join(TESTING_ROOT, \"reactors\", \"godiva\", \"godiva.armi.unittest.yaml\"))\n        cls.r = reactors.loadFromCs(cs)\n\n    def test_loadRZT_reactor(self):\n        \"\"\"\n        The Godiva benchmark model is a HEU sphere with a radius of 8.74 cm.\n\n        This tests loading and verifies the reactor is loaded correctly by comparing volumes against\n        expected volumes for full core (including void boundary conditions) and just the fuel.\n        \"\"\"\n        godivaRadius = 8.7407\n        reactorRadius = 9\n        reactorHeight = 17.5\n        refReactorVolume = math.pi * reactorRadius**2 * reactorHeight / 8\n        refFuelVolume = 4.0 / 3.0 * math.pi * (godivaRadius) ** 3 / 8\n\n        reactorVolumes = []\n        fuelVolumes = []\n        for b in self.r.core.iterBlocks():\n            reactorVolumes.append(b.getVolume())\n            for c in b:\n                if \"godiva\" in c.name:\n                    fuelVolumes.append(c.getVolume())\n\n        # verify the total reactor volume is as expected\n        tolerance = 1e-3\n        error = math.fabs((refReactorVolume - sum(reactorVolumes)) / refReactorVolume)\n        self.assertLess(error, tolerance)\n\n        # verify the total fuel volume is as expected\n        error = math.fabs((refFuelVolume - sum(fuelVolumes)) / refFuelVolume)\n        self.assertLess(error, tolerance)\n\n    def test_loadRZT(self):\n        self.assertEqual(len(self.r.core), 3)\n        radMeshes = [a.p.RadMesh for a in self.r.core]\n        aziMeshes = [a.p.AziMesh for a in self.r.core]\n        print(f\"radMeshes: {radMeshes}\")\n        print(f\"aziMeshes: {aziMeshes}\")\n        self.assertTrue(all(radMesh == 2 for radMesh in radMeshes))\n        self.assertTrue(all(aziMesh == 7 for aziMesh in aziMeshes))\n\n    def test_findAllMeshPoints(self):\n        \"\"\"Test findAllMeshPoints().\"\"\"\n        i, _, _ = self.r.core.findAllMeshPoints()\n        self.assertLess(i[-1], 2 * math.pi)\n"
  },
  {
    "path": "armi/reactor/tests/test_zones.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Test for Zones.\"\"\"\n\nimport logging\nimport os\nimport unittest\n\nfrom armi import runLog\nfrom armi.reactor import (\n    assemblies,\n    blocks,\n    blueprints,\n    geometry,\n    grids,\n    reactors,\n    zones,\n)\nfrom armi.testing import TESTING_ROOT, loadTestReactor\nfrom armi.tests import mockRunLogs\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass TestZone(unittest.TestCase):\n    def setUp(self):\n        # set up a Reactor, for the spatialLocator\n        bp = blueprints.Blueprints()\n        r = reactors.Reactor(\"zonetest\", bp)\n        r.add(reactors.Core(\"Core\"))\n        r.core.spatialGrid = grids.HexGrid.fromPitch(1.0)\n        r.core.spatialGrid._bounds = (\n            [0, 1, 2, 3, 4],\n            [0, 10, 20, 30, 40],\n            [0, 20, 40, 60, 80],\n        )\n        r.core.spatialGrid.symmetry = geometry.SymmetryType(\n            geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC\n        )\n        r.core.spatialGrid.geomType = geometry.HEX\n\n        # some testing constants\n        self.numAssems = 5\n        self.numBlocks = 5\n\n        # build a list of Assemblies\n        self.aList = []\n        for ring in range(self.numAssems):\n            a = assemblies.HexAssembly(\"fuel\")\n            a.spatialGrid = r.core.spatialGrid\n            a.spatialLocator = r.core.spatialGrid[ring, 1, 0]\n            a.parent = r.core\n            self.aList.append(a)\n\n        # build a list of Blocks\n        self.bList = []\n        for _ in range(self.numBlocks):\n            b = blocks.HexBlock(\"TestHexBlock\")\n            b.setType(\"defaultType\")\n            b.p.nPins = 3\n            b.setHeight(3.0)\n            self.aList[0].add(b)\n            self.bList.append(b)\n\n    def test_addItem(self):\n        \"\"\"\n        Test adding an item.\n\n        .. test:: Add item to a zone.\n            :id: T_ARMI_ZONE0\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        zone = zones.Zone(\"test_addItem\")\n        zone.addItem(self.aList[0])\n        self.assertIn(self.aList[0].getLocation(), zone)\n\n        self.assertRaises(AssertionError, zone.addItem, \"nope\")\n\n    def test_removeItem(self):\n        zone = zones.Zone(\"test_removeItem\", [a.getLocation() for a in self.aList])\n        zone.removeItem(self.aList[0])\n        self.assertNotIn(self.aList[0].getLocation(), zone)\n\n        self.assertRaises(AssertionError, zone.removeItem, \"also nope\")\n\n    def test_addItems(self):\n        \"\"\"\n        Test adding items.\n\n        .. test:: Add multiple items to a zone.\n            :id: T_ARMI_ZONE1\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        zone = zones.Zone(\"test_addItems\")\n        zone.addItems(self.aList)\n        for a in self.aList:\n            self.assertIn(a.getLocation(), zone)\n\n    def test_removeItems(self):\n        zone = zones.Zone(\"test_removeItems\", [a.getLocation() for a in self.aList])\n        zone.removeItems(self.aList)\n        for a in self.aList:\n            self.assertNotIn(a.getLocation(), zone)\n\n    def test_addLoc(self):\n        \"\"\"\n        Test adding a location.\n\n        .. test:: Add location to a zone.\n            :id: T_ARMI_ZONE2\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        zone = zones.Zone(\"test_addLoc\")\n        zone.addLoc(self.aList[0].getLocation())\n        self.assertIn(self.aList[0].getLocation(), zone)\n\n        self.assertRaises(AssertionError, zone.addLoc, 1234)\n\n    def test_removeLoc(self):\n        zone = zones.Zone(\"test_removeLoc\", [a.getLocation() for a in self.aList])\n        zone.removeLoc(self.aList[0].getLocation())\n        self.assertNotIn(self.aList[0].getLocation(), zone)\n\n        self.assertRaises(AssertionError, zone.removeLoc, 1234)\n\n    def test_addLocs(self):\n        \"\"\"\n        Test adding locations.\n\n        .. test:: Add multiple locations to a zone.\n            :id: T_ARMI_ZONE3\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        zone = zones.Zone(\"test_addLocs\")\n        zone.addLocs([a.getLocation() for a in self.aList])\n        for a in self.aList:\n            self.assertIn(a.getLocation(), zone)\n\n    def test_removeLocs(self):\n        zone = zones.Zone(\"test_removeLocs\", [a.getLocation() for a in self.aList])\n        zone.removeLocs([a.getLocation() for a in self.aList])\n        for a in self.aList:\n            self.assertNotIn(a.getLocation(), zone)\n\n    def test_iteration(self):\n        locs = [a.getLocation() for a in self.aList]\n        zone = zones.Zone(\"test_iteration\")\n\n        # BONUS TEST: Zone.__len__()\n        self.assertEqual(len(zone), 0)\n        zone.addLocs(locs)\n        self.assertEqual(len(zone), self.numAssems)\n\n        # loop once to prove looping works\n        for aLoc in zone:\n            self.assertIn(aLoc, locs)\n            self.assertTrue(aLoc in zone)  # Tests Zone.__contains__()\n\n        # loop twice to make sure it iterates nicely.\n        for aLoc in zone:\n            self.assertIn(aLoc, locs)\n            self.assertTrue(aLoc in zone)  # Tests Zone.__contains__()\n\n    def test_repr(self):\n        zone = zones.Zone(\"test_repr\")\n        zone.addItems(self.aList)\n        zStr = \"Zone test_repr with 5 Assemblies\"\n        self.assertIn(zStr, str(zone))\n\n    def test_blocks(self):\n        zone = zones.Zone(\"test_blocks\", zoneType=blocks.Block)\n\n        # test the blocks were correctly added\n        self.assertEqual(len(zone), 0)\n        zone.addItems(self.bList)\n        self.assertEqual(len(zone), self.numBlocks)\n\n        # loop once to prove looping works\n        for aLoc in zone:\n            self.assertIn(aLoc, zone.locs)\n            self.assertTrue(aLoc in zone)  # test Zone.__contains__()\n\n\nclass TestZones(unittest.TestCase):\n    def setUp(self):\n        # spin up the test reactor\n        self.o, self.r = loadTestReactor(\n            inputFilePath=TESTING_ROOT, inputFileName=\"reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml\"\n        )\n\n        # build some generic test zones to get started with\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = [\n            \"ring-1: 001-001\",\n            \"ring-2: 002-001, 002-002\",\n            \"ring-3: 003-001, 003-002, 003-003\",\n        ]\n        cs = self.o.cs.modified(newSettings=newSettings)\n        self.r.core.buildManualZones(cs)\n        self.zonez = self.r.core.zones\n\n    def test_dictionaryInterface(self):\n        \"\"\"\n        Test creating and interacting with the Zones object.\n\n        .. test:: Create collection of Zones.\n            :id: T_ARMI_ZONE4\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        zs = zones.Zones()\n\n        # validate the addZone() and __len__() work\n        self.assertEqual(len(zs.names), 0)\n        zs.addZone(self.zonez[\"ring-2\"])\n        self.assertEqual(len(zs.names), 1)\n\n        # validate that __contains__() works\n        self.assertFalse(\"ring-1\" in zs)\n        self.assertTrue(\"ring-2\" in zs)\n        self.assertFalse(\"ring-3\" in zs)\n\n        # validate that __remove__() works\n        del zs[\"ring-2\"]\n        self.assertEqual(len(zs.names), 0)\n\n        # validate that addZones() works\n        zs.addZones(self.zonez)\n        self.assertEqual(len(zs.names), 3)\n        self.assertTrue(\"ring-1\" in zs)\n        self.assertTrue(\"ring-2\" in zs)\n        self.assertTrue(\"ring-3\" in zs)\n\n        # validate that get() works\n        ring3 = zs[\"ring-3\"]\n        self.assertEqual(len(ring3), 3)\n        self.assertIn(\"003-002\", ring3)\n\n        # validate that removeZones() works\n        zonesToRemove = [z.name for z in self.zonez]\n        zs.removeZones(zonesToRemove)\n        self.assertEqual(len(zs.names), 0)\n        self.assertFalse(\"ring-1\" in zs)\n        self.assertFalse(\"ring-2\" in zs)\n        self.assertFalse(\"ring-3\" in zs)\n\n    def test_findZoneItIsIn(self):\n        # customize settings for this test\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = [\n            \"ring-1: 001-001\",\n            \"ring-2: 002-001, 002-002\",\n        ]\n        cs = self.o.cs.modified(newSettings=newSettings)\n\n        self.r.core.buildManualZones(cs)\n        daZones = self.r.core.zones\n        for zone in daZones:\n            a = self.r.core.getAssemblyWithStringLocation(sorted(zone.locs)[0])\n            aZone = daZones.findZoneItIsIn(a)\n            self.assertEqual(aZone, zone)\n\n        # get assem from first zone\n        a = self.r.core.getAssemblyWithStringLocation(sorted(daZones[daZones.names[0]].locs)[0])\n        # remove the zone\n        daZones.removeZone(daZones.names[0])\n\n        # ensure that we can no longer find the assembly in the zone\n        self.assertEqual(daZones.findZoneItIsIn(a), None)\n\n    def test_getZoneLocations(self):\n        # customize settings for this test\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = [\n            \"ring-1: 001-001\",\n            \"ring-2: 002-001, 002-002\",\n        ]\n        cs = self.o.cs.modified(newSettings=newSettings)\n        self.r.core.buildManualZones(cs)\n\n        # test the retrieval of zone locations\n        self.assertEqual(set([\"002-001\", \"002-002\"]), self.r.core.zones.getZoneLocations(\"ring-2\"))\n\n    def test_getAllLocations(self):\n        # customize settings for this test\n        newSettings = {}\n        newSettings[\"zoneDefinitions\"] = [\n            \"ring-1: 001-001\",\n            \"ring-2: 002-001, 002-002\",\n        ]\n        cs = self.o.cs.modified(newSettings=newSettings)\n        self.r.core.buildManualZones(cs)\n\n        # test the retrieval of zone locations\n        self.assertEqual(set([\"001-001\", \"002-001\", \"002-002\"]), self.r.core.zones.getAllLocations())\n\n    def test_summary(self):\n        # make sure we have a couple of zones to test on\n        for name0 in [\"ring-1\", \"ring-2\", \"ring-3\"]:\n            self.assertIn(name0, self.zonez.names)\n\n        # test the summary (in the log)\n        with mockRunLogs.BufferLog() as mock:\n            runLog.LOG.startLog(\"test_summary\")\n            runLog.LOG.setVerbosity(logging.INFO)\n            self.assertEqual(\"\", mock.getStdout())\n\n            self.zonez.summary()\n\n            self.assertIn(\"zoneDefinitions:\", mock.getStdout())\n            self.assertIn(\"- ring-1: \", mock.getStdout())\n            self.assertIn(\"- ring-2: \", mock.getStdout())\n            self.assertIn(\"- ring-3: \", mock.getStdout())\n            self.assertIn(\"003-001, 003-002, 003-003\", mock.getStdout())\n\n    def test_sortZones(self):\n        # create some zones in non-alphabetical order\n        zs = zones.Zones()\n        zs.addZone(self.zonez[\"ring-3\"])\n        zs.addZone(self.zonez[\"ring-1\"])\n        zs.addZone(self.zonez[\"ring-2\"])\n\n        # check the initial order of the zones\n        self.assertEqual(list(zs._zones.keys())[0], \"ring-3\")\n        self.assertEqual(list(zs._zones.keys())[1], \"ring-1\")\n        self.assertEqual(list(zs._zones.keys())[2], \"ring-2\")\n\n        # sort the zones\n        zs.sortZones()\n\n        # check the final order of the zones\n        self.assertEqual(list(zs._zones.keys())[0], \"ring-1\")\n        self.assertEqual(list(zs._zones.keys())[1], \"ring-2\")\n        self.assertEqual(list(zs._zones.keys())[2], \"ring-3\")\n\n\nclass TestZonesFile(unittest.TestCase):\n    def setUp(self):\n        # spin up the test reactor\n        self.o, self.r = loadTestReactor()\n\n        # build zones based on a file\n        newSettings = {}\n        newSettings[\"zonesFile\"] = os.path.join(THIS_DIR, \"zonesFile.yaml\")\n        cs = self.o.cs.modified(newSettings=newSettings)\n        self.r.core.buildManualZones(cs)\n        self.zonez = self.r.core.zones\n\n    def test_zonesFile(self):\n        \"\"\"\n        Test creating and interacting with a zones file.\n\n        .. test:: Create collection of Zones based on a yaml file.\n            :id: T_ARMI_ZONE5\n            :tests: R_ARMI_ZONE\n        \"\"\"\n        self.assertEqual(set([\"001-001\"]), self.r.core.zones.getZoneLocations(\"a_zone\"))\n        self.assertEqual(set([\"002-001\"]), self.r.core.zones.getZoneLocations(\"a_different_zone\"))\n"
  },
  {
    "path": "armi/reactor/tests/zonesFile.yaml",
    "content": "customZonesMap:\n    001-001: a_zone\n    002-001: a_different_zone"
  },
  {
    "path": "armi/reactor/zones.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA Zone object is a collection of locations in the Core.\nA Zones object is a collection of Zone objects.\nTogether, they are used to conceptually divide the Core for analysis.\n\"\"\"\n\nfrom typing import Iterator, List, Optional, Set, Union\n\nfrom armi import runLog\nfrom armi.reactor.assemblies import Assembly\nfrom armi.reactor.blocks import Block\n\n\nclass Zone:\n    \"\"\"\n    A group of locations in the Core, used to divide it up for analysis.\n    Each location represents an Assembly or a Block.\n\n    .. impl:: A user can define a collection of armi locations.\n        :id: I_ARMI_ZONE0\n        :implements: R_ARMI_ZONE\n\n        The Zone class facilitates the creation of a Zone object representing a collection of\n        locations in the Core. A Zone contains a group of locations in the Core, used to subdivide\n        it for analysis. Each location represents an Assembly or a Block, where a single Zone must\n        contain items of the same type (i.e., Assembly or Block). Methods are provided to add or\n        remove one or more locations to/from the Zone, and similarly, add or remove one or more\n        items with a Core location (i.e., Assemblies or Blocks) to/from the Zone. In addition,\n        several methods are provided to facilitate the retrieval of locations from a Zone by\n        performing functions to check if a location exists in the Zone, looping through the\n        locations in the Zone in alphabetical order, and returning the number of locations in the\n        Zone, etc.\n    \"\"\"\n\n    VALID_TYPES = (Assembly, Block)\n\n    def __init__(self, name: str, locations: Optional[List] = None, zoneType: type = Assembly):\n        self.name = name\n\n        # A single Zone must contain items of the same type\n        if zoneType not in Zone.VALID_TYPES:\n            raise TypeError(\"Invalid Type {0}; A Zone can only be of type {1}\".format(zoneType, Zone.VALID_TYPES))\n        self.zoneType = zoneType\n\n        # a Zone is mostly just a collection of locations in the Reactor\n        if locations is None:\n            self.locs = set()\n        else:\n            # NOTE: We are not validating the locations.\n            self.locs = set(locations)\n\n    def __contains__(self, loc: str) -> bool:\n        return loc in self.locs\n\n    def __iter__(self) -> Iterator[str]:\n        \"\"\"Loop through the locations, in alphabetical order.\"\"\"\n        for loc in sorted(self.locs):\n            yield loc\n\n    def __len__(self) -> int:\n        \"\"\"Return the number of locations.\"\"\"\n        return len(self.locs)\n\n    def __repr__(self) -> str:\n        zType = \"Assemblies\"\n        if self.zoneType == Block:\n            zType = \"Blocks\"\n\n        return \"<Zone {0} with {1} {2}>\".format(self.name, len(self), zType)\n\n    def addLoc(self, loc: str) -> None:\n        \"\"\"\n        Adds the location to this Zone.\n\n        Parameters\n        ----------\n        loc : str\n            Location within the Core.\n\n        Notes\n        -----\n        This method does not validate that the location given is somehow \"valid\". We are not doing\n        any reverse lookups in the Reactor to prove that the type or location is valid. Because this\n        would require heavier computation, and would add some chicken-and-the-egg problems into\n        instantiating a new Reactor.\n        \"\"\"\n        assert isinstance(loc, str), \"The location must be a str: {0}\".format(loc)\n        self.locs.add(loc)\n\n    def removeLoc(self, loc: str) -> None:\n        \"\"\"\n        Removes the location from this Zone.\n\n        Parameters\n        ----------\n        loc : str\n            Location within the Core.\n\n        Notes\n        -----\n        This method does not validate that the location given is somehow \"valid\".\n        We are not doing any reverse lookups in the Reactor to prove that the type\n        or location is valid. Because this would require heavier computation, and\n        would add some chicken-and-the-egg problems into instantiating a new Reactor.\n\n        Returns\n        -------\n        None\n        \"\"\"\n        assert isinstance(loc, str), \"The location must be a str: {0}\".format(loc)\n        self.locs.remove(loc)\n\n    def addLocs(self, locs: List) -> None:\n        \"\"\"\n        Adds the locations to this Zone.\n\n        Parameters\n        ----------\n        items : list\n            List of str objects\n        \"\"\"\n        for loc in locs:\n            self.addLoc(loc)\n\n    def removeLocs(self, locs: List) -> None:\n        \"\"\"\n        Removes the locations from this Zone.\n\n        Parameters\n        ----------\n        items : list\n            List of str objects\n        \"\"\"\n        for loc in locs:\n            self.removeLoc(loc)\n\n    def addItem(self, item: Union[Assembly, Block]) -> None:\n        \"\"\"\n        Adds the location of an Assembly or Block to a zone.\n\n        Parameters\n        ----------\n        item : Assembly or Block\n            A single item with Core location (Assembly or Block)\n        \"\"\"\n        assert issubclass(type(item), self.zoneType), \"The item ({0}) but be have a type in: {1}\".format(\n            item, Zone.VALID_TYPES\n        )\n        self.addLoc(item.getLocation())\n\n    def removeItem(self, item: Union[Assembly, Block]) -> None:\n        \"\"\"\n        Removes the location of an Assembly or Block from a zone.\n\n        Parameters\n        ----------\n        item : Assembly or Block\n            A single item with Core location (Assembly or Block)\n        \"\"\"\n        assert issubclass(type(item), self.zoneType), \"The item ({0}) but be have a type in: {1}\".format(\n            item, Zone.VALID_TYPES\n        )\n        self.removeLoc(item.getLocation())\n\n    def addItems(self, items: List) -> None:\n        \"\"\"\n        Adds the locations of a list of Assemblies or Blocks to a zone.\n\n        Parameters\n        ----------\n        items : list\n            List of Assembly/Block objects\n        \"\"\"\n        for item in items:\n            self.addItem(item)\n\n    def removeItems(self, items: List) -> None:\n        \"\"\"\n        Removes the locations of a list of Assemblies or Blocks from a zone.\n\n        Parameters\n        ----------\n        items : list\n            List of Assembly/Block objects\n        \"\"\"\n        for item in items:\n            self.removeItem(item)\n\n\nclass Zones:\n    \"\"\"Collection of Zone objects.\n\n    .. impl:: A user can define a collection of armi zones.\n        :id: I_ARMI_ZONE1\n        :implements: R_ARMI_ZONE\n\n        The Zones class facilitates the creation of a Zones object representing a collection of Zone\n        objects. Methods are provided to add or remove one or more Zone to/from the Zones object.\n        Likewise, methods are provided to validate that the zones are mutually exclusive, obtain the\n        location labels of zones, return the Zone object where a particular Assembly or Block\n        resides, sort the Zone objects alphabetically, and summarize the zone definitions. In\n        addition, methods are provided to facilitate the retrieval of Zone objects by name, loop\n        through the Zones in order, and return the number of Zone objects.\n    \"\"\"\n\n    def __init__(self):\n        \"\"\"Build a Zones object.\"\"\"\n        self._zones = {}\n\n    @property\n    def names(self) -> List:\n        \"\"\"Ordered names of contained zones.\n\n        Returns\n        -------\n        list\n            Alphabetical collection of Zone names\n        \"\"\"\n        return sorted(self._zones.keys())\n\n    def __contains__(self, name: str) -> bool:\n        return name in self._zones\n\n    def __delitem__(self, name: str) -> None:\n        del self._zones[name]\n\n    def __getitem__(self, name: str) -> Zone:\n        \"\"\"Access a zone by name.\"\"\"\n        return self._zones[name]\n\n    def __iter__(self) -> Iterator[Zone]:\n        \"\"\"Loop through the zones in order.\"\"\"\n        for nm in sorted(self._zones.keys()):\n            yield self._zones[nm]\n\n    def __len__(self) -> int:\n        \"\"\"Return the number of Zone objects.\"\"\"\n        return len(self._zones)\n\n    def addZone(self, zone: Zone) -> None:\n        \"\"\"Add a zone to the collection.\n\n        Parameters\n        ----------\n        zone: Zone\n            A new Zone to add to this collection.\n        \"\"\"\n        if zone.name in self._zones:\n            raise ValueError(\"Cannot add {} because a zone of that name already exists.\".format(zone.name))\n        self._zones[zone.name] = zone\n\n    def addZones(self, zones: List) -> None:\n        \"\"\"\n        Add multiple zones to the collection, and validate the Zones collection still make sense.\n\n        Parameters\n        ----------\n        zones: List (or Zones)\n            A multiple new Zone objects to add to this collection.\n        \"\"\"\n        for zone in zones:\n            self.addZone(zone)\n\n        self.checkDuplicates()\n\n    def removeZone(self, name: str) -> None:\n        \"\"\"Delete a zone by name.\n\n        Parameters\n        ----------\n        name: str\n            Name of zone to remove\n        \"\"\"\n        del self[name]\n\n    def removeZones(self, names: List) -> None:\n        \"\"\"\n        Delete multiple zones by name.\n\n        Parameters\n        ----------\n        names: List (or names)\n            Multiple Zone names to remove from this collection.\n        \"\"\"\n        for name in names:\n            self.removeZone(name)\n\n    def checkDuplicates(self) -> None:\n        \"\"\"\n        Validate that the zones are mutually exclusive.\n\n        That is, make sure that no item appears in more than one Zone.\n        \"\"\"\n        allLocs = []\n        for zone in self:\n            allLocs.extend(list(zone.locs))\n\n        # use set lotic to test for duplicates\n        if len(allLocs) == len(set(allLocs)):\n            # no duplicates\n            return\n\n        # find duplicates by removing unique locs from the full list\n        for uniqueLoc in set(allLocs):\n            allLocs.remove(uniqueLoc)\n\n        # there are duplicates, so raise an error\n        locs = sorted(set(allLocs))\n        raise RuntimeError(\"Duplicate items found in Zones: {0}\".format(locs))\n\n    def getZoneLocations(self, zoneNames: List) -> Set:\n        \"\"\"\n        Get the location labels of a particular (or a few) zone(s).\n\n        Parameters\n        ----------\n        zoneNames : str, or list\n            the zone name or list of names\n\n        Returns\n        -------\n        zoneLocs : set\n            List of location labels of this/these zone(s)\n        \"\"\"\n        if not isinstance(zoneNames, list):\n            zoneNames = [zoneNames]\n\n        zoneLocs = set()\n        for zn in zoneNames:\n            try:\n                thisZoneLocs = set(self[zn])\n            except KeyError:\n                runLog.error(\"The zone {0} does not exist. Please define it.\".format(zn))\n                raise\n            zoneLocs.update(thisZoneLocs)\n\n        return zoneLocs\n\n    def getAllLocations(self) -> Set:\n        \"\"\"Return all locations across every Zone in this Zones object.\n\n        Returns\n        -------\n        set\n            A combination set of all locations, from every Zone\n        \"\"\"\n        locs = set()\n        for zone in self:\n            locs.update(self[zone.name])\n\n        return locs\n\n    def findZoneItIsIn(self, a: Union[Assembly, Block]) -> Optional[Zone]:\n        \"\"\"\n        Return the zone object that this Assembly/Block is in.\n\n        Parameters\n        ----------\n        a : Assembly or Block\n           The item to locate\n\n        Returns\n        -------\n        zone : Zone object that the input item resides in.\n        \"\"\"\n        aLoc = a.getLocation()\n        zoneFound = False\n        for zone in self:\n            if aLoc in zone.locs:\n                zoneFound = True\n                return zone\n\n        if not zoneFound:\n            runLog.debug(f\"Was not able to find which zone {a} is in\", single=True)\n\n        return None\n\n    def sortZones(self, reverse=False) -> None:\n        \"\"\"Sorts the Zone objects alphabetically.\n\n        Parameters\n        ----------\n        reverse : bool, optional\n            Whether to sort in reverse order, by default False\n        \"\"\"\n        self._zones = dict(sorted(self._zones.items(), reverse=reverse))\n\n    def summary(self) -> None:\n        \"\"\"\n        Summarize the zone definitions clearly, and in a way that can be copy/pasted\n        back into a settings file under \"zoneDefinitions\", if the user wants to\n        manually reuse these zones later.\n\n        Examples\n        --------\n            zoneDefinitions:\n            - ring-1: 001-001\n            - ring-2: 002-001, 002-002\n            - ring-3: 003-001, 003-002, 003-003\n        \"\"\"\n        # log a quick header\n        runLog.info(\"zoneDefinitions:\")\n\n        # log the zone definitions in a way that can be copy/pasted back into a settings file\n        for name in sorted(self._zones.keys()):\n            locs = sorted(self._zones[name].locs)\n            line = \"- {0}: \".format(name) + \", \".join(locs)\n            runLog.info(line)\n"
  },
  {
    "path": "armi/resources/burn-chain.yaml",
    "content": "AM241:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU240\n      type: n2n\n  - transmutation:\n      branch: 0.1384\n      products:\n        - PU242\n      type: nGamma\n  - transmutation:\n      branch: 0.6616\n      products:\n        - CM242\n        - DUMP2\n      type: nGamma\n  - transmutation:\n      branch: 0.2\n      products:\n        - AM242M\n        - DUMP2\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.6500e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - decay:\n      branch: 1.0\n      products:\n        - NP237\n      type: ad\n  - decay:\n      branch: 4.120055e-12\n      products:\n        - LFP41\n      type: sf\nAM242G:\n  - transmutation:\n      branch: 1.0\n      products:\n        - AM241\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - AM243\n      type: nGamma\n  - decay:\n      branch: 0.173\n      products:\n        - PU242\n      type: ec\n  - decay:\n      branch: 0.827\n      products:\n        - CM242\n      type: bmd\nAM242M:\n  - transmutation:\n      branch: 1.0\n      products:\n        - AM241\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - AM243\n      type: nGamma\n  - decay:\n      branch: 0.822865\n      products:\n        - PU242\n      type: ec\n  - decay:\n      branch: 0.172135\n      products:\n        - CM242\n      type: bmd\n  - decay:\n      branch: 0.005\n      products:\n        - NP238\n      type: ad\nAM243:\n  - transmutation:\n      branch: 0.5\n      products:\n        - AM242M\n      type: n2n\n  - transmutation:\n      branch: 0.0865\n      products:\n        - CM242\n      type: n2n\n  - transmutation:\n      branch: 0.4135\n      products:\n        - PU242\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM244\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - NP239\n        - PU239\n      type: ad\nB10:\n  - transmutation:\n      branch: 1.0\n      products:\n        - B11\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LI7\n        - DUMP1\n      type: nalph\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP1\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - BE9\n        - DUMP1\n      type: nd\n  - transmutation:\n      branch: 1.0\n      products:\n        - BE10\n        - DUMP1\n      type: np\nB11:\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP1\n      type: nGamma\n  - transmutation:\n      # n-alphas to Li-8 -> Be-8 -> 2 alphas\n      branch: 1.0\n      products:\n        - HE4\n        - DUMP1\n      type: nalph\n      productParticle: HE4\n  - transmutation:\n      branch: 1.0\n      products:\n        - B10\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - BE9\n        - DUMP1\n      type: nt\nBE9:\n  - transmutation:\n      branch: 1.0\n      products:\n        - LI6\n        - DUMP1\n      type: nalph\n  - transmutation:\n      branch: 1.0\n      products:\n        - LI7\n        - DUMP1\n      type: nt\nBK249:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM244\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF250\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - CF249\n      type: bmd\n  - decay:\n      branch: 4.755215e-10\n      products:\n        - LFP41\n      type: sf\nCF249:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM244\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF250\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - CM245\n      type: ad\n  - decay:\n      branch: 5.00000e-09\n      products:\n        - LFP41\n      type: sf\nCF250:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF249\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF251\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - CM246\n      type: ad\n  - decay:\n      branch: 7.70000e-04\n      products:\n        - LFP41\n      type: sf\nCF251:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF250\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF252\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - CM247\n      type: ad\nCF252:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CF251\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: nGamma\n  - decay:\n      branch: 9.69080e-01\n      products:\n        - CM248\n      type: ad\n  - decay:\n      branch: 3.093567e-02\n      products:\n        - LFP41\n      type: sf\nCM242:\n  - transmutation:\n      branch: 0.99\n      products:\n        - AM241\n      type: n2n\n  - transmutation:\n      branch: 0.01\n      products:\n        - NP237\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM243\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - PU238\n      type: ad\n  - decay:\n      branch: 6.794544e-08\n      products:\n        - LFP41\n      type: sf\nCM243:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM242\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM244\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - decay:\n      branch: 0.9971\n      products:\n        - PU239\n      type: ad\n  - decay:\n      branch: 0.0029\n      products:\n        - AM243\n      type: ec\n  - decay:\n      branch: 5.30000e-11\n      products:\n        - LFP41\n      type: sf\nCM244:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM243\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM245\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - PU240\n      type: ad\n  - decay:\n      branch: 1.340741e-06\n      products:\n        - LFP41\n      type: sf\nCM245:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM244\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM246\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - PU241\n      type: ad\n  - decay:\n      branch: 6.10000e-09\n      products:\n        - LFP41\n      type: sf\nCM246:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM245\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM247\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - PU242\n      type: ad\n  - decay:\n      branch: 2.61500e-04\n      products:\n        - LFP41\n      type: sf\nCM247:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM246\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM248\n        - DUMP2\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - AM243\n      type: ad\nCM248:\n  - transmutation:\n      branch: 1.0\n      products:\n        - CM247\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - BK249\n      type: nGamma\n  - decay:\n      branch: 0.9161\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 8.39000e-02\n      products:\n        - LFP41\n      type: sf\nH3:\n  - decay:\n      branch: 1.0\n      products:\n        - HE3\n        - DUMP1\n      type: bmd\nHE3:\n  - transmutation:\n      branch: 1.0\n      products:\n        - HE4\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - H3\n      type: np\nHE4: []\nIN113:\n  - transmutation:\n      branch: 0.995\n      products:\n        - SN114\n        - DUMP1\n      type: nGamma\n  - transmutation:\n      branch: 0.005\n      products:\n        - CD114\n        - DUMP1\n      type: nGamma\nIN115:\n  - transmutation:\n      branch: 0.9997\n      products:\n        - SN116\n        - DUMP1\n      type: nGamma\n  - transmutation:\n      branch: 0.0003\n      products:\n        - CD116\n        - DUMP1\n      type: nGamma\nLI6:\n  - transmutation:\n      branch: 1.0\n      products:\n        - LI7\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - HE4\n        - DUMP1\n      type: nt\nLI7:\n  # LI7 n,gammas to Be8 which splits into two alphas, so we model both here by setting the productParticle to HE4\n  - transmutation:\n      branch: 1.0\n      products:\n        - HE4\n        - DUMP1\n      type: nGamma\n      productParticle: HE4\n  - transmutation:\n      branch: 1.0\n      products:\n        - LI6\n      type: n2n\nNP237:\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP38\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.2500e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP238\n        - PU238\n      type: nGamma\n  - transmutation:\n      branch: 0.346\n      products:\n        - PU236\n      type: n2n\n  - transmutation:\n      branch: 0.374\n      products:\n        - U236\n      type: n2n\n  - transmutation:\n      branch: 0.28\n      products:\n        - DUMP2\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - PA233\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 2.139954e-12\n      products:\n        - DUMP1\n      type: sf\nNP238:\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP38\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP239\n        - PU239\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP237\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - PU238\n      type: bmd\nPA231:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U232\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: n2n\nPA233:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U234\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - U233\n      type: bmd\nPU236:\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP237\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - U232\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 1.90000e-09\n      products:\n        - LFP38\n      type: sf\nPU238:\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP38\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU239\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP237\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - U234\n      type: ad\n  - decay:\n      branch: 1.838574e-09\n      products:\n        - LFP38\n      type: sf\nPU239:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU238\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP39\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.4200e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU240\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - U235\n      type: ad\n  - decay:\n      branch: 4.399635e-12\n      products:\n        - LFP39\n      type: sf\nPU240:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU239\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP40\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.9179e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU241\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - U236\n      type: ad\n  - decay:\n      branch: 5.656034e-08\n      products:\n        - LFP40\n      type: sf\nPU241:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU240\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.4100e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU242\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - AM241\n      type: bmd\n  - decay:\n      branch: 5.729878e-15\n      products:\n        - LFP41\n      type: sf\nPU242:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU241\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP41\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.6348e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - AM243\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - U238\n      type: ad\n  - decay:\n      branch: 5.482456e-06\n      products:\n        - LFP41\n      type: sf\nTH232:\n  - transmutation:\n      branch: 1.0\n      products:\n        - PA233\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PA231\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 1.410000e-11\n      products:\n        - LFP35\n      type: sf\nU232:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U233\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PA231\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 8.612316e-13\n      products:\n        - LFP35\n      type: sf\nU233:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U234\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.1400e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - U232\n      type: n2n\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 1.326638e-12\n      products:\n        - LFP35\n      type: sf\nU234:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U233\n        - DUMP2\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.5925e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - U235\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 1.169048e-11\n      products:\n        - LFP35\n      type: sf\nU235:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U234\n        - DUMP2\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.0800e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - U236\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 2.011429e-09\n      products:\n        - LFP35\n      type: sf\nU236:\n  - transmutation:\n      branch: 1.0\n      products:\n        - U235\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP237\n      type: nGamma\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP35\n      type: fission\n  - transmutation:\n      branch: 1.3094e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: ad\n  - decay:\n      branch: 1.201026e-09\n      products:\n        - LFP35\n      type: sf\nU238:\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP237\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP38\n      type: fission\n  # add tritium from ternary fission from JEFF3.1.1 on top of the LFPs (400 keV)\n  - transmutation:\n      branch: 1.0262e-04\n      products:\n        - H3\n        - DUMP1\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP239\n        - PU239\n      type: nGamma\n  - decay:\n      branch: 5.448780e-07\n      products:\n        - LFP38\n      type: sf\nNP239:\n  - transmutation:\n      branch: 1.0\n      products:\n        - NP238\n      type: n2n\n  - transmutation:\n      branch: 1.0\n      products:\n        - LFP38\n      type: fission\n  - transmutation:\n      branch: 1.0\n      products:\n        - PU240\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - PU239\n      type: bmd\nY89:\n  - transmutation:\n      branch: 1.0\n      products:\n        - SR89\n      type: np\nSR89:\n  - transmutation:\n      branch: 1.0\n      products:\n        - SR90\n      type: nGamma\n  - decay:\n      branch: 1.0\n      products:\n        - Y89\n      type: bmd\nSR90:\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: bmd\nS32:\n  - transmutation:\n      branch: 1.0\n      products:\n        - P32\n      type: np\n  - transmutation:\n      branch: 1.0\n      products:\n        - S33\n      type: nGamma\nP32:\n  - decay:\n      branch: 1.0\n      products:\n        - S32\n      type: bmd\nS33:\n  - transmutation:\n      branch: 1.0\n      products:\n        - S34\n      type: nGamma\nS34:\n  - transmutation:\n      branch: 1.0\n      products:\n        - S35\n      type: nGamma\nS35:\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: bmd\nS36:\n  - transmutation:\n      branch: 1.0\n      products:\n        - S37\n      type: nGamma\nS37:\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: bmd\nTI46:\n  - transmutation:\n      branch: 1.0\n      products:\n        - TI47\n      type: nGamma\nTI47:\n  - transmutation:\n      branch: 1.0\n      products:\n        - SC47\n      type: np\n  - transmutation:\n      branch: 1.0\n      products:\n        - TI48\n      type: nGamma\nSC47:\n  - decay:\n      branch: 1.0\n      products:\n        - TI47\n      type: bmd\nTI48:\n  - transmutation:\n      branch: 1.0\n      products:\n        - TI49\n      type: nGamma\nTI49:\n  - transmutation:\n      branch: 1.0\n      products:\n        - TI50\n      type: nGamma\nTI50:\n  - transmutation:\n      branch: 1.0\n      products:\n        - TI51\n      type: nGamma\nTI51:\n  - decay:\n      branch: 1.0\n      products:\n        - DUMP2\n      type: bmd\n"
  },
  {
    "path": "armi/resources/mcc-nuclides.yaml",
    "content": "# This file contains the nuclides that are defined by the MC2-2 and MC2-3\n# codes. The MC2-2 code base uses ENDF/B-V.2 and the MC2-3 code base uses\n# ENDF/B-VII.0 or ENDF/B-VII.1. This file can be amended in the future for \n# MC2-3 as the code base changes, but the nuclides that MC2-3 models are \n# consistent with the data that is supplied by ENDF/B-VII.0. \n# See: Appendix B of ANL/NE-11/41 Rev.3 for V.2 and VII.0 isotopes\n# Public Link: https://publications.anl.gov/anlpubs/2018/10/147840.pdf.\nAC225:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AC2257\n  ENDF/B-VII.1: AC2257\nAC226:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AC2267\n  ENDF/B-VII.1: AC2267\nAC227:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AC2277\n  ENDF/B-VII.1: AC2277\nAG107:\n  ENDF/B-V.2: AG1075\n  ENDF/B-VII.0: AG1077\n  ENDF/B-VII.1: AG1077\nAG109:\n  ENDF/B-V.2: AG1095\n  ENDF/B-VII.0: AG1097\n  ENDF/B-VII.1: AG1097\nAG110M:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AG10M7\n  ENDF/B-VII.1: AG10M7\nAG111:\n  ENDF/B-V.2: AG1115\n  ENDF/B-VII.0: AG1117\n  ENDF/B-VII.1: AG1117\nAL27:\n  ENDF/B-V.2: AL27 5\n  ENDF/B-VII.0: AL27_7\n  ENDF/B-VII.1: AL27_7\nAM240:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: AM2407\nAM241:\n  ENDF/B-V.2: AM2415\n  ENDF/B-VII.0: AM2417\n  ENDF/B-VII.1: AM2417\nAM242G:\n  ENDF/B-V.2: AM2425\n  ENDF/B-VII.0: AM2427\n  ENDF/B-VII.1: AM2427\nAM242M:\n  ENDF/B-V.2: AM242M\n  ENDF/B-VII.0: AM42M7\n  ENDF/B-VII.1: AM42M7\nAM243:\n  ENDF/B-V.2: AM243V\n  ENDF/B-VII.0: AM2437\n  ENDF/B-VII.1: AM2437\nAM244:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AM2447\n  ENDF/B-VII.1: AM2447\nAM244M:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AM44M7\n  ENDF/B-VII.1: AM44M7\nAR36:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AR36_7\n  ENDF/B-VII.1: AR36_7\nAR38:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AR38_7\n  ENDF/B-VII.1: AR38_7\nAR40:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: AR40_7\n  ENDF/B-VII.1: AR40_7\nAS74:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: AS74_7\nAS75:\n  ENDF/B-V.2: AS75 5\n  ENDF/B-VII.0: AS75_7\n  ENDF/B-VII.1: AS75_7\nAU197:\n  ENDF/B-V.2: AU1975\n  ENDF/B-VII.0: AU1977\n  ENDF/B-VII.1: AU1977\nB10:\n  ENDF/B-V.2: B-10 5\n  ENDF/B-VII.0: B10__7\n  ENDF/B-VII.1: B10__7\nB11:\n  ENDF/B-V.2: B-11 5\n  ENDF/B-VII.0: B11__7\n  ENDF/B-VII.1: B11__7\nBA130:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: BA1307\n  ENDF/B-VII.1: BA1307\nBA132:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: BA1327\n  ENDF/B-VII.1: BA1327\nBA133:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: BA1337\n  ENDF/B-VII.1: BA1337\nBA134:\n  ENDF/B-V.2: BA1345\n  ENDF/B-VII.0: BA1347\n  ENDF/B-VII.1: BA1347\nBA135:\n  ENDF/B-V.2: BA1355\n  ENDF/B-VII.0: BA1357\n  ENDF/B-VII.1: BA1357\nBA136:\n  ENDF/B-V.2: BA1365\n  ENDF/B-VII.0: BA1367\n  ENDF/B-VII.1: BA1367\nBA137:\n  ENDF/B-V.2: BA1375\n  ENDF/B-VII.0: BA1377\n  ENDF/B-VII.1: BA1377\nBA138:\n  ENDF/B-V.2: BA1385\n  ENDF/B-VII.0: BA1387\n  ENDF/B-VII.1: BA1387\nBA140:\n  ENDF/B-V.2: BA1405\n  ENDF/B-VII.0: BA1407\n  ENDF/B-VII.1: BA1407\nBE7:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: BE7__7\n  ENDF/B-VII.1: BE7__7\nBE9:\n  ENDF/B-V.2: BE-9 3\n  ENDF/B-VII.0: BE9__7\n  ENDF/B-VII.1: BE9__7\nBI209:\n  ENDF/B-V.2: BI2095\n  ENDF/B-VII.0: BI2097\n  ENDF/B-VII.1: BI2097\nBK245:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: BK2457\nBK246:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: BK2467\nBK247:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: BK2477\nBK248:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: BK2487\nBK249:\n  ENDF/B-V.2: BK2495\n  ENDF/B-VII.0: BK2497\n  ENDF/B-VII.1: BK2497\nBK250:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: BK2507\n  ENDF/B-VII.1: BK2507\nBR79:\n  ENDF/B-V.2: BR79 5\n  ENDF/B-VII.0: BR79_7\n  ENDF/B-VII.1: BR79_7\nBR81:\n  ENDF/B-V.2: BR81 5\n  ENDF/B-VII.0: BR81_7\n  ENDF/B-VII.1: BR81_7\nC:\n  ENDF/B-V.2: C    5\n  ENDF/B-VII.0: C____7\n  ENDF/B-VII.1: C____7\nCA:\n  ENDF/B-V.2: CA   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nCA40:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA40_7\n  ENDF/B-VII.1: CA40_7\nCA42:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA42_7\n  ENDF/B-VII.1: CA42_7\nCA43:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA43_7\n  ENDF/B-VII.1: CA43_7\nCA44:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA44_7\n  ENDF/B-VII.1: CA44_7\nCA46:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA46_7\n  ENDF/B-VII.1: CA46_7\nCA48:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CA48_7\n  ENDF/B-VII.1: CA48_7\nCD:\n  ENDF/B-V.2: CD   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nCD106:\n  ENDF/B-V.2: CD1065\n  ENDF/B-VII.0: CD1067\n  ENDF/B-VII.1: CD1067\nCD108:\n  ENDF/B-V.2: CD1085\n  ENDF/B-VII.0: CD1087\n  ENDF/B-VII.1: CD1087\nCD110:\n  ENDF/B-V.2: CD1105\n  ENDF/B-VII.0: CD1107\n  ENDF/B-VII.1: CD1107\nCD111:\n  ENDF/B-V.2: CD1115\n  ENDF/B-VII.0: CD1117\n  ENDF/B-VII.1: CD1117\nCD112:\n  ENDF/B-V.2: CD1125\n  ENDF/B-VII.0: CD1127\n  ENDF/B-VII.1: CD1127\nCD113:\n  ENDF/B-V.2: CD1135\n  ENDF/B-VII.0: CD1137\n  ENDF/B-VII.1: CD1137\nCD114:\n  ENDF/B-V.2: CD1145\n  ENDF/B-VII.0: CD1147\n  ENDF/B-VII.1: CD1147\nCD115M:\n  ENDF/B-V.2: CD115M\n  ENDF/B-VII.0: CD15M7\n  ENDF/B-VII.1: CD15M7\nCD116:\n  ENDF/B-V.2: CD1165\n  ENDF/B-VII.0: CD1167\n  ENDF/B-VII.1: CD1167\nCE136:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CE1367\n  ENDF/B-VII.1: CE1367\nCE138:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CE1387\n  ENDF/B-VII.1: CE1387\nCE139:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CE1397\n  ENDF/B-VII.1: CE1397\nCE140:\n  ENDF/B-V.2: CE1405\n  ENDF/B-VII.0: CE1407\n  ENDF/B-VII.1: CE1407\nCE141:\n  ENDF/B-V.2: CE1415\n  ENDF/B-VII.0: CE1417\n  ENDF/B-VII.1: CE1417\nCE142:\n  ENDF/B-V.2: CE1425\n  ENDF/B-VII.0: CE1427\n  ENDF/B-VII.1: CE1427\nCE143:\n  ENDF/B-V.2: CE1435\n  ENDF/B-VII.0: CE1437\n  ENDF/B-VII.1: CE1437\nCE144:\n  ENDF/B-V.2: CE1445\n  ENDF/B-VII.0: CE1447\n  ENDF/B-VII.1: CE1447\nCF246:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: CF2467\nCF248:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: CF2487\nCF249:\n  ENDF/B-V.2: CF2495\n  ENDF/B-VII.0: CF2497\n  ENDF/B-VII.1: CF2497\nCF250:\n  ENDF/B-V.2: CF2505\n  ENDF/B-VII.0: CF2507\n  ENDF/B-VII.1: CF2507\nCF251:\n  ENDF/B-V.2: CF2515\n  ENDF/B-VII.0: CF2517\n  ENDF/B-VII.1: CF2517\nCF252:\n  ENDF/B-V.2: CF2525\n  ENDF/B-VII.0: CF2527\n  ENDF/B-VII.1: CF2527\nCF253:\n  ENDF/B-V.2: CF2535\n  ENDF/B-VII.0: CF2537\n  ENDF/B-VII.1: CF2537\nCF254:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CF2547\n  ENDF/B-VII.1: CF2547\nCL:\n  ENDF/B-V.2: CL   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nCL35:\n  ENDF/B-V.2: CL35_7\n  ENDF/B-VII.0: CL35_7\n  ENDF/B-VII.1: CL35_7\nCL37:\n  ENDF/B-V.2: CL37_7\n  ENDF/B-VII.0: CL37_7\n  ENDF/B-VII.1: CL37_7\nCM240:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: CM2407\nCM241:\n  ENDF/B-V.2: CM2415\n  ENDF/B-VII.0: CM2417\n  ENDF/B-VII.1: CM2417\nCM242:\n  ENDF/B-V.2: CM2425\n  ENDF/B-VII.0: CM2427\n  ENDF/B-VII.1: CM2427\nCM243:\n  ENDF/B-V.2: CM2435\n  ENDF/B-VII.0: CM2437\n  ENDF/B-VII.1: CM2437\nCM244:\n  ENDF/B-V.2: CM2445\n  ENDF/B-VII.0: CM2447\n  ENDF/B-VII.1: CM2447\nCM245:\n  ENDF/B-V.2: CM2455\n  ENDF/B-VII.0: CM2457\n  ENDF/B-VII.1: CM2457\nCM246:\n  ENDF/B-V.2: CM2465\n  ENDF/B-VII.0: CM2467\n  ENDF/B-VII.1: CM2467\nCM247:\n  ENDF/B-V.2: CM2475\n  ENDF/B-VII.0: CM2477\n  ENDF/B-VII.1: CM2477\nCM248:\n  ENDF/B-V.2: CM2485\n  ENDF/B-VII.0: CM2487\n  ENDF/B-VII.1: CM2487\nCM249:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CM2497\n  ENDF/B-VII.1: CM2497\nCM250:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CM2507\n  ENDF/B-VII.1: CM2507\nCO58:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CO58_7\n  ENDF/B-VII.1: CO58_7\nCO58M:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CO58M7\n  ENDF/B-VII.1: CO58M7\nCO59:\n  ENDF/B-V.2: CO59 5\n  ENDF/B-VII.0: CO59_7\n  ENDF/B-VII.1: CO59_7\nCR:\n  ENDF/B-V.2: CR   S\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nCR50:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CR50_7\n  ENDF/B-VII.1: CR50_7\nCR52:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CR52_7\n  ENDF/B-VII.1: CR52_7\nCR53:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CR53_7\n  ENDF/B-VII.1: CR53_7\nCR54:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CR54_7\n  ENDF/B-VII.1: CR54_7\nCS133:\n  ENDF/B-V.2: CS1335\n  ENDF/B-VII.0: CS1337\n  ENDF/B-VII.1: CS1337\nCS134:\n  ENDF/B-V.2: CS1345\n  ENDF/B-VII.0: CS1347\n  ENDF/B-VII.1: CS1347\nCS135:\n  ENDF/B-V.2: CS1355\n  ENDF/B-VII.0: CS1357\n  ENDF/B-VII.1: CS1357\nCS136:\n  ENDF/B-V.2: CS1365\n  ENDF/B-VII.0: CS1367\n  ENDF/B-VII.1: CS1367\nCS137:\n  ENDF/B-V.2: CS1375\n  ENDF/B-VII.0: CS1377\n  ENDF/B-VII.1: CS1377\nCU:\n  ENDF/B-V.2: CU   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nCU63:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CU63_7\n  ENDF/B-VII.1: CU63_7\nCU65:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: CU65_7\n  ENDF/B-VII.1: CU65_7\nDUMP1:\n  ENDF/B-V.2: DUMMY1\n  ENDF/B-VII.0: DUMMY\n  ENDF/B-VII.1: DUMMY\nDUMP2:\n  ENDF/B-V.2: DUMMY2\n  ENDF/B-VII.0: DUMMY\n  ENDF/B-VII.1: DUMMY\nDY156:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: DY1567\n  ENDF/B-VII.1: DY1567\nDY158:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: DY1587\n  ENDF/B-VII.1: DY1587\nDY160:\n  ENDF/B-V.2: DY1605\n  ENDF/B-VII.0: DY1607\n  ENDF/B-VII.1: DY1607\nDY161:\n  ENDF/B-V.2: DY1615\n  ENDF/B-VII.0: DY1617\n  ENDF/B-VII.1: DY1617\nDY162:\n  ENDF/B-V.2: DY1625\n  ENDF/B-VII.0: DY1627\n  ENDF/B-VII.1: DY1627\nDY163:\n  ENDF/B-V.2: DY1635\n  ENDF/B-VII.0: DY1637\n  ENDF/B-VII.1: DY1637\nDY164:\n  ENDF/B-V.2: DY1645\n  ENDF/B-VII.0: DY1647\n  ENDF/B-VII.1: DY1647\nER162:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ER1627\n  ENDF/B-VII.1: ER1627\nER164:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ER1647\n  ENDF/B-VII.1: ER1647\nER166:\n  ENDF/B-V.2: ER1665\n  ENDF/B-VII.0: ER1667\n  ENDF/B-VII.1: ER1667\nER167:\n  ENDF/B-V.2: ER1675\n  ENDF/B-VII.0: ER1677\n  ENDF/B-VII.1: ER1677\nER168:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ER1687\n  ENDF/B-VII.1: ER1687\nER170:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ER1707\n  ENDF/B-VII.1: ER1707\nES251:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ES2517\nES252:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ES2527\nES253:\n  ENDF/B-V.2: ES2535\n  ENDF/B-VII.0: ES2537\n  ENDF/B-VII.1: ES2537\nES254:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ES2547\n  ENDF/B-VII.1: ES2547\nES254M:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ES54M7\nES255:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ES2557\n  ENDF/B-VII.1: ES2557\nEU151:\n  ENDF/B-V.2: EU1515\n  ENDF/B-VII.0: EU1517\n  ENDF/B-VII.1: EU1517\nEU152:\n  ENDF/B-V.2: EU1525\n  ENDF/B-VII.0: EU1527\n  ENDF/B-VII.1: EU1527\nEU153:\n  ENDF/B-V.2: EU1535\n  ENDF/B-VII.0: EU1537\n  ENDF/B-VII.1: EU1537\nEU154:\n  ENDF/B-V.2: EU1545\n  ENDF/B-VII.0: EU1547\n  ENDF/B-VII.1: EU1547\nEU155:\n  ENDF/B-V.2: EU1555\n  ENDF/B-VII.0: EU1557\n  ENDF/B-VII.1: EU1557\nEU156:\n  ENDF/B-V.2: EU1565\n  ENDF/B-VII.0: EU1567\n  ENDF/B-VII.1: EU1567\nEU157:\n  ENDF/B-V.2: EU1575\n  ENDF/B-VII.0: EU1577\n  ENDF/B-VII.1: EU1577\nF19:\n  ENDF/B-V.2: F-19 5\n  ENDF/B-VII.0: F19__7\n  ENDF/B-VII.1: F19__7\nFE:\n  ENDF/B-V.2: FE  SV\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nFE54:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: FE54_7\n  ENDF/B-VII.1: FE54_7\nFE56:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: FE56_7\n  ENDF/B-VII.1: FE56_7\nFE57:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: FE57_7\n  ENDF/B-VII.1: FE57_7\nFE58:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: FE58_7\n  ENDF/B-VII.1: FE58_7\nFM255:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: FM2557\n  ENDF/B-VII.1: FM2557\nGA:\n  ENDF/B-V.2: GA   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nGA69:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: GA69_7\n  ENDF/B-VII.1: GA69_7\nGA71:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: GA71_7\n  ENDF/B-VII.1: GA71_7\nGD152:\n  ENDF/B-V.2: GD1525\n  ENDF/B-VII.0: GD1527\n  ENDF/B-VII.1: GD1527\nGD153:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: GD1537\n  ENDF/B-VII.1: GD1537\nGD154:\n  ENDF/B-V.2: GD1545\n  ENDF/B-VII.0: GD1547\n  ENDF/B-VII.1: GD1547\nGD155:\n  ENDF/B-V.2: GD1555\n  ENDF/B-VII.0: GD1557\n  ENDF/B-VII.1: GD1557\nGD156:\n  ENDF/B-V.2: GD1565\n  ENDF/B-VII.0: GD1567\n  ENDF/B-VII.1: GD1567\nGD157:\n  ENDF/B-V.2: GD1575\n  ENDF/B-VII.0: GD1577\n  ENDF/B-VII.1: GD1577\nGD158:\n  ENDF/B-V.2: GD1585\n  ENDF/B-VII.0: GD1587\n  ENDF/B-VII.1: GD1587\nGD160:\n  ENDF/B-V.2: GD1605\n  ENDF/B-VII.0: GD1607\n  ENDF/B-VII.1: GD1607\nGE70:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: GE70_7\n  ENDF/B-VII.1: GE70_7\nGE72:\n  ENDF/B-V.2: GE72 5\n  ENDF/B-VII.0: GE72_7\n  ENDF/B-VII.1: GE72_7\nGE73:\n  ENDF/B-V.2: GE73 5\n  ENDF/B-VII.0: GE73_7\n  ENDF/B-VII.1: GE73_7\nGE74:\n  ENDF/B-V.2: GE74 5\n  ENDF/B-VII.0: GE74_7\n  ENDF/B-VII.1: GE74_7\nGE76:\n  ENDF/B-V.2: GE76 5\n  ENDF/B-VII.0: GE76_7\n  ENDF/B-VII.1: GE76_7\nH1:\n  ENDF/B-V.2: HYDRGN\n  ENDF/B-VII.0: H1___7\n  ENDF/B-VII.1: H1___7\nH2:\n  ENDF/B-V.2: H-2  5\n  ENDF/B-VII.0: H2___7\n  ENDF/B-VII.1: H2___7\nH3:\n  ENDF/B-V.2: H-3  5\n  ENDF/B-VII.0: H3___7\n  ENDF/B-VII.1: H3___7\nHE3:\n  ENDF/B-V.2: HE3  5\n  ENDF/B-VII.0: HE3__7\n  ENDF/B-VII.1: HE3__7\nHE4:\n  ENDF/B-V.2: HE4  5\n  ENDF/B-VII.0: HE4__7\n  ENDF/B-VII.1: HE4__7\nHF:\n  ENDF/B-V.2: HF   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nHF174:\n  ENDF/B-V.2: HF1745\n  ENDF/B-VII.0: HF1747\n  ENDF/B-VII.1: HF1747\nHF176:\n  ENDF/B-V.2: HF1765\n  ENDF/B-VII.0: HF1767\n  ENDF/B-VII.1: HF1767\nHF177:\n  ENDF/B-V.2: HF1775\n  ENDF/B-VII.0: HF1777\n  ENDF/B-VII.1: HF1777\nHF178:\n  ENDF/B-V.2: HF1785\n  ENDF/B-VII.0: HF1787\n  ENDF/B-VII.1: HF1787\nHF179:\n  ENDF/B-V.2: HF1795\n  ENDF/B-VII.0: HF1797\n  ENDF/B-VII.1: HF1797\nHF180:\n  ENDF/B-V.2: HF1805\n  ENDF/B-VII.0: HF1807\n  ENDF/B-VII.1: HF1807\nHG196:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG1967\n  ENDF/B-VII.1: HG1967\nHG198:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG1987\n  ENDF/B-VII.1: HG1987\nHG199:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG1997\n  ENDF/B-VII.1: HG1997\nHG200:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG2007\n  ENDF/B-VII.1: HG2007\nHG201:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG2017\n  ENDF/B-VII.1: HG2017\nHG202:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG2027\n  ENDF/B-VII.1: HG2027\nHG204:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HG2047\n  ENDF/B-VII.1: HG2047\nHO165:\n  ENDF/B-V.2: HO1655\n  ENDF/B-VII.0: HO1657\n  ENDF/B-VII.1: HO1657\nHO166M:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: HO66M7\n  ENDF/B-VII.1: HO66M7\nI127:\n  ENDF/B-V.2: I-1275\n  ENDF/B-VII.0: I127_7\n  ENDF/B-VII.1: I127_7\nI129:\n  ENDF/B-V.2: I-1295\n  ENDF/B-VII.0: I129_7\n  ENDF/B-VII.1: I129_7\nI130:\n  ENDF/B-V.2: I-1305\n  ENDF/B-VII.0: I130_7\n  ENDF/B-VII.1: I130_7\nI131:\n  ENDF/B-V.2: I-1315\n  ENDF/B-VII.0: I131_7\n  ENDF/B-VII.1: I131_7\nI135:\n  ENDF/B-V.2: I-1355\n  ENDF/B-VII.0: I135_7\n  ENDF/B-VII.1: I135_7\nIN113:\n  ENDF/B-V.2: IN1135\n  ENDF/B-VII.0: IN1137\n  ENDF/B-VII.1: IN1137\nIN115:\n  ENDF/B-V.2: IN1155\n  ENDF/B-VII.0: IN1157\n  ENDF/B-VII.1: IN1157\nIR191:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: IR1917\n  ENDF/B-VII.1: IR1917\nIR193:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: IR1937\n  ENDF/B-VII.1: IR1937\nK:\n  ENDF/B-V.2: K    5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nK39:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: K39__7\n  ENDF/B-VII.1: K39__7\nK40:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: K40__7\n  ENDF/B-VII.1: K40__7\nK41:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: K41__7\n  ENDF/B-VII.1: K41__7\nKR78:\n  ENDF/B-V.2: KR78 5\n  ENDF/B-VII.0: KR78_7\n  ENDF/B-VII.1: KR78_7\nKR80:\n  ENDF/B-V.2: KR80 5\n  ENDF/B-VII.0: KR80_7\n  ENDF/B-VII.1: KR80_7\nKR82:\n  ENDF/B-V.2: KR82 5\n  ENDF/B-VII.0: KR82_7\n  ENDF/B-VII.1: KR82_7\nKR83:\n  ENDF/B-V.2: KR83 5\n  ENDF/B-VII.0: KR83_7\n  ENDF/B-VII.1: KR83_7\nKR84:\n  ENDF/B-V.2: KR84 5\n  ENDF/B-VII.0: KR84_7\n  ENDF/B-VII.1: KR84_7\nKR85:\n  ENDF/B-V.2: KR85 5\n  ENDF/B-VII.0: KR85_7\n  ENDF/B-VII.1: KR85_7\nKR86:\n  ENDF/B-V.2: KR86 5\n  ENDF/B-VII.0: KR86_7\n  ENDF/B-VII.1: KR86_7\nLA138:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: LA1387\n  ENDF/B-VII.1: LA1387\nLA139:\n  ENDF/B-V.2: LA1395\n  ENDF/B-VII.0: LA1397\n  ENDF/B-VII.1: LA1397\nLA140:\n  ENDF/B-V.2: LA1405\n  ENDF/B-VII.0: LA1407\n  ENDF/B-VII.1: LA1407\nLI6:\n  ENDF/B-V.2: LI-6 5\n  ENDF/B-VII.0: LI6__7\n  ENDF/B-VII.1: LI6__7\nLI7:\n  ENDF/B-V.2: LI-7 V\n  ENDF/B-VII.0: LI7__7\n  ENDF/B-VII.1: LI7__7\nLU175:\n  ENDF/B-V.2: LU1755\n  ENDF/B-VII.0: LU1757\n  ENDF/B-VII.1: LU1757\nLU176:\n  ENDF/B-V.2: LU1765\n  ENDF/B-VII.0: LU1767\n  ENDF/B-VII.1: LU1767\nMG:\n  ENDF/B-V.2: MG   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nMG24:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: MG24_7\n  ENDF/B-VII.1: MG24_7\nMG25:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: MG25_7\n  ENDF/B-VII.1: MG25_7\nMG26:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: MG26_7\n  ENDF/B-VII.1: MG26_7\nMN55:\n  ENDF/B-V.2: MN55 S\n  ENDF/B-VII.0: MN55_7\n  ENDF/B-VII.1: MN55_7\nMO:\n  ENDF/B-V.2: MO   S\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nMO100:\n  ENDF/B-V.2: MO1005\n  ENDF/B-VII.0: MO1007\n  ENDF/B-VII.1: MO1007\nMO92:\n  ENDF/B-V.2: MO92 5\n  ENDF/B-VII.0: MO92_7\n  ENDF/B-VII.1: MO92_7\nMO94:\n  ENDF/B-V.2: MO94 5\n  ENDF/B-VII.0: MO94_7\n  ENDF/B-VII.1: MO94_7\nMO95:\n  ENDF/B-V.2: MO95 5\n  ENDF/B-VII.0: MO95_7\n  ENDF/B-VII.1: MO95_7\nMO96:\n  ENDF/B-V.2: MO96 5\n  ENDF/B-VII.0: MO96_7\n  ENDF/B-VII.1: MO96_7\nMO97:\n  ENDF/B-V.2: MO97 5\n  ENDF/B-VII.0: MO97_7\n  ENDF/B-VII.1: MO97_7\nMO98:\n  ENDF/B-V.2: MO98 5\n  ENDF/B-VII.0: MO98_7\n  ENDF/B-VII.1: MO98_7\nMO99:\n  ENDF/B-V.2: MO99 5\n  ENDF/B-VII.0: MO99_7\n  ENDF/B-VII.1: MO99_7\nN14:\n  ENDF/B-V.2: N-14 5\n  ENDF/B-VII.0: N14__7\n  ENDF/B-VII.1: N14__7\nN15:\n  ENDF/B-V.2: N-15 5\n  ENDF/B-VII.0: N15__7\n  ENDF/B-VII.1: N15__7\nNA22:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NA22_7\n  ENDF/B-VII.1: NA22_7\nNA23:\n  ENDF/B-V.2: NA23 S\n  ENDF/B-VII.0: NA23_7\n  ENDF/B-VII.1: NA23_7\nNB93:\n  ENDF/B-V.2: NB93 5\n  ENDF/B-VII.0: NB93_7\n  ENDF/B-VII.1: NB93_7\nNB94:\n  ENDF/B-V.2: NB94 5\n  ENDF/B-VII.0: NB94_7\n  ENDF/B-VII.1: NB94_7\nNB95:\n  ENDF/B-V.2: NB95 5\n  ENDF/B-VII.0: NB95_7\n  ENDF/B-VII.1: NB95_7\nND142:\n  ENDF/B-V.2: ND1425\n  ENDF/B-VII.0: ND1427\n  ENDF/B-VII.1: ND1427\nND143:\n  ENDF/B-V.2: ND1435\n  ENDF/B-VII.0: ND1437\n  ENDF/B-VII.1: ND1437\nND144:\n  ENDF/B-V.2: ND1445\n  ENDF/B-VII.0: ND1447\n  ENDF/B-VII.1: ND1447\nND145:\n  ENDF/B-V.2: ND1455\n  ENDF/B-VII.0: ND1457\n  ENDF/B-VII.1: ND1457\nND146:\n  ENDF/B-V.2: ND1465\n  ENDF/B-VII.0: ND1467\n  ENDF/B-VII.1: ND1467\nND147:\n  ENDF/B-V.2: ND1475\n  ENDF/B-VII.0: ND1477\n  ENDF/B-VII.1: ND1477\nND148:\n  ENDF/B-V.2: ND1485\n  ENDF/B-VII.0: ND1487\n  ENDF/B-VII.1: ND1487\nND150:\n  ENDF/B-V.2: ND1505\n  ENDF/B-VII.0: ND1507\n  ENDF/B-VII.1: ND1507\nNI:\n  ENDF/B-V.2: NI   S\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nNI58:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI58_7\n  ENDF/B-VII.1: NI58_7\nNI59:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI59_7\n  ENDF/B-VII.1: NI59_7\nNI60:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI60_7\n  ENDF/B-VII.1: NI60_7\nNI61:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI61_7\n  ENDF/B-VII.1: NI61_7\nNI62:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI62_7\n  ENDF/B-VII.1: NI62_7\nNI64:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NI64_7\n  ENDF/B-VII.1: NI64_7\nNP234:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: NP2347\nNP235:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NP2357\n  ENDF/B-VII.1: NP2357\nNP236:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NP2367\n  ENDF/B-VII.1: NP2367\nNP237:\n  ENDF/B-V.2: NP237V\n  ENDF/B-VII.0: NP2377\n  ENDF/B-VII.1: NP2377\nNP238:\n  ENDF/B-V.2: NP2385\n  ENDF/B-VII.0: NP2387\n  ENDF/B-VII.1: NP2387\nNP239:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: NP2397\n  ENDF/B-VII.1: NP2397\nO16:\n  ENDF/B-V.2: O-16 5\n  ENDF/B-VII.0: O16__7\n  ENDF/B-VII.1: O16__7\nO17:\n  ENDF/B-V.2: O-17 5\n  ENDF/B-VII.0: O17__7\n  ENDF/B-VII.1: O17__7\nP31:\n  ENDF/B-V.2: P-31 5\n  ENDF/B-VII.0: P31__7\n  ENDF/B-VII.1: P31__7\nPA229:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: PA2297\nPA230:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: PA2307\nPA231:\n  ENDF/B-V.2: PA2315\n  ENDF/B-VII.0: PA2317\n  ENDF/B-VII.1: PA2317\nPA232:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PA2327\n  ENDF/B-VII.1: PA2327\nPA233:\n  ENDF/B-V.2: PA2335\n  ENDF/B-VII.0: PA2337\n  ENDF/B-VII.1: PA2337\nPB:\n  ENDF/B-V.2: PB   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nPB204:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PB2047\n  ENDF/B-VII.1: PB2047\nPB206:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PB2067\n  ENDF/B-VII.1: PB2067\nPB207:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PB2077\n  ENDF/B-VII.1: PB2077\nPB208:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PB2087\n  ENDF/B-VII.1: PB2087\nPD102:\n  ENDF/B-V.2: PD1025\n  ENDF/B-VII.0: PD1027\n  ENDF/B-VII.1: PD1027\nPD104:\n  ENDF/B-V.2: PD1045\n  ENDF/B-VII.0: PD1047\n  ENDF/B-VII.1: PD1047\nPD105:\n  ENDF/B-V.2: PD1055\n  ENDF/B-VII.0: PD1057\n  ENDF/B-VII.1: PD1057\nPD106:\n  ENDF/B-V.2: PD1065\n  ENDF/B-VII.0: PD1067\n  ENDF/B-VII.1: PD1067\nPD107:\n  ENDF/B-V.2: PD1075\n  ENDF/B-VII.0: PD1077\n  ENDF/B-VII.1: PD1077\nPD108:\n  ENDF/B-V.2: PD1085\n  ENDF/B-VII.0: PD1087\n  ENDF/B-VII.1: PD1087\nPD110:\n  ENDF/B-V.2: PD1105\n  ENDF/B-VII.0: PD1107\n  ENDF/B-VII.1: PD1107\nPM147:\n  ENDF/B-V.2: PM1475\n  ENDF/B-VII.0: PM1477\n  ENDF/B-VII.1: PM1477\nPM148:\n  ENDF/B-V.2: PM1485\n  ENDF/B-VII.0: PM1487\n  ENDF/B-VII.1: PM1487\nPM148M:\n  ENDF/B-V.2: PM148M\n  ENDF/B-VII.0: PM48M7\n  ENDF/B-VII.1: PM48M7\nPM149:\n  ENDF/B-V.2: PM1495\n  ENDF/B-VII.0: PM1497\n  ENDF/B-VII.1: PM1497\nPM151:\n  ENDF/B-V.2: PM1515\n  ENDF/B-VII.0: PM1517\n  ENDF/B-VII.1: PM1517\nPR141:\n  ENDF/B-V.2: PR1415\n  ENDF/B-VII.0: PR1417\n  ENDF/B-VII.1: PR1417\nPR142:\n  ENDF/B-V.2: PR1425\n  ENDF/B-VII.0: PR1427\n  ENDF/B-VII.1: PR1427\nPR143:\n  ENDF/B-V.2: PR1435\n  ENDF/B-VII.0: PR1437\n  ENDF/B-VII.1: PR1437\nPU236:\n  ENDF/B-V.2: PU2365\n  ENDF/B-VII.0: PU2367\n  ENDF/B-VII.1: PU2367\nPU237:\n  ENDF/B-V.2: PU2375\n  ENDF/B-VII.0: PU2377\n  ENDF/B-VII.1: PU2377\nPU238:\n  ENDF/B-V.2: PU2385\n  ENDF/B-VII.0: PU2387\n  ENDF/B-VII.1: PU2387\nPU239:\n  ENDF/B-V.2: PU239V\n  ENDF/B-VII.0: PU2397\n  ENDF/B-VII.1: PU2397\nPU240:\n  ENDF/B-V.2: PU2405\n  ENDF/B-VII.0: PU2407\n  ENDF/B-VII.1: PU2407\nPU241:\n  ENDF/B-V.2: PU2415\n  ENDF/B-VII.0: PU2417\n  ENDF/B-VII.1: PU2417\nPU242:\n  ENDF/B-V.2: PU2425\n  ENDF/B-VII.0: PU2427\n  ENDF/B-VII.1: PU2427\nPU243:\n  ENDF/B-V.2: PU2435\n  ENDF/B-VII.0: PU2437\n  ENDF/B-VII.1: PU2437\nPU244:\n  ENDF/B-V.2: PU2445\n  ENDF/B-VII.0: PU2447\n  ENDF/B-VII.1: PU2447\nPU246:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: PU2467\n  ENDF/B-VII.1: PU2467\nRA223:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: RA2237\n  ENDF/B-VII.1: RA2237\nRA224:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: RA2247\n  ENDF/B-VII.1: RA2247\nRA225:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: RA2257\n  ENDF/B-VII.1: RA2257\nRA226:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: RA2267\n  ENDF/B-VII.1: RA2267\nRB85:\n  ENDF/B-V.2: RB85 5\n  ENDF/B-VII.0: RB85_7\n  ENDF/B-VII.1: RB85_7\nRB86:\n  ENDF/B-V.2: RB86 5\n  ENDF/B-VII.0: RB86_7\n  ENDF/B-VII.1: RB86_7\nRB87:\n  ENDF/B-V.2: RB87 5\n  ENDF/B-VII.0: RB87_7\n  ENDF/B-VII.1: RB87_7\nRE185:\n  ENDF/B-V.2: RE1855\n  ENDF/B-VII.0: RE1857\n  ENDF/B-VII.1: RE1857\nRE187:\n  ENDF/B-V.2: RE1875\n  ENDF/B-VII.0: RE1877\n  ENDF/B-VII.1: RE1877\nRH103:\n  ENDF/B-V.2: RH1035\n  ENDF/B-VII.0: RH1037\n  ENDF/B-VII.1: RH1037\nRH105:\n  ENDF/B-V.2: RH1055\n  ENDF/B-VII.0: RH1057\n  ENDF/B-VII.1: RH1057\nRU100:\n  ENDF/B-V.2: RU1005\n  ENDF/B-VII.0: RU1007\n  ENDF/B-VII.1: RU1007\nRU101:\n  ENDF/B-V.2: RU1015\n  ENDF/B-VII.0: RU1017\n  ENDF/B-VII.1: RU1017\nRU102:\n  ENDF/B-V.2: RU1025\n  ENDF/B-VII.0: RU1027\n  ENDF/B-VII.1: RU1027\nRU103:\n  ENDF/B-V.2: RU1035\n  ENDF/B-VII.0: RU1037\n  ENDF/B-VII.1: RU1037\nRU104:\n  ENDF/B-V.2: RU1045\n  ENDF/B-VII.0: RU1047\n  ENDF/B-VII.1: RU1047\nRU105:\n  ENDF/B-V.2: RU1055\n  ENDF/B-VII.0: RU1057\n  ENDF/B-VII.1: RU1057\nRU106:\n  ENDF/B-V.2: RU1065\n  ENDF/B-VII.0: RU1067\n  ENDF/B-VII.1: RU1067\nRU96:\n  ENDF/B-V.2: RU96 5\n  ENDF/B-VII.0: RU96_7\n  ENDF/B-VII.1: RU96_7\nRU98:\n  ENDF/B-V.2: RU98 5\n  ENDF/B-VII.0: RU98_7\n  ENDF/B-VII.1: RU98_7\nRU99:\n  ENDF/B-V.2: RU99 5\n  ENDF/B-VII.0: RU99_7\n  ENDF/B-VII.1: RU99_7\nS:\n  ENDF/B-V.2: S    5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nS32:\n  ENDF/B-V.2: S-32 5\n  ENDF/B-VII.0: S32__7\n  ENDF/B-VII.1: S32__7\nS33:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: S33__7\n  ENDF/B-VII.1: S33__7\nS34:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: S34__7\n  ENDF/B-VII.1: S34__7\nS36:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: S36__7\n  ENDF/B-VII.1: S36__7\nSB121:\n  ENDF/B-V.2: SB1215\n  ENDF/B-VII.0: SB1217\n  ENDF/B-VII.1: SB1217\nSB123:\n  ENDF/B-V.2: SB1235\n  ENDF/B-VII.0: SB1237\n  ENDF/B-VII.1: SB1237\nSB124:\n  ENDF/B-V.2: SB1245\n  ENDF/B-VII.0: SB1247\n  ENDF/B-VII.1: SB1247\nSB125:\n  ENDF/B-V.2: SB1255\n  ENDF/B-VII.0: SB1257\n  ENDF/B-VII.1: SB1257\nSB126:\n  ENDF/B-V.2: SB1265\n  ENDF/B-VII.0: SB1267\n  ENDF/B-VII.1: SB1267\nSC45:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SC45_7\n  ENDF/B-VII.1: SC45_7\nSE74:\n  ENDF/B-V.2: SE74 5\n  ENDF/B-VII.0: SE74_7\n  ENDF/B-VII.1: SE74_7\nSE76:\n  ENDF/B-V.2: SE76 5\n  ENDF/B-VII.0: SE76_7\n  ENDF/B-VII.1: SE76_7\nSE77:\n  ENDF/B-V.2: SE77 5\n  ENDF/B-VII.0: SE77_7\n  ENDF/B-VII.1: SE77_7\nSE78:\n  ENDF/B-V.2: SE78 5\n  ENDF/B-VII.0: SE78_7\n  ENDF/B-VII.1: SE78_7\nSE79:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SE79_7\n  ENDF/B-VII.1: SE79_7\nSE80:\n  ENDF/B-V.2: SE80 5\n  ENDF/B-VII.0: SE80_7\n  ENDF/B-VII.1: SE80_7\nSE82:\n  ENDF/B-V.2: SE82 5\n  ENDF/B-VII.0: SE82_7\n  ENDF/B-VII.1: SE82_7\nSI:\n  ENDF/B-V.2: SI   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nSI28:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SI28_7\n  ENDF/B-VII.1: SI28_7\nSI29:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SI29_7\n  ENDF/B-VII.1: SI29_7\nSI30:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SI30_7\n  ENDF/B-VII.1: SI30_7\nSM144:\n  ENDF/B-V.2: SM1445\n  ENDF/B-VII.0: SM1447\n  ENDF/B-VII.1: SM1447\nSM147:\n  ENDF/B-V.2: SM1475\n  ENDF/B-VII.0: SM1477\n  ENDF/B-VII.1: SM1477\nSM148:\n  ENDF/B-V.2: SM1485\n  ENDF/B-VII.0: SM1487\n  ENDF/B-VII.1: SM1487\nSM149:\n  ENDF/B-V.2: SM1495\n  ENDF/B-VII.0: SM1497\n  ENDF/B-VII.1: SM1497\nSM150:\n  ENDF/B-V.2: SM1505\n  ENDF/B-VII.0: SM1507\n  ENDF/B-VII.1: SM1507\nSM151:\n  ENDF/B-V.2: SM1515\n  ENDF/B-VII.0: SM1517\n  ENDF/B-VII.1: SM1517\nSM152:\n  ENDF/B-V.2: SM1525\n  ENDF/B-VII.0: SM1527\n  ENDF/B-VII.1: SM1527\nSM153:\n  ENDF/B-V.2: SM1535\n  ENDF/B-VII.0: SM1537\n  ENDF/B-VII.1: SM1537\nSM154:\n  ENDF/B-V.2: SM1545\n  ENDF/B-VII.0: SM1547\n  ENDF/B-VII.1: SM1547\nSN112:\n  ENDF/B-V.2: SN1125\n  ENDF/B-VII.0: SN1127\n  ENDF/B-VII.1: SN1127\nSN113:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: SN1137\n  ENDF/B-VII.1: SN1137\nSN114:\n  ENDF/B-V.2: SN1145\n  ENDF/B-VII.0: SN1147\n  ENDF/B-VII.1: SN1147\nSN115:\n  ENDF/B-V.2: SN1155\n  ENDF/B-VII.0: SN1157\n  ENDF/B-VII.1: SN1157\nSN116:\n  ENDF/B-V.2: SN1165\n  ENDF/B-VII.0: SN1167\n  ENDF/B-VII.1: SN1167\nSN117:\n  ENDF/B-V.2: SN1175\n  ENDF/B-VII.0: SN1177\n  ENDF/B-VII.1: SN1177\nSN118:\n  ENDF/B-V.2: SN1185\n  ENDF/B-VII.0: SN1187\n  ENDF/B-VII.1: SN1187\nSN119:\n  ENDF/B-V.2: SN1195\n  ENDF/B-VII.0: SN1197\n  ENDF/B-VII.1: SN1197\nSN120:\n  ENDF/B-V.2: SN1205\n  ENDF/B-VII.0: SN1207\n  ENDF/B-VII.1: SN1207\nSN122:\n  ENDF/B-V.2: SN1225\n  ENDF/B-VII.0: SN1227\n  ENDF/B-VII.1: SN1227\nSN123:\n  ENDF/B-V.2: SN1235\n  ENDF/B-VII.0: SN1237\n  ENDF/B-VII.1: SN1237\nSN124:\n  ENDF/B-V.2: SN1245\n  ENDF/B-VII.0: SN1247\n  ENDF/B-VII.1: SN1247\nSN125:\n  ENDF/B-V.2: SN1255\n  ENDF/B-VII.0: SN1257\n  ENDF/B-VII.1: SN1257\nSN126:\n  ENDF/B-V.2: SN1265\n  ENDF/B-VII.0: SN1267\n  ENDF/B-VII.1: SN1267\nSR84:\n  ENDF/B-V.2: SR84 5\n  ENDF/B-VII.0: SR84_7\n  ENDF/B-VII.1: SR84_7\nSR86:\n  ENDF/B-V.2: SR86 5\n  ENDF/B-VII.0: SR86_7\n  ENDF/B-VII.1: SR86_7\nSR87:\n  ENDF/B-V.2: SR87 5\n  ENDF/B-VII.0: SR87_7\n  ENDF/B-VII.1: SR87_7\nSR88:\n  ENDF/B-V.2: SR88 5\n  ENDF/B-VII.0: SR88_7\n  ENDF/B-VII.1: SR88_7\nSR89:\n  ENDF/B-V.2: SR89 5\n  ENDF/B-VII.0: SR89_7\n  ENDF/B-VII.1: SR89_7\nSR90:\n  ENDF/B-V.2: SR90 5\n  ENDF/B-VII.0: SR90_7\n  ENDF/B-VII.1: SR90_7\nTA180:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TA1807\nTA181:\n  ENDF/B-V.2: TA1815\n  ENDF/B-VII.0: TA1817\n  ENDF/B-VII.1: TA1817\nTA182:\n  ENDF/B-V.2: TA1825\n  ENDF/B-VII.0: TA1827\n  ENDF/B-VII.1: TA1827\nTB159:\n  ENDF/B-V.2: TB1595\n  ENDF/B-VII.0: TB1597\n  ENDF/B-VII.1: TB1597\nTB160:\n  ENDF/B-V.2: TB1605\n  ENDF/B-VII.0: TB1607\n  ENDF/B-VII.1: TB1607\nTC99:\n  ENDF/B-V.2: TC99 5\n  ENDF/B-VII.0: TC99_7\n  ENDF/B-VII.1: TC99_7\nTE120:\n  ENDF/B-V.2: TE1205\n  ENDF/B-VII.0: TE1207\n  ENDF/B-VII.1: TE1207\nTE122:\n  ENDF/B-V.2: TE1225\n  ENDF/B-VII.0: TE1227\n  ENDF/B-VII.1: TE1227\nTE123:\n  ENDF/B-V.2: TE1235\n  ENDF/B-VII.0: TE1237\n  ENDF/B-VII.1: TE1237\nTE124:\n  ENDF/B-V.2: TE1245\n  ENDF/B-VII.0: TE1247\n  ENDF/B-VII.1: TE1247\nTE125:\n  ENDF/B-V.2: TE1255\n  ENDF/B-VII.0: TE1257\n  ENDF/B-VII.1: TE1257\nTE126:\n  ENDF/B-V.2: TE1265\n  ENDF/B-VII.0: TE1267\n  ENDF/B-VII.1: TE1267\nTE127M:\n  ENDF/B-V.2: TE127M\n  ENDF/B-VII.0: TE27M7\n  ENDF/B-VII.1: TE27M7\nTE128:\n  ENDF/B-V.2: TE1285\n  ENDF/B-VII.0: TE1287\n  ENDF/B-VII.1: TE1287\nTE129M:\n  ENDF/B-V.2: TE129M\n  ENDF/B-VII.0: TE29M7\n  ENDF/B-VII.1: TE29M7\nTE130:\n  ENDF/B-V.2: TE1305\n  ENDF/B-VII.0: TE1307\n  ENDF/B-VII.1: TE1307\nTE132:\n  ENDF/B-V.2: TE1325\n  ENDF/B-VII.0: TE1327\n  ENDF/B-VII.1: TE1327\nTH227:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TH2277\n  ENDF/B-VII.1: TH2277\nTH228:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TH2287\n  ENDF/B-VII.1: TH2287\nTH229:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TH2297\n  ENDF/B-VII.1: TH2297\nTH230:\n  ENDF/B-V.2: TH2305\n  ENDF/B-VII.0: TH2307\n  ENDF/B-VII.1: TH2307\nTH231:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TH2317\nTH232:\n  ENDF/B-V.2: TH2325\n  ENDF/B-VII.0: TH2327\n  ENDF/B-VII.1: TH2327\nTH233:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TH2337\n  ENDF/B-VII.1: TH2337\nTH234:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TH2347\n  ENDF/B-VII.1: TH2347\nTI:\n  ENDF/B-V.2: TI   5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nTI46:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TI46_7\n  ENDF/B-VII.1: TI46_7\nTI47:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TI47_7\n  ENDF/B-VII.1: TI47_7\nTI48:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TI48_7\n  ENDF/B-VII.1: TI48_7\nTI49:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TI49_7\n  ENDF/B-VII.1: TI49_7\nTI50:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: TI50_7\n  ENDF/B-VII.1: TI50_7\nTM168:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TM1687\nTM169:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TM1697\nTM170:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TM1707\nTL203:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TL2037\nTL205:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: TL2057\nU230:\n  ENDF/B-V.2: null \n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: U230_7\nU231:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: U231_7\nU232:\n  ENDF/B-V.2: U-2325\n  ENDF/B-VII.0: U232_7\n  ENDF/B-VII.1: U232_7\nU233:\n  ENDF/B-V.2: U-2335\n  ENDF/B-VII.0: U233_7\n  ENDF/B-VII.1: U233_7\nU234:\n  ENDF/B-V.2: U-2345\n  ENDF/B-VII.0: U234_7\n  ENDF/B-VII.1: U234_7\nU235:\n  ENDF/B-V.2: U-2355\n  ENDF/B-VII.0: U235_7\n  ENDF/B-VII.1: U235_7\nU236:\n  ENDF/B-V.2: U-2365\n  ENDF/B-VII.0: U236_7\n  ENDF/B-VII.1: U236_7\nU237:\n  ENDF/B-V.2: U-2375\n  ENDF/B-VII.0: U237_7\n  ENDF/B-VII.1: U237_7\nU238:\n  ENDF/B-V.2: U-2385\n  ENDF/B-VII.0: U238_7\n  ENDF/B-VII.1: U238_7\nU239:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: U239_7\n  ENDF/B-VII.1: U239_7\nU240:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: U240_7\n  ENDF/B-VII.1: U240_7\nU241:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: U241_7\n  ENDF/B-VII.1: U241_7\nV:\n  ENDF/B-V.2: V    5\n  ENDF/B-VII.0: V____7\n  ENDF/B-VII.1: null\nV50:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: V50__7\nV51:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: V51__7\nW180:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: W180_7\nW182:\n  ENDF/B-V.2: W-182V\n  ENDF/B-VII.0: W182_7\n  ENDF/B-VII.1: W182_7\nW183:\n  ENDF/B-V.2: W-183V\n  ENDF/B-VII.0: W183_7\n  ENDF/B-VII.1: W183_7\nW184:\n  ENDF/B-V.2: W-184V\n  ENDF/B-VII.0: W184_7\n  ENDF/B-VII.1: W184_7\nW186:\n  ENDF/B-V.2: W-186V\n  ENDF/B-VII.0: W186_7\n  ENDF/B-VII.1: W186_7\nXE123:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: XE1237\n  ENDF/B-VII.1: XE1237\nXE124:\n  ENDF/B-V.2: XE1245\n  ENDF/B-VII.0: XE1247\n  ENDF/B-VII.1: XE1247\nXE126:\n  ENDF/B-V.2: XE1265\n  ENDF/B-VII.0: XE1267\n  ENDF/B-VII.1: XE1267\nXE128:\n  ENDF/B-V.2: XE1285\n  ENDF/B-VII.0: XE1287\n  ENDF/B-VII.1: XE1287\nXE129:\n  ENDF/B-V.2: XE1295\n  ENDF/B-VII.0: XE1297\n  ENDF/B-VII.1: XE1297\nXE130:\n  ENDF/B-V.2: XE1305\n  ENDF/B-VII.0: XE1307\n  ENDF/B-VII.1: XE1307\nXE131:\n  ENDF/B-V.2: XE1315\n  ENDF/B-VII.0: XE1317\n  ENDF/B-VII.1: XE1317\nXE132:\n  ENDF/B-V.2: XE1325\n  ENDF/B-VII.0: XE1327\n  ENDF/B-VII.1: XE1327\nXE133:\n  ENDF/B-V.2: XE1335\n  ENDF/B-VII.0: XE1337\n  ENDF/B-VII.1: XE1337\nXE134:\n  ENDF/B-V.2: XE1345\n  ENDF/B-VII.0: XE1347\n  ENDF/B-VII.1: XE1347\nXE135:\n  ENDF/B-V.2: XE1355\n  ENDF/B-VII.0: XE1357\n  ENDF/B-VII.1: XE1357\nXE136:\n  ENDF/B-V.2: XE1365\n  ENDF/B-VII.0: XE1367\n  ENDF/B-VII.1: XE1367\nY89:\n  ENDF/B-V.2: Y89  5\n  ENDF/B-VII.0: Y89__7\n  ENDF/B-VII.1: Y89__7\nY90:\n  ENDF/B-V.2: Y90  5\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: Y90__7\nY91:\n  ENDF/B-V.2: Y91  5\n  ENDF/B-VII.0: Y91__7\n  ENDF/B-VII.1: Y91__7\nZN:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: ZN___7\n  ENDF/B-VII.1: null\nZN64:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN64_7\nZN65:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN65_7\nZN66:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN66_7\nZN67:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN67_7\nZN68:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN68_7\nZN70:\n  ENDF/B-V.2: null\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: ZN70_7\nZR:\n  ENDF/B-V.2: ZIRCSV\n  ENDF/B-VII.0: null\n  ENDF/B-VII.1: null\nZR90:\n  ENDF/B-V.2: ZR90SV\n  ENDF/B-VII.0: ZR90_7\n  ENDF/B-VII.1: ZR90_7\nZR91:\n  ENDF/B-V.2: ZR91SV\n  ENDF/B-VII.0: ZR91_7\n  ENDF/B-VII.1: ZR91_7\nZR92:\n  ENDF/B-V.2: ZR92SV\n  ENDF/B-VII.0: ZR92_7\n  ENDF/B-VII.1: ZR92_7\nZR93:\n  ENDF/B-V.2: ZR93 5\n  ENDF/B-VII.0: ZR93_7\n  ENDF/B-VII.1: ZR93_7\nZR94:\n  ENDF/B-V.2: ZR94SV\n  ENDF/B-VII.0: ZR94_7\n  ENDF/B-VII.1: ZR94_7\nZR95:\n  ENDF/B-V.2: ZR95 5\n  ENDF/B-VII.0: ZR95_7\n  ENDF/B-VII.1: ZR95_7\nZR96:\n  ENDF/B-V.2: ZR96 5\n  ENDF/B-VII.0: ZR96_7\n  ENDF/B-VII.1: ZR96_7"
  },
  {
    "path": "armi/runLog.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module handles logging of console during a simulation.\n\nThe default way of calling and the global armi logger is to just import it:\n\n.. code::\n\n    from armi import runLog\n\nYou may want a logger specific to a single module, say to provide debug logging for only one module.\nThat functionality is provided by a global override of logging imports:\n\n.. code::\n\n    import logging\n    runLog = logging.getLogger(__name__)\n\nIn either case, you can then log things the same way:\n\n.. code::\n\n    runLog.info('information here')\n    runLog.error('extra error info here')\n    raise SomeException  # runLog.error() implies that the code will crash!\n\nOr change the log level the same way:\n\n.. code::\n\n    runLog.setVerbosity('debug')\n\"\"\"\n\nimport collections\nimport logging\nimport operator\nimport os\nimport sys\nimport time\nfrom glob import glob\n\nfrom armi import context\n\n# global constants\n_ADD_LOG_METHOD_STR = \"\"\"def {0}(self, message, *args, **kws):\n    if self.isEnabledFor({1}):\n        self._log({1}, message, args, **kws)\nlogging.Logger.{0} = {0}\"\"\"\nOS_SECONDS_TIMEOUT = 2 * 60\nSEP = \"|\"\nSTDERR_LOGGER_NAME = \"ARMI_ERROR\"\nSTDOUT_LOGGER_NAME = \"ARMI\"\n\n\nclass _RunLog:\n    \"\"\"\n    Handles all the logging.\n\n    For the parent process, things are allowed to print to stdout and stderr,\n    but the stdout prints are formatted like log statements.\n    For the child processes, everything is piped to log files.\n    \"\"\"\n\n    STDERR_NAME = \"{0}.{1:04d}.stderr\"\n    STDOUT_NAME = \"{0}.{1:04d}.stdout\"\n\n    def __init__(self, mpiRank=0):\n        \"\"\"\n        Build a log object.\n\n        Parameters\n        ----------\n        mpiRank : int\n            If this is zero, we are in the parent process, otherwise child process. This should not\n            be adjusted after instantiation.\n        \"\"\"\n        self._mpiRank = mpiRank\n        self._verbosity = logging.INFO\n        self.initialErr = None\n        self.logLevels = None\n        self._logLevelNumbers = []\n        self.logger = None\n        self.stderrLogger = None\n\n        self.setNullLoggers()\n        self._setLogLevels()\n\n    def setNullLoggers(self):\n        \"\"\"Helper method to set both of our loggers to Null handlers.\"\"\"\n        self.logger = NullLogger(\"NULL\")\n        self.stderrLogger = NullLogger(\"NULL2\", isStderr=True)\n\n    @staticmethod\n    def getLogLevels(mpiRank):\n        \"\"\"Helper method to build an important data object this class needs.\n\n        Parameters\n        ----------\n        mpiRank : int\n            If this is zero, we are in the parent process, otherwise child process. This should not\n            be adjusted after instantiation.\n        \"\"\"\n        rank = \"\" if mpiRank == 0 else f\"-{mpiRank:>03d}\"\n\n        # NOTE: using ordereddict so we can get right order of options in GUI\n        return collections.OrderedDict(\n            [\n                (\"debug\", (logging.DEBUG, f\"[dbug{rank}] \")),\n                (\"extra\", (15, f\"[xtra{rank}] \")),\n                (\"info\", (logging.INFO, f\"[info{rank}] \")),\n                (\"important\", (25, f\"[impt{rank}] \")),\n                (\"prompt\", (27, f\"[prmt{rank}] \")),\n                (\"warning\", (logging.WARNING, f\"[warn{rank}] \")),\n                (\"error\", (logging.ERROR, f\"[err {rank}] \")),\n                (\"header\", (100, f\"{rank}\")),\n            ]\n        )\n\n    @staticmethod\n    def getWhiteSpace(mpiRank):\n        \"\"\"Helper method to build the white space used to left-adjust the log lines.\n\n        Parameters\n        ----------\n        mpiRank : int\n            If this is zero, we are in the parent process, otherwise child process. This should not\n            be adjusted after instantiation.\n        \"\"\"\n        logLevels = _RunLog.getLogLevels(mpiRank)\n        return \" \" * len(max([ll[1] for ll in logLevels.values()]))\n\n    def _setLogLevels(self):\n        \"\"\"Here we fill the logLevels dict with custom strings that depend on the MPI rank.\"\"\"\n        self.logLevels = self.getLogLevels(self._mpiRank)\n        self._logLevelNumbers = sorted([ll[0] for ll in self.logLevels.values()])\n\n        # modify the logging module strings for printing\n        for longLogString, (logValue, shortLogString) in self.logLevels.items():\n            # add the log string name (upper and lower) to logging module\n            logging.addLevelName(logValue, shortLogString.upper())\n            logging.addLevelName(logValue, shortLogString)\n\n            # ensure that we add any custom logging levels as constants to the module, e.g. logging.HEADER\n            try:\n                getattr(logging, longLogString.upper())\n            except AttributeError:\n                setattr(logging, longLogString.upper(), logValue)\n\n            # Add logging methods for our new custom levels: LOG.extra(\"message\")\n            try:\n                getattr(logging, longLogString)\n            except AttributeError:\n                exec(_ADD_LOG_METHOD_STR.format(longLogString, logValue))\n\n    def log(self, msgType, msg, single=False, label=None, **kwargs):\n        \"\"\"\n        This is a wrapper around logger.log() that does most of the work and is used by all message\n        passers (e.g. info, warning, etc.).\n\n        In this situation, we do the mangling needed to get the log level to the correct number.\n        And we do some custom string manipulation so we can handle de-duplicating warnings.\n        \"\"\"\n        # Determine the log level: users can optionally pass in custom strings (\"debug\")\n        msgLevel = msgType if isinstance(msgType, int) else self.logLevels[msgType][0]\n\n        # If this is a special \"don't duplicate me\" string, we need to add that info to the msg temporarily\n        msg = str(msg)\n\n        # Do the actual logging\n        self.logger.log(msgLevel, msg, single=single, label=label)\n\n    def getDuplicatesFilter(self):\n        \"\"\"If it exists, find the top-level ARMI logger 'should have a no duplicates' filter.\"\"\"\n        if not self.logger or not isinstance(self.logger, logging.Logger):\n            return None\n\n        return self.logger.getDuplicatesFilter()\n\n    def clearSingleLogs(self):\n        \"\"\"Reset the list of de-duplicated warnings, so users can see those warnings again.\"\"\"\n        dupsFilter = self.getDuplicatesFilter()\n        if dupsFilter:\n            dupsFilter.singleMessageLabels.clear()\n\n    def warningReport(self):\n        \"\"\"Summarize all warnings for the run.\"\"\"\n        self.logger.warningReport()\n\n    def getLogVerbosityRank(self, level):\n        \"\"\"Return integer verbosity rank given the string verbosity name.\"\"\"\n        try:\n            return self.logLevels[level][0]\n        except KeyError:\n            log_strs = list(self.logLevels.keys())\n            raise KeyError(f\"{level} is not a valid verbosity level: {log_strs}\")\n\n    def setVerbosity(self, level):\n        \"\"\"\n        Sets the minimum output verbosity for the logger.\n\n        Any message with a higher verbosity than this will be emitted.\n\n        Parameters\n        ----------\n        level : int or str\n            The level to set the log output verbosity to.\n            Valid numbers are 0-50 and valid strings are keys of logLevels\n\n        Examples\n        --------\n        >>> setVerbosity('debug') -> sets to 0\n        >>> setVerbosity(0) -> sets to 0\n\n        \"\"\"\n        # first, we have to get a valid integer from the input level\n        if isinstance(level, str):\n            self._verbosity = self.getLogVerbosityRank(level)\n        elif isinstance(level, int):\n            # The logging module does strange things if you set the log level to something other\n            # than DEBUG, INFO, etc. So, if someone tries, we HAVE to set the log level at a\n            # canonical value. Otherwise, nearly all log statements will be silently dropped.\n            if level in self._logLevelNumbers:\n                self._verbosity = level\n            elif level < self._logLevelNumbers[0]:\n                self._verbosity = self._logLevelNumbers[0]\n            else:\n                for i in range(len(self._logLevelNumbers) - 1, -1, -1):\n                    if level >= self._logLevelNumbers[i]:\n                        self._verbosity = self._logLevelNumbers[i]\n                        break\n        else:\n            raise TypeError(f\"Invalid verbosity rank {level}.\")\n\n        # Finally, set the log level\n        if self.logger is not None:\n            for handler in self.logger.handlers:\n                handler.setLevel(self._verbosity)\n            self.logger.setLevel(self._verbosity)\n\n    def getVerbosity(self):\n        \"\"\"Return the global runLog verbosity.\"\"\"\n        return self._verbosity\n\n    def restoreStandardStreams(self):\n        \"\"\"Set the system stderr back to its default (as it was when the run started).\"\"\"\n        if self.initialErr is not None and self._mpiRank > 0:\n            sys.stderr = self.initialErr\n\n    def startLog(self, name):\n        \"\"\"Initialize the streams when parallel processing.\"\"\"\n        # open the main logger\n        self.logger = logging.getLogger(STDOUT_LOGGER_NAME + SEP + name + SEP + str(self._mpiRank))\n\n        # if there was a pre-existing _verbosity, use it now\n        if self._verbosity != logging.INFO:\n            self.setVerbosity(self._verbosity)\n\n        if self._mpiRank != 0:\n            # init stderr intercepting logging\n            filePath = os.path.join(getLogDir(), _RunLog.STDERR_NAME.format(name, self._mpiRank))\n            self.stderrLogger = logging.getLogger(STDERR_LOGGER_NAME)\n            h = logging.FileHandler(filePath, delay=True)\n            fmt = \"%(message)s\"\n            form = logging.Formatter(fmt)\n            h.setFormatter(form)\n            h.setLevel(logging.WARNING)\n            self.stderrLogger.handlers = [h]\n            self.stderrLogger.setLevel(logging.WARNING)\n\n            # force the error logger onto stderr\n            self.initialErr = sys.stderr\n            sys.stderr = self.stderrLogger\n\n\ndef getLogDir():\n    \"\"\"This returns a file path for the `logs` directory, first checking if the user set the ARMI_TEMP_ROOT_PATH\n    environment variable.\n    \"\"\"\n    if os.environ.get(\"ARMI_TEMP_ROOT_PATH\"):\n        return os.path.join(os.environ[\"ARMI_TEMP_ROOT_PATH\"], \"logs\")\n    else:\n        return os.path.join(os.getcwd(), \"logs\")\n\n\ndef close(mpiRank=None):\n    \"\"\"End use of the log. Concatenate if needed and restore defaults.\"\"\"\n    mpiRank = context.MPI_RANK if mpiRank is None else mpiRank\n\n    if mpiRank == 0:\n        try:\n            concatenateLogs()\n        except IOError as ee:\n            warning(\"Failed to concatenate logs due to IOError.\")\n            error(ee)\n    else:\n        if LOG.stderrLogger:\n            _ = [h.close() for h in LOG.stderrLogger.handlers]\n        if LOG.logger:\n            _ = [h.close() for h in LOG.logger.handlers]\n\n    LOG.setNullLoggers()\n    LOG.restoreStandardStreams()\n\n\ndef concatenateLogs(logDir=None):\n    \"\"\"\n    Concatenate the armi run logs and delete them.\n\n    Should only ever be called by parent.\n\n    .. impl:: Log files from different processes are combined.\n        :id: I_ARMI_LOG_MPI\n        :implements: R_ARMI_LOG_MPI\n\n        The log files are plain text files. Since ARMI is frequently run in parallel, the situation\n        arises where each ARMI process generates its own plain text log file. This function combines\n        the separate log files, per process, into one log file.\n\n        The files are written in numerical order, with the lead process stdout first then the lead\n        process stderr. Then each other process is written to the combined file, in order, stdout\n        then stderr. Finally, the original stdout and stderr files are deleted.\n    \"\"\"\n    if logDir is None:\n        logDir = getLogDir()\n\n    # find all the logging-module-based log files\n    stdoutFiles = sorted(glob(os.path.join(logDir, \"*.stdout\")))\n    if not len(stdoutFiles):\n        info(\"No log files found to concatenate.\")\n        return\n\n    info(f\"Concatenating {len(stdoutFiles)} log files\")\n\n    # default worker log name if none is found\n    caseTitle = \"armi-workers\"\n    for stdoutPath in stdoutFiles:\n        stdoutFile = os.path.normpath(stdoutPath).split(os.sep)[-1]\n        prefix = STDOUT_LOGGER_NAME + \".\"\n        if stdoutFile[0 : len(prefix)] == prefix:\n            candidate = stdoutFile.split(\".\")[-3]\n            if len(candidate) > 0:\n                caseTitle = candidate\n                break\n\n    combinedLogName = os.path.join(logDir, f\"{caseTitle}-mpi.log\")\n    with open(combinedLogName, \"w\") as workerLog:\n        workerLog.write(\"\\n{0} CONCATENATED WORKER LOG FILES {1}\\n\".format(\"-\" * 10, \"-\" * 10))\n\n        for stdoutName in stdoutFiles:\n            # NOTE: If the log file name format changes, this will need to change.\n            rank = int(stdoutName.split(\".\")[-2])\n            with open(stdoutName, \"r\") as logFile:\n                data = logFile.read()\n                # only write if there's something to write\n                if data:\n                    rankId = \"\\n{0} RANK {1:03d} STDOUT {2}\\n\".format(\"-\" * 10, rank, \"-\" * 60)\n                    if rank == 0:\n                        print(rankId, file=sys.stdout)\n                        print(data, file=sys.stdout)\n                    else:\n                        workerLog.write(rankId)\n                        workerLog.write(data)\n            try:\n                os.remove(stdoutName)\n            except OSError:\n                warning(f\"Could not delete {stdoutName}\")\n\n            # then print the stderr messages for that child process\n            stderrName = stdoutName[:-3] + \"err\"\n            if os.path.exists(stderrName):\n                with open(stderrName) as logFile:\n                    data = logFile.read()\n                    if data:\n                        # only write if there's something to write.\n                        rankId = \"\\n{0} RANK {1:03d} STDERR {2}\\n\".format(\"-\" * 10, rank, \"-\" * 60)\n                        print(rankId, file=sys.stderr)\n                        print(data, file=sys.stderr)\n                try:\n                    os.remove(stderrName)\n                except OSError:\n                    warning(f\"Could not delete {stderrName}\")\n\n\n# Here are all the module-level functions that should be used for most outputs. They use the Log\n# object behind the scenes.\ndef raw(msg):\n    \"\"\"Print raw text without any special functionality.\"\"\"\n    LOG.log(\"header\", msg, single=False)\n\n\ndef extra(msg, single=False, label=None):\n    LOG.log(\"extra\", msg, single=single, label=label)\n\n\ndef debug(msg, single=False, label=None):\n    LOG.log(\"debug\", msg, single=single, label=label)\n\n\ndef info(msg, single=False, label=None):\n    LOG.log(\"info\", msg, single=single, label=label)\n\n\ndef important(msg, single=False, label=None):\n    LOG.log(\"important\", msg, single=single, label=label)\n\n\ndef warning(msg, single=False, label=None):\n    LOG.log(\"warning\", msg, single=single, label=label)\n\n\ndef error(msg, single=False, label=None):\n    LOG.log(\"error\", msg, single=single, label=label)\n\n\ndef header(msg, single=False, label=None):\n    LOG.log(\"header\", msg, single=single, label=label)\n\n\ndef warningReport():\n    LOG.warningReport()\n\n\ndef setVerbosity(level):\n    LOG.setVerbosity(level)\n\n\ndef getVerbosity():\n    return LOG.getVerbosity()\n\n\nclass DeduplicationFilter(logging.Filter):\n    \"\"\"\n    Important logging filter.\n\n    * allow users to turn off duplicate warnings\n    * handles special indentation rules for our logs\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        logging.Filter.__init__(self, *args, **kwargs)\n        self.singleMessageLabels = set()\n        self.warningCounts = {}\n\n    def filter(self, record):\n        # determine if this is a \"do not duplicate\" message\n        msg = str(record.msg)\n        single = getattr(record, \"single\", False)\n\n        # grab the label if it exist, otherwise use the message itself as the label\n        label = getattr(record, \"label\", msg)\n        label = msg if label is None else label\n\n        # Track all warnings, for warning report\n        if record.levelno in (logging.WARNING, logging.CRITICAL):\n            if label not in self.warningCounts:\n                self.warningCounts[label] = 1\n            else:\n                self.warningCounts[label] += 1\n                if single:\n                    return False\n\n        # If the message is set to \"do not duplicate\" we may filter it out\n        if single:\n            # in sub-warning cases, hash the label, for faster lookup\n            label = hash(label)\n            if label not in self.singleMessageLabels:\n                self.singleMessageLabels.add(label)\n            else:\n                return False\n\n        # Handle some special string-mangling we want to do, for multi-line messages\n        whiteSpace = _RunLog.getWhiteSpace(context.MPI_RANK)\n        record.msg = msg.rstrip().replace(\"\\n\", \"\\n\" + whiteSpace)\n        return True\n\n\nclass RunLogger(logging.Logger):\n    \"\"\"Custom Logger to support our specific desires.\n\n    1. Giving users the option to de-duplicate warnings\n    2. Piping stderr to a log file\n\n    .. impl:: A simulation-wide log, with user-specified verbosity.\n        :id: I_ARMI_LOG\n        :implements: R_ARMI_LOG\n\n        Log statements are any text a user wants to record during a run. For instance, basic\n        notifications of what is happening in the run, simple warnings, or hard errors. Every log\n        message has an associated log level, controlled by the \"verbosity\" of the logging statement\n        in the code. In the ARMI codebase, you can see many examples of logging:\n\n        .. code-block:: python\n\n            runLog.error(\"This sort of error might usually terminate the run.\")\n            runLog.warning(\"Users probably want to know.\")\n            runLog.info(\"This is the usual verbosity.\")\n            runLog.debug(\"This is only logged during a debug run.\")\n\n        The full list of logging levels is defined in ``_RunLog.getLogLevels()``, and the developer\n        specifies the verbosity of a run via ``_RunLog.setVerbosity()``.\n\n        At the end of the ARMI-based simulation, the analyst will have a full record of potentially\n        interesting information they can use to understand their run.\n\n    .. impl:: Logging is done to the screen and to file.\n        :id: I_ARMI_LOG_IO\n        :implements: R_ARMI_LOG_IO\n\n        This logger makes it easy for users to add log statements to and ARMI application, and ARMI\n        will control the flow of those log statements. In particular, ARMI overrides the normal\n        Python logging tooling, to allow developers to pipe their log statements to both screen and\n        file. This works for stdout and stderr.\n\n        At any place in the ARMI application, developers can interject a plain text logging message,\n        and when that code is hit during an ARMI simulation, the text will be piped to screen and a\n        log file. By default, the ``logging`` module only logs to screen, but ARMI adds a\n        ``FileHandler`` in the ``RunLog`` constructor and in ``_RunLog.startLog``.\n    \"\"\"\n\n    FMT = \"%(levelname)s%(message)s\"\n    # This is being set as a class attribute so it only runs once, before the class is initialized. For some bespoke\n    # MPI use cases, calling the function when setting the `filePath` causes issues. This sidesteps the problem.\n    LOG_DIR = getLogDir()\n\n    def __init__(self, *args, **kwargs):\n        # optionally, the user can pass in the MPI_RANK by putting it in the logger name after a separator string\n        # args[0].split(SEP): 0 = \"ARMI\", 1 = caseTitle, 2 = MPI_RANK\n        if SEP in args[0]:\n            mpiRank = int(args[0].split(SEP)[-1].strip())\n            args = (\".\".join(args[0].split(SEP)[0:2]),)\n        else:\n            mpiRank = context.MPI_RANK\n\n        logging.Logger.__init__(self, *args, **kwargs)\n        self.allowStopDuplicates()\n\n        if mpiRank == 0:\n            handler = logging.StreamHandler(sys.stdout)\n            handler.setLevel(logging.INFO)\n            self.setLevel(logging.INFO)\n        else:\n            filePath = os.path.join(RunLogger.LOG_DIR, _RunLog.STDOUT_NAME.format(args[0], mpiRank))\n            handler = logging.FileHandler(filePath, delay=True)\n            handler.setLevel(logging.WARNING)\n            self.setLevel(logging.WARNING)\n\n        form = logging.Formatter(RunLogger.FMT)\n        handler.setFormatter(form)\n        self.addHandler(handler)\n\n    def log(self, msgType, msg, single=False, label=None, *args, **kwargs):\n        \"\"\"\n        This is a wrapper around logger.log() that does most of the work.\n\n        This is used by all message passers (e.g. info, warning, etc.). In this situation, we do the\n        mangling needed to get the log level to the correct number. And we do some custom string\n        manipulation so we can handle de-duplicating warnings.\n        \"\"\"\n        # Determine the log level: users can optionally pass in custom strings (\"debug\")\n        msgLevel = msgType if isinstance(msgType, int) else LOG.logLevels[msgType][0]\n\n        # Do the actual logging\n        logging.Logger.log(self, msgLevel, str(msg), extra={\"single\": single, \"label\": label})\n\n    def _log(self, *args, **kwargs):\n        \"\"\"\n        Wrapper around the standard library Logger._log() method.\n\n        The primary goal here is to allow us to support the deduplication of warnings.\n\n        Notes\n        -----\n        All of the ``*args`` and ``**kwargs`` logic here are mandatory, as the standard library\n        implementation of this method changed the number of kwargs between Python v3.4 and v3.9.\n        \"\"\"\n        # we need 'extra' as an output keyword, even if empty\n        if \"extra\" not in kwargs:\n            kwargs[\"extra\"] = {}\n\n        # make sure to populate the single/label data for de-duplication\n        if \"single\" not in kwargs[\"extra\"]:\n            msg = args[1]\n            single = kwargs.pop(\"single\", False)\n            label = kwargs.pop(\"label\", None)\n            label = msg if label is None else label\n\n            kwargs[\"extra\"][\"single\"] = single\n            kwargs[\"extra\"][\"label\"] = label\n\n        logging.Logger._log(self, *args, **kwargs)\n\n    def allowStopDuplicates(self):\n        \"\"\"Helper method to allow us to safely add the deduplication filter at any time.\"\"\"\n        for f in self.filters:\n            if isinstance(f, DeduplicationFilter):\n                return\n        self.addFilter(DeduplicationFilter())\n\n    def write(self, msg, **kwargs):\n        \"\"\"The redirect method that allows to do stderr piping.\"\"\"\n        self.error(msg)\n\n    def flush(self, *args, **kwargs):\n        \"\"\"Stub, purely to allow stderr piping.\"\"\"\n        pass\n\n    def close(self):\n        \"\"\"Helper method, to shutdown and delete a Logger.\"\"\"\n        self.handlers.clear()\n        del self\n\n    def getDuplicatesFilter(self):\n        \"\"\"This object should have a no-duplicates filter. If it exists, find it.\"\"\"\n        for f in self.filters:\n            if isinstance(f, DeduplicationFilter):\n                return f\n\n        return None\n\n    def warningReport(self):\n        \"\"\"Summarize all warnings for the run.\"\"\"\n        self.info(\"----- Final Warning Count --------\")\n        self.info(\"  {0:^10s}   {1:^25s}\".format(\"COUNT\", \"LABEL\"))\n\n        # grab the no-duplicates filter, and exit early if it doesn't exist\n        dupsFilter = self.getDuplicatesFilter()\n        if dupsFilter is None:\n            self.info(\"  {0:^10s}   {1:^25s}\".format(str(0), str(\"None Found\")))\n            self.info(\"------------------------------------\")\n            return\n\n        # sort by labcollections.defaultdict(lambda: 1)\n        total = 0\n        for label, count in sorted(dupsFilter.warningCounts.items(), key=operator.itemgetter(1), reverse=True):\n            self.info(f\"  {str(count):^10s}   {str(label):^25s}\")\n            total += count\n        self.info(\"------------------------------------\")\n\n        # add a totals line\n        self.info(f\"  {str(total):^10s}   Total Number of Warnings\")\n        self.info(\"------------------------------------\")\n\n    def setVerbosity(self, intLevel):\n        \"\"\"A helper method to try to partially support the local, historical method of the same name.\"\"\"\n        self.setLevel(intLevel)\n\n\nclass NullLogger(RunLogger):\n    \"\"\"This is really just a placeholder for logging before or after the span of a normal armi run.\n\n    It will forward all logging to stdout/stderr, as you'd normally expect.\n    But it will preserve the formatting and duplication tools of the armi library.\n    \"\"\"\n\n    def __init__(self, name, isStderr=False):\n        RunLogger.__init__(self, name)\n        if isStderr:\n            self.handlers = [logging.StreamHandler(sys.stderr)]\n        else:\n            self.handlers = [logging.StreamHandler(sys.stdout)]\n\n    def addHandler(self, *args, **kwargs):\n        \"\"\"Ensure this STAYS a null logger.\"\"\"\n        pass\n\n\n# Setting the default logging class to be ours\nlogging.RunLogger = RunLogger\nlogging.setLoggerClass(RunLogger)\n\n\ndef createLogDir(logDir: str = None) -> None:\n    \"\"\"A helper method to create the log directory.\"\"\"\n    # the usual case is the user does not pass in a log dir path, so we use the global one\n    if logDir is None:\n        logDir = getLogDir()\n\n    # create the directory\n    if not os.path.exists(logDir):\n        try:\n            os.makedirs(logDir)\n        except FileExistsError:\n            # If we hit this race condition, we still win.\n            return\n\n    # potentially, wait for directory to be created\n    secondsWait = 0.5\n    loopCounter = 0\n    while not os.path.exists(logDir):\n        loopCounter += 1\n        if loopCounter > (OS_SECONDS_TIMEOUT / secondsWait):\n            raise OSError(f\"Was unable to create the log directory: {logDir}\")\n\n        time.sleep(secondsWait)\n\n\nif not os.path.exists(getLogDir()):\n    createLogDir(getLogDir())\n\n\ndef logFactory():\n    \"\"\"Create the default logging object.\"\"\"\n    return _RunLog(int(context.MPI_RANK))\n\n\nLOG = logFactory()\n"
  },
  {
    "path": "armi/settings/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSettings are various key-value pairs that determine a bunch of modeling and simulation behaviors.\n\nThey are one of the key inputs to an ARMI run. They say which modules to run and which\nmodeling approximations to apply and how many cycles to run and at what power and\navailability fraction and things like that. The ARMI Framework itself has many settings\nof its own, and plugins typically register some of their own settings as well.\n\"\"\"\n\nimport fnmatch\nimport glob\nimport os\nfrom typing import List\n\nfrom ruamel import yaml\n\nfrom armi import runLog\nfrom armi.settings.caseSettings import Settings\nfrom armi.settings.setting import (\n    Default,  # noqa: F401\n    Option,  # noqa: F401\n    Setting,\n)\nfrom armi.utils.customExceptions import InvalidSettingsFileError\n\nNOT_ENABLED = \"\"  # An empty setting value implies that the feature\n\n\ndef isBoolSetting(setting: Setting) -> bool:\n    \"\"\"Return whether the passed setting represents a boolean value.\"\"\"\n    return isinstance(setting.default, bool)\n\n\ndef recursivelyLoadSettingsFiles(\n    rootDir,\n    patterns: List[str],\n    recursive=True,\n    ignorePatterns: List[str] = None,\n    handleInvalids=True,\n):\n    \"\"\"\n    Scans path for valid xml files and returns their paths.\n\n    Parameters\n    ----------\n    rootDir : str\n        The base path to scan for settings files\n    patterns : list\n        file patterns to match file names\n    recursive : bool (optional)\n        load files recursively\n    ignorePatterns : list (optional)\n        list of filename patterns to ignore\n    handleInvalids : bool\n        option to suppress errors generated when finding files that appear to be settings files but fail to load. This\n        may happen when old settings are present.\n\n    Returns\n    -------\n    csFiles : list\n        list of :py:class:`~armi.settings.caseSettings.Settings` objects.\n    \"\"\"\n    assert not isinstance(ignorePatterns, str), \"Bare string passed as ignorePatterns. Make sure to pass a list\"\n\n    assert not isinstance(patterns, str), \"Bare string passed as patterns. Make sure to pass a list\"\n\n    possibleSettings = []\n    runLog.info(\"Finding potential settings files matching {}.\".format(patterns))\n    if recursive:\n        for directory, _list, files in os.walk(rootDir):\n            matches = set()\n            for pattern in patterns:\n                matches |= set(fnmatch.filter(files, pattern))\n            if ignorePatterns is not None:\n                for ignorePattern in ignorePatterns:\n                    matches -= set(fnmatch.filter(files, ignorePattern))\n            possibleSettings.extend([os.path.join(directory, fname) for fname in matches])\n    else:\n        for pattern in patterns:\n            possibleSettings.extend(glob.glob(pattern))\n\n    csFiles = []\n    runLog.info(\"Checking for valid settings files.\")\n    for possibleSettingsFile in possibleSettings:\n        if os.path.getsize(possibleSettingsFile) > 1e6:\n            runLog.info(\"skipping {} -- looks too big\".format(possibleSettingsFile))\n            continue\n        try:\n            cs = Settings()\n            cs.loadFromInputFile(possibleSettingsFile, handleInvalids=handleInvalids)\n            csFiles.append(cs)\n            runLog.extra(\"loaded {}\".format(possibleSettingsFile))\n        except InvalidSettingsFileError as ee:\n            runLog.info(\"skipping {}\\n    {}\".format(possibleSettingsFile, ee))\n        except yaml.composer.ComposerError as ee:\n            runLog.info(\n                \"skipping {}; it appears to be an incomplete YAML snippet\\n    {}\".format(possibleSettingsFile, ee)\n            )\n        except Exception as ee:\n            runLog.error(\n                \"Failed to parse {}.\\nIt looked like a settings file but gave this exception:\\n{}: {}\".format(\n                    possibleSettingsFile, type(ee).__name__, ee\n                )\n            )\n            raise\n    csFiles.sort(key=lambda csFile: csFile.caseTitle)\n    return csFiles\n\n\ndef promptForSettingsFile(choice=None):\n    \"\"\"\n    Allows the user to select an ARMI input from the input files in the directory.\n\n    Parameters\n    ----------\n    choice : int, optional\n        The item in the list of valid YAML files to load\n    \"\"\"\n    runLog.info(\"Welcome to the ARMI Loader\")\n    runLog.info(\"Scanning for ARMI settings files...\")\n    files = sorted(glob.glob(\"*.yaml\"))\n    if not files:\n        runLog.info(\"No eligible settings files found. Creating settings without choice\")\n        return None\n\n    if choice is None:\n        for i, pathToFile in enumerate(files):\n            runLog.info(\"[{0}] - {1}\".format(i, os.path.split(pathToFile)[-1]))\n        choice = int(input(\"Enter choice: \"))\n\n    return files[choice]\n"
  },
  {
    "path": "armi/settings/caseSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis defines a Settings object that acts mostly like a dictionary. It\nis meant so that each ARMI run has one-and-only-one Settings object. It records\nuser settings like the core power level, the input file names, the number of cycles to\nrun, the run type, the environment setup, and hundreds of other things.\n\nA Settings object can be saved as or loaded from an YAML file. The ARMI GUI is designed to\ncreate this settings file, which is then loaded by an ARMI process on the cluster.\n\"\"\"\n\nimport io\nimport logging\nimport os\nfrom copy import copy, deepcopy\n\nfrom ruamel.yaml import YAML\n\nfrom armi import context, runLog\nfrom armi.settings import settingsIO\nfrom armi.settings.setting import Setting\nfrom armi.utils import pathTools\nfrom armi.utils.customExceptions import NonexistentSetting\n\nSIMPLE_CYCLES_INPUTS = {\n    \"availabilityFactor\",\n    \"availabilityFactors\",\n    \"powerFractions\",\n    \"burnSteps\",\n    \"cycleLength\",\n    \"cycleLengths\",\n}\n\n\nclass Settings:\n    \"\"\"\n    A container for run settings, such as case title, power level, and many more.\n\n    .. impl:: Settings are used to define an ARMI run.\n        :id: I_ARMI_SETTING0\n        :implements: R_ARMI_SETTING\n\n        The Settings object is accessible to most ARMI objects through self.cs\n        (for 'case settings'). It acts largely as a dictionary, and setting values\n        are accessed by keys.\n\n        The Settings object has a 1-to-1 correspondence with the ARMI settings\n        input file. This file may be created by hand or by a GUI.\n\n    Notes\n    -----\n    While it is possible to modify case settings during the course of a run, this\n    is highly discouraged because there will be no record of this happening in your\n    results or in the database produced from your run. There is no guarantee that\n    doing so will not cause unexpected problems with your calculation.\n    \"\"\"\n\n    defaultCaseTitle = \"armi\"\n\n    def __init__(self, fName=None):\n        \"\"\"\n        Instantiate a Settings object.\n\n        Parameters\n        ----------\n        fName : str, optional\n            Path to a valid yaml settings file that will be loaded\n        \"\"\"\n        # if the \"loadingFile\" is not set, this better be True, or there are no blueprints at all\n        self.filelessBP = False\n\n        self._failOnLoad = False\n        \"\"\"This is state information.\n\n        The command line can take settings, which override a value in the current\n        settings file; however, if the settings file is listed after a setting value,\n        the setting from the settings file will be used rather than the one explicitly\n        provided by the user on the command line.  Therefore, _failOnLoad is used to\n        prevent this from happening.\n        \"\"\"\n        from armi import getApp\n\n        self.path = \"\"\n\n        app = getApp()\n        assert app is not None\n        self.__settings = app.getSettings()\n\n        if fName:\n            self.loadFromInputFile(fName)\n\n    @property\n    def inputDirectory(self):\n        \"\"\"Getter for settings file path.\"\"\"\n        if self.path is None:\n            return os.getcwd()\n        else:\n            return os.path.dirname(self.path)\n\n    @property\n    def caseTitle(self):\n        \"\"\"Getter for settings case title.\n\n        .. impl:: Define a case title to go with the settings.\n            :id: I_ARMI_SETTINGS_META0\n            :implements: R_ARMI_SETTINGS_META\n\n            Every Settings object has a \"case title\"; a string for users to\n            help identify their run. This case title is used in log file\n            names, it is printed during a run, it is frequently used to\n            name the settings file. It is designed to be an easy-to-use\n            and easy-to-understand way to keep track of simulations. The\n            general idea here is that the average analyst that is using\n            ARMI will run many ARMI-based simulations, and there needs\n            to be an easy to identify them all.\n        \"\"\"\n        if not self.path:\n            return self.defaultCaseTitle\n        else:\n            return os.path.splitext(os.path.basename(self.path))[0]\n\n    @caseTitle.setter\n    def caseTitle(self, value):\n        \"\"\"Setter for the case title.\"\"\"\n        self.path = os.path.join(self.inputDirectory, value + \".yaml\")\n\n    @property\n    def environmentSettings(self):\n        \"\"\"Getter for environment settings.\"\"\"\n        return [setting.name for setting in self.__settings.values() if setting.isEnvironment]\n\n    def __contains__(self, key):\n        return key in self.__settings\n\n    def __repr__(self):\n        total = len(self.__settings.keys())\n        isAltered = lambda s: 1 if s.value != s.default else 0\n        altered = sum([isAltered(setting) for setting in self.__settings.values()])\n\n        return \"<{} name:{} total:{} altered:{}>\".format(self.__class__.__name__, self.caseTitle, total, altered)\n\n    def _directAccessOfSettingAllowed(self, key):\n        \"\"\"\n        A way to check if specific settings can be grabbed out of the case settings.\n\n        Could be updated with other specific instances as necessary.\n\n        Notes\n        -----\n        Checking the validity of grabbing specific settings at this point, as is done for the\n        SIMPLE_CYCLES_INPUT's, feels a bit intrusive and out of place. In particular, the fact that\n        the check is done every time that a setting is reached for, no matter if it is the setting\n        in question, is quite clunky. In the future, it would be desirable if the settings system\n        were more flexible to control this type of thing at a deeper level.\n        \"\"\"\n        if key not in self.__settings:\n            return False, NonexistentSetting(key)\n\n        if key in SIMPLE_CYCLES_INPUTS and self.__settings[\"cycles\"].value != []:\n            err = ValueError(\n                \"Cannot grab simple cycles information from the case settings when detailed cycles \"\n                \"information is also entered. In general cycles information should be pulled off \"\n                \"the operator or parsed using the appropriate getter in the utils.\"\n            )\n\n            return False, err\n\n        return True, None\n\n    def __getitem__(self, key):\n        settingIsOkayToGrab, err = self._directAccessOfSettingAllowed(key)\n        if settingIsOkayToGrab:\n            return self.__settings[key].value\n        else:\n            raise err\n\n    def getSetting(self, key, default=None):\n        \"\"\"\n        Return a copy of an actual Setting object, instead of just its value.\n\n        Notes\n        -----\n        This is used very rarely, try to organize your code to only need a Setting value.\n        \"\"\"\n        if key in self.__settings:\n            return copy(self.__settings[key])\n        elif default is not None:\n            return default\n        else:\n            raise NonexistentSetting(key)\n\n    def __setitem__(self, key, val):\n        \"\"\"\n        Notes\n        -----\n        This potentially allows for invisible settings mutations.\n        \"\"\"\n        if key in self.__settings:\n            self.__settings[key].setValue(val)\n        else:\n            raise NonexistentSetting(key)\n\n    def __setstate__(self, state):\n        \"\"\"\n        Rebuild schema upon unpickling since schema is unpickleable.\n\n        Pickling happens during mpi broadcasts and also during testing where the test reactor is\n        cached.\n\n        See Also\n        --------\n        armi.settings.setting.Setting.__getstate__ : removes schema\n        \"\"\"\n        from armi import getApp\n\n        self.__settings = getApp().getSettings()\n\n        # restore non-setting instance attrs\n        for key, val in state.items():\n            if key != \"_Settings__settings\":\n                setattr(self, key, val)\n\n        # with schema restored, restore all setting values\n        for name, settingState in state[\"_Settings__settings\"].items():\n            if name in self.__settings:\n                self.__settings[name]._value = settingState.value\n            elif isinstance(settingState, Setting):\n                self.__settings[name] = copy(settingState)\n            else:\n                raise NonexistentSetting(name)\n\n    def keys(self):\n        return self.__settings.keys()\n\n    def values(self):\n        return self.__settings.values()\n\n    def items(self):\n        return self.__settings.items()\n\n    def duplicate(self):\n        \"\"\"Return a duplicate copy of this settings object.\"\"\"\n        cs = deepcopy(self)\n        cs._failOnLoad = False\n        # It's not really protected access since it is a new Settings object. _failOnLoad is set to\n        # false, because this new settings object should be independent of the command line\n        return cs\n\n    def revertToDefaults(self):\n        \"\"\"Sets every setting back to its default value.\"\"\"\n        for setting in self.__settings.values():\n            setting.revertToDefault()\n\n    def failOnLoad(self):\n        \"\"\"This method is used to force loading a file to fail.\n\n        After command line processing of settings has begun, the settings should be fully defined.\n        If the settings are loaded\n        \"\"\"\n        self._failOnLoad = True\n\n    def loadFromInputFile(self, fName, handleInvalids=True, setPath=True):\n        \"\"\"\n        Read in settings from an input YAML file.\n\n        Passes the reader back out in case you want to know something about how the reading went\n        like for knowing if a file contained deprecated settings, etc.\n        \"\"\"\n        reader, path = self._prepToRead(fName)\n        reader.readFromFile(fName, handleInvalids)\n        self._applyReadSettings(path if setPath else None)\n        self.registerUserPlugins()\n\n        return reader\n\n    def registerUserPlugins(self):\n        \"\"\"Add any ad-hoc 'user' plugins that are referenced in the settings file.\"\"\"\n        userPlugins = self[\"userPlugins\"]\n        if len(userPlugins):\n            from armi import getApp\n\n            app = getApp()\n            app.registerUserPlugins(userPlugins)\n\n    def _prepToRead(self, fName):\n        if self._failOnLoad:\n            raise RuntimeError(\n                \"Cannot load settings file after processing of command line options begins.\\nYou \"\n                \"may be able to fix this by reordering the command line arguments, and making sure \"\n                f\"the settings file `{fName}` comes before any modified settings.\"\n            )\n        path = pathTools.armiAbsPath(fName)\n        return settingsIO.SettingsReader(self), path\n\n    def loadFromString(self, string, handleInvalids=True):\n        \"\"\"Read in settings from a YAML string.\n\n        Passes the reader back out in case you want to know something about how the reading went\n        like for knowing if a file contained deprecated settings, etc.\n        \"\"\"\n        if self._failOnLoad:\n            raise RuntimeError(\n                \"Cannot load settings after processing of command line options begins.\\nYou may be \"\n                \"able to fix this by reordering the command line arguments.\"\n            )\n\n        reader = settingsIO.SettingsReader(self)\n        reader.readFromStream(io.StringIO(string), handleInvalids=handleInvalids)\n\n        self.initLogVerbosity()\n\n        return reader\n\n    def _applyReadSettings(self, path=None):\n        self.initLogVerbosity()\n\n        if path:\n            self.path = path  # can't set this before a chance to fail occurs\n\n    def initLogVerbosity(self):\n        \"\"\"\n        Central location to init logging verbosity.\n\n        Notes\n        -----\n        This means that creating a Settings object sets the global logging level of the entire code\n        base.\n        \"\"\"\n        if context.MPI_RANK == 0:\n            runLog.setVerbosity(self[\"verbosity\"])\n        else:\n            runLog.setVerbosity(self[\"branchVerbosity\"])\n\n        self.setModuleVerbosities(force=True)\n\n    def writeToYamlFile(self, fName, style=\"short\", fromFile=None):\n        \"\"\"\n        Write settings to a yaml file.\n\n        Notes\n        -----\n        This resets the current CS's path to the newly written absolute path.\n\n        Parameters\n        ----------\n        fName : str\n            the file to write to\n        style : str (optional)\n            the method of output to be used when creating the file for the current state of settings\n            (short, medium, or full)\n        fromFile : str (optional)\n            if the source file and destination file are different (i.e. for cloning) and the style\n            argument is ``medium``, then this arg is used\n        \"\"\"\n        self.path = pathTools.armiAbsPath(fName)\n        if style == \"medium\":\n            getSettingsPath = self.path if fromFile is None else pathTools.armiAbsPath(fromFile)\n            settingsSetByUser = self.getSettingsSetByUser(getSettingsPath)\n        else:\n            settingsSetByUser = []\n        with open(self.path, \"w\") as stream:\n            writer = self.writeToYamlStream(stream, style, settingsSetByUser)\n\n        return writer\n\n    def getSettingsSetByUser(self, fPath):\n        \"\"\"\n        Grabs the list of settings in the user-defined input file so that the settings can be\n        tracked outside of a Settings object.\n\n        Parameters\n        ----------\n        fPath : str\n            The absolute file path of the settings file\n\n        Returns\n        -------\n        userSettingsNames : list\n            The settings names read in from a yaml settings file\n        \"\"\"\n        # We do not want to load these as settings, but just grab the dictionary straight from the\n        # settings file to know which settings are user-defined.\n        with open(fPath, \"r\") as stream:\n            yaml = YAML()\n            yaml.allow_duplicate_keys = False\n            tree = yaml.load(stream)\n            userSettings = tree[settingsIO.Roots.CUSTOM]\n\n        userSettingsNames = list(userSettings.keys())\n        return userSettingsNames\n\n    def writeToYamlStream(self, stream, style=\"short\", settingsSetByUser=[]):\n        \"\"\"\n        Write settings in yaml format to an arbitrary stream.\n\n        Parameters\n        ----------\n        stream : file object\n            Writable file stream\n        style : str (optional)\n            Writing style for settings file. Can be short, medium, or full.\n        settingsSetByUser : list\n            List of settings names in user-defined settings file\n\n        Returns\n        -------\n        writer : SettingsWriter\n        \"\"\"\n        writer = settingsIO.SettingsWriter(self, style=style, settingsSetByUser=settingsSetByUser)\n        writer.writeYaml(stream)\n        return writer\n\n    def updateEnvironmentSettingsFrom(self, otherCs):\n        \"\"\"Updates the environment settings in this object based on some other cs (from the GUI,\n        most likely).\n\n        Parameters\n        ----------\n        otherCs : Settings\n            A cs object that environment settings will be inherited from.\n\n        This enables users to run tests with their environment rather than the reference environment\n        \"\"\"\n        for replacement in self.environmentSettings:\n            self[replacement] = otherCs[replacement]\n\n    def modified(self, caseTitle=None, newSettings=None):\n        \"\"\"Return a new Settings object containing the provided modifications.\"\"\"\n        settings = self.duplicate()\n\n        if caseTitle:\n            settings.caseTitle = caseTitle\n\n        if newSettings:\n            for key, val in newSettings.items():\n                if isinstance(val, Setting):\n                    settings.__settings[key] = copy(val)\n                elif key in settings.__settings:\n                    settings.__settings[key].setValue(val)\n                else:\n                    settings.__settings[key] = Setting(key, val, description=\"Description from cs.modified()\")\n\n        return settings\n\n    def setModuleVerbosities(self, force=False):\n        \"\"\"Attempt to grab the module-level logger verbosities from the settings file,\n        and then set their log levels (verbosities).\n\n        Parameters\n        ----------\n        force : bool, optional\n            If force is False, don't overwrite the log verbosities if the logger already exists.\n            IF this needs to be used mid-run, force=False is safer.\n\n        Notes\n        -----\n        This method is only meant to be called once per run.\n        \"\"\"\n        # try to get the setting dict\n        verbs = self[\"moduleVerbosity\"]\n\n        # set, but don't use, the module-level loggers\n        for mName, mLvl in verbs.items():\n            # by default, we init module-level logging, not change it mid-run\n            if force or mName not in logging.Logger.manager.loggerDict:\n                # cast verbosity to integer\n                lvl = int(mLvl) if mLvl.isnumeric() else runLog.LOG.logLevels[mLvl][0]\n\n                log = logging.getLogger(mName)\n                log.setVerbosity(lvl)\n"
  },
  {
    "path": "armi/settings/fwSettings/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This package contains the settings that control the base/framework-level ARMI functionality.\"\"\"\n\nfrom typing import List\n\nfrom armi.settings import setting\nfrom armi.settings.fwSettings import databaseSettings, globalSettings, reportSettings\n\n\ndef getFrameworkSettings() -> List[setting.Setting]:\n    settings = []\n    for mod in (\n        globalSettings,\n        databaseSettings,\n        reportSettings,\n    ):\n        settings.extend(mod.defineSettings())\n    return settings\n"
  },
  {
    "path": "armi/settings/fwSettings/databaseSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Settings related to the ARMI database.\"\"\"\n\nfrom armi.settings import setting\n\nCONF_DB = \"db\"\nCONF_RELOAD_DB_NAME = \"reloadDBName\"\nCONF_LOAD_FROM_DB_EVERY_NODE = \"loadFromDBEveryNode\"\nCONF_SYNC_AFTER_WRITE = \"syncDbAfterWrite\"\nCONF_FORCE_DB_PARAMS = \"forceDbParams\"\n\n\ndef defineSettings():\n    \"\"\"Define settings for the interface.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_DB,\n            default=True,\n            label=\"Activate Database\",\n            description=\"Write the state information to a database at every timestep\",\n        ),\n        setting.Setting(\n            CONF_RELOAD_DB_NAME,\n            default=\"\",\n            label=\"Database Input File\",\n            description=\"Name of the database file to load initial conditions from\",\n            oldNames=[(\"snapShotDB\", None)],\n        ),\n        setting.Setting(\n            CONF_LOAD_FROM_DB_EVERY_NODE,\n            default=False,\n            label=\"Load Database at EveryNode\",\n            description=\"Every node loaded from reference database\",\n        ),\n        setting.Setting(\n            CONF_SYNC_AFTER_WRITE,\n            default=True,\n            label=\"Sync Database After Write\",\n            description=(\n                \"Copy the output database from the fast scratch space to the shared network drive after each write.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_FORCE_DB_PARAMS,\n            default=[],\n            label=\"Force Database Write of Parameters\",\n            description=(\n                \"A list of parameter names that should always be written to the \"\n                \"database, regardless of their Parameter Definition's typical saveToDB \"\n                \"status. This is only honored if the DatabaseInterface is used.\"\n            ),\n        ),\n    ]\n    return settings\n"
  },
  {
    "path": "armi/settings/fwSettings/globalSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nFramework-wide settings definitions and constants.\n\nThis should contain Settings definitions for general-purpose \"framework\" settings. These should only include settings\nthat are not related to any particular physics or plugins.\n\"\"\"\n\nimport os\nfrom typing import List\n\nimport voluptuous as vol\n\nfrom armi import context\nfrom armi.settings import setting\nfrom armi.settings.fwSettings import tightCouplingSettings\nfrom armi.utils.mathematics import isMonotonic\n\nCONF_ACCEPTABLE_BLOCK_AREA_ERROR = \"acceptableBlockAreaError\"\nCONF_ASSEM_FLAGS_SKIP_AXIAL_EXP = \"assemFlagsToSkipAxialExpansion\"\nCONF_AVAILABILITY_FACTOR = \"availabilityFactor\"\nCONF_AVAILABILITY_FACTORS = \"availabilityFactors\"\nCONF_AXIAL_MESH_REFINEMENT_FACTOR = \"axialMeshRefinementFactor\"\nCONF_BETA = \"beta\"\nCONF_BRANCH_VERBOSITY = \"branchVerbosity\"\nCONF_BU_GROUPS = \"buGroups\"\nCONF_BURN_CHAIN_FILE_NAME = \"burnChainFileName\"\nCONF_BURN_STEPS = \"burnSteps\"\nCONF_BURNUP_PEAKING_FACTOR = \"burnupPeakingFactor\"\nCONF_CIRCULAR_RING_PITCH = \"circularRingPitch\"\nCONF_COMMENT = \"comment\"\nCONF_COPY_FILES_FROM = \"copyFilesFrom\"\nCONF_COPY_FILES_TO = \"copyFilesTo\"\nCONF_COVERAGE = \"coverage\"\nCONF_COVERAGE_CONFIG_FILE = \"coverageConfigFile\"\nCONF_CYCLE_LENGTH = \"cycleLength\"\nCONF_CYCLE_LENGTHS = \"cycleLengths\"\nCONF_CYCLES = \"cycles\"\nCONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION = \"cyclesSkipTightCouplingInteraction\"\nCONF_DEBUG_MEM = \"debugMem\"\nCONF_DEBUG_MEM_SIZE = \"debugMemSize\"\nCONF_DECAY_CONSTANTS = \"decayConstants\"\nCONF_DEFAULT_SNAPSHOTS = \"defaultSnapshots\"\nCONF_DEFERRED_INTERFACE_NAMES = \"deferredInterfaceNames\"\nCONF_DEFERRED_INTERFACES_CYCLE = \"deferredInterfacesCycle\"\nCONF_DETAIL_ALL_ASSEMS = \"detailAllAssems\"\nCONF_DETAIL_ASSEM_LOCATIONS_BOL = \"detailAssemLocationsBOL\"\nCONF_DETAIL_ASSEM_NUMS = \"detailAssemNums\"\nCONF_DETAILED_AXIAL_EXPANSION = \"detailedAxialExpansion\"\nCONF_DUMP_SNAPSHOT = \"dumpSnapshot\"\nCONF_EQ_DIRECT = \"eqDirect\"  # fuelCycle/equilibrium coupling\nCONF_EXPLICIT_REPEAT_SHUFFLES = \"explicitRepeatShuffles\"\nCONF_FLUX_RECON = \"fluxRecon\"  # strange coupling in fuel handlers\nCONF_FRESH_FEED_TYPE = \"freshFeedType\"\nCONF_GROW_TO_FULL_CORE_AFTER_LOAD = \"growToFullCoreAfterLoad\"\nCONF_INDEPENDENT_VARIABLES = \"independentVariables\"\nCONF_INITIALIZE_BURN_CHAIN = \"initializeBurnChain\"\nCONF_INPUT_HEIGHTS_HOT = \"inputHeightsConsideredHot\"\nCONF_LOAD_STYLE = \"loadStyle\"\nCONF_LOADING_FILE = \"loadingFile\"\nCONF_MATERIAL_NAMESPACE_ORDER = \"materialNamespaceOrder\"\nCONF_MIN_MESH_SIZE_RATIO = \"minMeshSizeRatio\"\nCONF_MODULE_VERBOSITY = \"moduleVerbosity\"\nCONF_N_CYCLES = \"nCycles\"\nCONF_N_TASKS = \"nTasks\"\nCONF_NON_UNIFORM_ASSEM_FLAGS = \"nonUniformAssemFlags\"\nCONF_OUTPUT_CACHE_LOCATION = \"outputCacheLocation\"\nCONF_OUTPUT_FILE_EXTENSION = \"outputFileExtension\"\nCONF_PHYSICS_FILES = \"savePhysicsFiles\"\nCONF_PLOTS = \"plots\"\nCONF_POWER = \"power\"\nCONF_POWER_DENSITY = \"powerDensity\"\nCONF_POWER_FRACTIONS = \"powerFractions\"\nCONF_PROFILE = \"profile\"\nCONF_REMOVE_PER_CYCLE = \"removePerCycle\"\nCONF_RM_EXT_FILES_AT_BOC = \"rmExternalFilesAtBOC\"\nCONF_RUN_TYPE = \"runType\"\nCONF_SKIP_CYCLES = \"skipCycles\"\nCONF_SMALL_RUN = \"rmExternalFilesAtEOL\"\nCONF_SORT_REACTOR = \"sortReactor\"\nCONF_START_CYCLE = \"startCycle\"\nCONF_START_NODE = \"startNode\"\nCONF_STATIONARY_BLOCK_FLAGS = \"stationaryBlockFlags\"\nCONF_T_IN = \"Tin\"\nCONF_T_OUT = \"Tout\"\nCONF_TARGET_K = \"targetK\"  # lots of things use this\nCONF_TEMP_GROUPS = \"tempGroups\"\nCONF_TIGHT_COUPLING = \"tightCoupling\"\nCONF_TIGHT_COUPLING_MAX_ITERS = \"tightCouplingMaxNumIters\"\nCONF_TIGHT_COUPLING_SETTINGS = \"tightCouplingSettings\"\nCONF_TRACE = \"trace\"\nCONF_TRACK_ASSEMS = \"trackAssems\"\nCONF_UNIFORM_MESH_MINIMUM_SIZE = \"uniformMeshMinimumSize\"\nCONF_USER_PLUGINS = \"userPlugins\"\nCONF_VERBOSITY = \"verbosity\"\nCONF_VERSIONS = \"versions\"\nCONF_ZONE_DEFINITIONS = \"zoneDefinitions\"\nCONF_ZONES_FILE = \"zonesFile\"\n\n\ndef defineSettings() -> List[setting.Setting]:\n    \"\"\"\n    Return a list of global framework settings.\n\n    .. impl:: There is a setting for total core power.\n        :id: I_ARMI_SETTINGS_POWER\n        :implements: R_ARMI_SETTINGS_POWER\n\n        ARMI defines a collection of settings by default to be associated\n        with all runs, and one such setting is ``power``. This is the\n        total thermal power of the reactor. This is designed to be the\n        standard power of the reactor core, to be easily set by the user.\n        There is frequently the need to adjust the power of the reactor\n        at different cycles. That is done by setting the ``powerFractions``\n        setting to a list of fractions of this power.\n\n    .. impl:: Define a comment and a versions list to go with the settings.\n        :id: I_ARMI_SETTINGS_META1\n        :implements: R_ARMI_SETTINGS_META\n\n        Because nuclear analysts have a lot to keep track of when doing\n        various simulations of a reactor, ARMI provides a ``comment``\n        setting that takes an arbitrary string and stores it. This string\n        will be preserved in the settings file and thus in the database,\n        and can provide helpful notes for analysts in the future.\n\n        Likewise, it is helpful to know what versions of software were\n        used in an ARMI application. There is a dictionary-like setting\n        called ``versions`` that allows users to track the versions of:\n        ARMI, their ARMI application, and the versions of all the plugins\n        in their simulation. While it is always helpful to know what\n        versions of software you run, it is particularly needed in nuclear\n        engineering where demands will be made to track the exact\n        versions of code used in simulations.\n    \"\"\"\n    settings = [\n        setting.Setting(\n            CONF_N_TASKS,\n            default=1,\n            label=\"parallel tasks\",\n            description=\"Number of parallel tasks to request on the cluster\",\n            schema=vol.All(vol.Coerce(int), vol.Range(min=1)),\n            oldNames=[(\"numProcessors\", None)],\n        ),\n        setting.Setting(\n            CONF_INITIALIZE_BURN_CHAIN,\n            default=True,\n            label=\"Initialize Burn Chain\",\n            description=(\n                f\"This setting is paired with the `{CONF_BURN_CHAIN_FILE_NAME}` setting. \"\n                \"When enabled, this will initialize the burn-chain on initializing the case and \"\n                \"is required for running depletion calculations where the transmutations and decays \"\n                \"are controlled by the framework. If an external software, such as ORIGEN, contains \"\n                \"data for the burn-chain already embedded then this may be disabled.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_BURN_CHAIN_FILE_NAME,\n            default=os.path.join(context.RES, \"burn-chain.yaml\"),\n            label=\"Burn Chain File\",\n            description=\"Path to YAML file that has the depletion chain defined in it\",\n        ),\n        setting.Setting(\n            CONF_AXIAL_MESH_REFINEMENT_FACTOR,\n            default=1,\n            label=\"Axial Mesh Refinement Factor\",\n            description=\"Multiplicative factor on the Global Flux number of mesh per \"\n            \"block. Used for axial mesh refinement.\",\n            schema=vol.All(vol.Coerce(int), vol.Range(min=0, min_included=False)),\n        ),\n        setting.Setting(\n            CONF_UNIFORM_MESH_MINIMUM_SIZE,\n            default=None,\n            label=\"Minimum axial mesh size in cm for uniform mesh\",\n            description=\"Minimum mesh size used when generating an axial mesh for the \"\n            \"uniform mesh converter. Providing a value for this setting allows fuel \"\n            \"and control material boundaries to be enforced better in uniform mesh.\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0.0, min_included=False)),\n        ),\n        setting.Setting(\n            CONF_DETAILED_AXIAL_EXPANSION,\n            default=False,\n            label=\"Detailed Axial Expansion\",\n            description=(\n                \"Allow each assembly to expand independently of the others. Results in non-uniform \"\n                \"axial mesh. Neutronics kernel must be able to handle.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_NON_UNIFORM_ASSEM_FLAGS,\n            default=[],\n            label=\"Non Uniform Assem Flags\",\n            description=(\n                \"Assemblies that match a flag group on this list will not have their \"\n                \"mesh changed with the reference mesh of the core for uniform mesh cases (non-\"\n                \"detailed axial expansion). Another plugin may need to make the mesh uniform if \"\n                \"necessary.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_INPUT_HEIGHTS_HOT,\n            default=True,\n            label=\"Input Height Considered Hot\",\n            description=(\n                \"This is a flag to determine if block heights, as provided in blueprints, are at \"\n                \"hot dimensions. If false, block heights are at cold/as-built dimensions and will \"\n                \"be thermally expanded as appropriate.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_TRACE,\n            default=False,\n            label=\"Use the Python Tracer\",\n            description=\"Activate Python trace module to print out each line as it's executed\",\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_PROFILE,\n            default=False,\n            label=\"Turn On the Profiler\",\n            description=\"Turn on the profiler for the submitted case. The profiler \"\n            \"results will not include all import times.\",\n            isEnvironment=True,\n            oldNames=[\n                (\"turnOnProfiler\", None),\n            ],\n        ),\n        setting.Setting(\n            CONF_COVERAGE,\n            default=False,\n            label=\"Turn On Coverage Report Generation\",\n            description=\"Turn on coverage report generation which tracks all the lines \"\n            \"of code that execute during a run\",\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_COVERAGE_CONFIG_FILE,\n            default=\"\",\n            label=\"File to Define Coverage Configuration\",\n            description=\"User-defined coverage configuration file\",\n        ),\n        setting.Setting(\n            CONF_MIN_MESH_SIZE_RATIO,\n            default=0.15,\n            label=\"Minimum Mesh Size Ratio\",\n            description=\"This is the minimum ratio of mesh sizes (dP1/(dP1 + dP2)) \"\n            \"allowable -- only active if automaticVariableMesh flag is set to True\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)),\n        ),\n        setting.Setting(\n            CONF_CYCLE_LENGTH,\n            default=365.242199,\n            label=\"Cycle Length\",\n            description=\"Duration of one single cycle in days. If `availabilityFactor` is below \"\n            \"1, the reactor will be at power less than this. If variable, use \"\n            \"`cycleLengths` setting.\",\n            oldNames=[\n                (\"burnTime\", None),\n            ],\n            schema=(\n                vol.Any(\n                    vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)),\n                    None,\n                )\n            ),\n        ),\n        setting.Setting(\n            CONF_CYCLE_LENGTHS,\n            default=[],\n            label=\"Cycle Durations\",\n            description=\"List of durations of each cycle in days. The at-power \"\n            \"duration will be affected by `availabilityFactor`. R is repeat. For \"\n            \"example [100, 150, '9R'] is 1 100 day cycle followed by 10 150 day \"\n            \"cycles. Empty list is constant duration set by `cycleLength`.\",\n            schema=vol.Any([vol.Coerce(str)], None),\n        ),\n        setting.Setting(\n            CONF_AVAILABILITY_FACTOR,\n            default=1.0,\n            label=\"Plant Availability Factor\",\n            description=\"Availability factor of the plant. This is the fraction of the \"\n            \"time that the plant is operating. If variable, use `availabilityFactors` setting.\",\n            oldNames=[\n                (\"capacityFactor\", None),\n            ],\n            schema=(vol.Any(vol.All(vol.Coerce(float), vol.Range(min=0)), None)),\n        ),\n        setting.Setting(\n            CONF_AVAILABILITY_FACTORS,\n            default=[],\n            label=\"Availability Factors\",\n            description=\"List of availability factor of each cycle as a fraction \"\n            \"(fraction of time plant is not in an outage). R is repeat. For example \"\n            \"[0.5, 1.0, '9R'] is 1 50% followed by 10 100%. Empty list is \"\n            \"constant duration set by `availabilityFactor`.\",\n            schema=vol.Any([vol.Coerce(str)], None),\n        ),\n        setting.Setting(\n            CONF_POWER_FRACTIONS,\n            default=[],\n            label=\"Power Fractions\",\n            description=\"List of power fractions at each cycle (fraction of rated \"\n            \"thermal power the plant achieves). R is repeat. For example [0.5, 1.0, \"\n            \"'9R'] is 1 50% followed by 10 100%. Specify zeros to indicate \"\n            \"decay-only cycles (i.e. for decay heat analysis). None implies \"\n            \"always full rated power.\",\n            schema=vol.Any([vol.Coerce(str)], None),\n        ),\n        setting.Setting(\n            CONF_BURN_STEPS,\n            default=4,\n            label=\"Burnup Steps per Cycle\",\n            description=\"Number of depletion substeps, n, in one cycle. Note: There \"\n            \"will be n+1 time nodes and the burnup step time will be computed as cycle \"\n            \"length/n when the simple cycles input format is used.\",\n            schema=(vol.Any(vol.All(vol.Coerce(int), vol.Range(min=0)), None)),\n        ),\n        setting.Setting(\n            CONF_BETA,\n            default=None,\n            label=\"Delayed Neutron Fraction\",\n            description=\"Individual precursor group delayed neutron fractions\",\n            schema=vol.Any(\n                [\n                    vol.All(\n                        vol.Coerce(float),\n                        vol.Range(min=0, min_included=True, max=1, max_included=True),\n                    )\n                ],\n                None,\n                vol.All(\n                    vol.Coerce(float),\n                    vol.Range(min=0, min_included=True, max=1, max_included=True),\n                ),\n                msg=\"Expected NoneType, float, or list of floats.\",\n            ),\n            oldNames=[\n                (\"betaComponents\", None),\n            ],\n        ),\n        setting.Setting(\n            CONF_DECAY_CONSTANTS,\n            default=None,\n            label=\"Decay Constants\",\n            description=\"Individual precursor group delayed neutron decay constants\",\n            schema=vol.Any(\n                [vol.All(vol.Coerce(float), vol.Range(min=0, min_included=True))],\n                None,\n                vol.All(vol.Coerce(float), vol.Range(min=0, min_included=True)),\n                msg=\"Expected NoneType, float, or list of floats.\",\n            ),\n        ),\n        setting.Setting(\n            CONF_BRANCH_VERBOSITY,\n            default=\"error\",\n            label=\"Worker Log Verbosity\",\n            description=\"Verbosity of the non-primary MPI nodes\",\n            options=[\n                \"debug\",\n                \"extra\",\n                \"info\",\n                \"important\",\n                \"prompt\",\n                \"warning\",\n                \"error\",\n            ],\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_MODULE_VERBOSITY,\n            default={},\n            label=\"Module-Level Verbosity\",\n            description=\"Verbosity of any module-specific loggers that are set\",\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_VERSIONS,\n            default={},\n            label=\"Versions of Code Used\",\n            description=\"Versions of ARMI, and any Apps or Plugins that register a version here.\",\n        ),\n        setting.Setting(\n            CONF_BU_GROUPS,\n            default=[10, 20, 30],\n            label=\"Burnup XS Groups\",\n            description=\"The range of burnups where cross-sections will be the same \"\n            \"for a given cross section type (units of %FIMA)\",\n            schema=vol.Schema(\n                [\n                    vol.All(\n                        vol.Coerce(int),\n                        vol.Range(\n                            min=0,\n                            min_included=False,\n                        ),\n                    )\n                ]\n            ),\n        ),\n        setting.Setting(\n            CONF_TEMP_GROUPS,\n            default=[],\n            label=\"Temperature XS Groups\",\n            description=\"The range of fuel temperatures where cross-sections will be the same \"\n            \"for a given cross section type (units of degrees C)\",\n            schema=vol.Schema([vol.All(vol.Coerce(int), vol.Range(min=0, min_included=False))]),\n        ),\n        setting.Setting(\n            CONF_BURNUP_PEAKING_FACTOR,\n            default=0.0,\n            label=\"Burn-up Peaking Factor\",\n            description=\"The peak/avg factor for burnup and DPA. If it is not set the current flux \"\n            \"peaking is used (this is typically conservatively high).\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_CIRCULAR_RING_PITCH,\n            default=1.0,\n            label=\"Circular Ring Relative Pitch\",\n            description=\"The relative pitch to be used to define a single circular ring in circular shuffling\",\n        ),\n        setting.Setting(\n            CONF_COMMENT,\n            default=\"\",\n            label=\"Case Comments\",\n            description=\"A comment describing this case\",\n        ),\n        setting.Setting(\n            CONF_COPY_FILES_FROM,\n            default=[],\n            label=\"Copy These Files\",\n            description=\"A list of files that need to be copied at the start of a run.\",\n        ),\n        setting.Setting(\n            CONF_COPY_FILES_TO,\n            default=[],\n            label=\"Copy to These Directories\",\n            description=\"A list of directories to copy provided files into at the start of a run.\"\n            \"This list can be of length zero (copy to working dir), 1 (copy all files to the same \"\n            f\"place), or it must be the same length as {CONF_COPY_FILES_FROM}\",\n        ),\n        setting.Setting(\n            CONF_DEBUG_MEM,\n            default=False,\n            label=\"Debug Memory\",\n            description=\"Turn on memory debugging options to help find problems with the code\",\n        ),\n        setting.Setting(\n            CONF_DEBUG_MEM_SIZE,\n            default=False,\n            label=\"Debug Memory Size\",\n            description=\"Show size of objects during memory debugging\",\n        ),\n        setting.Setting(\n            CONF_DEFAULT_SNAPSHOTS,\n            default=False,\n            label=\"Basic Reactor Snapshots\",\n            description=\"Generate snapshots at BOL, MOL, and EOL.\",\n        ),\n        setting.Setting(\n            CONF_DETAIL_ALL_ASSEMS,\n            default=False,\n            label=\"Detailed Assems - All\",\n            description=\"All assemblies will have 'detailed' treatment. Note: This \"\n            \"option is interpreted differently by different modules.\",\n        ),\n        setting.Setting(\n            CONF_DETAIL_ASSEM_LOCATIONS_BOL,\n            default=[],\n            label=\"Detailed Assems - BOL Location\",\n            description=\"Assembly locations for assemblies that will have 'detailed' \"\n            \"treatment. This option will track assemblies in the core at BOL. Note: \"\n            \"This option is interpreted differently by different modules.\",\n        ),\n        setting.Setting(\n            CONF_DETAIL_ASSEM_NUMS,\n            default=[],\n            label=\"Detailed Assems - ID\",\n            description=\"Assembly numbers(IDs) for assemblies that will have \"\n            \"'detailed' treatment. This option will track assemblies that not in the \"\n            \"core at BOL. Note: This option is interpreted differently by different modules.\",\n            schema=vol.Schema([int]),\n        ),\n        setting.Setting(\n            CONF_DUMP_SNAPSHOT,\n            default=[],\n            label=\"Detailed Reactor Snapshots\",\n            description=\"List of snapshots to perform detailed reactor analysis, \"\n            \"such as reactivity coefficient generation.\",\n        ),\n        setting.Setting(\n            CONF_PHYSICS_FILES,\n            default=[],\n            label=\"Dump Snapshot Files\",\n            description=\"List of snapshots to dump reactor physics kernel input and \"\n            \"output files. Can be used to perform follow-on analysis.\",\n        ),\n        setting.Setting(\n            CONF_EQ_DIRECT,\n            default=False,\n            label=\"Direct Eq Shuffling\",\n            description=\"Does the equilibrium search with repetitive shuffing but with \"\n            \"direct shuffling rather than the fast way\",\n        ),\n        setting.Setting(\n            CONF_FLUX_RECON,\n            default=False,\n            label=\"Flux/Power Reconstruction\",\n            description=\"Perform detailed flux and power reconstruction\",\n        ),\n        setting.Setting(\n            CONF_FRESH_FEED_TYPE,\n            default=\"feed fuel\",\n            label=\"Fresh Feed Type\",\n            description=\"The type of fresh fuel added to the core, used in certain pre-defined \"\n            \"fuel shuffling logic sequences.\",\n            options=[\"feed fuel\", \"igniter fuel\", \"inner driver fuel\"],\n        ),\n        setting.Setting(\n            CONF_GROW_TO_FULL_CORE_AFTER_LOAD,\n            default=False,\n            label=\"Expand to Full Core on Snapshot Load\",\n            description=\"Grows from 1/3 to full core after loading a 1/3 \"\n            \"symmetric snapshot. Note: This is needed when a full core model is needed \"\n            \"and the database was produced using a third core model.\",\n        ),\n        setting.Setting(\n            CONF_START_CYCLE,\n            default=0,\n            label=\"Start Cycle\",\n            description=\"Cycle number to continue calculation from. Database will \"\n            \"load from the time step just before. For snapshots use `dumpSnapshot`.\",\n            oldNames=[\n                (\"loadCycle\", None),\n            ],\n            schema=vol.All(vol.Coerce(int), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_LOADING_FILE,\n            default=\"\",\n            label=\"Blueprints File\",\n            description=\"The blueprints/loading input file path containing component dimensions, materials, etc.\",\n        ),\n        setting.Setting(\n            CONF_START_NODE,\n            default=0,\n            label=\"Start Node\",\n            description=\"Timenode number (0 for BOC, etc.) to continue calculation from. \"\n            \"Database will load from the time step just before.\",\n            oldNames=[\n                (\"loadNode\", None),\n            ],\n            schema=vol.All(vol.Coerce(int), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_LOAD_STYLE,\n            default=\"fromInput\",\n            label=\"Load Style\",\n            description=\"Description of how the ARMI case will be initialized\",\n            options=[\"fromInput\", \"fromDB\"],\n        ),\n        setting.Setting(\n            CONF_N_CYCLES,\n            default=1,\n            label=\"Number of Cycles\",\n            description=\"Number of cycles that will be simulated. Fuel management \"\n            \"happens at the beginning of each cycle. Can include active (full-power) \"\n            \"cycles as well as post-shutdown decay-heat steps. For restart cases, \"\n            \"this value should include both cycles from the restart plus any additional \"\n            \"cycles to be run after `startCycle`.\",\n            schema=vol.All(vol.Coerce(int), vol.Range(min=1)),\n        ),\n        setting.Setting(\n            CONF_TIGHT_COUPLING,\n            default=False,\n            label=\"Tight Coupling\",\n            description=\"Boolean to turn on/off tight coupling\",\n        ),\n        setting.Setting(\n            CONF_TIGHT_COUPLING_MAX_ITERS,\n            default=4,\n            label=\"Maximum number of iterations for tight coupling.\",\n            description=\"Maximum number of iterations for tight coupling.\",\n        ),\n        setting.Setting(\n            CONF_CYCLES_SKIP_TIGHT_COUPLING_INTERACTION,\n            default=[],\n            label=\"Cycles to skip the tight coupling interaction.\",\n            description=\"List of cycle numbers skip tight coupling interaction for. \"\n            \"Will still update component temps, etc during these cycles, will just \"\n            \"not iterate a second (or more) time.\",\n        ),\n        tightCouplingSettings.TightCouplingSettingDef(\n            CONF_TIGHT_COUPLING_SETTINGS,\n        ),\n        setting.Setting(\n            CONF_OUTPUT_FILE_EXTENSION,\n            default=\"jpg\",\n            label=\"Plot File Extension\",\n            description=\"The default extension for plots\",\n            options=[\"jpg\", \"png\", \"svg\", \"pdf\"],\n        ),\n        setting.Setting(\n            CONF_PLOTS,\n            default=False,\n            label=\"Plot Results\",\n            description=\"Generate additional plots throughout the ARMI analysis\",\n        ),\n        setting.Setting(\n            CONF_POWER,\n            default=0.0,\n            label=\"Reactor Thermal Power (W)\",\n            description=\"Nameplate thermal power of the reactor. Can be varied by setting the powerFractions setting.\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_POWER_DENSITY,\n            default=0.0,\n            label=\"Reactor Thermal Power Density (W/HMM)\",\n            description=\"Thermal power of the Reactor, per gram of Heavy metal \"\n            \"mass. Ignore this setting if the `power` setting is non-zero.\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_REMOVE_PER_CYCLE,\n            default=3,\n            label=\"Remove per Cycle\",\n            description=\"The number of fuel assemblies removed per cycle at equilibrium.\",\n        ),\n        setting.Setting(\n            CONF_RUN_TYPE,\n            default=\"Standard\",\n            label=\"Run Type\",\n            description=\"Type of run that this is, e.g. a normal run through all \"\n            \"cycles, a snapshot-loaded reactivity coefficient run, etc.\",\n            options=[\"Standard\", \"Equilibrium\", \"Snapshots\"],\n        ),\n        setting.Setting(\n            CONF_EXPLICIT_REPEAT_SHUFFLES,\n            default=\"\",\n            label=\"Explicit Shuffles File\",\n            description=\"Path to file that contains a detailed shuffling history that is to be repeated exactly.\",\n            oldNames=[(\"movesFile\", None), (\"shuffleFileName\", None)],\n        ),\n        setting.Setting(\n            CONF_SKIP_CYCLES,\n            default=0,\n            label=\"Number of Cycles to Skip\",\n            description=\"Number of cycles to be skipped during the calculation. Note: \"\n            \"This is typically used when repeating only a portion of a calculation or \"\n            \"repeating a run.\",\n            schema=vol.All(vol.Coerce(int), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_SMALL_RUN,\n            default=False,\n            label=\"Clean Up Files at EOL\",\n            description=\"Clean up intermediate files after the run completes (EOL)\",\n        ),\n        setting.Setting(\n            CONF_SORT_REACTOR,\n            default=True,\n            label=\"Do we want to automatically sort the Reactor?\",\n            description=\"If unsorted, ArmiObject IDs will be by the order they were added to the Reactor.\",\n        ),\n        setting.Setting(\n            CONF_RM_EXT_FILES_AT_BOC,\n            default=False,\n            label=\"Clean Up Files at BOC\",\n            description=\"Clean up files at the beginning of each cycle (BOC)\",\n        ),\n        setting.Setting(\n            CONF_STATIONARY_BLOCK_FLAGS,\n            default=[\"GRID_PLATE\"],\n            label=\"stationary Block Flags\",\n            description=\"Blocks with these flags will not move in moves. Used for fuel management.\",\n        ),\n        setting.Setting(\n            CONF_TARGET_K,\n            default=1.005,\n            label=\"Criticality Search Target (k-effective)\",\n            description=\"Target criticality (k-effective) for cycle length, branch, and equilibrium search\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0)),\n        ),\n        setting.Setting(\n            CONF_TRACK_ASSEMS,\n            default=False,\n            label=\"Save Discharged Assemblies\",\n            description=\"Retain discharged assemblies in a spent fuel pool so their histories \"\n            \"can be analyzed or the assemblies reused. Turning this off removes \"\n            \"discharged assemblies to minimize memory and database size. \"\n            \"Assemblies explicitly discharged to the spent fuel pool remain \"\n            \"regardless, allowing selective tracking.\",\n        ),\n        setting.Setting(\n            CONF_VERBOSITY,\n            default=\"info\",\n            label=\"Primary Log Verbosity\",\n            description=\"How verbose the output will be\",\n            options=[\n                \"debug\",\n                \"extra\",\n                \"info\",\n                \"important\",\n                \"prompt\",\n                \"warning\",\n                \"error\",\n            ],\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_ZONE_DEFINITIONS,\n            default=[],\n            label=\"Zone Definitions\",\n            description=\"Manual definitions of zones as lists of assembly locations \"\n            '(e.g. \"zoneName: loc1, loc2, loc3\") . Zones are groups of assemblies used '\n            f\"by various summary and calculation routines. See also {CONF_ZONES_FILE} \"\n            \"for an alternative method of specifying zones.\",\n        ),\n        setting.Setting(\n            CONF_ZONES_FILE,\n            default=\"\",\n            label=\"Zones definition file\",\n            description=\"Input file containing the definition of Zones to be applied to the reactor. \"\n            f\"See also {CONF_ZONE_DEFINITIONS} for an alternative method of specifying zones.\",\n        ),\n        setting.Setting(\n            CONF_ACCEPTABLE_BLOCK_AREA_ERROR,\n            default=1e-05,\n            label=\"Acceptable Block Area Error\",\n            description=\"The limit of error between a block's cross-\"\n            \"sectional area and the reference block used during the assembly area \"\n            \"consistency check\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=0, min_included=False)),\n        ),\n        setting.Setting(\n            CONF_INDEPENDENT_VARIABLES,\n            default=[],\n            label=\"Independent Variables\",\n            description=\"List of (independentVarName, value) tuples to inform optimization post-processing\",\n        ),\n        setting.Setting(\n            CONF_T_IN,\n            default=360.0,\n            label=\"Inlet Temperature\",\n            description=\"The inlet temperature of the reactor in C\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=-273.15)),\n        ),\n        setting.Setting(\n            CONF_T_OUT,\n            default=510.0,\n            label=\"Outlet Temperature\",\n            description=\"The outlet temperature of the reactor in C\",\n            schema=vol.All(vol.Coerce(float), vol.Range(min=-273.15)),\n        ),\n        setting.Setting(\n            CONF_DEFERRED_INTERFACES_CYCLE,\n            default=0,\n            label=\"Deferred Interface Start Cycle\",\n            description=\"The supplied list of interface names in deferredInterfaceNames\"\n            \" will begin normal operations on this cycle number\",\n        ),\n        setting.Setting(\n            CONF_DEFERRED_INTERFACE_NAMES,\n            default=[],\n            label=\"Deferred Interface Names\",\n            description=\"Interfaces to delay the normal operations of for special circumstance problem avoidance\",\n        ),\n        setting.Setting(\n            CONF_OUTPUT_CACHE_LOCATION,\n            default=\"\",\n            label=\"Location of Output Cache\",\n            description=\"Location where cached calculations are stored and \"\n            \"retrieved if exactly the same as the calculation requested. Empty \"\n            \"string will not cache.\",\n            isEnvironment=True,\n        ),\n        setting.Setting(\n            CONF_MATERIAL_NAMESPACE_ORDER,\n            default=[],\n            label=\"Material Namespace Order\",\n            description=(\n                \"Ordered list of Python namespaces for finding materials by class name. \"\n                \"This allows users to choose between different implementations of reactor \"\n                \"materials. For example, the framework comes with a basic UZr material, \"\n                \"but power users will want to override it with their own UZr subclass. \"\n                \"This allows users to specify to get materials out of a plugin rather \"\n                \"than from the framework.\"\n            ),\n        ),\n        setting.Setting(\n            CONF_CYCLES,\n            default=[],\n            label=\"Cycle information\",\n            description=\"YAML dict defining the cycle history of the case. Options at each cycle \"\n            \"include: `name`, `cumulative days`, `step days`, `availability factor`, \"\n            \"`cycle length`, `burn steps`, and `power fractions`. If specified, do not use any of \"\n            \"the case settings `cycleLength(s)`, `availabilityFactor(s)`, `powerFractions`, or \"\n            \"`burnSteps`. Must also specify `nCycles` and `power`.\",\n            schema=vol.Schema(\n                [\n                    vol.All(\n                        {\n                            \"name\": str,\n                            \"cumulative days\": vol.All([vol.Any(float, int)], _isMonotonicIncreasing),\n                            \"step days\": [vol.Coerce(str)],\n                            \"power fractions\": [vol.Coerce(str)],\n                            \"availability factor\": vol.All(vol.Coerce(float), vol.Range(min=0, max=1)),\n                            \"cycle length\": vol.All(vol.Coerce(float), vol.Range(min=0)),\n                            \"burn steps\": vol.All(vol.Coerce(int), vol.Range(min=0)),\n                        },\n                        _mutuallyExclusiveCyclesInputs,\n                    )\n                ]\n            ),\n        ),\n        setting.Setting(\n            CONF_USER_PLUGINS,\n            default=[],\n            label=CONF_USER_PLUGINS,\n            description=\"YAML list defining the locations of UserPlugin subclasses. You can enter \"\n            \"the full ARMI import path: armi.test.test_what.MyPlugin, or you can enter the full \"\n            \"file path: /path/to/my/pluginz.py:MyPlugin \",\n            schema=vol.Any([vol.Coerce(str)], None),\n        ),\n        setting.Setting(\n            CONF_ASSEM_FLAGS_SKIP_AXIAL_EXP,\n            default=[],\n            label=\"Assembly Flags to Skip Axial Expansion\",\n            description=(\"Assemblies that match a flag on this list will not be axially expanded.\"),\n        ),\n    ]\n    return settings\n\n\ndef _isMonotonicIncreasing(inputList):\n    if isMonotonic(inputList, \"<\"):\n        return inputList\n    else:\n        raise vol.error.Invalid(f\"List must be monotonicically increasing: {inputList}\")\n\n\ndef _mutuallyExclusiveCyclesInputs(cycle):\n    \"\"\"Helper for `cycles` setting.\n\n    There are multiple different ways to define the time nodes of the simulation, but they are\n    exclusive, and you have to pick one. Here we verify it was done correcty.\n    \"\"\"\n    cycleKeys = cycle.keys()\n    if (\n        sum(\n            [\n                \"cumulative days\" in cycleKeys,\n                \"step days\" in cycleKeys,\n                \"cycle length\" in cycleKeys or \"burn steps\" in cycleKeys,\n            ]\n        )\n        != 1\n    ):\n        baseErrMsg = (\n            \"Must have exactly one of either 'cumulative days', 'step days', or 'cycle length' + \"\n            \"'burn steps' in each cycle definition.\"\n        )\n\n        raise vol.Invalid((baseErrMsg + f\" Check cycle {cycle['name']}.\") if \"name\" in cycleKeys else baseErrMsg)\n\n    return cycle\n"
  },
  {
    "path": "armi/settings/fwSettings/reportSettings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Settings related to the report generation.\"\"\"\n\nfrom armi.settings import setting\n\nCONF_GEN_REPORTS = \"genReports\"\nCONF_SUMMARIZE_ASSEM_DESIGN = \"summarizeAssemDesign\"\n\n\ndef defineSettings():\n    \"\"\"Define settings for the interface.\"\"\"\n    settings = [\n        setting.Setting(\n            CONF_GEN_REPORTS,\n            default=True,\n            label=\"Enable Reports\",\n            description=\"Employ the use of the reporting utility for ARMI, generating \"\n            \"HTML and ASCII summaries of the run\",\n            oldNames=[(\"summarizer\", None)],\n        ),\n        setting.Setting(\n            CONF_SUMMARIZE_ASSEM_DESIGN,\n            default=True,\n            label=\"Summarize Assembly Design\",\n            description=\"Print a summary of the assembly design details at BOL\",\n        ),\n    ]\n    return settings\n"
  },
  {
    "path": "armi/settings/fwSettings/tests/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/settings/fwSettings/tests/test_fwSettings.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the framework settings.\"\"\"\n\nimport unittest\n\nimport voluptuous as vol\n\nfrom armi.settings import caseSettings\n\n\nclass TestSchema(unittest.TestCase):\n    \"\"\"Test that the implemented schema are doing what we think they are.\"\"\"\n\n    def setUp(self):\n        self.cs = caseSettings.Settings()\n        self.settings = {\n            \"nTasks\": {\n                \"valid\": 1,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"axialMeshRefinementFactor\": {\n                \"valid\": 1,\n                \"invalid\": 0,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"minMeshSizeRatio\": {\n                \"valid\": 1,\n                \"invalid\": 0,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"cycleLength\": {\n                \"valid\": 1,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"availabilityFactor\": {\n                \"valid\": 0,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"burnSteps\": {\n                \"valid\": 0,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"beta\": {\n                \"valid\": [0.5, 0.5],\n                \"invalid\": [0.5, 2],\n                \"error\": vol.error.AnyInvalid,\n            },\n            \"decayConstants\": {\n                \"valid\": [1, 1],\n                \"invalid\": [-1, 1],\n                \"error\": vol.error.AnyInvalid,\n            },\n            \"buGroups\": {\n                \"valid\": [1, 5],\n                \"invalid\": [-1, 200],\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"burnupPeakingFactor\": {\n                \"valid\": 0,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"startCycle\": {\n                \"valid\": 1,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"startNode\": {\n                \"valid\": 0,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"nCycles\": {\"valid\": 1, \"invalid\": -1, \"error\": vol.error.MultipleInvalid},\n            \"power\": {\"valid\": 0, \"invalid\": -1, \"error\": vol.error.MultipleInvalid},\n            \"skipCycles\": {\n                \"valid\": 0,\n                \"invalid\": -1,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"targetK\": {\"valid\": 1, \"invalid\": -1, \"error\": vol.error.MultipleInvalid},\n            \"acceptableBlockAreaError\": {\n                \"valid\": 1,\n                \"invalid\": 0,\n                \"error\": vol.error.MultipleInvalid,\n            },\n            \"Tin\": {\"valid\": -272, \"invalid\": -274, \"error\": vol.error.MultipleInvalid},\n            \"Tout\": {\n                \"valid\": -272,\n                \"invalid\": -274,\n                \"error\": vol.error.MultipleInvalid,\n            },\n        }\n\n    def test_schema(self):\n        # first test that a valid case goes through without error\n        for settingName, settingVal in self.settings.items():\n            validOption = settingVal[\"valid\"]\n            self.cs = self.cs.modified(newSettings={settingName: validOption})\n\n            invalidOption = settingVal[\"invalid\"]\n            expectedError = settingVal[\"error\"]\n            with self.assertRaises(expectedError):\n                self.cs = self.cs.modified(newSettings={settingName: invalidOption})\n"
  },
  {
    "path": "armi/settings/fwSettings/tests/test_tightCouplingSettings.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nUnit testing for tight coupling settings.\n\n- The settings example below shows the intended use for these settings in\n  an ARMI yaml input file.\n- Note, for these to be recognized, they need to be prefixed with \"tightCouplingSettings:\".\n\"\"\"\n\nimport io\nimport unittest\n\nimport voluptuous as vol\nfrom ruamel.yaml import YAML\n\nfrom armi.settings.fwSettings.tightCouplingSettings import (\n    TightCouplingSettingDef,\n    tightCouplingSettingsValidator,\n)\n\nTIGHT_COUPLING_SETTINGS_EXAMPLE = \"\"\"\n    globalFlux:\n        parameter: keff\n        convergence: 1e-05\n    fuelPerformance:\n        parameter: peakFuelTemperature\n        convergence: 1e-02\n    \"\"\"\n\n\nclass TestTightCouplingSettings(unittest.TestCase):\n    def test_validAssignments(self):\n        \"\"\"Tests that the tight coupling settings dictionary can be added to.\"\"\"\n        tc = {}\n        tc[\"globalFlux\"] = {\"parameter\": \"keff\", \"convergence\": 1e-05}\n        tc[\"thermalHydraulics\"] = {\n            \"parameter\": \"peakCladdingTemperature\",\n            \"convergence\": 1e-02,\n        }\n        tc = tightCouplingSettingsValidator(tc)\n        self.assertEqual(tc[\"globalFlux\"][\"parameter\"], \"keff\")\n        self.assertEqual(tc[\"globalFlux\"][\"convergence\"], 1e-05)\n        self.assertEqual(tc[\"thermalHydraulics\"][\"parameter\"], \"peakCladdingTemperature\")\n        self.assertEqual(tc[\"thermalHydraulics\"][\"convergence\"], 1e-02)\n\n    def test_incompleteAssignment(self):\n        \"\"\"Tests that the tight coupling settings is rendered empty if a complete dictionary is not provided.\"\"\"\n        tc = {}\n        tc[\"globalFlux\"] = None\n        tc = tightCouplingSettingsValidator(tc)\n        self.assertNotIn(\"globalFlux\", tc.keys())\n\n        tc = {}\n        tc[\"globalFlux\"] = {}\n        tc = tightCouplingSettingsValidator(tc)\n        self.assertNotIn(\"globalFlux\", tc.keys())\n\n    def test_missingAssignments(self):\n        \"\"\"Tests failure if not all keys/value pairs are provided on initialization.\"\"\"\n        # Fails because `convergence` is not assigned at the same\n        # time as the `parameter` assignment.\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\"parameter\": \"keff\"}\n            tc = tightCouplingSettingsValidator(tc)\n\n        # Fails because `parameter` is not assigned at the same\n        # time as the `convergence` assignment.\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\"convergence\": 1e-08}\n            tc = tightCouplingSettingsValidator(tc)\n\n    def test_invalidArgumentTypes(self):\n        \"\"\"Tests failure when the values of the parameters do not match the expected schema.\"\"\"\n        # Fails because `parameter` value is required to be a string\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\"parameter\": 1.0}\n            tc = tightCouplingSettingsValidator(tc)\n\n        # Fails because `convergence` value is required to be something can be coerced into a float\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\"convergence\": \"keff\"}\n            tc = tightCouplingSettingsValidator(tc)\n\n    def test_extraAssignments(self):\n        \"\"\"\n        Tests failure if additional keys are supplied that do not match the expected schema or\n        if there are any typos in the expected keys.\n        \"\"\"\n        # Fails because the `parameter` key is misspelled.\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\"parameters\": \"keff\", \"convergence\": 1e-05}\n            tc = tightCouplingSettingsValidator(tc)\n\n        # Fails because of the `extra` key.\n        with self.assertRaises(vol.MultipleInvalid):\n            tc = {}\n            tc[\"globalFlux\"] = {\n                \"parameter\": \"keff\",\n                \"convergence\": 1e-05,\n                \"extra\": \"fails\",\n            }\n            tc = tightCouplingSettingsValidator(tc)\n\n    def test_serializeSettingsException(self):\n        \"\"\"Ensure the TypeError in serializeTightCouplingSettings can be reached.\"\"\"\n        tc = [\"globalFlux\"]\n        with self.assertRaises(TypeError) as cm:\n            tc = tightCouplingSettingsValidator(tc)\n            the_exception = cm.exception\n            self.assertEqual(the_exception.error_code, 3)\n\n    def test_yamlIO(self):\n        \"\"\"Ensure we can read/write this custom setting object to yaml.\"\"\"\n        yaml = YAML()\n        inp = yaml.load(io.StringIO(TIGHT_COUPLING_SETTINGS_EXAMPLE))\n        tcd = TightCouplingSettingDef(\"TestSetting\")\n        tcd.setValue(inp)\n        self.assertEqual(tcd.value[\"globalFlux\"][\"parameter\"], \"keff\")\n        outBuf = io.StringIO()\n        output = tcd.dump()\n        yaml.dump(output, outBuf)\n        outBuf.seek(0)\n        inp2 = yaml.load(outBuf)\n        self.assertEqual(inp.keys(), inp2.keys())\n"
  },
  {
    "path": "armi/settings/fwSettings/tightCouplingSettings.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThe data structures and schema of the tight coupling settings.\n\nThese are advanced/compound settings that are carried along in the normal cs\nobject but aren't simple key/value pairs.\n\"\"\"\n\nfrom typing import Dict, Union\n\nimport voluptuous as vol\n\nfrom armi.settings import Setting\n\n_SCHEMA = vol.Schema(\n    {\n        str: vol.Schema(\n            {\n                vol.Required(\"parameter\"): str,\n                vol.Required(\"convergence\"): vol.Coerce(float),\n            }\n        )\n    }\n)\n\n\nclass TightCouplingSettings(dict):\n    \"\"\"\n    Dictionary with keys of Interface functions and a dictionary value.\n\n    Notes\n    -----\n    The dictionary value for each Interface function is required to contain a ``parameter``\n    and a ``convergence`` key with string and float values, respectively. No other\n    keys are allowed.\n\n    Examples\n    --------\n        couplingSettings = TightCouplingSettings({'globalFlux': {'parameter': 'keff', 'convergence': 1e-05}})\n    \"\"\"\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} with Interface functions {self.keys()}>\"\n\n\ndef serializeTightCouplingSettings(tightCouplingSettingsDict: Union[TightCouplingSettings, Dict]) -> Dict[str, Dict]:\n    \"\"\"\n    Return a serialized form of the ``TightCouplingSettings`` as a dictionary.\n\n    Notes\n    -----\n    Attributes that are not set (i.e., set to None) will be skipped.\n    \"\"\"\n    if not isinstance(tightCouplingSettingsDict, dict):\n        raise TypeError(f\"Expected a dictionary for {tightCouplingSettingsDict}\")\n\n    output = {}\n    for interfaceFunction, options in tightCouplingSettingsDict.items():\n        # Setting the value to an empty dictionary\n        # if it is set to a None or an empty\n        # dictionary.\n        if not options:\n            continue\n\n        output[str(interfaceFunction)] = options\n    return output\n\n\nclass TightCouplingSettingDef(Setting):\n    \"\"\"\n    Custom setting object to manage the tight coupling settings for each interface.\n\n    Notes\n    -----\n    This uses the ``tightCouplingSettingsValidator`` schema to validate the inputs\n    and will automatically coerce the value into a ``TightCouplingSettings`` dictionary.\n    \"\"\"\n\n    def __init__(self, name):\n        description = (\n            \"Data structure defining the tight coupling parameters and convergence criteria for each interface.\"\n        )\n        label = \"Interface Tight Coupling Control\"\n        default = TightCouplingSettings()\n        options = None\n        schema = tightCouplingSettingsValidator\n        enforcedOptions = False\n        subLabels = None\n        isEnvironment = False\n        oldNames = None\n        Setting.__init__(\n            self,\n            name,\n            default,\n            description,\n            label,\n            options,\n            schema,\n            enforcedOptions,\n            subLabels,\n            isEnvironment,\n            oldNames,\n        )\n\n    def dump(self):\n        \"\"\"Return a serialized version of the ``TightCouplingSettings`` object.\"\"\"\n        return serializeTightCouplingSettings(self._value)\n\n\ndef tightCouplingSettingsValidator(tightCouplingSettingsDict: Dict[str, Dict]) -> TightCouplingSettings:\n    \"\"\"Returns a ``TightCouplingSettings`` object if validation is successful.\"\"\"\n    tightCouplingSettingsDict = serializeTightCouplingSettings(tightCouplingSettingsDict)\n    tightCouplingSettingsDict = _SCHEMA(tightCouplingSettingsDict)\n    vals = TightCouplingSettings()\n    for interfaceFunction, inputParams in tightCouplingSettingsDict.items():\n        vals[interfaceFunction] = inputParams\n    return vals\n"
  },
  {
    "path": "armi/settings/setting.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSystem to handle basic configuration settings.\n\nNotes\n-----\nThe type of each Setting is derived from the type of the default value. When users set values to their settings, ARMI\nenforces these types with schema validation. This also allows for more complex schema validation for settings that are\nmore complex dictionaries (e.g. XS, rx coeffs).\n\"\"\"\n\nimport copy\nimport datetime\nfrom collections import namedtuple\nfrom typing import List, Optional, Tuple\n\nimport voluptuous as vol\n\nfrom armi import runLog\nfrom armi.reactor.flags import Flags\n\n# Options are used to imbue existing settings with new Options. This allows a setting like `neutronicsKernel` to\n# strictly enforce options, even though the plugin that defines it does not know all possible options, which may be\n# provided from other plugins.\nOption = namedtuple(\"Option\", [\"option\", \"settingName\"])\nDefault = namedtuple(\"Default\", [\"value\", \"settingName\"])\n\n\nclass Setting:\n    \"\"\"\n    A particular setting.\n\n    .. impl:: The setting default is mandatory.\n        :id: I_ARMI_SETTINGS_DEFAULTS\n        :implements: R_ARMI_SETTINGS_DEFAULTS\n\n        Setting objects hold all associated information of a setting in ARMI and should typically be accessed through\n        the Settings methods rather than directly. Settings require a mandatory default value.\n\n        Setting subclasses can implement custom ``load`` and ``dump`` methods that can enable serialization (to/from\n        dicts) of custom objects. When you set a setting's value, the value will be unserialized into the custom object\n        and when you call ``dump``, it will be serialized. Just accessing the value will return the actual object in\n        this case.\n    \"\"\"\n\n    def __init__(\n        self,\n        name,\n        default,\n        description,\n        label=None,\n        options=None,\n        schema=None,\n        enforcedOptions=False,\n        subLabels=None,\n        isEnvironment=False,\n        oldNames: Optional[List[Tuple[str, Optional[datetime.date]]]] = None,\n    ):\n        \"\"\"\n        Initialize a Setting object.\n\n        Parameters\n        ----------\n        name : str\n            the setting's name\n        default : object\n            The setting's default value\n        description : str\n            The description of the setting\n        label : str, optional\n            the shorter description used for the ARMI GUI\n        options : list, optional\n            Legal values (useful in GUI drop-downs)\n        schema : callable, optional\n            A function that gets called with the configuration VALUES that build this setting. The callable will either\n            raise an exception, safely modify/update, or leave unchanged the value. If left blank, a type check will be\n            performed against the default.\n        enforcedOptions : bool, optional\n            Require that the value be one of the valid options.\n        subLabels : tuple, optional\n            The names of the fields in each tuple for a setting that accepts a list of tuples. For example, if a setting\n            is a list of (assembly name, file name) tuples, the sublabels would be (\"assembly name\", \"file name\"). This\n            is needed for building GUI widgets to input such data.\n        isEnvironment : bool, optional\n            Whether this should be considered an \"environment\" setting. These can be used by the Case system to\n            propagate environment options through command-line flags.\n        oldNames : list of tuple, optional\n            List of previous names that this setting used to have, along with optional expiration dates. These can aid\n            in automatic migration of old inputs. When provided, if it is appears that the expiration date has passed,\n            old names will result in errors, requiring to user to update their input by hand to use more current names.\n        \"\"\"\n        assert description, f\"Setting {name} defined without description.\"\n        assert description != \"None\", f\"Setting {name} defined without description.\"\n\n        self.name = name\n        self.description = description or name\n        self.label = label or name\n        self.options = options\n        self.enforcedOptions = enforcedOptions\n        self.subLabels = subLabels\n        self.isEnvironment = isEnvironment\n        self.oldNames: List[Tuple[str, Optional[datetime.date]]] = oldNames or []\n        self._default = default\n        self._value = copy.deepcopy(default)  # break link from _default\n        # Retain the passed schema so that we don't accidentally stomp on it in addOptions(), et.al.\n        self._customSchema = schema\n        self._setSchema()\n\n    @property\n    def underlyingType(self):\n        \"\"\"Useful in categorizing settings, e.g. for GUI.\"\"\"\n        return type(self._default)\n\n    @property\n    def containedType(self):\n        \"\"\"The subtype for lists.\"\"\"\n        # assume schema set to [int] or [str] or something similar\n        try:\n            containedSchema = self.schema.schema[0]\n            if isinstance(containedSchema, vol.Coerce):\n                # special case for Coerce objects, which store their underlying type as ``.type``.\n                return containedSchema.type\n            return containedSchema\n        except TypeError:\n            # cannot infer. fall back to str\n            return str\n\n    def _setSchema(self):\n        \"\"\"Apply or auto-derive schema of the value.\"\"\"\n        schema = self._customSchema\n        if schema:\n            self.schema = schema\n        elif self.options and self.enforcedOptions:\n            self.schema = vol.Schema(vol.In(self.options))\n        else:\n            # Coercion is needed in some GUI instances where lists are getting set as strings.\n            if isinstance(self.default, list) and self.default:\n                # Non-empty default: assume the default has the desired contained type Coerce all values to the first\n                # entry in the default so mixed floats and ints work. Note that this will not work for settings that\n                # allow mixed types in their lists (e.g. [0, '10R']), so those all need custom schemas.\n                self.schema = vol.Schema([vol.Coerce(type(self.default[0]))])\n            else:\n                self.schema = vol.Schema(vol.Coerce(type(self.default)))\n\n    @property\n    def default(self):\n        return self._default\n\n    @property\n    def value(self):\n        return self._value\n\n    @value.setter\n    def value(self, val):\n        \"\"\"\n        Set the value directly.\n\n        Notes\n        -----\n        Can't just decorate ``setValue`` with ``@value.setter`` because some callers use setting.value=val and others\n        use setting.setValue(val) and the latter fails with ``TypeError: 'XSSettings' object is not callable``.\n        \"\"\"\n        return self.setValue(val)\n\n    def setValue(self, val):\n        \"\"\"\n        Set value of a setting.\n\n        This validates it against its value schema on the way in.\n\n        Some setting values are custom serializable objects. Rather than writing them directly to YAML using YAML's\n        Python object-writing features, we prefer to use our own custom serializers on subclasses.\n        \"\"\"\n        try:\n            val = self.schema(val)\n        except vol.error.Invalid:\n            runLog.error(f\"Error in setting {self.name}, val: {val}.\")\n            raise\n\n        self._value = self._load(val)\n\n    def addOptions(self, options: List[Option]):\n        \"\"\"Extend this Setting's options with extra options.\"\"\"\n        try:\n            self.options.extend([o.option for o in options])\n        except AttributeError:\n            if self.options is None:\n                msg = (\n                    f\"The Setting {self.name} has no default options, it looks like you want to add that to the \"\n                    + \"definition.\"\n                )\n                runLog.error(msg)\n                raise AttributeError(msg)\n            else:\n                raise\n\n        self._setSchema()\n\n    def addOption(self, option: Option):\n        \"\"\"Extend this Setting's options with an extra option.\"\"\"\n        self.addOptions([option])\n\n    def changeDefault(self, newDefault: Default):\n        \"\"\"Change the default of a setting, and also the current value.\"\"\"\n        self._default = newDefault.value\n        self.value = newDefault.value\n\n    @staticmethod\n    def _load(inputVal):\n        \"\"\"\n        Create setting value from input value.\n\n        In some custom settings, this can return a custom object rather than just the input value.\n        \"\"\"\n        return inputVal\n\n    def dump(self):\n        \"\"\"\n        Return a serializable version of this setting's value.\n\n        Override to define custom deserializers for custom/compound settings.\n        \"\"\"\n        return self._value\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.name} value:{self.value} default:{self.default}>\"\n\n    def __getstate__(self):\n        \"\"\"\n        Remove schema during pickling because it is often unpickleable.\n\n        Notes\n        -----\n        Errors are often with\n        ``AttributeError: Can't pickle local object '_compile_scalar.<locals>.validate_instance'``\n\n        See Also\n        --------\n        armi.settings.caseSettings.Settings.__setstate__ : regenerates the schema upon load.\n            Note that we don't do it at the individual setting level because it'd be too O(N^2).\n        \"\"\"\n        state = copy.deepcopy(self.__dict__)\n        for trouble in (\"schema\", \"_customSchema\"):\n            if trouble in state:\n                del state[trouble]\n\n        return state\n\n    def revertToDefault(self):\n        \"\"\"\n        Revert a setting back to its default.\n\n        Notes\n        -----\n        Skips the property setter because default val should already be validated.\n        \"\"\"\n        self._value = copy.deepcopy(self.default)\n\n    def isDefault(self):\n        \"\"\"\n        Returns a boolean based on whether or not the setting equals its default value.\n\n        It is possible for a setting to change and not be reported as such when it is changed back to its default. That\n        behavior seems acceptable.\n        \"\"\"\n        return self.value == self.default\n\n    @property\n    def offDefault(self):\n        \"\"\"Return True if the setting is not the default value for that setting.\"\"\"\n        return not self.isDefault()\n\n    def getCustomAttributes(self):\n        \"\"\"Hack to work with settings writing system until old one is gone.\"\"\"\n        return {\"value\": self.value}\n\n    def getDefaultAttributes(self):\n        \"\"\"\n        Additional hack, residual from when settings system could write settings definitions.\n\n        This is only needed here due to the unit tests in test_settings.\n        \"\"\"\n        return {\n            \"value\": self.value,\n            \"type\": type(self.default),\n            \"default\": self.default,\n        }\n\n    def __copy__(self):\n        setting = Setting(\n            str(self.name),\n            copy.copy(self._default),\n            description=None if self.description is None else str(self.description),\n            label=None if self.label is None else str(self.label),\n            options=copy.copy(self.options),\n            schema=copy.copy(self.schema) if hasattr(self, \"schema\") else None,\n            enforcedOptions=bool(self.enforcedOptions),\n            subLabels=copy.copy(self.subLabels),\n            isEnvironment=bool(self.isEnvironment),\n            oldNames=None if self.oldNames is None else list(self.oldNames),\n        )\n        setting._value = copy.deepcopy(self._value)\n        return setting\n\n\nclass FlagListSetting(Setting):\n    \"\"\"Subclass of :py:class:`Setting <armi.settings.Setting>` convert settings between flags and strings.\"\"\"\n\n    def __init__(\n        self,\n        name,\n        default,\n        description=None,\n        label=None,\n        oldNames: Optional[List[Tuple[str, Optional[datetime.date]]]] = None,\n    ):\n        Setting.__init__(\n            self,\n            name=name,\n            default=default,\n            description=description,\n            label=label,\n            options=None,\n            schema=self.schema,\n            enforcedOptions=None,\n            subLabels=None,\n            isEnvironment=False,\n            oldNames=oldNames,\n        )\n\n    @staticmethod\n    def schema(val) -> List[Flags]:\n        \"\"\"\n        Return a list of :py:class:`Flags <armi.reactor.flags.Flags`.\n\n        Raises\n        ------\n        TypeError\n            When ``val`` is not a list.\n        ValueError\n            When ``val`` is not an instance of str or Flags.\n        \"\"\"\n        if not isinstance(val, list):\n            raise TypeError(f\"Expected `{val}` to be a list.\")\n\n        flagVals = []\n        for v in val:\n            if isinstance(v, str):\n                flagVals.append(Flags.fromString(v))\n            elif isinstance(v, Flags):\n                flagVals.append(v)\n            else:\n                raise ValueError(f\"Invalid flag input `{v}` in `FlagListSetting`\")\n\n        return flagVals\n\n    def dump(self) -> List[str]:\n        \"\"\"Return a list of strings converted from the flag values.\"\"\"\n        return [Flags.toString(v) for v in self.value]\n"
  },
  {
    "path": "armi/settings/settingsIO.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module contains classes and methods for reading and writing\n:py:class:`~armi.settings.caseSettings.Settings`, and the contained\n:py:class:`~armi.settings.setting.Setting`.\n\"\"\"\n\nimport collections\nimport datetime\nimport os\nimport sys\nfrom typing import Dict, Set, Tuple\n\nimport ruamel.yaml.comments\nfrom ruamel.yaml import YAML\n\nfrom armi import context, runLog\nfrom armi.meta import __version__ as version\nfrom armi.settings.setting import Setting\nfrom armi.utils.customExceptions import (\n    InvalidSettingsFileError,\n    InvalidSettingsStopProcess,\n    SettingException,\n)\n\n# Constants defining valid output styles\nWRITE_SHORT = \"short\"\nWRITE_MEDIUM = \"medium\"\nWRITE_FULL = \"full\"\n\n\nclass Roots:\n    \"\"\"XML tree root node common strings.\"\"\"\n\n    CUSTOM = \"settings\"\n    VERSION = \"version\"\n\n\nclass SettingRenamer:\n    \"\"\"\n    Utility class to help with setting rename migrations.\n\n    This class stores a cache of renaming maps, derived from the ``Setting.oldNames`` values of the\n    passed ``settings``. Expired renames are retained, so that meaningful warning messages can be\n    generated if one attempts to use one of them. The renaming logic follows the rules described in\n    :py:meth:`renameSetting`.\n    \"\"\"\n\n    def __init__(self, settings: Dict[str, Setting]):\n        self._currentNames: Set[str] = set()\n        self._activeRenames: Dict[str, str] = dict()\n        self._expiredRenames: Set[Tuple[str, str, datetime.date]] = set()\n\n        today = datetime.date.today()\n\n        for name, s in settings.items():\n            self._currentNames.add(name)\n            for oldName, expiry in s.oldNames:\n                if expiry is not None:\n                    expired = expiry <= today\n                else:\n                    expired = False\n                if expired:\n                    self._expiredRenames.add((oldName, name, expiry))\n                else:\n                    if oldName in self._activeRenames:\n                        raise SettingException(\n                            \"The setting rename from {0}->{1} collides with another rename {0}->{2}\".format(\n                                oldName, name, self._activeRenames[oldName]\n                            )\n                        )\n                    self._activeRenames[oldName] = name\n\n    def renameSetting(self, name) -> Tuple[str, bool]:\n        \"\"\"\n        Attempt to rename a candidate setting.\n\n        Renaming follows these rules:\n         - If the ``name`` corresponds to a current setting name, do not attempt to rename it.\n         - If the ``name`` does not correspond to a current setting name, but is one of the active\n           renames, return the corresponding active rename.\n         - If the ``name`` does not correspond to a current setting name, but is one of the expired\n           renames, produce a warning and do not rename it.\n\n        Parameters\n        ----------\n        name : str\n            The candidate setting name to potentially rename.\n\n        Returns\n        -------\n        name : str\n            The potentially-renamed setting\n        renamed : bool\n            Whether the setting was actually renamed\n        \"\"\"\n        if name in self._currentNames:\n            return name, False\n\n        activeRename = self._activeRenames.get(name, None)\n        if activeRename is not None:\n            runLog.extra(f\"Invalid setting {name} found. Renaming to {activeRename}.\", single=True)\n            return activeRename, True\n\n        return name, False\n\n\nclass SettingsReader:\n    \"\"\"Abstract class for processing settings files.\n\n    .. impl:: The setting use a human-readable, plain text file as input.\n        :id: I_ARMI_SETTINGS_IO_TXT\n        :implements: R_ARMI_SETTINGS_IO_TXT\n\n        ARMI uses the YAML standard for settings files. ARMI uses industry-standard ``ruamel.yaml``\n        Python library to read these files. ARMI does not bend or change the YAML file format\n        standard in any way.\n\n    Parameters\n    ----------\n    cs : Settings\n        The settings object to read into\n    \"\"\"\n\n    def __init__(self, cs):\n        self.cs = cs\n        self.inputPath = \"<stream>\"\n        self.invalidSettings = set()\n        self.settingsAlreadyRead = set()\n        self._renamer = SettingRenamer(dict(self.cs.items()))\n\n        # The input version will be overwritten if explicitly stated in input file. Otherwise, it's\n        # assumed to precede the version inclusion change and should be treated as alright.\n        self.inputVersion = version\n        self.liveVersion = version\n\n    def __getitem__(self, key):\n        return self.cs[key]\n\n    def __getattr__(self, attr):\n        return getattr(self.cs, attr)\n\n    def __repr__(self):\n        return f\"<{self.__class__.__name__} {self.inputPath}>\"\n\n    def readFromFile(self, path, handleInvalids=True):\n        \"\"\"Load file and read it.\"\"\"\n        with open(path, \"r\") as f:\n            ext = os.path.splitext(path)[1].lower()\n            assert ext.lower() in (\".yaml\", \".yml\"), f\"{ext} is the wrong extension\"\n            self.inputPath = path\n            try:\n                self.readFromStream(f, handleInvalids)\n            except Exception as ee:\n                raise InvalidSettingsFileError(path, str(ee))\n\n    def readFromStream(self, stream, handleInvalids=True):\n        \"\"\"Read from a file-like stream.\"\"\"\n        self._readYaml(stream)\n        if handleInvalids:\n            self._checkInvalidSettings()\n\n    def _readYaml(self, stream):\n        \"\"\"Read settings from a YAML stream.\"\"\"\n        from armi.physics.thermalHydraulics import const  # avoid circular import\n        from armi.settings.fwSettings.globalSettings import CONF_VERSIONS\n\n        yaml = YAML(typ=\"rt\")\n        yaml.allow_duplicate_keys = False\n        tree = yaml.load(stream)\n        if \"settings\" not in tree:\n            raise InvalidSettingsFileError(\n                self.inputPath,\n                \"Missing the `settings:` header required in YAML settings\",\n            )\n\n        if const.ORIFICE_SETTING_ZONE_MAP in tree:\n            raise InvalidSettingsFileError(self.inputPath, \"Appears to be an orifice_settings file\")\n\n        caseSettings = tree[Roots.CUSTOM]\n        setts = tree[\"settings\"]\n        if CONF_VERSIONS in setts and \"armi\" in setts[CONF_VERSIONS]:\n            self.inputVersion = setts[CONF_VERSIONS][\"armi\"]\n        else:\n            # Versions setting section not found; continuing with uncontrolled versions.\n            self.inputVersion = \"uncontrolled\"\n\n        for settingName, settingVal in caseSettings.items():\n            self._applySettings(settingName, settingVal)\n\n    def _checkInvalidSettings(self):\n        if not self.invalidSettings:\n            return\n        try:\n            invalidNames = \"\\n\\t\".join(self.invalidSettings)\n            proceed = prompt(\n                \"Found {} invalid settings in {}.\\n\\n {} \\n\\t\".format(\n                    len(self.invalidSettings), self.inputPath, invalidNames\n                ),\n                \"Invalid settings will be ignored. Continue running the case?\",\n                \"YES_NO\",\n            )\n        except RunLogPromptUnresolvable:\n            # proceed with invalid settings (they'll be ignored).\n            proceed = True\n        if not proceed:\n            raise InvalidSettingsStopProcess(self)\n        else:\n            runLog.info(f\"Ignoring invalid settings: {invalidNames}\")\n\n    def _applySettings(self, name, val):\n        \"\"\"Add a setting, if it is valid. Capture invalid settings.\"\"\"\n        _nameToSet, _wasRenamed = self._renamer.renameSetting(name)\n\n        if name not in self.cs:\n            self.invalidSettings.add(name)\n        else:\n            # apply validations\n            _settingObj = self.cs.getSetting(name)\n\n            # The val is automatically coerced into the expected type when set using either the default or user-defined\n            # schema\n            self.cs[name] = val\n\n\nclass SettingsWriter:\n    \"\"\"Writes settings out to files.\n\n    This can write in three styles:\n\n    short\n        setting values that are not their defaults only\n    medium\n        preserves all settings originally in file even if they match the default value\n    full\n        all setting values regardless of default status\n    \"\"\"\n\n    def __init__(self, settings_instance, style=\"short\", settingsSetByUser=[]):\n        self.cs = settings_instance\n        self.style = style\n        if style not in {WRITE_SHORT, WRITE_MEDIUM, WRITE_FULL}:\n            raise ValueError(f\"Invalid supplied setting writing style {style}\")\n        # The writer should know about the old settings it is overwriting, but only sometimes (when the style is medium)\n        self.settingsSetByUser = settingsSetByUser\n\n    @staticmethod\n    def _getTag():\n        tag, _attrib = Roots.CUSTOM, {Roots.VERSION: version}\n        return tag\n\n    def writeYaml(self, stream):\n        \"\"\"Write settings to YAML file.\"\"\"\n        settingData = self._getSettingDataToWrite()\n        settingData = self._preprocessYaml(settingData)\n        yaml = YAML()\n        yaml.default_flow_style = False\n        yaml.indent(mapping=2, sequence=4, offset=2)\n        yaml.dump(settingData, stream)\n\n    def _preprocessYaml(self, settingData):\n        \"\"\"\n        Clean up the dict before dumping to YAML.\n\n        If it has just a value attrib it flattens it for brevity.\n        \"\"\"\n        from armi.settings.fwSettings.globalSettings import CONF_VERSIONS\n\n        yamlData = {}\n        cleanedData = collections.OrderedDict()\n        for settingObj, settingDatum in settingData.items():\n            if \"value\" in settingDatum and len(settingDatum) == 1:\n                # ok to flatten\n                cleanedData[settingObj.name] = settingObj.dump()\n            else:\n                cleanedData[settingObj.name] = settingDatum\n\n        # add ARMI version to the settings YAML\n        if CONF_VERSIONS not in cleanedData:\n            cleanedData[CONF_VERSIONS] = {}\n        cleanedData[CONF_VERSIONS][\"armi\"] = version\n\n        # this gets rid of a !!omap associated with ordered dicts\n        tag = self._getTag()\n        yamlData.update({tag: ruamel.yaml.comments.CommentedMap(cleanedData)})\n        return yamlData\n\n    def _getSettingDataToWrite(self):\n        \"\"\"\n        Make an ordered dict with all settings slated for being written.\n\n        This is general so it can be dumped to whatever file format.\n        \"\"\"\n        settingData = collections.OrderedDict()\n        for settingName, settingObject in iter(sorted(self.cs.items(), key=lambda name: name[0].lower())):\n            if self.style == WRITE_SHORT and not settingObject.offDefault:\n                continue\n\n            if (\n                self.style == WRITE_MEDIUM\n                and not settingObject.offDefault\n                and settingName not in self.settingsSetByUser\n            ):\n                continue\n\n            attribs = settingObject.getCustomAttributes().items()\n            settingDatum = {}\n            for attribName, attribValue in attribs:\n                if isinstance(attribValue, type):\n                    attribValue = attribValue.__name__\n                settingDatum[attribName] = attribValue\n            settingData[settingObject] = settingDatum\n\n        return settingData\n\n\ndef prompt(statement, question, *options):\n    \"\"\"Prompt the user for some information.\"\"\"\n    if context.CURRENT_MODE == context.Mode.GUI:\n        # avoid hard dependency on wx\n        import wx\n\n        msg = statement + \"\\n\\n\\n\" + question\n        style = wx.CENTER\n        for opt in options:\n            style |= getattr(wx, opt)\n        dlg = wx.MessageDialog(None, msg, style=style)\n\n        response = dlg.ShowModal()\n        dlg.Destroy()\n        if response == wx.ID_CANCEL:\n            raise RunLogPromptCancel(\"Manual cancellation of GUI prompt\")\n        return response in [wx.ID_OK, wx.ID_YES]\n\n    elif context.CURRENT_MODE == context.Mode.INTERACTIVE:\n        response = \"\"\n        responses = [opt for opt in options if opt in [\"YES_NO\", \"YES\", \"NO\", \"CANCEL\", \"OK\"]]\n\n        if \"YES_NO\" in responses:\n            index = responses.index(\"YES_NO\")\n            responses[index] = \"NO\"\n            responses.insert(index, \"YES\")\n\n        if not any(responses):\n            raise RuntimeError(f\"No suitable responses in {responses}\")\n\n        # highly requested shorthand responses\n        if \"YES\" in responses:\n            responses.append(\"Y\")\n        if \"NO\" in responses:\n            responses.append(\"N\")\n\n        # Use the logger tools to handle user prompts (runLog supports this).\n        while response not in responses:\n            runLog.LOG.log(\"prompt\", statement)\n            runLog.LOG.log(\"prompt\", \"{} ({}): \".format(question, \", \".join(responses)))\n            response = sys.stdin.readline().strip().upper()\n\n        if response == \"CANCEL\":\n            raise RunLogPromptCancel(\"Manual cancellation of interactive prompt\")\n\n        return response in [\"YES\", \"Y\", \"OK\"]\n\n    else:\n        raise RunLogPromptUnresolvable(f\"Incorrect CURRENT_MODE for prompting user: {context.CURRENT_MODE}\")\n\n\nclass RunLogPromptCancel(Exception):\n    \"\"\"An error that occurs when the user submits a cancel on a runLog prompt which allows for cancellation.\"\"\"\n\n    pass\n\n\nclass RunLogPromptUnresolvable(Exception):\n    \"\"\"\n    An error that occurs when the current mode enum in armi.__init__ suggests the user cannot be\n    communicated with from the current process.\n    \"\"\"\n\n    pass\n"
  },
  {
    "path": "armi/settings/settingsValidation.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA system to check user settings for validity and provide users with meaningful suggestions to fix.\n\nThis allows developers to define a rich set of rules and suggestions for user settings. These then pop up during\ninitialization of a run, either on the command line or as dialogues in the GUI. They say things like: \"Your ___ setting\nhas the value ___, which is impossible. Would you like to switch to ___?\"\n\"\"\"\n\nimport itertools\nimport os\nimport re\n\nfrom armi import context, getPluginManagerOrFail, runLog\nfrom armi.settings.settingsIO import (\n    RunLogPromptCancel,\n    RunLogPromptUnresolvable,\n    prompt,\n)\nfrom armi.utils import pathTools, safeCopy\nfrom armi.utils.mathematics import expandRepeatedFloats\n\n\nclass Query:\n    \"\"\"\n    An individual setting validator.\n\n    .. impl:: Rules to validate and customize a setting's behavior.\n        :id: I_ARMI_SETTINGS_RULES\n        :implements: R_ARMI_SETTINGS_RULES\n\n        This class is meant to represent a generic validation test against a setting. The goal is: developers create new\n        settings and they want to make sure those settings are used correctly. As an implementation, users pass in a\n        ``condition`` function to this class that returns ``True`` or ``False`` based on the setting name and value. And\n        then this class has a ``resolve`` method which tests if the condition is met. Optionally, this class also\n        contains a ``correction`` function that allows users to automatically correct a bad setting, if the developers\n        can find a clear path forward.\n    \"\"\"\n\n    def __init__(self, condition, statement, question, correction):\n        \"\"\"\n        Construct a query.\n\n        Parameters\n        ----------\n        condition : callable\n            A callable that returns True or False. If True, then the query activates its question and potential\n            correction.\n        statement : str\n            A statement of the problem indicated by a True condition\n        question : str\n            A question asking the user for confirmation of the proposed fix.\n        correction : callable\n            A callable that when called fixes the situation. See :py:meth:`Inspector.NO_ACTION` for no-ops.\n        \"\"\"\n        self.condition = condition\n        self.statement = statement\n        self.question = question\n        self.correction = correction\n        # True if the query is `passed` and does not result in an immediate failure\n        self.corrected = False\n        self._passed = False\n        self.autoResolved = True\n\n    def __repr__(self):\n        # Add representation so that it's possible to identify which one is being referred to when there are errors.\n        return f\"<Query: {self.statement}>\"\n\n    def __bool__(self):\n        try:\n            return bool(self.condition())\n        except TypeError:\n            runLog.error(f\"Invalid setting validation query. Update validator for: {self})\")\n            raise\n\n    def isCorrective(self):\n        return self.correction is not Inspector.NO_ACTION\n\n    def resolve(self):\n        \"\"\"Standard i/o prompt for resolution of an individual query.\"\"\"\n        if context.MPI_RANK != 0:\n            return\n\n        if self.condition():\n            try:\n                if self.isCorrective():\n                    try:\n                        makeCorrection = prompt(\n                            \"INSPECTOR: \" + self.statement,\n                            self.question,\n                            \"YES_NO\",\n                            \"NO_DEFAULT\",\n                            \"CANCEL\",\n                        )\n                        if makeCorrection:\n                            self.correction()\n                            self.corrected = True\n                        self._passed = True\n                    except RunLogPromptCancel:\n                        raise SystemExit(\"You have cancelled the submission.\")\n                else:\n                    try:\n                        continueSubmission = prompt(\n                            \"INSPECTOR: \" + self.statement,\n                            \"Continue?\",\n                            \"YES_NO\",\n                            \"NO_DEFAULT\",\n                        )\n                        if not continueSubmission:\n                            raise SystemExit(\"You have cancelled the submission.\")\n                    except RunLogPromptCancel:\n                        raise SystemExit(\"You have cancelled the submission.\")\n            except RunLogPromptUnresolvable:\n                self.autoResolved = False\n                self._passed = True\n\n\nclass Inspector:\n    \"\"\"\n    This manages queries which assert certain states of the data model, generally presenting themselves to the user,\n    offering information on the potential problem, a question and the action to take on an affirmative and negative\n    answer from the user.\n\n    In practice very useful for making sure setting values are as intended and without bad interplay with one another.\n\n    One Inspector will contain multiple Queries and be associated directly with an\n    :py:class:`~armi.operators.operator.Operator`.\n    \"\"\"\n\n    @staticmethod\n    def NO_ACTION():\n        \"\"\"Convenience callable used to generate Queries that can't be easily auto-resolved.\"\"\"\n        return None\n\n    def __init__(self, cs):\n        \"\"\"\n        Construct an inspector.\n\n        Parameters\n        ----------\n        cs : Settings\n        \"\"\"\n        self.queries = []\n        self.cs = cs\n        self.geomType = None\n        self.coreSymmetry = None\n        self._inspectBlueprints()\n        self._inspectSettings()\n\n        # Gather and attach validators from all plugins. This runs on all registered plugins, not just active ones.\n        pluginQueries = getPluginManagerOrFail().hook.defineSettingsValidators(inspector=self)\n        for queries in pluginQueries:\n            self.queries.extend(queries)\n\n    def run(self, cs=None):\n        \"\"\"\n        Run through each query and deal with it if possible.\n\n        Returns\n        -------\n        correctionsMade : bool\n            Whether or not anything was updated.\n\n        Raises\n        ------\n        RuntimeError\n            When a programming error causes queries to loop.\n        \"\"\"\n        if context.MPI_RANK != 0:\n            return False\n\n        # the following attribute changes will alter what the queries investigate when resolved\n        correctionsMade = False\n        self.cs = cs or self.cs\n        runLog.debug(\"{} executing queries.\".format(self.__class__.__name__))\n        if not any(self.queries):\n            runLog.debug(\"{} found no problems with the current state.\".format(self.__class__.__name__))\n        else:\n            for query in self.queries:\n                query.resolve()\n                if query.corrected:\n                    correctionsMade = True\n            issues = [query for query in self.queries if query and (query.isCorrective() and not query._passed)]\n            if any(issues):\n                # something isn't resolved or was unresolved by changes\n                raise RuntimeError(\n                    \"The input inspection did not resolve all queries, \"\n                    \"some issues are creating cyclic resolutions: {}\".format(issues)\n                )\n            runLog.debug(\"{} has finished querying.\".format(self.__class__.__name__))\n\n        if correctionsMade:\n            # find unused file path to store original settings as to avoid overwrite\n            strSkeleton = \"{}_old\".format(self.cs.path.split(\".yaml\")[0])\n            for num in itertools.count():\n                if num == 0:\n                    renamePath = f\"{strSkeleton}.yaml\"\n                else:\n                    renamePath = f\"{strSkeleton}{num}.yaml\"\n                if not self._csRelativePathExists(renamePath):\n                    break\n            # preserve old file before saving settings file\n            runLog.important(f\"Preserving original settings file by renaming `{renamePath}`\")\n            safeCopy(self.cs.path, renamePath)\n            # save settings file\n            self.cs.writeToYamlFile(self.cs.path)\n\n        return correctionsMade\n\n    def addQuery(self, condition, statement, question, correction):\n        \"\"\"Convenience method, query must be resolved, else run fails.\"\"\"\n        if not callable(correction):\n            raise ValueError('Query for \"{}\" malformed. Expecting callable.'.format(statement))\n        self.queries.append(Query(condition, statement, question, correction))\n\n    def addQueryBadLocationWillLikelyFail(self, settingName):\n        \"\"\"Add a query indicating the current path for ``settingName`` does not exist and will likely fail.\"\"\"\n        self.addQuery(\n            lambda: not os.path.exists(pathTools.armiAbsPath(self.cs[settingName])),\n            \"Setting {} points to nonexistent location\\n{}\\nFailure extremely likely\".format(\n                settingName, self.cs[settingName]\n            ),\n            \"\",\n            self.NO_ACTION,\n        )\n\n    def addQueryCurrentSettingMayNotSupportFeatures(self, settingName):\n        \"\"\"Add a query that the current value for ``settingName`` may not support certain features.\"\"\"\n        self.addQuery(\n            lambda: self.cs[settingName] != self.cs.getSetting(settingName).default,\n            \"{} set as:\\n{}\\nUsing this location instead of the default location\\n{}\\n\"\n            \"may not support certain functions.\".format(\n                settingName,\n                self.cs[settingName],\n                self.cs.getSetting(settingName).default,\n            ),\n            \"Revert to default location?\",\n            lambda: self._assignCS(settingName, self.cs.getSetting(settingName).default),\n        )\n\n    def _assignCS(self, key, value):\n        \"\"\"Lambda assignment workaround.\"\"\"\n        # this type of assignment works, but be mindful of scoping when trying different methods\n        runLog.extra(f\"Updating setting `{key}` to `{value}`\")\n        self.cs[key] = value\n\n    def _raise(self):\n        raise KeyboardInterrupt(\"Input inspection has been interrupted.\")\n\n    def _inspectBlueprints(self):\n        \"\"\"Blueprints early error detection and old format conversions.\"\"\"\n        from armi.physics.neutronics.settings import CONF_LOADING_FILE\n\n        # if there is a blueprints object, we don't need to check for a file\n        if self.cs.filelessBP:\n            return\n\n        self.addQuery(\n            lambda: not self.cs[CONF_LOADING_FILE],\n            \"No blueprints file loaded. Run will probably fail.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: not self._csRelativePathExists(self.cs[CONF_LOADING_FILE]),\n            \"Blueprints file {} not found. Run will fail.\".format(self.cs[CONF_LOADING_FILE]),\n            \"\",\n            self.NO_ACTION,\n        )\n\n    def _csRelativePathExists(self, filename):\n        csRelativePath = self._csRelativePath(filename)\n        return os.path.exists(csRelativePath) and os.path.isfile(csRelativePath)\n\n    def _csRelativePath(self, filename):\n        return os.path.join(self.cs.inputDirectory, filename)\n\n    def _correctCyclesToZeroBurnup(self):\n        self._assignCS(\"nCycles\", 1)\n        self._assignCS(\"burnSteps\", 0)\n        self._assignCS(\"cycleLength\", None)\n        self._assignCS(\"cycleLengths\", None)\n        self._assignCS(\"availabilityFactor\", None)\n        self._assignCS(\"availabilityFactors\", None)\n        self._assignCS(\"cycles\", [])\n\n    def _checkForBothSimpleAndDetailedCyclesInputs(self):\n        \"\"\"\n        Because the only way to check if a setting has been \"entered\" is to check against the\n        default, if the user specifies all the simple cycle settings exactly as the defaults, this\n        won't be caught. But, it would be very coincidental for the user to _specify_ all the\n        default values when performing any real analysis.\n\n        Also, we must bypass the `Settings` getter and reach directly into the underlying\n        `__settings` dict to avoid triggering an error at this stage in the run. Otherwise an error\n        will inherently be raised if the detailed cycles input is used because the simple cycles\n        inputs have defaults. We don't care that those defaults are there, we only have a problem\n        with those defaults being _used_, which will be caught later on.\n        \"\"\"\n        bothCyclesInputTypesPresent = (\n            self.cs._Settings__settings[\"cycleLength\"].value != self.cs._Settings__settings[\"cycleLength\"].default\n            or self.cs._Settings__settings[\"cycleLengths\"].value != self.cs._Settings__settings[\"cycleLengths\"].default\n            or self.cs._Settings__settings[\"burnSteps\"].value != self.cs._Settings__settings[\"burnSteps\"].default\n            or self.cs._Settings__settings[\"availabilityFactor\"].value\n            != self.cs._Settings__settings[\"availabilityFactor\"].default\n            or self.cs._Settings__settings[\"availabilityFactors\"].value\n            != self.cs._Settings__settings[\"availabilityFactors\"].default\n            or self.cs._Settings__settings[\"powerFractions\"].value\n            != self.cs._Settings__settings[\"powerFractions\"].default\n        ) and self.cs[\"cycles\"] != []\n\n        return bothCyclesInputTypesPresent\n\n    def _inspectSettings(self):\n        \"\"\"Check settings for inconsistencies.\"\"\"\n        from armi import operators\n        from armi.physics.fuelCycle.settings import (\n            CONF_SHUFFLE_LOGIC,\n            CONF_SHUFFLE_SEQUENCE_FILE,\n        )\n        from armi.settings.fwSettings.globalSettings import (\n            CONF_EXPLICIT_REPEAT_SHUFFLES,\n            CONF_ZONE_DEFINITIONS,\n            CONF_ZONES_FILE,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"outputFileExtension\"] == \"pdf\" and self.cs[\"genReports\"],\n            \"Output files of '.pdf' format are not supported by the reporting HTML generator. '.pdf' \"\n            \"images will not be included.\",\n            \"Switch to '.png'?\",\n            lambda: self._assignCS(\"outputFileExtension\", \"png\"),\n        )\n\n        (\n            self.addQuery(\n                lambda: (\n                    (self.cs[\"beta\"] and isinstance(self.cs[\"beta\"], list) and not self.cs[\"decayConstants\"])\n                    or (self.cs[\"decayConstants\"] and not self.cs[\"beta\"])\n                ),\n                \"Both beta components and decay constants should be provided if either are being supplied.\",\n                \"\",\n                self.NO_ACTION,\n            ),\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"skipCycles\"] > 0 and not self.cs[\"reloadDBName\"],\n            \"You have chosen to do a restart case without specifying a database to load from. \"\n            \"Run will load from output files, if they exist but burnup, etc. will not be updated.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"runType\"] != operators.RunTypes.SNAPSHOTS\n            and self.cs[\"loadStyle\"] == \"fromDB\"\n            and self.cs[\"startCycle\"] == 0\n            and self.cs[\"startNode\"] == 0,\n            \"Starting from cycle 0, and time node 0 was chosen. Restart runs load from \"\n            \"the time node just before the restart. There is no time node to load from \"\n            \"before cycle 0 node 0. Either switch to the snapshot operator, start from \"\n            \"a different time step or load from inputs rather than database as \"\n            \"`loadStyle`.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"runType\"] == operators.RunTypes.SNAPSHOTS\n            and not (self.cs[\"dumpSnapshot\"] or self.cs[\"defaultSnapshots\"]),\n            \"The Snapshots operator was specified, but no dump snapshots were chosen.\"\n            \"Please specify snapshot steps with the `dumpSnapshot` setting.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs.caseTitle.lower() == os.path.splitext(os.path.basename(self.cs[\"reloadDBName\"].lower()))[0],\n            \"Snapshot DB ({0}) and main DB ({1}) cannot have the same name.\"\n            \"Change name of settings file and resubmit.\".format(self.cs[\"reloadDBName\"], self.cs.caseTitle),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"reloadDBName\"] != \"\" and not os.path.exists(self.cs[\"reloadDBName\"]),\n            \"Reload database {} does not exist. \\nPlease point to an existing DB, \"\n            \"or set to empty and load from input.\".format(self.cs[\"reloadDBName\"]),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        def _willBeCopiedFrom(fName):\n            return any(fName == os.path.split(copyFile)[1] for copyFile in self.cs[\"copyFilesFrom\"])\n\n        self.addQuery(\n            lambda: self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]\n            and not self._csRelativePathExists(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES])\n            and not _willBeCopiedFrom(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]),\n            \"The specified repeat shuffle file `{0}` does not exist, and won't be copied. Run will crash.\".format(\n                self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES]\n            ),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[CONF_SHUFFLE_SEQUENCE_FILE]\n            and not self._csRelativePathExists(self.cs[CONF_SHUFFLE_SEQUENCE_FILE])\n            and not _willBeCopiedFrom(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]),\n            \"The specified shuffle sequence file `{0}` does not exist. Run will crash.\".format(\n                self.cs[CONF_SHUFFLE_SEQUENCE_FILE]\n            ),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: (\n                bool(self.cs[CONF_EXPLICIT_REPEAT_SHUFFLES])\n                and (bool(self.cs[CONF_SHUFFLE_SEQUENCE_FILE]) or bool(self.cs[CONF_SHUFFLE_LOGIC]))\n            ),\n            \"explicitRepeatShuffles cannot be used together with shuffleSequenceFile or shuffleLogic. \"\n            \"Please specify either explicitRepeatShuffles alone, or some combination of shuffleSequenceFile\"\n            \"and shuffleLogic.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: not self.cs[\"power\"] and not self.cs[\"powerDensity\"],\n            \"No power or powerDensity set. You must always start by importing a base settings file.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"power\"] > 0 and self.cs[\"powerDensity\"] > 0,\n            \"The power and powerDensity are both set, please note the power will be used as the truth.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"outputCacheLocation\"] and not os.path.exists(self.cs[\"outputCacheLocation\"]),\n            \"`outputCacheLocation` path {} does not exist. Please specify a location that exists.\".format(\n                self.cs[\"outputCacheLocation\"]\n            ),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: (not self.cs[\"tightCoupling\"] and self.cs[\"tightCouplingMaxNumIters\"] != 4),\n            \"You've requested a non default number of tight coupling iterations but left tightCoupling: False.\"\n            \"Do you want to set tightCoupling to True?\",\n            \"\",\n            lambda: self._assignCS(\"tightCoupling\", True),\n        )\n\n        self.addQuery(\n            lambda: (not self.cs[\"tightCoupling\"] and self.cs[\"tightCouplingSettings\"]),\n            \"You've requested non default tight coupling settings but tightCoupling: False.\"\n            \"Do you want to set tightCoupling to True?\",\n            \"\",\n            lambda: self._assignCS(\"tightCoupling\", True),\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"startCycle\"] and self.cs[\"nCycles\"] < self.cs[\"startCycle\"],\n            \"nCycles must be greater than or equal to startCycle in restart cases. nCycles\"\n            \" is the _total_ number of cycles in the completed run (i.e. restarted +\"\n            \" continued cycles). Please update the case settings.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"nCycles\"] in [0, None],\n            \"Cannot run 0 cycles. Set burnSteps to 0 to activate a single time-independent case.\",\n            \"Set 1 cycle and 0 burnSteps for single time-independent case?\",\n            self._correctCyclesToZeroBurnup,\n        )\n\n        self.addQuery(\n            self._checkForBothSimpleAndDetailedCyclesInputs,\n            \"If specifying detailed cycle history with `cycles`, you may not\"\n            \" also use any of the simple cycle history inputs `cycleLength(s)`,\"\n            \" `burnSteps`, `availabilityFactor(s)`, or `powerFractions`.\"\n            \" Using the detailed cycle history.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        def _factorsAreValid(factors, maxVal=1.0):\n            try:\n                expandedList = expandRepeatedFloats(factors)\n            except (ValueError, IndexError):\n                return False\n            return all(0.0 <= val <= maxVal for val in expandedList) and len(expandedList) == self.cs[\"nCycles\"]\n\n        if self.cs[\"cycles\"] == []:\n            self.addQuery(\n                lambda: (self.cs[\"availabilityFactors\"] and not _factorsAreValid(self.cs[\"availabilityFactors\"])),\n                \"`availabilityFactors` was not set to a list compatible with the number of cycles. \"\n                \"Please update input or use constant duration.\",\n                \"Use constant availability factor specified in `availabilityFactor` setting?\",\n                lambda: self._assignCS(\"availabilityFactors\", []),\n            )\n\n            self.addQuery(\n                lambda: (self.cs[\"powerFractions\"] and not _factorsAreValid(self.cs[\"powerFractions\"])),\n                \"`powerFractions` was not set to a compatible list. \"\n                \"Please update input or use full power at all cycles.\",\n                \"Use full power for all cycles?\",\n                lambda: self._assignCS(\"powerFractions\", []),\n            )\n\n            self.addQuery(\n                lambda: (self.cs[\"cycleLengths\"] and not _factorsAreValid(self.cs[\"cycleLengths\"], maxVal=1e10)),\n                \"`cycleLengths` was not set to a list compatible with the number of cycles.\"\n                \" Please update input or use constant duration.\",\n                \"Use constant cycle length specified in `cycleLength` setting?\",\n                lambda: self._assignCS(\"cycleLengths\", []),\n            )\n\n            self.addQuery(\n                lambda: (\n                    self.cs[\"runType\"] == operators.RunTypes.STANDARD\n                    and self.cs[\"burnSteps\"] == 0\n                    and (\n                        (len(self.cs[\"cycleLengths\"]) > 1 if self.cs[\"cycleLengths\"] is not None else False)\n                        or self.cs[\"nCycles\"] > 1\n                    )\n                ),\n                \"Cannot run multi-cycle standard cases with 0 burnSteps per cycle. Please update settings.\",\n                \"\",\n                self.NO_ACTION,\n            )\n\n            def decayCyclesHaveInputThatWillBeIgnored():\n                \"\"\"Check if there is any decay-related input that will be ignored.\"\"\"\n                try:\n                    powerFracs = expandRepeatedFloats(self.cs[\"powerFractions\"])\n                    availabilities = expandRepeatedFloats(self.cs[\"availabilityFactors\"]) or (\n                        [self.cs[\"availabilityFactor\"]] * self.cs[\"nCycles\"]\n                    )\n                except Exception:\n                    return True\n\n                # This will be a full decay step and any power fraction will be ignored. May be ok.\n                return any(pf > 0.0 and af == 0.0 for pf, af in zip(powerFracs, availabilities))\n\n            self.addQuery(\n                lambda: (\n                    self.cs[\"cycleLengths\"]\n                    and self.cs[\"powerFractions\"]\n                    and decayCyclesHaveInputThatWillBeIgnored()\n                    and not self.cs[\"cycles\"]\n                ),\n                \"At least one cycle has a non-zero power fraction but an availability of zero. Please \"\n                \"update the input.\",\n                \"\",\n                self.NO_ACTION,\n            )\n\n        self.addQuery(\n            lambda: self.cs[\"skipCycles\"] > 0 and not os.path.exists(self.cs.caseTitle + \".restart.dat\"),\n            \"This is a restart case, but the required restart file {0}.restart.dat is not found\".format(\n                self.cs.caseTitle\n            ),\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: self.cs[\"deferredInterfacesCycle\"] > self.cs[\"nCycles\"],\n            \"The deferred interface activation cycle exceeds set cycle occurrence. \"\n            \"Interfaces will not be activated in this run!\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n        self.addQuery(\n            lambda: (self.cs[CONF_ZONE_DEFINITIONS] and self.cs[CONF_ZONES_FILE]),\n            f\"Cannot specify both {CONF_ZONE_DEFINITIONS} and {CONF_ZONES_FILE}. Please remove one and resubmit.\",\n            \"\",\n            self.NO_ACTION,\n        )\n\n\ndef createQueryRevertBadPathToDefault(inspector, settingName, initialLambda=None):\n    \"\"\"\n    Return a query to revert a bad path to its default.\n\n    Parameters\n    ----------\n    inspector: Inspector\n        the inspector who's settings are being queried\n    settingName: str\n        name of the setting to inspect\n    initialLambda: None or callable function\n        If ``None``, the callable argument for :py:meth:`addQuery` is does the setting's path exist.\n        If more complicated callable arguments are needed, they can be passed in as the\n        ``initialLambda`` setting.\n    \"\"\"\n    if initialLambda is None:\n        initialLambda = lambda: (\n            not os.path.exists(pathTools.armiAbsPath(inspector.cs[settingName]))\n            and inspector.cs.getSetting(settingName).offDefault\n        )  # solution is to revert to default\n\n    query = Query(\n        initialLambda,\n        \"Setting {} points to a nonexistent location:\\n{}\".format(settingName, inspector.cs[settingName]),\n        \"Revert to default location?\",\n        inspector.cs.getSetting(settingName).revertToDefault,\n    )\n    return query\n\n\ndef validateVersion(versionThis: str, versionRequired: str) -> bool:\n    \"\"\"Helper function to allow users to verify that their version matches the settings file.\n\n    Parameters\n    ----------\n    versionThis: str\n        The version of this ARMI, App, or Plugin.\n        This MUST be in the form: 1.2.3.\n    versionRequired: str\n        The version to compare against, say in a Settings file.\n        This must be in one of the forms: 1.2.3, 1.2, or 1.\n\n    Returns\n    -------\n    bool\n        Does this version match the version in the Settings file/object?\n    \"\"\"\n    fullV = r\"\\d+\\.\\d+\\.\\d+\"\n    medV = r\"\\d+\\.\\d+\"\n    minV = r\"\\d+\"\n\n    if versionRequired == \"uncontrolled\":\n        # This default flag means we don't want to check the version.\n        return True\n    elif re.search(fullV, versionThis) is None:\n        raise ValueError(\"The input version ({0}) does not match the required format: {1}\".format(versionThis, fullV))\n    elif re.search(fullV, versionRequired) is not None:\n        return versionThis == versionRequired\n    elif re.search(medV, versionRequired) is not None:\n        return \".\".join(versionThis.split(\".\")[:2]) == versionRequired\n    elif re.search(minV, versionRequired) is not None:\n        return versionThis.split(\".\")[0] == versionRequired\n    else:\n        raise ValueError(\"The required version is not a valid format: {}\".format(versionRequired))\n"
  },
  {
    "path": "armi/settings/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/settings/tests/test_inspectors.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for settings validation system.\"\"\"\n\nimport os\nimport unittest\n\nfrom armi import context, operators, settings\nfrom armi.settings import settingsValidation\nfrom armi.settings.settingsValidation import createQueryRevertBadPathToDefault\nfrom armi.utils import directoryChangers\n\n\nclass TestInspector(unittest.TestCase):\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.init_mode = context.CURRENT_MODE\n        self.cs = settings.Settings()\n        self.inspector = operators.getOperatorClassFromSettings(self.cs).inspector(self.cs)\n        self.inspector.queries = []  # clear out the auto-generated ones\n        self.filepathYaml = os.path.join(os.getcwd(), self._testMethodName + \"_test_setting_io.yaml\")\n\n    def tearDown(self):\n        context.Mode.setMode(self.init_mode)\n        self.td.__exit__(None, None, None)\n\n    def test_query(self):\n        buh = {1: 2, 3: 4}\n\n        def defdef(x, y, z):\n            x[y] = z\n\n        self.inspector.addQuery(\n            lambda: buh[1] == 2,\n            \"beepbopboopbeep\",\n            \"bonkbonk\",\n            lambda: defdef(buh, 1, 10),\n        )\n        query = self.inspector.queries[0]\n        if query:\n            query.correction()\n        self.assertEqual(buh[1], 10)\n        self.assertFalse(query)\n\n        self.assertEqual(str(query), \"<Query: beepbopboopbeep>\")\n\n    def test_overwriteSettingsCorrectiveQuery(self):\n        \"\"\"\n        Tests the case where a corrective query is resolved.\n        Checks to make sure the settings file is overwritten with the resolved setting.\n\n        .. test:: Settings have validation and correction tools.\n            :id: T_ARMI_SETTINGS_RULES0\n            :tests: R_ARMI_SETTINGS_RULES\n        \"\"\"\n        # load settings from test settings file\n        self.cs[\"cycleLength\"] = 300.0\n        self.cs.writeToYamlFile(self.filepathYaml)\n        self.cs.loadFromInputFile(self.filepathYaml)\n        self.assertEqual(self.cs[\"cycleLength\"], 300.0)\n\n        # define corrective query\n        def csChange(x, y, z):\n            x[y] = z\n\n        self.inspector.addQuery(\n            lambda: self.inspector.cs[\"cycleLength\"] == 300.0,\n            \"Changing `cycleLength` from 300.0 to 666\",\n            \":D\",\n            lambda: csChange(self.cs, \"cycleLength\", 666),\n        )\n\n        # redefine prompt function in order to circumvent need for user input\n        def fakePrompt(*inputs):\n            return True\n\n        nominalPromptFunction = settingsValidation.prompt\n        settingsValidation.prompt = fakePrompt\n\n        try:\n            # run inspector\n            self.inspector.run()\n\n            # check to see if file was overwritten correctly\n            self.cs.loadFromInputFile(self.filepathYaml)\n            self.assertEqual(self.cs[\"cycleLength\"], 666)\n\n            # check to see if original settings were saved in \"_old.yaml\" file\n            oldFilePath = \"{}_old.yaml\".format(self.filepathYaml.split(\".yaml\")[0])\n            self.assertTrue(os.path.exists(oldFilePath) and os.path.isfile(oldFilePath))\n            self.csOriginal = settings.Settings()\n            self.csOriginal.loadFromInputFile(oldFilePath)\n            self.assertEqual(self.csOriginal[\"cycleLength\"], 300.0)\n\n        finally:\n            # reset prompt function to nominal\n            settingsValidation.prompt = nominalPromptFunction\n\n    def test_changeOfCS(self):\n        self.inspector.addQuery(\n            lambda: self.inspector.cs[\"runType\"] == \"banane\",\n            \"babababa\",\n            \"\",\n            self.inspector.NO_ACTION,\n        )\n        query = self.inspector.queries[0]\n        self.assertFalse(query)\n\n        newCS = settings.Settings().duplicate()\n        newSettings = {\"runType\": \"banane\"}\n        newCS = newCS.modified(newSettings=newSettings)\n\n        self.inspector.cs = newCS\n        self.assertTrue(query)\n        self.assertIsNone(self.inspector.NO_ACTION())\n\n    def test_nonCorrectiveQuery(self):\n        self.inspector.addQuery(lambda: True, \"babababa\", \"\", self.inspector.NO_ACTION)\n        self.inspector.run()\n\n    def test_callableCorrectionCheck(self):\n        successes = [lambda: True, lambda: False, self.inspector.NO_ACTION]\n        failures = [1, \"\", None]\n\n        for correction in successes:\n            self.inspector.addQuery(lambda: True, \"\", \"\", correction)\n\n        for correction in failures:\n            with self.assertRaises(ValueError):\n                self.inspector.addQuery(lambda: True, \"\", \"\", correction)\n\n    def test_assignCS(self):\n        keys = sorted(self.inspector.cs.keys())\n        self.assertIn(\"nCycles\", keys)\n\n    def test_createQueryRevertBadPathToDefault(self):\n        query = createQueryRevertBadPathToDefault(self.inspector, \"nTasks\")\n        self.assertEqual(\n            str(query),\n            \"<Query: Setting nTasks points to a nonexistent location:\\n1>\",\n        )\n\n    def test_correctCyclesToZeroBurnup(self):\n        self.inspector._assignCS(\"nCycles\", 666)\n        self.inspector._assignCS(\"burnSteps\", 666)\n\n        self.assertEqual(self.inspector.cs[\"nCycles\"], 666)\n        self.assertEqual(self.inspector.cs[\"burnSteps\"], 666)\n\n        self.inspector._correctCyclesToZeroBurnup()\n\n        self.assertEqual(self.inspector.cs[\"nCycles\"], 1)\n        self.assertEqual(self.inspector.cs[\"burnSteps\"], 0)\n\n    def test_checkForSimpleAndDetailedCycInps(self):\n        self.inspector._assignCS(\n            \"cycles\",\n            [\n                {\"cumulative days\": [1, 2, 3]},\n                {\"cycle length\": 1},\n                {\"step days\": [3, 3, 3]},\n            ],\n        )\n        self.assertFalse(self.inspector._checkForBothSimpleAndDetailedCyclesInputs())\n\n        self.inspector._assignCS(\n            \"cycles\",\n            [\n                {\"cumulative days\": [1, 2, 3]},\n                {\"cycle length\": 1},\n                {\"step days\": [3, 3, 3]},\n            ],\n        )\n        self.inspector._assignCS(\"cycleLength\", 666)\n        self.assertTrue(self.inspector._checkForBothSimpleAndDetailedCyclesInputs())\n"
  },
  {
    "path": "armi/settings/tests/test_settings.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for new settings system with plugin import.\"\"\"\n\nimport copy\nimport io\nimport logging\nimport os\nimport unittest\n\nimport voluptuous as vol\nfrom ruamel.yaml import YAML\n\nfrom armi import configure, getApp, getPluginManagerOrFail, plugins, settings\nfrom armi.physics.fuelCycle import FuelHandlerPlugin\nfrom armi.physics.fuelCycle.settings import CONF_SHUFFLE_LOGIC\nfrom armi.physics.neutronics.settings import CONF_NEUTRONICS_KERNEL\nfrom armi.reactor.flags import Flags\nfrom armi.settings import caseSettings, setting\nfrom armi.settings.settingsValidation import Inspector, validateVersion\nfrom armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs\nfrom armi.utils import directoryChangers\nfrom armi.utils.customExceptions import NonexistentSetting\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass DummySettingPlugin1(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return [\n            setting.Setting(\n                \"extendableOption\",\n                default=\"DEFAULT\",\n                label=\"Neutronics Kernel\",\n                description=\"The neutronics / depletion solver for global flux solve.\",\n                enforcedOptions=True,\n                options=[\"DEFAULT\", \"OTHER\"],\n            ),\n            setting.Setting(\n                \"avocado\",\n                default=0,\n                label=\"Avocados\",\n                description=\"Avocados are delicious.\",\n            ),\n        ]\n\n\nclass DummySettingPlugin2(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return [\n            setting.Option(\"PLUGIN\", \"extendableOption\"),\n            setting.Default(\"PLUGIN\", \"extendableOption\"),\n        ]\n\n\nclass PluginAddsOptions(plugins.ArmiPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return [\n            setting.Option(\"MCNP\", CONF_NEUTRONICS_KERNEL),\n            setting.Option(\"MCNP_Slab\", CONF_NEUTRONICS_KERNEL),\n        ]\n\n\nclass TestSettings(unittest.TestCase):\n    def setUp(self):\n        self.cs = caseSettings.Settings()\n\n    def test_updateEnvironmentSettingsFrom(self):\n        envSettings = [\n            \"trace\",\n            \"profile\",\n            \"coverage\",\n            \"branchVerbosity\",\n            \"moduleVerbosity\",\n            \"verbosity\",\n            \"outputCacheLocation\",\n        ]\n        self.assertEqual(self.cs.environmentSettings, envSettings)\n\n        newEnv = {es: 9 for es in envSettings}\n        newEnv[\"moduleVerbosity\"] = {}\n        self.cs.updateEnvironmentSettingsFrom(newEnv)\n        self.assertEqual(self.cs[\"verbosity\"], \"9\")\n\n    def test_metaData(self):\n        \"\"\"Test we can get and set the important settings metadata.\n\n        .. test:: Test getting and setting import settings metadata.\n            :id: T_ARMI_SETTINGS_META\n            :tests: R_ARMI_SETTINGS_META\n        \"\"\"\n        # test get/set on caseTitle\n        self.assertEqual(self.cs.caseTitle, \"armi\")\n        testTitle = \"test_metaData\"\n        self.cs.caseTitle = testTitle\n        self.assertEqual(self.cs.caseTitle, testTitle)\n\n        # test get/set on comment\n        self.assertEqual(self.cs[\"comment\"], \"\")\n        testComment = \"Comment: test_metaData\"\n        self.cs = self.cs.modified(newSettings={\"comment\": testComment})\n        self.assertEqual(self.cs[\"comment\"], testComment)\n\n        # test get/set on version\n        self.assertEqual(len(self.cs[\"versions\"]), 0)\n        self.cs = self.cs.modified(newSettings={\"versions\": {\"something\": 1.234}})\n\n        d = self.cs[\"versions\"]\n        self.assertEqual(len(d), 1)\n        self.assertEqual(d[\"something\"], 1.234)\n\n\nclass TestAddingOptions(unittest.TestCase):\n    def setUp(self):\n        self.dc = directoryChangers.TemporaryDirectoryChanger()\n        self.dc.__enter__()\n        # load in the plugin with extra, added options\n        self.pm = getPluginManagerOrFail()\n        self.pm.register(PluginAddsOptions)\n\n    def tearDown(self):\n        self.dc.__exit__(None, None, None)\n        self.pm.unregister(PluginAddsOptions)\n\n    def test_addingOptions(self):\n        # modify the default/text settings YAML file to include neutronicsKernel\n        fin = os.path.join(TEST_ROOT, \"armiRun.yaml\")\n        txt = open(fin, \"r\").read()\n        txt = txt.replace(\"\\n  nCycles:\", \"\\n  neutronicsKernel: MCNP\\n  nCycles:\")\n        fout = \"test_addingOptions.yaml\"\n        open(fout, \"w\").write(txt)\n\n        # this settings file should load fine, and test some basics\n        cs = settings.Settings(fout)\n        self.assertEqual(cs[\"burnSteps\"], 2)\n        self.assertEqual(cs[CONF_NEUTRONICS_KERNEL], \"MCNP\")\n\n    def test_illDefinedOptions(self):\n        \"\"\"Test an edge case where the Setting was ill-defined.\"\"\"\n        s = setting.Setting(\n            \"illDefinedOptions\",\n            default=\"DEFAULT\",\n            label=\"stuff\",\n            description=\"Whatever\",\n            enforcedOptions=True,\n        )\n\n        self.assertIsNone(s.options)\n\n        with mockRunLogs.BufferLog() as mock:\n            self.assertIs(mock.getStdout(), \"\")\n            with self.assertRaises(AttributeError):\n                s.addOptions([1, 2])\n            self.assertIn(\"has no default options\", mock.getStdout())\n\n        with mockRunLogs.BufferLog() as mock:\n            self.assertIs(mock.getStdout(), \"\")\n            with self.assertRaises(AttributeError):\n                s.addOption(3)\n            self.assertIn(\"has no default options\", mock.getStdout())\n\n\nclass TestSettings2(unittest.TestCase):\n    def setUp(self):\n        # We are going to be messing with the plugin manager, which is global ARMI\n        # state, so we back it up and restore the original when we are done.\n        self._backupApp = copy.copy(getApp())\n\n    def tearDown(self):\n        configure(self._backupApp, permissive=True)\n\n    def test_schemaChecksType(self):\n        newSettings = FuelHandlerPlugin.defineSettings()\n\n        good_input = io.StringIO(\n            \"\"\"\nassemblyRotationAlgorithm: buReducingAssemblyRotation\nshuffleLogic: {}\n\"\"\".format(__file__)\n        )\n\n        bad_input = io.StringIO(\n            \"\"\"\nassemblyRotationAlgorithm: buReducingAssemblyRotatoin\n\"\"\"\n        )\n\n        yaml = YAML(typ=\"rt\")\n\n        inp = yaml.load(good_input)\n        for inputSetting, inputVal in inp.items():\n            settin = [s for s in newSettings if s.name == inputSetting][0]\n            settin.schema(inputVal)\n\n        inp = yaml.load(bad_input)\n        for inputSetting, inputVal in inp.items():\n            with self.assertRaises(vol.error.MultipleInvalid):\n                settin = [s for s in newSettings if s.name == inputSetting][0]\n                settin.schema(inputVal)\n\n    def test_listsMutable(self):\n        listSetting = setting.Setting(\"aList\", default=[], label=\"Dummy list\", description=\"whatever\")\n\n        listSetting.value = [1, 2, 3]\n        self.assertEqual([1, 2, 3], listSetting.value)\n\n        listSetting.value[-1] = 4\n        self.assertEqual([1, 2, 4], listSetting.value)\n\n    def test_listCoercion(self):\n        \"\"\"Make sure list setting values get coerced right.\"\"\"\n        listSetting = setting.Setting(\"aList\", default=[0.2, 5], label=\"Dummy list\", description=\"whatever\")\n        listSetting.value = [1, 2, 3]\n        self.assertEqual(listSetting.value, [1.0, 2.0, 3.0])\n        self.assertTrue(isinstance(listSetting.value[0], float))\n\n    def test_typeDetection(self):\n        \"\"\"Ensure some of the type inference operations work.\"\"\"\n        listSetting = setting.Setting(\n            \"aList\",\n            default=[],\n            label=\"label\",\n            description=\"desc\",\n            schema=vol.Schema([float]),\n        )\n        self.assertEqual(listSetting.containedType, float)\n        listSetting = setting.Setting(\n            \"aList\",\n            default=[],\n            label=\"label\",\n            description=\"desc\",\n            schema=vol.Schema([vol.Coerce(float)]),\n        )\n        self.assertEqual(listSetting.containedType, float)\n\n    def test_csWorks(self):\n        \"\"\"Ensure plugin settings become available and have defaults.\"\"\"\n        cs = settings.Settings()\n        self.assertEqual(cs[\"nTasks\"], 1)\n\n    def test_pluginValidatorsAreDiscovered(self):\n        cs = caseSettings.Settings()\n        cs = cs.modified(\n            caseTitle=\"test_pluginValidatorsAreDiscovered\",\n            newSettings={\n                CONF_SHUFFLE_LOGIC: \"nothere\",\n                \"cycleLengths\": [3, 4, 5, 6, 9],\n                \"powerFractions\": [0.2, 0.2, 0.2, 0.2, 0.2],\n            },\n        )\n\n        inspector = Inspector(cs)\n        self.assertTrue(any([\"Shuffling will not occur\" in query.statement for query in inspector.queries]))\n\n    def test_pluginSettings(self):\n        \"\"\"Test settings change depending on what plugins are registered.\n\n        .. test:: Registering a plugin can change what settings exist.\n            :id: T_ARMI_PLUGIN_SETTINGS\n            :tests: R_ARMI_PLUGIN_SETTINGS\n        \"\"\"\n        pm = getPluginManagerOrFail()\n        pm.register(DummySettingPlugin1)\n        # We have a setting; this should be fine\n        cs = caseSettings.Settings()\n\n        self.assertEqual(cs[\"extendableOption\"], \"DEFAULT\")\n        self.assertEqual(cs[\"avocado\"], 0)\n        # We shouldn't have any settings from the other plugin, so this should be an error.\n        with self.assertRaises(vol.error.MultipleInvalid):\n            newSettings = {\"extendableOption\": \"PLUGIN\"}\n            cs = cs.modified(newSettings=newSettings)\n\n        pm.register(DummySettingPlugin2)\n        cs = caseSettings.Settings()\n        self.assertEqual(cs[\"extendableOption\"], \"PLUGIN\")\n        # Now we should have the option from plugin 2; make sure that works\n        cs = cs.modified(newSettings=newSettings)\n        cs[\"extendableOption\"] = \"PLUGIN\"\n        self.assertIn(\"extendableOption\", cs.keys())\n        pm.unregister(DummySettingPlugin2)\n        pm.unregister(DummySettingPlugin1)\n\n        # Now try the same, but adding the plugins in a different order. This is to make\n        # sure that it doesn't matter if the Setting or its Options come first\n        pm.register(DummySettingPlugin2)\n        pm.register(DummySettingPlugin1)\n        cs = caseSettings.Settings()\n        self.assertEqual(cs[\"extendableOption\"], \"PLUGIN\")\n        self.assertEqual(cs[\"avocado\"], 0)\n\n    def test_default(self):\n        \"\"\"\n        Make sure default updating mechanism works.\n\n        .. test:: The setting default is mandatory.\n            :id: T_ARMI_SETTINGS_DEFAULTS\n            :tests: R_ARMI_SETTINGS_DEFAULTS\n        \"\"\"\n        a = setting.Setting(\"testsetting\", 0, description=\"whatever\")\n        newDefault = setting.Default(5, \"testsetting\")\n        a.changeDefault(newDefault)\n        self.assertEqual(a.value, 5)\n\n    def test_getSettingsSetByUser(self):\n        cs = caseSettings.Settings()\n        settingsList = cs.getSettingsSetByUser(ARMI_RUN_PATH)\n        # This test is dependent on the current setup of armiRun.yaml, which includes\n        # some default settings values\n        for sett in [\"availabilityFactor\", \"db\"]:\n            self.assertIn(sett, settingsList)\n        self.assertNotIn(\"nTasks\", settingsList)\n\n    def test_setModuleVerbosities(self):\n        # init settings and use them to set module-level logging levels\n        cs = caseSettings.Settings()\n        newSettings = {\"moduleVerbosity\": {\"test_setModuleVerbosities\": \"debug\"}}\n        cs = cs.modified(newSettings=newSettings)\n\n        # set the logger once, and check it is was set\n        cs.setModuleVerbosities()\n        logger = logging.getLogger(\"test_setModuleVerbosities\")\n        self.assertEqual(logger.level, 10)\n\n        # try to set the logger again, without forcing it\n        newSettings = {\"moduleVerbosity\": {\"test_setModuleVerbosities\": \"error\"}}\n        cs = cs.modified(newSettings=newSettings)\n        cs.setModuleVerbosities()\n        self.assertEqual(logger.level, 10)\n\n        # try to set the logger again, with force=True\n        cs.setModuleVerbosities(force=True)\n        self.assertEqual(logger.level, 40)\n\n    def test_getFailures(self):\n        \"\"\"Make sure the correct error is thrown when getting a nonexistent setting.\"\"\"\n        cs = caseSettings.Settings()\n\n        with self.assertRaises(NonexistentSetting):\n            cs.getSetting(\"missingFake\")\n\n        with self.assertRaises(NonexistentSetting):\n            _ = cs[\"missingFake\"]\n\n    def test_settingIsOkayToGrab(self):\n        cs = caseSettings.Settings()\n        newSettings = {\"cycles\": [{\"cumulative days\": [1]}]}\n        cs = cs.modified(newSettings=newSettings)\n\n        with self.assertRaises(ValueError):\n            _ = cs[\"cycleLength\"]\n\n    def test_modified(self):\n        \"\"\"Prove that using the modified() method does not mutate the original object.\"\"\"\n        # init settings\n        cs = caseSettings.Settings()\n\n        # prove this setting doesn't exist\n        with self.assertRaises(NonexistentSetting):\n            cs.getSetting(\"extendableOption\")\n\n        # ensure that defaults in getSetting works\n        val = cs.getSetting(\"extendableOption\", 789)\n        self.assertEqual(val, 789)\n\n        # prove the new settings object has the new setting\n        cs2 = cs.modified(newSettings={\"extendableOption\": \"PLUGIN\"})\n        self.assertEqual(cs2[\"extendableOption\"], \"PLUGIN\")\n\n        # prove modified() didn't alter the original object\n        with self.assertRaises(NonexistentSetting):\n            cs.getSetting(\"extendableOption\")\n\n        # prove that successive applications of \"modified\" don't fail\n        cs3 = cs2.modified(newSettings={\"numberofGenericParams\": 7})\n        _cs4 = cs3.modified(newSettings={\"somethingElse\": 123})\n\n    def test_copySetting(self):\n        \"\"\"Ensure that when we copy a Setting() object, the result is sound.\n\n        Notes\n        -----\n        In particular, self.schema and self._customSchema on a Setting object are\n        removed by Setting.__getstate__, and that has been a problem in the past.\n        \"\"\"\n        # get a baseline: show how the Setting object looks to start\n        s1 = setting.Setting(\"testCopy\", 765, description=\"whatever\")\n        self.assertEqual(s1.name, \"testCopy\")\n        self.assertEqual(s1._value, 765)\n        self.assertTrue(hasattr(s1, \"schema\"))\n        self.assertTrue(hasattr(s1, \"_customSchema\"))\n\n        # show that copy(Setting) is working correctly\n        s2 = copy.copy(s1)\n        self.assertEqual(s2._value, 765)\n        self.assertEqual(s2.name, \"testCopy\")\n        self.assertTrue(hasattr(s2, \"schema\"))\n        self.assertTrue(hasattr(s2, \"_customSchema\"))\n\n    def test_copySettingNotDefault(self):\n        \"\"\"Ensure that when we copy a Setting() object, the result is sound\n        when the Setting value is set to a non-default value.\n        \"\"\"\n        # get a baseline: show how the Setting object looks to start\n        s1 = setting.Setting(\"testCopy\", 765, description=\"whatever\")\n        s1.value = 999\n        self.assertEqual(s1.name, \"testCopy\")\n        self.assertEqual(s1._value, 999)\n        self.assertTrue(hasattr(s1, \"schema\"))\n        self.assertTrue(hasattr(s1, \"_customSchema\"))\n\n        # show that copy(Setting) is working correctly\n        s2 = copy.copy(s1)\n        self.assertEqual(s2._value, 999)\n        self.assertEqual(s2.name, \"testCopy\")\n        self.assertTrue(hasattr(s2, \"schema\"))\n        self.assertTrue(hasattr(s2, \"_customSchema\"))\n\n    def test_empty(self):\n        cs = caseSettings.Settings()\n        cs = cs.modified(newSettings={\"buGroups\": []})\n        self.assertEqual(cs[\"buGroups\"], [])\n\n\nclass TestSettingsUtils(unittest.TestCase):\n    \"\"\"Tests for utility functions.\"\"\"\n\n    def setUp(self):\n        self.dc = directoryChangers.TemporaryDirectoryChanger()\n        self.dc.__enter__()\n\n        # Create a little case suite on the fly. Whipping it up from defaults should be\n        # more evergreen than committing settings files as a test resource\n        cs = caseSettings.Settings()\n        cs.writeToYamlFile(\"settings1.yaml\")\n        cs.writeToYamlFile(\"settings2.yaml\")\n        with open(\"notSettings.yaml\", \"w\") as f:\n            f.write(\"some: other\\nyaml: file\\n\")\n        os.mkdir(\"subdir\")\n        cs.writeToYamlFile(\"subdir/settings3.yaml\")\n        cs.writeToYamlFile(\"subdir/skipSettings.yaml\")\n\n    def tearDown(self):\n        self.dc.__exit__(None, None, None)\n\n    def test_recursiveScan(self):\n        loadedSettings = settings.recursivelyLoadSettingsFiles(\".\", [\"*.yaml\"], ignorePatterns=[\"skip*\"])\n        names = {cs.caseTitle for cs in loadedSettings}\n        self.assertIn(\"settings1\", names)\n        self.assertIn(\"settings2\", names)\n        self.assertIn(\"settings3\", names)\n        self.assertNotIn(\"skipSettings\", names)\n\n        loadedSettings = settings.recursivelyLoadSettingsFiles(\n            \".\", [\"*.yaml\"], recursive=False, ignorePatterns=[\"skip*\"]\n        )\n        names = {cs.caseTitle for cs in loadedSettings}\n        self.assertIn(\"settings1\", names)\n        self.assertIn(\"settings2\", names)\n        self.assertNotIn(\"settings3\", names)\n\n    def test_prompt(self):\n        selection = settings.promptForSettingsFile(1)\n        self.assertEqual(selection, \"settings1.yaml\")\n\n\nclass TestFlagListSetting(unittest.TestCase):\n    def test_flagListSetting(self):\n        \"\"\"Test that a list of strings can be converted to a list of flags and back.\"\"\"\n        flagsAsStringList = [\"DUCT\", \"FUEL\", \"CLAD\"]\n        flagsAsFlagList = [Flags.DUCT, Flags.FUEL, Flags.CLAD]\n\n        fs = setting.FlagListSetting(name=\"testFlagSetting\", default=[], description=\"whatever\")\n        # Set the value as a list of strings first\n        fs.value = flagsAsStringList\n        self.assertEqual(fs.value, flagsAsFlagList)\n        self.assertEqual(fs.dump(), flagsAsStringList)\n\n        # Set the value as a list of flags\n        fs.value = flagsAsFlagList\n        self.assertEqual(fs.value, flagsAsFlagList)\n        self.assertEqual(fs.dump(), flagsAsStringList)\n\n    def test_invalidFlagListTypeError(self):\n        \"\"\"Test raising a TypeError when a list is not provided.\"\"\"\n        fs = setting.FlagListSetting(name=\"testFlagSetting\", default=[], description=\"whatever\")\n        with self.assertRaises(TypeError):\n            fs.value = \"DUCT\"\n\n\nclass TestSettingsValidationUtils(unittest.TestCase):\n    def test_validateVersion(self):\n        # controlled version, and true\n        self.assertTrue(validateVersion(\"1.22.3\", \"1.22.3\"))\n        self.assertTrue(validateVersion(\"1.3.102\", \"1.3.102\"))\n        self.assertTrue(validateVersion(\"1.2.3\", \"1.2\"))\n        self.assertTrue(validateVersion(\"1.2.37\", \"1.2\"))\n        self.assertTrue(validateVersion(\"13.7.3\", \"13.7\"))\n        self.assertTrue(validateVersion(\"1.22.310\", \"1\"))\n\n        # uncontrolled version is always true\n        self.assertTrue(validateVersion(\"4.2.0\", \"uncontrolled\"))\n\n        # controlled versions and false\n        self.assertFalse(validateVersion(\"11.2.3\", \"11.2.4\"))\n        self.assertFalse(validateVersion(\"1.2.3\", \"3.2.1\"))\n        self.assertFalse(validateVersion(\"11.2.3\", \"2.2\"))\n\n        # examples of various errors\n        with self.assertRaises(ValueError):\n            validateVersion(\"1.2.a\", \"1.20.3\")\n\n        with self.assertRaises(ValueError):\n            validateVersion(\"nope\", \"7\")\n\n        with self.assertRaises(ValueError):\n            validateVersion(\"1.2.3\", \"zzz\")\n"
  },
  {
    "path": "armi/settings/tests/test_settingsIO.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License0.\n\"\"\"Testing the settingsIO.\"\"\"\n\nimport datetime\nimport io\nimport os\nimport unittest\n\nfrom armi import context, settings\nfrom armi.cli import entryPoint\nfrom armi.settings import setting, settingsIO\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import directoryChangers\nfrom armi.utils.customExceptions import (\n    InvalidSettingsFileError,\n    NonexistentSetting,\n    SettingException,\n)\n\n\nclass SettingsFailureTests(unittest.TestCase):\n    def test_settingsObjSetting(self):\n        sets = settings.Settings()\n        with self.assertRaises(NonexistentSetting):\n            sets[\"idontexist\"] = \"this test should fail because no setting named idontexist should exist.\"\n\n    def test_loadFromYamlFailsOnBadNames(self):\n        ss = settings.Settings()\n        with self.assertRaises(TypeError):\n            ss.loadFromInputFile(None)\n        with self.assertRaises(IOError):\n            ss.loadFromInputFile(\"this-settings-file-does-not-exist.yaml\")\n\n    def test_invalidFile(self):\n        with self.assertRaises(InvalidSettingsFileError):\n            cs = settings.caseSettings.Settings()\n            reader = settingsIO.SettingsReader(cs)\n            reader.readFromStream(io.StringIO(\"useless:\\n    should_fail\"))\n\n\nclass SettingsReaderTests(unittest.TestCase):\n    def setUp(self):\n        self.cs = settings.caseSettings.Settings()\n\n    def test_basicSettingsReader(self):\n        reader = settingsIO.SettingsReader(self.cs)\n\n        self.assertEqual(reader[\"nTasks\"], 1)\n        self.assertEqual(reader[\"nCycles\"], 1)\n\n        self.assertFalse(getattr(reader, \"filelessBP\"))\n        self.assertEqual(getattr(reader, \"path\"), \"\")\n\n    def test_readFromFile(self):\n        \"\"\"Read settings from a (human-readable) YAML file.\n\n        .. test:: Settings can be input from a human-readable text file.\n            :id: T_ARMI_SETTINGS_IO_TXT0\n            :tests: R_ARMI_SETTINGS_IO_TXT\n        \"\"\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            inPath = os.path.join(TEST_ROOT, \"armiRun.yaml\")\n            outPath = \"test_readFromFile.yaml\"\n\n            txt = open(inPath, \"r\").read()\n            verb = \"branchVerbosity:\"\n            txt0, txt1 = txt.split(verb)\n            newTxt = f\"{txt0}{verb} fake\\n  {verb}{txt1}\"\n            open(outPath, \"w\").write(newTxt)\n\n            with self.assertRaises(InvalidSettingsFileError):\n                settings.caseSettings.Settings(outPath)\n\n\nclass SettingsRenameTests(unittest.TestCase):\n    testSettings = [\n        setting.Setting(\n            \"testSetting1\",\n            default=None,\n            oldNames=[(\"oSetting1\", None), (\"osetting1\", datetime.date.today())],\n            description=\"Just a unit test setting.\",\n        ),\n        setting.Setting(\n            \"testSetting2\",\n            default=None,\n            oldNames=[(\"oSetting2\", None)],\n            description=\"Just a unit test setting.\",\n        ),\n        setting.Setting(\n            \"testSetting3\",\n            default=None,\n            description=\"Just a unit test setting.\",\n        ),\n    ]\n\n    def test_rename(self):\n        renamer = settingsIO.SettingRenamer({setting.name: setting for setting in self.testSettings})\n\n        self.assertEqual(renamer.renameSetting(\"testSetting1\"), (\"testSetting1\", False))\n        self.assertEqual(renamer.renameSetting(\"oSetting1\"), (\"testSetting1\", True))\n        # this one is expired\n        self.assertEqual(renamer.renameSetting(\"osetting1\"), (\"osetting1\", False))\n        self.assertEqual(renamer.renameSetting(\"oSetting2\"), (\"testSetting2\", True))\n        self.assertEqual(renamer.renameSetting(\"testSetting2\"), (\"testSetting2\", False))\n        self.assertEqual(renamer.renameSetting(\"testSetting3\"), (\"testSetting3\", False))\n\n        # No rename; let it through\n        self.assertEqual(renamer.renameSetting(\"boo!\"), (\"boo!\", False))\n\n    def test_collidingRenames(self):\n        settings = {\n            setting.name: setting\n            for setting in self.testSettings\n            + [\n                setting.Setting(\n                    \"someOtherSetting\",\n                    default=None,\n                    oldNames=[(\"oSetting1\", None)],\n                    description=\"Just a unit test setting.\",\n                )\n            ]\n        }\n        with self.assertRaises(SettingException):\n            _ = settingsIO.SettingRenamer(settings)\n\n\nclass SettingsWriterTests(unittest.TestCase):\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n        self.init_mode = context.CURRENT_MODE\n        self.filepathYaml = os.path.join(os.getcwd(), self._testMethodName + \"test_setting_io.yaml\")\n        self.cs = settings.Settings()\n        self.cs = self.cs.modified(newSettings={\"nCycles\": 55})\n\n    def tearDown(self):\n        context.Mode.setMode(self.init_mode)\n        self.td.__exit__(None, None, None)\n\n    def test_writeShort(self):\n        \"\"\"Setting output as a sparse file.\"\"\"\n        self.cs.writeToYamlFile(self.filepathYaml, style=\"short\")\n        self.cs.loadFromInputFile(self.filepathYaml)\n        txt = open(self.filepathYaml, \"r\").read()\n        self.assertIn(\"nCycles: 55\", txt)\n        self.assertNotIn(\"nTasks\", txt)\n\n    def test_writeMedium(self):\n        \"\"\"Setting output as a sparse file that only includes defaults if they are\n        user-specified.\n        \"\"\"\n        with open(self.filepathYaml, \"w\") as stream:\n            # Specify a setting that is also a default\n            self.cs.writeToYamlStream(stream, \"medium\", [\"nTasks\"])\n        txt = open(self.filepathYaml, \"r\").read()\n        self.assertIn(\"nCycles: 55\", txt)\n        self.assertIn(\"nTasks: 1\", txt)\n\n    def test_writeFull(self):\n        \"\"\"Setting output as a full, all defaults included file.\n\n        .. test:: Settings can be output to a human-readable text file.\n            :id: T_ARMI_SETTINGS_IO_TXT1\n            :tests: R_ARMI_SETTINGS_IO_TXT\n        \"\"\"\n        self.cs.writeToYamlFile(self.filepathYaml, style=\"full\")\n        txt = open(self.filepathYaml, \"r\").read()\n        self.assertIn(\"nCycles: 55\", txt)\n        # check a default setting\n        self.assertIn(\"nTasks: 1\", txt)\n\n    def test_writeYaml(self):\n        self.cs.writeToYamlFile(self.filepathYaml)\n        self.cs.loadFromInputFile(self.filepathYaml)\n        self.assertEqual(self.cs[\"nCycles\"], 55)\n\n    def test_errorSettingsWriter(self):\n        with self.assertRaises(ValueError):\n            _ = settingsIO.SettingsWriter(self.cs, \"wrong\")\n\n\nclass MockEntryPoint(entryPoint.EntryPoint):\n    name = \"dummy\"\n\n\nclass SettingArgsTests(unittest.TestCase):\n    def setUp(self):\n        self.cs = None\n\n    def test_commandLineSetting(self):\n        ep = MockEntryPoint()\n        self.cs = cs = ep.cs\n\n        self.assertEqual(cs[\"nCycles\"], 1)\n        ep.createOptionFromSetting(\"nCycles\")\n        ep.parse_args([\"--nCycles\", \"5\"])\n        self.assertEqual(cs[\"nCycles\"], 5)\n\n    def test_cannotLoadSettingsAfterParsingCLI(self):\n        self.test_commandLineSetting()\n\n        with self.assertRaises(RuntimeError):\n            self.cs.loadFromInputFile(\"somefile.yaml\")\n"
  },
  {
    "path": "armi/testing/__init__.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nImportable testing utilities.\n\nThis is a very limited set of ARMI testing tools, meant to be importable as part of the ARMI API. The goal is to provide\na small set of high quality tools to help downstream ARMI developers write tests.\n\nNotes\n-----\nThis will not be a catch-all for random unit test functions. Be very sparing here.\n\"\"\"\n\nimport os\nimport pickle\n\nfrom armi import runLog\nfrom armi.reactor import geometry, grids, reactors\n\nTEST_ROOT = os.path.realpath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"tests\"))\nTESTING_ROOT = os.path.dirname(os.path.abspath(__file__))\nARMI_RUN_PATH = os.path.join(TEST_ROOT, \"armiRun.yaml\")\nCOMPXS_PATH = os.path.join(TESTING_ROOT, \"resources\", \"COMPXS.ascii\")\nISOAA_PATH = os.path.join(TEST_ROOT, \"ISOAA\")\n_TEST_REACTORS = {}  # dictionary of pickled string of test reactors (for fast caching)\n\n\ndef loadTestReactor(inputFilePath=TEST_ROOT, customSettings=None, inputFileName=\"armiRun.yaml\", useCache=True):\n    \"\"\"\n    Loads a test reactor. Can be used in other test modules.\n\n    Parameters\n    ----------\n    inputFilePath : str, default=TEST_ROOT\n        Path to the directory of the input file.\n    customSettings : dict with str keys and values of any type, default=None\n        For each key in customSettings, the cs which is loaded from the armiRun.yaml will be overwritten to the value\n        given in customSettings for that key.\n    inputFileName : str, default=\"armiRun.yaml\"\n        Name of the input file to run.\n    useCache : bool, default=True\n        Look for a copy of this Reactor in the cache, if not in the cache, put it there. (Set to False when you are\n        sure there will only be one test using this test reactor.)\n\n    Notes\n    -----\n    If the armiRun.yaml test reactor 3 rings instead of 9, most unit tests that use it go ~4 times faster. The problem\n    is it would breat a LOT of downstream tests that import this method. It is still worth it though.\n\n    Returns\n    -------\n    o : Operator\n    r : Reactor\n    \"\"\"\n    from armi import operators, settings\n\n    global _TEST_REACTORS\n    fName = os.path.abspath(os.path.join(inputFilePath, inputFileName))\n    customSettings = customSettings or {}\n    reactorHash = hash(fName + str(customSettings))\n\n    if useCache and reactorHash in _TEST_REACTORS:\n        # return test reactor from cache\n        o, r = pickle.loads(_TEST_REACTORS[reactorHash])\n        o.reattach(r, o.cs)\n        return o, r\n\n    # Overwrite settings if desired\n    cs = settings.Settings(fName=fName)\n    if customSettings:\n        cs = cs.modified(newSettings=customSettings)\n\n    if \"verbosity\" not in customSettings:\n        runLog.setVerbosity(\"error\")\n\n    o = operators.factory(cs)\n    r = reactors.loadFromCs(cs)\n\n    o.initializeInterfaces(r)\n    o.r.core.regenAssemblyLists()\n\n    if useCache:\n        # cache it for fast load for other future tests protocol=2 allows for classes with __slots__ but not\n        # __getstate__ to be pickled\n        _TEST_REACTORS[reactorHash] = pickle.dumps((o, o.r), protocol=2)\n\n    return o, o.r\n\n\ndef reduceTestReactorRings(r, cs, maxNumRings):\n    \"\"\"Helper method for the test reactor above.\n\n    The goal is to reduce the size of the reactor for tests that don't need such a large reactor, and would run much\n    faster with a smaller one.\n    \"\"\"\n    maxRings = r.core.getNumRings()\n    if maxNumRings > maxRings:\n        runLog.info(f\"The test reactor has a maximum of {maxRings} rings.\")\n        return\n    elif maxNumRings <= 1:\n        raise ValueError(\"The test reactor must have multiple rings.\")\n\n    # reducing the size of the test reactor, by removing the outer rings\n    for ring in range(maxRings, maxNumRings, -1):\n        r.core.removeAssembliesInRing(ring, cs)\n\n\ndef getEmptyHexReactor():\n    \"\"\"Make an empty hex reactor for use in tests.\"\"\"\n    from armi.reactor import blueprints\n\n    bp = blueprints.Blueprints()\n    reactor = reactors.Reactor(\"Reactor\", bp)\n    reactor.add(reactors.Core(\"Core\"))\n    reactor.core.spatialGrid = grids.HexGrid.fromPitch(1.0)\n    reactor.core.spatialGrid.symmetry = geometry.SymmetryType(\n        geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC\n    )\n    reactor.core.spatialGrid.geomType = geometry.HEX\n    reactor.core.spatialGrid.armiObject = reactor.core\n\n    return reactor\n\n\ndef getEmptyCartesianReactor(pitch=(10.0, 16.0), throughCenterAssembly=True):\n    \"\"\"Return an empty Cartesian reactor for use in tests.\"\"\"\n    from armi.reactor import blueprints\n\n    bp = blueprints.Blueprints()\n    reactor = reactors.Reactor(\"Reactor\", bp)\n    reactor.add(reactors.Core(\"Core\"))\n    reactor.core.spatialGrid = grids.CartesianGrid.fromRectangle(*pitch)\n    reactor.core.spatialGrid.symmetry = geometry.SymmetryType(\n        geometry.DomainType.QUARTER_CORE,\n        geometry.BoundaryType.REFLECTIVE,\n        throughCenterAssembly=throughCenterAssembly,\n    )\n    reactor.core.spatialGrid.geomType = geometry.CARTESIAN\n    reactor.core.spatialGrid.armiObject = reactor.core\n\n    return reactor\n"
  },
  {
    "path": "armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml",
    "content": "# The comments in this file are important, as they are used\n# to bring in sections of this file into the tutorial in the docs.\n# start-block-clad\nblocks:\n    fuel: &block_fuel\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            mult: 271\n# end-block-clad\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.8888\n            id: 0.0\n            od: 0.0808\n            mult: 271\n# end-block-wire\n        fuel:\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 500.0\n            id: 0.0\n            mult: 271\n            od: 0.6029\n# end-block-fuel\n        bond:\n            shape: Circle\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            id: fuel.od\n            mult: fuel.mult\n            od: clad.id\n# end-block-bond\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n# end-block-duct\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            op: 16.142\n            mult: 1.0\n# end-block-intercoolant\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-coolant\n    radial shield: &block_shield\n        control:\n            shape: Circle\n            material: B4C\n            Tinput: 597.0\n            Thot: 597.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-radialshield\n    reflector: &block_reflector\n        reflector:\n            shape: Circle\n            material: HT9\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.777\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-reflector\n    control: &block_control\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            op: 16.142\n            mult: 1.0\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-control\n    plenum: &block_plenum\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.88888\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: clad.id\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-plenum\nassemblies:\n    heights: &heights\n        - 15.0\n        - 20.32\n        - 20.32\n        - 20.32\n        - 20.32\n        - 20.32\n        - 191.14\n    axial mesh points: &mesh\n        - 1\n        - 2\n        - 2\n        - 2\n        - 2\n        - 2\n        - 8\n# end-assemblies-common\n    inner fuel:\n        specifier: IC\n        blocks: &fuel_blocks\n            - *block_reflector\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_plenum\n        height: *heights\n        axial mesh points: *mesh\n        material modifications:\n            U235_wt_frac:\n                - ''\n                - 0.127\n                - 0.127\n                - 0.127\n                - 0.127\n                - 0.127\n                - ''\n        xs types: &IC_xs\n            - A\n            - A\n            - A\n            - A\n            - A\n            - A\n            - A\n# end-assemblies-ic\n    middle core fuel:\n        specifier: MC\n        blocks: *fuel_blocks\n        height: *heights\n        axial mesh points: *mesh\n        material modifications:\n            U235_wt_frac:\n                - ''\n                - 0.153\n                - 0.153\n                - 0.153\n                - 0.153\n                - 0.153\n                - ''\n        xs types:\n            - B\n            - B\n            - B\n            - B\n            - B\n            - B\n            - B\n# end-assemblies-mc\n    outer core fuel:\n        specifier: OC\n        blocks: *fuel_blocks\n        height: *heights\n        axial mesh points: *mesh\n        material modifications:\n            U235_wt_frac:\n                - ''\n                - 0.180\n                - 0.180\n                - 0.180\n                - 0.180\n                - 0.180\n                - ''\n        xs types:\n            - C\n            - C\n            - C\n            - C\n            - C\n            - C\n            - C\n# end-assemblies-oc\n    radial reflector:\n        specifier: RR\n        blocks: [*block_reflector]\n        height: [307.74]\n        axial mesh points: [1]\n        xs types: [A]\n# end-assemblies-rr\n    radial shield:\n        specifier: SH\n        blocks: [*block_shield]\n        height: [307.74]\n        axial mesh points: [1]\n        xs types: [A]\n# end-assemblies-sh\n    control:\n        specifier: PC\n        blocks: [*block_control]\n        height: [307.74]\n        axial mesh points: [1]\n        xs types: [A]\n    ultimate shutdown:\n        specifier: US\n        blocks: [*block_control]\n        height: [307.74]\n        axial mesh points: [1]\n        xs types: [A]\n# end-assemblies-section\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        !include anl-afci-177-coreMap.yaml\n\n# end-systems-section\n"
  },
  {
    "path": "armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml",
    "content": "geom: hex\nsymmetry: third periodic\nlattice map: |\n  -     SH   SH   SH\n  -  SH   SH   SH   SH\n   SH   RR   RR   RR   SH\n     RR   RR   RR   RR   SH\n   RR   RR   RR   RR   RR   SH\n     RR   OC   OC   RR   RR   SH\n       OC   OC   OC   RR   RR   SH\n     OC   OC   OC   OC   RR   RR\n       OC   MC   OC   OC   RR   SH\n         MC   MC   PC   OC   RR   SH\n       MC   MC   MC   OC   OC   RR\n         MC   MC   MC   OC   RR   SH\n           PC   MC   MC   OC   RR   SH\n         MC   MC   MC   MC   OC   RR\n           IC   MC   MC   OC   RR   SH\n             IC   US   MC   OC   RR\n           IC   IC   MC   OC   RR   SH\n             IC   MC   MC   OC   RR\n           IC   IC   MC   PC   RR   SH\n"
  },
  {
    "path": "armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom armi.physics.fuelCycle.fuelHandlers import FuelHandler\nfrom armi.utils import units\n\n\nclass SampleShuffler(FuelHandler):\n    def chooseSwaps(self, shuffleParameters):\n        cycleSeconds = self.r.p.cycleLength * self.r.p.availabilityFactor * units.SECONDS_PER_DAY\n        for a in self.r.core:\n            peakFlux = a.getMaxParam(\"fastFlux\")\n            if peakFlux * cycleSeconds > 4.0e23:\n                newAssem = self.r.core.createAssemblyOfType(a.getType())\n                self.dischargeSwap(newAssem, a)\n\n    def getFactorList(self, cycle, cs=None):\n        \"\"\"Parameters here can be used to adjust shuffling philosophy vs. cycle.\"\"\"\n        return {}, []\n"
  },
  {
    "path": "armi/testing/reactors/anl-afci-177/anl-afci-177.yaml",
    "content": "# This file is part of the walthrough_inputs tutorial in ARMI, which\n# uses .. literalinclude to bring in sections of this file. Thus,\n# the comments and order are important. These will get wiped out\n# if you load and re-write a settings file via the ARMI gui, unfortunately.\n# begin-settings\nsettings:\n  availabilityFactor: 0.9\n  power: 1000000000.0\n  cycleLength: 411.11\n# end-section-1\n  loadingFile: anl-afci-177-blueprints.yaml\n  shuffleLogic: anl-afci-177-fuelManagement.py\n  fuelHandlerName: SampleShuffler\n# end-section-2\n  nCycles: 10\n  burnSteps: 2\n# end-section-3\n  buGroups:\n    - 100\n  comment: ANL-AFCI-177 CR 1.0 metal core but with HALEU instead of TRU\n  genXS: Neutron\n  nTasks: 1\n  versions:\n    armi: uncontrolled\n"
  },
  {
    "path": "armi/testing/reactors/c5g7/c5g7-blueprints.yaml",
    "content": "# Simple description of the C5G7 benchmark problem\n# General description from: https://www.oecd-nea.org/upload/docs/application/pdf/2019-12/nsc-doc2003-16.pdf\n# Composition/dimensions description from: https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf\n# start-custom-isotopics\ncustom isotopics:\n    # NEA/NSC/DOC(96)2 Table 2 - Isotopic Distributions for each medium\n    mox low: # 4.3%\n        input format: number densities\n        U235: 5.00E-5\n        U238: 2.21E-2\n        PU238: 1.50E-5\n        PU239: 5.80E-4\n        PU240: 2.40E-4\n        PU241: 9.80E-5\n        PU242: 5.40E-5\n        AM241: 1.30E-5\n        O: 4.63E-2\n    mox medium: # 7.0%\n        input format: number densities\n        U235: 5.00E-5\n        U238: 2.21E-2\n        PU238: 2.40E-5\n        PU239: 9.30E-4\n        PU240: 3.90E-4\n        PU241: 1.52E-4\n        PU242: 8.40E-5\n        AM241: 2.00E-5\n        O: 4.63E-2\n    mox high: # 8.7%\n        input format: number densities\n        U235: 5.00E-5\n        U238: 2.21E-2\n        PU238: 3.00E-5\n        PU239: 1.16E-3\n        PU240: 4.90E-4\n        PU241: 1.90E-4\n        PU242: 1.05E-4\n        AM241: 2.50E-5\n        O: 4.63E-2\n    UO2:\n        input format: number densities\n        U235: 8.65e-4\n        U238: 2.225E-2\n        O: 4.622E-2\n    moderator:\n        input format: number densities\n        H: 6.70e-2\n        O: 3.35E-2\n        B: 2.78E-5\n    Zr clad:\n        input format: number densities\n        ZR: 4.30E-2\n    Al clad:\n        input format: number densities\n        AL27: 6.00e-2\n    fission chamber:\n        # NEA/NSC/DOC(96)2 Documents:\n        # \"Central guide tube contains: moderator (as defined in Table 2)\n        # and 1.0E-8 at/(b cm) of U-235\"\n        input format: number densities\n        H: 6.70e-2\n        O: 3.35E-2\n        B: 2.78E-5\n        U235: 1.0e-8\n# end-custom-isotopics\nblocks:\n    uo2: &block_uo2\n        # NEA/NSC/DOC(96)2 Table 1 - Cell geometries\n        grid name: UO2 grid\n        fuel:\n            shape: Circle\n            material: UO2\n            isotopics: UO2\n            Tinput: 20.0\n            Thot: 20.0\n            od: .8190\n            latticeIDs: [U]\n        gap 1: &fuel_gap_1\n            shape: Circle\n            material: Void\n            Tinput: 20.0\n            Thot: 20.0\n            id: fuel.od\n            od: zirconium clad.id\n            latticeIDs: [U]\n        zirconium clad: &clad_Zr\n            shape: Circle\n            material: Custom\n            isotopics: Zr clad\n            Tinput: 20.0\n            Thot: 20.0\n            id: .8360\n            od: .9500\n            latticeIDs: [U]\n        gap 2: &fuel_gap_2\n            shape: Circle\n            material: Void\n            Tinput: 20.0\n            Thot: 20.0\n            id: zirconium clad.od\n            od: aluminum clad.id\n            latticeIDs: [U]\n        aluminum clad: &clad_Al\n            # NEA/NSC/DOC(96)2 Documents:\n            # \"This clad is used to simulate hot conditions at room temperature\n            # (decrease the moderation ratio)\"\n            shape: Circle\n            material: Custom\n            isotopics: Al clad\n            Tinput: 20.0\n            Thot: 20.0\n            id: .9700\n            od: 1.0800\n            latticeIDs: [U]\n        moderator: &moderator\n            shape: DerivedShape\n            material: SaturatedWater\n            isotopics: moderator\n            Tinput: 450.0\n            Thot: 450.0\n        # Moderator within the guide tube\n        inner moderator guide tube: &guide_tube_moderator\n            shape: Circle\n            material: SaturatedWater\n            isotopics: moderator\n            Tinput: 20.0\n            Thot: 20.0\n            od: guide tube.id\n            latticeIDs: [GT]\n        guide tube: &guide_tube\n            shape: Circle\n            material: Custom\n            isotopics: Al clad\n            Tinput: 20.0\n            Thot: 20.0\n            id: .6800\n            od: 1.0800\n            latticeIDs: [GT]\n        fission chamber guide tube: &fission_chamber_guide_tube\n            <<: *guide_tube\n            # Avoid giving this the same flag as \"guide tube\" by implementing\n            # a custom flag. This is done to distinguish the \"fission chamber guide tube\"\n            # from the regular \"guide tube\". This demonstrates the use of setting\n            # flags directly rather than relying on them to be implied based on the\n            # name.\n            flags: fission chamber structure\n            latticeIDs: [FC]\n        fission chamber: &fission_chamber\n            shape: Circle\n            material: Custom\n            isotopics: fission chamber\n            Tinput: 20.0\n            Thot: 20.0\n            od: .8190 # No documentation fission chamber dims of composition\n            latticeIDs: [FC]\n        inner moderator FC: &fission_chamber_mod\n            # No documentation of this either, but assuming fission chamber\n            # has same od as fuel, so there needs to be something in the gap.\n            shape: Circle\n            material: Void\n            Tinput: 20.0\n            Thot: 20.0\n            id: fission chamber.od\n            od: guide tube.id\n            latticeIDs: [FC]\n        pitch: &pitch\n        # dummy component for assembly sizing\n            shape: Square\n            material: Void\n            Tinput: 20.0\n            Thot: 20.0\n            widthInner: 21.42\n            widthOuter: 21.42\n            mult: 1.0\n# end-block-uo2\n    mox: &block_mox\n        grid name: MOX grid\n        mox low fuel:\n            shape: Circle\n            material: UO2\n            isotopics: mox low\n            Tinput: 20.0\n            Thot: 20.0\n            od: .8190\n            latticeIDs: [ML]\n        mox medium fuel:\n            shape: Circle\n            material: UO2\n            isotopics: mox medium\n            Tinput: 20.0\n            Thot: 20.0\n            od: .8190\n            latticeIDs: [MM]\n        mox high fuel:\n            shape: Circle\n            material: UO2\n            isotopics: mox high\n            Tinput: 20.0\n            Thot: 20.0\n            od: .8190\n            latticeIDs: [MH]\n        void 1:\n            <<: *fuel_gap_1\n            id: mox low fuel.od\n            latticeIDs: [ML, MM, MH]\n        zirconium clad:\n            <<: *clad_Zr\n            latticeIDs: [ML, MM, MH]\n        void 2:\n            <<: *fuel_gap_2\n            latticeIDs: [ML, MM, MH]\n        aluminum clad:\n             # See Aluminum Clad note above about why there are 2 clads.\n            <<: *clad_Al\n            latticeIDs: [ML, MM, MH]\n        moderator: *moderator\n        inner moderator GT: *guide_tube_moderator\n        guide tube: *guide_tube\n        fission chamber guide tube: *fission_chamber_guide_tube\n        fission chamber: *fission_chamber\n        moderator fission chamber: *fission_chamber_mod\n        pitch: *pitch\n# end-block-mox\n    moderator: &block_mod\n        moderator:\n            shape: Square\n            material: SaturatedWater\n            isotopics: moderator\n            Tinput: 20.0\n            Thot: 20.0\n            widthOuter: 21.42\n            mult: 1.0\n# end-block-mod\nassemblies:\n    heights: &heights\n        - 64.26\n        - 64.26\n        - 64.26\n        - 21.42\n    axial mesh points: &mesh\n        - 3\n        - 3\n        - 3\n        - 2\n\n    UO2:\n        flags: fuel\n        specifier: UO2\n        blocks:\n            - *block_uo2\n            - *block_uo2\n            - *block_uo2\n            - *block_mod\n        height: *heights\n        axial mesh points: *mesh\n        xs types: [A, A, A, A]\n    mox:\n        flags: fuel\n        specifier: MOX\n        blocks:\n            - *block_mox\n            - *block_mox\n            - *block_mox\n            - *block_mod\n        height: *heights\n        axial mesh points: *mesh\n        xs types: [A, A, A, A]\n    mod:\n        specifier: MOD\n        blocks:\n            - *block_mod\n            - *block_mod\n            - *block_mod\n            - *block_mod\n        height: *heights\n        axial mesh points: *mesh\n        xs types: [A, A, A, A]\n# end-assemblies\nsystems:\n    core:\n        grid name: core\n\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n# end-systems\ngrids:\n    core:\n        symmetry: quarter reflective\n        geom: cartesian\n        lattice pitch:\n            x: 21.42\n            y: 21.42\n        lattice map: |\n         MOD MOD MOD\n         MOX UO2 MOD\n         UO2 MOX MOD\n# end-grid-core\n    UO2 grid:\n        symmetry: full\n        geom: cartesian\n        lattice pitch:\n            x: 1.26\n            y: 1.26\n        lattice map: |\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  U  U  GT U  U  GT U  U  GT U  U  U  U  U\n            U  U  U  GT U  U  U  U  U  U  U  U  U  GT U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  GT U  U  GT U  U  GT U  U  GT U  U  GT U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  GT U  U  GT U  U  FC U  U  GT U  U  GT U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  GT U  U  GT U  U  GT U  U  GT U  U  GT U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  GT U  U  U  U  U  U  U  U  U  GT U  U  U\n            U  U  U  U  U  GT U  U  GT U  U  GT U  U  U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n            U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U  U\n# end-grid-UO2\n    MOX grid:\n        symmetry: full\n        geom: cartesian\n        lattice pitch:\n            x: 1.26\n            y: 1.26\n        lattice map: |\n            ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML\n            ML MM MM MM MM MM MM MM MM MM MM MM MM MM MM MM ML\n            ML MM MM MM MM GT MM MM GT MM MM GT MM MM MM MM ML\n            ML MM MM GT MM MH MH MH MH MH MH MH MM GT MM MM ML\n            ML MM MM MM MH MH MH MH MH MH MH MH MH MM MM MM ML\n            ML MM GT MH MH GT MH MH GT MH MH GT MH MH GT MM ML\n            ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML\n            ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML\n            ML MM GT MH MH GT MH MH FC MH MH GT MH MH GT MM ML\n            ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML\n            ML MM MM MH MH MH MH MH MH MH MH MH MH MH MM MM ML\n            ML MM GT MH MH GT MH MH GT MH MH GT MH MH GT MM ML\n            ML MM MM MM MH MH MH MH MH MH MH MH MH MM MM MM ML\n            ML MM MM GT MM MH MH MH MH MH MH MH MM GT MM MM ML\n            ML MM MM MM MM GT MM MM GT MM MM GT MM MM MM MM ML\n            ML MM MM MM MM MM MM MM MM MM MM MM MM MM MM MM ML\n            ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML ML\n# end-grid-MOX\nnuclide flags:\n    H: {burn: false, xs: true}\n    O:\n        burn: false\n        xs: true\n        expandTo: [\"O16\", \"O17\"] # O18 is not in many nuclear data sets.\n    B: {burn: false, xs: true}\n    AL: {burn: false, xs: true}\n    ZR: {burn: false, xs: true}\n    U235: {burn: false, xs: true}\n    U238: {burn: false, xs: true}\n    PU238: {burn: false, xs: true}\n    PU239: {burn: false, xs: true}\n    PU240: {burn: false, xs: true}\n    PU241: {burn: false, xs: true}\n    PU242: {burn: false, xs: true}\n    AM241: {burn: false, xs: true}\n# end-nucflags\n"
  },
  {
    "path": "armi/testing/reactors/c5g7/c5g7-settings.yaml",
    "content": "settings:\n# global\n  availabilityFactor: 0.9\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: C5G7 LWR Benchmark inputs\n  cycleLength: 411.11\n  loadingFile: c5g7-blueprints.yaml\n  nCycles: 10\n  nTasks: 1\n  power: 1000000000.0\n  versions:\n    armi: uncontrolled\n\n# database\n  db: true\n\n# neutronics\n  genXS: Neutron\n\n# report\n  genReports: false\n"
  },
  {
    "path": "armi/testing/reactors/godiva/godiva-blueprints.yaml",
    "content": "nuclide flags:\n  PU237: {burn: false, xs: true, expandTo: []}\n  PU240: {burn: false, xs: true, expandTo: []}\n  PU241: {burn: false, xs: true, expandTo: []}\n  AR: {burn: false, xs: true, expandTo: []}\n  PA233: {burn: false, xs: true, expandTo: []}\n  NP238: {burn: false, xs: true, expandTo: []}\n  AR36: {burn: false, xs: true, expandTo: []}\n  TH230: {burn: false, xs: true, expandTo: []}\n  AR38: {burn: false, xs: true, expandTo: []}\n  U238: {burn: false, xs: true, expandTo: []}\n  U239: {burn: false, xs: true, expandTo: []}\n  C: {burn: false, xs: true, expandTo: []}\n  LFP35: {burn: false, xs: true, expandTo: []}\n  U233: {burn: false, xs: true, expandTo: []}\n  U234: {burn: false, xs: true, expandTo: []}\n  U235: {burn: false, xs: true, expandTo: []}\n  U236: {burn: false, xs: true, expandTo: []}\n  U237: {burn: false, xs: true, expandTo: []}\n  PU239: {burn: false, xs: true, expandTo: []}\n  PU238: {burn: false, xs: true, expandTo: []}\n  TH234: {burn: false, xs: true, expandTo: []}\n  TH232: {burn: false, xs: true, expandTo: []}\n  AR40: {burn: false, xs: true, expandTo: []}\n  LFP39: {burn: false, xs: true, expandTo: []}\n  DUMP2: {burn: false, xs: true, expandTo: []}\n  LFP41: {burn: false, xs: true, expandTo: []}\n  LFP40: {burn: false, xs: true, expandTo: []}\n  PU242: {burn: false, xs: true, expandTo: []}\n  PU236: {burn: false, xs: true, expandTo: []}\n  U232: {burn: false, xs: true, expandTo: []}\n  DUMP1: {burn: false, xs: true, expandTo: []}\n  LFP38: {burn: false, xs: true, expandTo: []}\n  AM243: {burn: false, xs: true, expandTo: []}\n  PA231: {burn: false, xs: true, expandTo: []}\n  CM244: {burn: false, xs: true, expandTo: []}\n  CM242: {burn: false, xs: true, expandTo: []}\n  AM242: {burn: false, xs: true, expandTo: []}\n  CM245: {burn: false, xs: true, expandTo: []}\n  CM243: {burn: false, xs: true, expandTo: []}\n  CM246: {burn: false, xs: true, expandTo: []}\n  CM247: {burn: false, xs: true, expandTo: []}\n  O: {burn: false, xs: true, expandTo: [O16]}\n  N: {burn: false, xs: true, expandTo: [N14]}\n  ZR: {burn: false, xs: true, expandTo: []}\ncustom isotopics: {}\nblocks: {}\nassemblies:\n  heights:\n    - 3.5\n    - 3.5\n    - 3.5\n    - 3.5\n    - 3.5\n  axial mesh points:\n    - 5\n    - 5\n    - 5\n    - 5\n    - 5\n  assembly1_1:\n    specifier: assembly1_1\n    blocks:\n      - name: block1_1_1\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 0.9226919412612915\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 0.0773080587387085\n      - name: block1_1_2\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 1.0\n      - name: block1_1_3\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 1.0\n      - name: block1_1_4\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 1.0\n      - name: block1_1_5\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 0.9271114468574524\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 3.001\n          inner_radius: 0.0\n          mult: 0.07288855314254761\n    height:\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n    axial mesh points:\n      - 5\n      - 5\n      - 5\n      - 5\n      - 5\n    radial mesh points: 2\n    azimuthal mesh points: 7\n    material modifications:\n      U235_wt_frac:\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n      ZR_wt_frac:\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n    xs types:\n      - A\n      - A\n      - A\n      - A\n      - A\n  assembly2_1:\n    specifier: assembly2_1\n    blocks:\n      - name: block2_1_1\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 0.5954532027244568\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 0.4045467972755432\n      - name: block2_1_2\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 1.0\n      - name: block2_1_3\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 1.0\n      - name: block2_1_4\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 1.0\n      - name: block2_1_5\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 0.5924441814422607\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 6.002\n          inner_radius: 3.001\n          mult: 0.40755581855773926\n    height:\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n    axial mesh points:\n      - 5\n      - 5\n      - 5\n      - 5\n      - 5\n    radial mesh points: 2\n    azimuthal mesh points: 7\n    material modifications:\n      U235_wt_frac:\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n      ZR_wt_frac:\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n    xs types:\n      - A\n      - A\n      - A\n      - A\n      - A\n  assembly3_1:\n    specifier: assembly3_1\n    blocks:\n      - name: block3_1_1\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.046154800802469254\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.9538451991975307\n      - name: block3_1_2\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.6035306453704834\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.3964693546295166\n      - name: block3_1_3\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.8756284713745117\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.12437152862548828\n      - name: block3_1_4\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.5993080139160156\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.4006919860839844\n      - name: block3_1_5\n        godiva:\n          shape: RadialSegment\n          material: UZr\n          Tinput: 26.85\n          Thot: 26.85\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.04680449143052101\n        compliment:\n          shape: RadialSegment\n          material: Air\n          Tinput: 0.0\n          Thot: 0.0\n          outer_theta: 0.7853981633974483\n          height: 3.5\n          inner_theta: 0.0\n          outer_radius: 9.0\n          inner_radius: 6.002\n          mult: 0.953195508569479\n    height:\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n      - 3.5\n    axial mesh points:\n      - 5\n      - 5\n      - 5\n      - 5\n      - 5\n    radial mesh points: 2\n    azimuthal mesh points: 7\n    material modifications:\n      U235_wt_frac:\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n        - 0.9371\n      ZR_wt_frac:\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n        - 0.0\n    xs types:\n      - A\n      - A\n      - A\n      - A\n      - A\nsystems:\n  core:\n    grid name: core\n    origin:\n      x: 0.0\n      y: 0.0\n      z: 0.0\ngrids:\n  core:\n    geom: thetarz\n    lattice map:\n    grid bounds:\n      r:\n        - 0.0\n        - 3.001\n        - 6.002\n        - 9.0\n      theta:\n        - 0.0\n        - 0.7853981633974483\n      z:\n        - -8.75\n        - -5.25\n        - -1.7500000000000002\n        - 1.7500000000000002\n        - 5.25\n        - 8.75\n    symmetry: eighth periodic\n    grid contents:\n      ? - 0\n        - 0\n      : assembly1_1\n      ? - 0\n        - 1\n      : assembly2_1\n      ? - 0\n        - 2\n      : assembly3_1\n"
  },
  {
    "path": "armi/testing/reactors/godiva/godiva.armi.unittest.yaml",
    "content": "settings:\n  acceptableBlockAreaError: 0.0001\n  burnSteps: 0\n  comment: Bare, Highly Enriched Uranium Sphere\n  crossSectionControl:\n    AA:\n      geometry: 0D\n      validBlockTypes:\n        - fuel\n      blockRepresentation: FluxWeightedAverage\n      criticalBuckling: true\n  genReports: false\n  genXS: Neutron\n  groupStructure: ARMI45\n  loadingFile: godiva-blueprints.yaml\n  neutronicsKernel: DIF3D-FD\n  neutronicsOutputsToSave: All\n  neutronicsType: both\n  nTasks: 36\n  outers: 200\n  power: 0.001\n  verbosity: debug\n  versions:\n    armi: uncontrolled\n"
  },
  {
    "path": "armi/testing/reactors/smallHexReactor/smallHexReactor-bp.yaml",
    "content": "# A small, hex-based, full-core reactor\nnuclide flags:\n  U234:\n    burn: true\n    xs: true\n    expandTo:\n  U235:\n    burn: true\n    xs: true\n    expandTo:\n  U236:\n    burn: true\n    xs: true\n    expandTo:\n  U238:\n    burn: true\n    xs: true\n    expandTo:\n  NP237:\n    burn: true\n    xs: true\n    expandTo:\n  NP238:\n    burn: true\n    xs: true\n    expandTo:\n  PU236:\n    burn: true\n    xs: true\n    expandTo:\n  PU238:\n    burn: true\n    xs: true\n    expandTo:\n  PU239:\n    burn: true\n    xs: true\n    expandTo:\n  PU240:\n    burn: true\n    xs: true\n    expandTo:\n  PU241:\n    burn: true\n    xs: true\n    expandTo:\n  PU242:\n    burn: true\n    xs: true\n    expandTo:\n  AM241:\n    burn: true\n    xs: true\n    expandTo:\n  AM242:\n    burn: true\n    xs: true\n    expandTo:\n  AM243:\n    burn: true\n    xs: true\n    expandTo:\n  CM242:\n    burn: true\n    xs: true\n    expandTo:\n  CM243:\n    burn: true\n    xs: true\n    expandTo:\n  CM244:\n    burn: true\n    xs: true\n    expandTo:\n  CM245:\n    burn: true\n    xs: true\n    expandTo:\n  CM246:\n    burn: true\n    xs: true\n    expandTo:\n  CM247:\n    burn: true\n    xs: true\n    expandTo:\n  LFP35:\n    burn: true\n    xs: true\n    expandTo:\n  LFP38:\n    burn: true\n    xs: true\n    expandTo:\n  LFP39:\n    burn: true\n    xs: true\n    expandTo:\n  LFP40:\n    burn: true\n    xs: true\n    expandTo:\n  LFP41:\n    burn: true\n    xs: true\n    expandTo:\n  DUMP1:\n    burn: true\n    xs: true\n    expandTo:\n  DUMP2:\n    burn: true\n    xs: true\n    expandTo:\n  B10:\n    burn: false\n    xs: true\n    expandTo:\n  B11:\n    burn: false\n    xs: true\n    expandTo:\n  ZR:\n    burn: false\n    xs: true\n    expandTo:\n  C:\n    burn: false\n    xs: true\n    expandTo:\n  SI:\n    burn: false\n    xs: true\n    expandTo:\n  V:\n    burn: false\n    xs: true\n    expandTo:\n  CR:\n    burn: false\n    xs: true\n    expandTo:\n  MN:\n    burn: false\n    xs: true\n    expandTo:\n  FE:\n    burn: false\n    xs: true\n    expandTo:\n  NI:\n    burn: false\n    xs: true\n    expandTo:\n  MO:\n    burn: false\n    xs: true\n    expandTo:\n  W:\n    burn: false\n    xs: true\n    expandTo:\n  NA:\n    burn: false\n    xs: true\n    expandTo:\n  HE:\n    burn: false\n    xs: true\n    expandTo:\n  N:\n    burn: false\n    xs: true\n    expandTo:\n    - N14\n    - N15\n  S:\n    burn: false\n    xs: true\n    expandTo:\n    - S32\n    - S33\n    - S34\n    - S36\n  P:\n    burn: false\n    xs: true\n    expandTo:\n    - P31\n  NB:\n    burn: false\n    xs: true\n    expandTo:\n    - NB93\n  CO:\n    burn: false\n    xs: true\n    expandTo:\n    - CO59\n  CU:\n    burn: false\n    xs: true\n    expandTo:\n    - CU63\n    - CU65\n  SN:\n    burn: false\n    xs: true\n    expandTo:\n    - SN112\n    - SN114\n    - SN115\n    - SN116\n    - SN117\n    - SN118\n    - SN119\n    - SN120\n    - SN122\n    - SN124\n    - SN126\n  BI:\n    burn: false\n    xs: true\n    expandTo:\n    - BI209\n  AL:\n    burn: false\n    xs: true\n    expandTo:\n    - AL27\n  PB:\n    burn: false\n    xs: true\n    expandTo:\n    - PB204\n    - PB206\n    - PB207\n    - PB208\n  O:\n    burn: false\n    xs: true\n    expandTo:\n    - O16\n  AS:\n    burn: false\n    xs: true\n    expandTo:\n    - AS75\n  TA:\n    burn: false\n    xs: true\n    expandTo: []\n  TI:\n    burn: false\n    xs: true\n    expandTo:\n    - TI46\n    - TI47\n    - TI48\n    - TI49\n    - TI50\n  BE:\n    burn: false\n    xs: true\n    expandTo:\n    - BE9\n  SB:\n    burn: false\n    xs: true\n    expandTo:\n    - SB121\n    - SB123\n  Y:\n    burn: false\n    xs: true\n    expandTo:\n  RU:\n    burn: false\n    xs: true\n    expandTo:\n    - RU96\n    - RU98\n    - RU99\n    - RU100\n    - RU101\n    - RU102\n    - RU104\n  PD:\n    burn: false\n    xs: true\n    expandTo:\n    - PD102\n    - PD104\n    - PD105\n    - PD106\n    - PD108\n    - PD110\n  RH:\n    burn: false\n    xs: true\n    expandTo:\n    - RH103\n  B:\n    burn: true\n    xs: true\n    expandTo:\n    - B10\n    - B11\n\n\nblocks:\n    fuel: &block_fuel\n        clad1:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            latticeIDs:\n            - 1\n# end-block-clad\n        wire1:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.8888\n            id: 0.0\n            od: 0.0808\n            latticeIDs:\n            - 1\n# end-block-wire\n        fuel1:\n            shape: Circle\n            material: UO2\n            Tinput: 25.0\n            Thot: 500.0\n            id: 0.0\n            od: 0.6029\n            latticeIDs:\n            - 1\n# end-block-fuel\n        bond1:\n            shape: Circle\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            id: fuel1.od\n            od: clad1.id\n            latticeIDs:\n            - 1\n\n# end-block-bond\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n# end-block-duct\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            op: 16.142\n            mult: 1.0\n# end-block-intercoolant\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 200.0\n            Thot: 450.0\n# end-block-coolant\n    radial shield: &block_shield\n        control:\n            shape: Circle\n            material: B4C\n            Tinput: 597.0\n            Thot: 597.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-radialshield\n    reflector: &block_reflector\n        reflector:\n            shape: Circle\n            material: HT9\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.777\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n    plenum: &block_plenum\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.88888\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: clad.id\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-plenum\nassemblies:\n    heights: &heights\n        - 15.0\n        - 20.32\n        - 20.32\n        - 20.32\n        - 20.32\n        - 20.32\n        - 191.14\n    axial mesh points: &mesh\n        - 1\n        - 2\n        - 2\n        - 2\n        - 2\n        - 2\n        - 8\n# end-assemblies-common\n    inner fuel:\n        specifier: IC\n        blocks: &fuel_blocks\n            - *block_reflector\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_fuel\n            - *block_plenum\n        height: *heights\n        axial mesh points: *mesh\n        xs types: &IC_xs\n            - A\n            - A\n            - A\n            - A\n            - A\n            - A\n            - A\n# end-assemblies-ic\n    middle core fuel:\n        specifier: MC\n        blocks: *fuel_blocks\n        height: *heights\n        axial mesh points: *mesh\n        material modifications:\n            TD_frac:\n                - ''\n                - 0.153\n                - 0.153\n                - 0.153\n                - 0.153\n                - 0.153\n                - ''\n        xs types:\n            - B\n            - B\n            - B\n            - B\n            - B\n            - B\n            - B\n# end-assemblies-mc\n# end-assemblies-section\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        geom: hex_corners_up\n        symmetry: full\n        lattice map: |\n          -     MC   MC   MC\n          -  MC   IC   IC   IC\n           IC   IC   IC   IC   IC\n             IC   IC   IC   IC\n                IC   IC   IC\n# end-systems-section\n"
  },
  {
    "path": "armi/testing/reactors/smallHexReactor/smallHexReactor.yaml",
    "content": "# A simple test reactor, not physicially functional\n#\n# * pin-type reactor with hex assemblies\n# * sodium-cooled, fast\n# * full core symmetry\n# * The core grid is corners up\n# * The symmetric core positions have different assembly types\n# * The different assembly types have different amounts of molesHmBOL because they had different TD_frac material modifications\n# * There are 3 rings (necessary for checking problems with symmetry because the assemblies in the 2nd ring don't actually fall along the symmetry line that is checked)\nsettings:\n# global\n  availabilityFactor: 0.9\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: Small, full core test reactor.\n  cycleLength: 400.0\n  loadingFile: smallHexReactor-bp.yaml\n  nCycles: 1\n  nTasks: 1\n  power: 1000000000.0\n  verbosity: warning\n  versions:\n    armi: uncontrolled\n# neutronics\n  genXS: Neutron"
  },
  {
    "path": "armi/testing/reactors/thirdSmallHexReactor/thirdSmallHexReactor-bp.yaml",
    "content": "# A small, hex-based, full-core reactor\nnuclide flags:\n  U234:\n    burn: true\n    xs: true\n    expandTo:\n  U235:\n    burn: true\n    xs: true\n    expandTo:\n  U236:\n    burn: true\n    xs: true\n    expandTo:\n  U238:\n    burn: true\n    xs: true\n    expandTo:\n  NP237:\n    burn: true\n    xs: true\n    expandTo:\n  NP238:\n    burn: true\n    xs: true\n    expandTo:\n  PU236:\n    burn: true\n    xs: true\n    expandTo:\n  PU238:\n    burn: true\n    xs: true\n    expandTo:\n  PU239:\n    burn: true\n    xs: true\n    expandTo:\n  PU240:\n    burn: true\n    xs: true\n    expandTo:\n  PU241:\n    burn: true\n    xs: true\n    expandTo:\n  PU242:\n    burn: true\n    xs: true\n    expandTo:\n  AM241:\n    burn: true\n    xs: true\n    expandTo:\n  AM242:\n    burn: true\n    xs: true\n    expandTo:\n  AM243:\n    burn: true\n    xs: true\n    expandTo:\n  CM242:\n    burn: true\n    xs: true\n    expandTo:\n  CM243:\n    burn: true\n    xs: true\n    expandTo:\n  CM244:\n    burn: true\n    xs: true\n    expandTo:\n  CM245:\n    burn: true\n    xs: true\n    expandTo:\n  CM246:\n    burn: true\n    xs: true\n    expandTo:\n  CM247:\n    burn: true\n    xs: true\n    expandTo:\n  LFP35:\n    burn: true\n    xs: true\n    expandTo:\n  LFP38:\n    burn: true\n    xs: true\n    expandTo:\n  LFP39:\n    burn: true\n    xs: true\n    expandTo:\n  LFP40:\n    burn: true\n    xs: true\n    expandTo:\n  LFP41:\n    burn: true\n    xs: true\n    expandTo:\n  DUMP1:\n    burn: true\n    xs: true\n    expandTo:\n  DUMP2:\n    burn: true\n    xs: true\n    expandTo:\n  B10:\n    burn: false\n    xs: true\n    expandTo:\n  B11:\n    burn: false\n    xs: true\n    expandTo:\n  ZR:\n    burn: false\n    xs: true\n    expandTo:\n  C:\n    burn: false\n    xs: true\n    expandTo:\n  SI:\n    burn: false\n    xs: true\n    expandTo:\n  V:\n    burn: false\n    xs: true\n    expandTo:\n  CR:\n    burn: false\n    xs: true\n    expandTo:\n  MN:\n    burn: false\n    xs: true\n    expandTo:\n  FE:\n    burn: false\n    xs: true\n    expandTo:\n  NI:\n    burn: false\n    xs: true\n    expandTo:\n  MO:\n    burn: false\n    xs: true\n    expandTo:\n  W:\n    burn: false\n    xs: true\n    expandTo:\n  NA:\n    burn: false\n    xs: true\n    expandTo:\n  HE:\n    burn: false\n    xs: true\n    expandTo:\n  N:\n    burn: false\n    xs: true\n    expandTo:\n    - N14\n    - N15\n  S:\n    burn: false\n    xs: true\n    expandTo:\n    - S32\n    - S33\n    - S34\n    - S36\n  P:\n    burn: false\n    xs: true\n    expandTo:\n    - P31\n  NB:\n    burn: false\n    xs: true\n    expandTo:\n    - NB93\n  CO:\n    burn: false\n    xs: true\n    expandTo:\n    - CO59\n  CU:\n    burn: false\n    xs: true\n    expandTo:\n    - CU63\n    - CU65\n  SN:\n    burn: false\n    xs: true\n    expandTo:\n    - SN112\n    - SN114\n    - SN115\n    - SN116\n    - SN117\n    - SN118\n    - SN119\n    - SN120\n    - SN122\n    - SN124\n    - SN126\n  BI:\n    burn: false\n    xs: true\n    expandTo:\n    - BI209\n  AL:\n    burn: false\n    xs: true\n    expandTo:\n    - AL27\n  PB:\n    burn: false\n    xs: true\n    expandTo:\n    - PB204\n    - PB206\n    - PB207\n    - PB208\n  O:\n    burn: false\n    xs: true\n    expandTo:\n    - O16\n  AS:\n    burn: false\n    xs: true\n    expandTo:\n    - AS75\n  TA:\n    burn: false\n    xs: true\n    expandTo: []\n  TI:\n    burn: false\n    xs: true\n    expandTo:\n    - TI46\n    - TI47\n    - TI48\n    - TI49\n    - TI50\n  BE:\n    burn: false\n    xs: true\n    expandTo:\n    - BE9\n  SB:\n    burn: false\n    xs: true\n    expandTo:\n    - SB121\n    - SB123\n  Y:\n    burn: false\n    xs: true\n    expandTo:\n  RU:\n    burn: false\n    xs: true\n    expandTo:\n    - RU96\n    - RU98\n    - RU99\n    - RU100\n    - RU101\n    - RU102\n    - RU104\n  PD:\n    burn: false\n    xs: true\n    expandTo:\n    - PD102\n    - PD104\n    - PD105\n    - PD106\n    - PD108\n    - PD110\n  RH:\n    burn: false\n    xs: true\n    expandTo:\n    - RH103\n  B:\n    burn: true\n    xs: true\n    expandTo:\n    - B10\n    - B11\n\n\nblocks:\n    fuel: &block_fuel\n        clad1:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            latticeIDs:\n            - 1\n# end-block-clad\n        wire1:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.8888\n            id: 0.0\n            od: 0.0808\n            latticeIDs:\n            - 1\n# end-block-wire\n        fuel1:\n            shape: Circle\n            material: UO2\n            Tinput: 25.0\n            Thot: 500.0\n            id: 0.0\n            od: 0.6029\n            latticeIDs:\n            - 1\n# end-block-fuel\n        bond1:\n            shape: Circle\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            id: fuel1.od\n            od: clad1.id\n            latticeIDs:\n            - 1\n\n# end-block-bond\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n# end-block-duct\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            op: 16.142\n            mult: 1.0\n# end-block-intercoolant\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 200.0\n            Thot: 450.0\n# end-block-coolant\n    radial shield: &block_shield\n        control:\n            shape: Circle\n            material: B4C\n            Tinput: 597.0\n            Thot: 597.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-radialshield\n    reflector: &block_reflector\n        reflector:\n            shape: Circle\n            material: HT9\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: 0.6962\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.777\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n    plenum: &block_plenum\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.6962\n            od: 0.808\n            mult: 271\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.0\n            helixDiameter: 0.88888\n            id: 0.0\n            od: 0.0808\n            mult: 271\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 450.0\n            Thot: 450.0\n            id: 0.0\n            od: clad.id\n            mult: 271\n        duct:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.922\n            op: 15.710\n            mult: 1.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 447.0\n            Thot: 447.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.142\n        coolant:\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n# end-block-plenum\nassemblies:\n    heights: &heights\n        - 15.0\n        - 20.32\n        - 191.14\n    axial mesh points: &mesh\n        - 1\n        - 2\n        - 8\n# end-assemblies-common\n    inner fuel:\n        specifier: IC\n        blocks: &fuel_blocks\n            - *block_reflector\n            - *block_fuel\n            - *block_plenum\n        height: *heights\n        axial mesh points: *mesh\n        xs types: &IC_xs\n            - A\n            - A\n            - A\n# end-assemblies-ic\n    middle core fuel:\n        specifier: MC\n        blocks: *fuel_blocks\n        height: *heights\n        axial mesh points: *mesh\n        material modifications:\n            TD_frac:\n                - ''\n                - 0.153\n                - ''\n        xs types:\n            - B\n            - B\n            - B\n# end-assemblies-mc\n# end-assemblies-section\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        geom: hex_corners_up\n        symmetry: third periodic\n        lattice map: |\n          MC\n           MC\n          IC IC\n           IC\n          IC IC\n# end-systems-section\n"
  },
  {
    "path": "armi/testing/reactors/thirdSmallHexReactor/thirdSmallHexReactor.yaml",
    "content": "# A simple test reactor, not physicially functional\n#\n# * pin-type reactor with hex assemblies\n# * sodium-cooled, fast\n# * third-core symmetry\n# * The core grid is corners up\n# * The symmetric core positions have different assembly types\n# * The different assembly types have different amounts of molesHmBOL because they had different TD_frac material modifications\n# * There are 3 rings (necessary for checking problems with symmetry because the assemblies in the 2nd ring don't actually fall along the symmetry line that is checked)\nsettings:\n# global\n  availabilityFactor: 0.9\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: Small, third-core test reactor.\n  cycleLength: 400.0\n  loadingFile: thirdSmallHexReactor-bp.yaml\n  nCycles: 1\n  nTasks: 1\n  power: 1000000000.0\n  startCycle: 1\n  startNode: 2\n  verbosity: error\n  versions:\n    armi: uncontrolled\n\n# neutronics\n  genXS: Neutron\n  genReports: false\n  summarizeAssemDesign: false\n"
  },
  {
    "path": "armi/testing/resources/armiRun-SHUFFLES.yaml",
    "content": "sequence:\n  1: &cycle_1\n    - &shuffle_1_9_45\n      cascade: [\"igniter fuel\", \"009-045\", \"008-004\", \"007-001\", \"006-005\"]\n      fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]\n    - extraRotations: {\"009-045\": 60}\n    - &shuffle_1_4_4\n      cascade: [\"middle fuel\", \"004-004\", \"005-005\", \"006-006\", \"Delete\"]\n      fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]\n  2:\n    - *shuffle_1_9_45\n    - *shuffle_1_4_4\n    - extraRotations: {\"009-045\": 60}\n    - cascade: [\"SFP\", \"005-003\", \"SFP\"]\n      ringPosCycle: [6, 5, 0]\n  3:\n    - *shuffle_1_9_45\n    - swap: [\"009-045\", \"008-004\"]\n    - swap: [\"007-001\", \"006-005\"]\n    - cascade: [\"SFP\", \"002-002\", \"SFP\"]\n      ringPosCycle: [5, 3, 1]\n"
  },
  {
    "path": "armi/testing/singleMixedAssembly.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport io\n\nfrom armi.reactor.blueprints import Blueprints\nfrom armi.settings import Settings\n\nBLOCK_DEFINITIONS_2PIN = \"\"\"\nblocks:\n    grid plate: &block_grid_plate\n        grid:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 15.277\n            mult: 1.0\n            op: 16.577\n        coolant: &component_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: grid.op\n            mult: 1.0\n            op: 19.0\n\n    duct: &block_duct\n        coolant: *component_coolant\n        duct: &component_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 18.0\n            mult: 1.0\n            op: 18.5\n        intercoolant: &component_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 19.0\n\n    axial shield twoPin: &block_fuel_multiPin_axial_shield\n        grid name: twoPin\n        shield: &component_shield_shield1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_shield_bond1\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: shield.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_shield_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_shield_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        shield test:\n            <<: *component_shield_shield1\n            latticeIDs: [2]\n        bond test:\n            <<: *component_shield_bond1\n            id: shield test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test:\n            <<: *component_shield_clad1\n            latticeIDs: [2]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: shield\n\n    fuel twoPin: &block_fuel_multiPin\n        grid name: twoPin\n        fuel: &component_fuelmultiPin\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_fuelmultiPin_bond\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: fuel.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_fuelmultiPin_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_fuelmultiPin_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        fuel test: &component_fuelmultiPin_fuel2\n            <<: *component_fuelmultiPin\n            latticeIDs: [2]\n        bond test: &component_fuelmultiPin_bond2\n            <<: *component_fuelmultiPin_bond\n            id: fuel test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: &component_fuelmultiPin_clad2\n            <<: *component_fuelmultiPin_clad1\n            latticeIDs: [2]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel\n\n    plenum 2pin: &block_plenum_multiPin\n        grid name: twoPin\n        gap: &component_plenummultiPin_gap1\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: clad.id\n            latticeIDs: [1]\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        gap test:\n            <<: *component_plenummultiPin_gap1\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: *component_fuelmultiPin_clad2\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: clad test\n\n    mixed fuel plenum 2pin: &block_mixed_multiPin\n        grid name: twoPin\n        gap: *component_plenummultiPin_gap1\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        fuel test: *component_fuelmultiPin_fuel2\n        bond test: *component_fuelmultiPin_bond2\n        clad test: *component_fuelmultiPin_clad2\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel test\n\n    aclp plenum 2pin: &block_aclp_multiPin\n        <<: *block_plenum_multiPin\n\n    SodiumBlock: &block_dummy\n        flags: dummy\n        coolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 0.0\n            mult: 1.0\n            op: 19.0\n\"\"\"\n\nBLOCK_DEFINITIONS_3PIN = \"\"\"\nblocks:\n    grid plate: &block_grid_plate\n        grid:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 15.277\n            mult: 1.0\n            op: 16.577\n        coolant: &component_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: grid.op\n            mult: 1.0\n            op: 19.0\n\n    duct: &block_duct\n        coolant: *component_coolant\n        duct: &component_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 18.0\n            mult: 1.0\n            op: 18.5\n        intercoolant: &component_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 19.0\n\n    axial shield threePin: &block_fuel_multiPin_axial_shield\n        grid name: threePin\n        shield: &component_shield_shield1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_shield_bond1\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: shield.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_shield_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_shield_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        shield test:\n            <<: *component_shield_shield1\n            latticeIDs: [2]\n        bond test:\n            <<: *component_shield_bond1\n            id: shield test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test:\n            <<: *component_shield_clad1\n            latticeIDs: [2]\n        annular void: &shield_annular_void\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: annular shield test.id\n            latticeIDs: [3]\n        annular shield test:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.600\n            od: 0.950\n            latticeIDs: [3]\n        gap1:\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: annular shield test.od\n            od: liner.id\n            latticeIDs: [3]\n        liner:\n            shape: Circle\n            material: Zr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.950\n            od: 1.000\n            latticeIDs: [3]\n        gap2:\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: liner.od\n            od: annular clad test.id\n            latticeIDs: [3]\n        annular clad test:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 1.000\n            od: 1.090\n            latticeIDs: [3]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: shield\n\n    fuel threePin: &block_fuel_multiPin\n        grid name: threePin\n        fuel: &component_fuelmultiPin\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_fuelmultiPin_bond\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: fuel.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_fuelmultiPin_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_fuelmultiPin_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        fuel test: &component_fuelmultiPin_fuel2\n            <<: *component_fuelmultiPin\n            latticeIDs: [2]\n        bond test: &component_fuelmultiPin_bond2\n            <<: *component_fuelmultiPin_bond\n            id: fuel test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: &component_fuelmultiPin_clad2\n            <<: *component_fuelmultiPin_clad1\n            latticeIDs: [2]\n        annular void: &fuel_annular_void\n            <<: *shield_annular_void\n            od: annular fuel test.id\n        annular fuel test: &fuel_annular_test\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.600\n            od: 0.950\n            latticeIDs: [3]\n        gap1: &annular_test_gap1\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: annular fuel test.od\n            od: liner.id\n            latticeIDs: [3]\n        liner: &liner\n            shape: Circle\n            material: Zr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.950\n            od: 1.000\n            latticeIDs: [3]\n        gap2: &annular_test_gap2\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: liner.od\n            od: annular clad test.id\n            latticeIDs: [3]\n        annular clad test: &annular_clad_test\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 1.000\n            od: 1.090\n            latticeIDs: [3]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel\n\n    plenum 3pin: &block_plenum_multiPin\n        grid name: threePin\n        gap: &component_plenummultiPin_gap1\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: clad.id\n            latticeIDs: [1]\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        gap test:\n            <<: *component_plenummultiPin_gap1\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: *component_fuelmultiPin_clad2\n        annular void: \n            <<: *fuel_annular_void\n            od: liner.id\n            latticeIDs: [3]\n        liner: *liner\n        gap2: *annular_test_gap2\n        annular clad test: *annular_clad_test\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: clad test\n\n    mixed fuel plenum 3pin: &block_mixed_multiPin\n        grid name: threePin\n        gap: *component_plenummultiPin_gap1\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        fuel test: *component_fuelmultiPin_fuel2\n        bond test: *component_fuelmultiPin_bond2\n        clad test: *component_fuelmultiPin_clad2\n        annular void: *fuel_annular_void\n        annular fuel test: *fuel_annular_test\n        gap1: *annular_test_gap1\n        liner: *liner\n        gap2: *annular_test_gap2\n        annular clad test: *annular_clad_test\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel test\n\n    aclp plenum 3pin: &block_aclp_multiPin\n        <<: *block_plenum_multiPin\n\n    SodiumBlock: &block_dummy\n        flags: dummy\n        coolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 0.0\n            mult: 1.0\n            op: 19.0\n\"\"\"\n\nREGULAR_ASSEMBLY_DEF = \"\"\"\nassemblies:\n    multi pin fuel:\n        specifier: LA\n        blocks: [*block_grid_plate, *block_fuel_multiPin_axial_shield, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin, *block_duct, *block_dummy]\n        height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n        axial mesh points: [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n        material modifications:\n            U235_wt_frac: ['', '', 0.2, 0.2, 0.2, 0.2, '', '', '', '']\n            ZR_wt_frac: ['', '', 0.07, 0.07, 0.07, 0.07, '', '', '', '']\n        xs types: [A, A, B, C, C, D, A, A, A, A]\n\"\"\"  # noqa: E501\n\nGRID_DEFINITION = \"\"\"\ngrids:\n    core:\n        geom: hex\n        symmetry: third periodic\n        lattice map: LA\n    twoPin:\n        geom: hex_corners_up\n        symmetry: full\n        lattice map: |\n            -  2 1\n              2 1 2\n               1 2\n    threePin:\n        geom: hex_corners_up\n        symmetry: full\n        lattice map: |\n            -  2 1\n              3 1 3\n               1 2\n\"\"\"\n\n\ndef buildMixedPinAssembly(\n    blockDefs: str = BLOCK_DEFINITIONS_2PIN,\n    assemDef: str = REGULAR_ASSEMBLY_DEF,\n    gridDef: str = GRID_DEFINITION,\n):\n    \"\"\"Builds a hex-shaped mixed-pin assembly for a sodium fast reactor. This assembly consists of 2 pin types\n    arranged as specified in the lattice map.\n    \"\"\"\n    completeBlueprints = blockDefs + assemDef + gridDef\n    cs = Settings()\n    with io.StringIO(completeBlueprints) as stream:\n        blueprints = Blueprints.load(stream)\n        blueprints._prepConstruction(cs)\n\n    return list(blueprints.assemblies.values())[0]\n\n\ndef buildMixedThreePinAssembly(\n    blockDefs: str = BLOCK_DEFINITIONS_3PIN,\n    assemDef: str = REGULAR_ASSEMBLY_DEF,\n    gridDef: str = GRID_DEFINITION,\n):\n    \"\"\"Builds a hex-shaped mixed-pin assembly for a sodium fast reactor. This assembly consists of 3 pin types\n    arranged as specified in the lattice map.\n    \"\"\"\n    completeBlueprints = blockDefs + assemDef + gridDef\n    cs = Settings()\n    with io.StringIO(completeBlueprints) as stream:\n        blueprints = Blueprints.load(stream)\n        blueprints._prepConstruction(cs)\n\n    return list(blueprints.assemblies.values())[0]\n"
  },
  {
    "path": "armi/testing/symmetryTesting.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTesting utilities for symmetry.\n\nSymmetry factor usage can be difficult to verify across multiple plugins, and plugins may write one-off fixes for\nsituations involving the symmetry factor. The utilities provided here are an attempt to catch symmetry factor issues\nat the unit test level, rather than during integration tests.\n\nThe goal of this utility is to test symmetry intent, not functionality. This means individual implementations of\nsymmetry-aware operations are still responsible for testing the implemetation. This module serves as a check that the\nparameters that are expected to change with symmetry do indeed change.\n\nThis might be obvious, but this test CANNOT detect errors where the parameter is not either:\n\n    1) Labeled as a symmetry-aware parameter in the parameter definition.\n    2) Labeled as a symmetry-aware parameter in the test.\n\nFailing to do at least one of the above will result in passing symmetry tests.\n\nThe tests here use the `growToFullCore` since that should be one of the most mature symmetry-aware operations.\n\nThis module provides the `BasicArmiSymmetryTestHelper` which is meant to be inherited into a downstream unit test.\nThe test helper uses the `SymmetryFactorTester` to handle the bookkeeping tasks associated with testing symmetry.\n\"\"\"\n\nimport unittest\nfrom contextlib import contextmanager\nfrom typing import TYPE_CHECKING, Any, Iterable, Union\n\nfrom armi.testing import loadTestReactor\n\nif TYPE_CHECKING:\n    from armi.reactor import Core, parameters\n    from armi.reactor.assemblies import Assembly\n    from armi.reactor.blocks import Block\n\n\nclass BasicArmiSymmetryTestHelper(unittest.TestCase):\n    \"\"\"\n    Customizable test runner for symmetry-intent audit.\n\n    This class is meant to be customized in a plugin to check the plugin-specific symmetry-aware parameters.\n\n    To use the test fixture, make a subclass test and assign the `*ParamsToTest` and `expectedSymmetric*` attributes in\n    the `setUp` method of the subclass. The subclass must have `super.setUp()` in it's `setUp` method at some point\n    after the necessary plugin attributes are assigned.\n\n    It should generally not be necessary for the plugin to implement any further unit tests, the parent class contains\n    a test method that should adequately verify the the expected symmetric parameters are indeed expanded.\n\n    Attributes\n    ----------\n    coreParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional\n        Core parameters that should be initialized and tested.\n    assemblyParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional\n        Assembly parameters that should be initialized and tested.\n    blockParamsToTest : Iterable[str] | armi.reactor.parameters.parameterDefinitionCollection, optional\n        Block parameters that should be initialized and tested.\n    expectedSymmetricCoreParams : Iterable[str], optional\n        Core parameters that are expected to change with symmetry.\n    expectedSymmetricAssemblyParams : Iterable[str], optional\n        Assembly parameters that are expected to change with symmetry.\n    expectedSymmetricBlockParams : Iterable[str], optional\n        Block Parameters that are expected to change with symmetry.\n    parameterOverrides : dict[str: Any], optional\n        Dictionary of specific values to assign to a particular parameter. Useful for parameters that have validators.\n    paramsToIgnore : Iterable[str], optional\n        Parameter names to ignore the comparison results for.\n    customSettings : dict[str: Any]\n        Dictionary of custom settings that is passed to the test reactor builder. Useful for disabling features that\n        require additional input and are not useful for the symmetry audit.\n\n    Example\n    -------\n    class MySymmetryTest(symmetryTesting.BasicArmiSymmetryTestHelper):\n        def setUp():\n            # Tests are configured using attributes. Attributes must be set prior to calling super.setUp()\n            # Note that it is not required to set any attributes, all have empty defaults\n\n            # Repeat for self.coreParamsToTest and self.assemblyParamsToTest as necessary:\n            self.blockParamsToTest = [p if isinstance(p, str) else p.name for p in getPluginBlockParameterDefinitions()]\n\n            # Repeat for self.expectedSymmetricCoreParams and self.expectedSymmetricAssemblyParams as necessary:\n            self.expectedSymmetricBlockParams = [\"mySymmetricBlockParam1\", \"mySymmetricBlockParam2\"]\n\n            # Set specific parameter overrides if the parameters need a specific value (usually due to input validators)\n            self.parameterOverrides = {\"parameterName1\": value1, \"parameterName2\": value2}\n\n            # Set specific parameters to ignore in comparison.\n            self.paramsToIgnore = [\"myIgnoredParameter\"]\n\n            # Finish setting up the tests by calling the parent's `setUp` method.\n            super.setUp()\n    \"\"\"\n\n    def __init__(self, methodName=\"runTest\"):\n        self.coreParamsToTest = []\n        self.assemblyParamsToTest = []\n        self.blockParamsToTest = []\n        self.expectedSymmetricCoreParams = []\n        self.expectedSymmetricAssemblyParams = []\n        self.expectedSymmetricBlockParams = []\n        self.parameterOverrides = {}\n        self.paramsToIgnore = []\n        self.customSettings = {}\n        super().__init__(methodName)\n\n    def setUp(self):\n        self._preprocessPluginParams()\n        self.symTester = SymmetryFactorTester(self)\n\n    def _preprocessPluginParams(self):\n        \"\"\"Parameters can be provided as string names or whole parameter objects, need to convert to string name.\"\"\"\n        self.coreParamsToTest = [p if isinstance(p, str) else p.name for p in self.coreParamsToTest]\n        self.assemblyParamsToTest = [p if isinstance(p, str) else p.name for p in self.assemblyParamsToTest]\n        self.blockParamsToTest = [p if isinstance(p, str) else p.name for p in self.blockParamsToTest]\n        self.expectedSymmetricCoreParams = [\n            p if isinstance(p, str) else p.name for p in self.expectedSymmetricCoreParams\n        ]\n        self.expectedSymmetricAssemblyParams = [\n            p if isinstance(p, str) else p.name for p in self.expectedSymmetricAssemblyParams\n        ]\n        self.expectedSymmetricBlockParams = [\n            p if isinstance(p, str) else p.name for p in self.expectedSymmetricBlockParams\n        ]\n\n    def test_defaultSymmetry(self):\n        self.symTester.runSymmetryFactorTests(\n            expectedCoreParams=self.expectedSymmetricCoreParams,\n            expectedAssemblyParams=self.expectedSymmetricAssemblyParams,\n            expectedBlockParams=self.expectedSymmetricBlockParams,\n        )\n\n\nclass SymmetryFactorTester:\n    \"\"\"\n    A test runner for symmetry factors.\n\n    This class does the actual symmetry testing, but there is a lot of bookkeeping that isn't important to expose in the\n    test helper class so putting it here helps keep the BasicArmiSymmetryTestHelper clean.\n    \"\"\"\n\n    def __init__(self, armiSymmetryTester: BasicArmiSymmetryTestHelper):\n        self.o, self.r = loadTestReactor(customSettings=armiSymmetryTester.customSettings)\n        self.core = self.r.core\n        # there is exactly one assembly with 3-symmetry in the test core\n        self.partialAssembly = [a for a in self.r.core.getAssemblies() if a.getSymmetryFactor() == 3][0]\n        self.partialBlock = self.partialAssembly.getBlocks()[0]\n        # expectedSymmetry describes the ratio of (post-expansion / pre-expansion) values\n        self.expectedSymmetryRatio = 3\n        self.defaultParameterValue = 2\n        # some parameters have validation on their inputs and need specific settings\n        self.parameterOverrides = armiSymmetryTester.parameterOverrides\n\n        self.testObject = armiSymmetryTester\n        self.coreParamsToTest = armiSymmetryTester.coreParamsToTest\n        self.assemblyParamsToTest = armiSymmetryTester.assemblyParamsToTest\n        self.blockParamsToTest = armiSymmetryTester.blockParamsToTest\n        self._initializeCore()\n        self._initializeAssembly()\n        self._initializeBlock()\n\n        # Some parameters change because of symmetry but are not \"volume integrated\"\n        # so this marks them for skipping in the compare.\n        # Also allows plugins the flexibility to skip some parameters if needed.\n        self.paramsToIgnore = armiSymmetryTester.paramsToIgnore\n\n    @staticmethod\n    def _getParameters(obj: object, paramList: Iterable[str]):\n        return {param: obj.p[param] for param in paramList}\n\n    @staticmethod\n    def _getParamNamesFromDefs(pdefs: \"parameters.ParameterDefinitionCollection\"):\n        return set([p.name for p in pdefs])\n\n    def _initializeCore(self):\n        self._initializeParameters(self.coreParamsToTest, self.core)\n\n    def _initializeAssembly(self):\n        self._initializeParameters(self.assemblyParamsToTest, self.partialAssembly)\n\n    def _initializeBlock(self):\n        self._initializeParameters(self.blockParamsToTest, self.partialBlock)\n\n    def _initializeParameters(self, parameterNames, obj: Union[\"Core\", \"Assembly\", \"Block\"]):\n        \"\"\"\n        Load values into each parameter.\n\n        The values generally do not need to be the correct types (see Notes) because this test fixture is for auditing\n        intent, not capability. The capability of the expansion functions to expand different types correctly should be\n        part of the tests for those functions.\n\n        Parameters\n        ----------\n        parameterNames : Iterable[str]\n            Iterable of string parameter names to initialize on the object.\n        obj : armi.reactor.Core | armi.reactor.assemblies.Assembly | armi.reactor.blocks.Block\n            The object on which to initialize parameter values.\n\n        Notes\n        -----\n        Some parameters are specifically adjusted here because inspecting their types does not yield usable results\n        for setting the values. Current specific settings are:\n        xsType: must be an iterable of strings.\n        xsTypeNum: must be an integer corresponding to an ASCII character in the range of what is acceptable for xsType.\n        notes: must be a string with length less than 1000 characters.\n        \"\"\"\n        for p in parameterNames:\n            name = str(p)\n            if name in self.parameterOverrides.keys():\n                obj.p[name] = self.parameterOverrides[name]\n            else:\n                obj.p[name] = self.defaultParameterValue\n\n    def _compareParameters(\n        self,\n        referenceParameters: dict[str:Any],\n        perturbedParameters: dict[str:Any],\n        expectedParameters: Iterable[str],\n        scopeName: str,\n    ):\n        \"\"\"\n        Run the comparison of reference parameters vs the perturbed parameters.\n\n        Tests:\n            1. Parameters that change after core expansion are in the list of parameters expected to change.\n            2. All parameters in the list of parameters expected to change do indeed change by the expected ratio.\n        \"\"\"\n        for paramName, perturbedValue in perturbedParameters.items():\n            referenceValue = referenceParameters[paramName]\n            if referenceValue != perturbedValue and paramName not in self.paramsToIgnore:\n                self.testObject.assertIn(\n                    paramName,\n                    expectedParameters,\n                    f\"The value of {paramName} on the {scopeName} changed from {referenceValue} to {perturbedValue} but\"\n                    \" is not specified in the parameters expected to change.\",\n                )\n            if paramName in expectedParameters:\n                ratio = perturbedParameters[paramName] / referenceParameters[paramName]\n                self.testObject.assertEqual(\n                    ratio,\n                    self.expectedSymmetryRatio,\n                    f\"The after-to-before expansion ratio of parameter '{paramName}' was expected to be \"\n                    f\"{self.expectedSymmetryRatio} but was instead {ratio} for the {scopeName}.\",\n                )\n\n    @contextmanager\n    def _checkCore(self, expectedParams: Iterable[str]):\n        coreReferenceParameters = self._getParameters(self.core, self.coreParamsToTest)\n        yield  # yield to allow the core to be expanded\n        corePerturbedParameters = self._getParameters(self.core, self.coreParamsToTest)\n        self._compareParameters(coreReferenceParameters, corePerturbedParameters, expectedParams, \"core\")\n\n    @contextmanager\n    def _checkAssembly(self, expectedParams: Iterable[str]):\n        assemblyReferenceParameters = self._getParameters(self.partialAssembly, self.assemblyParamsToTest)\n        yield  # yield to allow the core to be expanded\n        assemblyPerturbedParameters = self._getParameters(self.partialAssembly, self.assemblyParamsToTest)\n        self._compareParameters(assemblyReferenceParameters, assemblyPerturbedParameters, expectedParams, \"assembly\")\n\n    @contextmanager\n    def _checkBlock(self, expectedParams: Iterable[str]):\n        blockReferenceParameters = self._getParameters(self.partialBlock, self.blockParamsToTest)\n        yield  # yield to allow the core to be expanded\n        blockPerturbedParameters = self._getParameters(self.partialBlock, self.blockParamsToTest)\n        self._compareParameters(blockReferenceParameters, blockPerturbedParameters, expectedParams, \"block\")\n\n    def runSymmetryFactorTests(\n        self,\n        expectedCoreParams: Iterable[str] = [],\n        expectedAssemblyParams: Iterable[str] = [],\n        expectedBlockParams: Iterable[str] = [],\n    ):\n        \"\"\"\n        Runs tests on how symmetry factors apply to parameters during partial-to-full core coversions and vice-versa.\n\n        This method provides a convenient way for plugins to test that symmetry factors are applied correctly to flagged\n        parameters when the core is converted.\n\n        Parameters\n        ----------\n        testObject : unittest.TestCase\n            The TestCase object is injected to give this fixture the ability to do unittest asserts without causing\n            the fixture itself to be run as a unit test.\n        coreParams : Iterable[str], optional\n            Dictionary of core parameters that the user expects to be symmetry aware.\n        assemblyParams : Iterable[str], optional\n            Dictionary of assembly parameters that the user expects to be symmetry aware.\n        blockParams : Iterable[str], optional\n            Dictionary of block parameters that the user expects to be symmetry aware.\n        \"\"\"\n        with (\n            self._checkCore(expectedCoreParams),\n            self._checkAssembly(expectedAssemblyParams),\n            self._checkBlock(expectedBlockParams),\n        ):\n            converter = self.r.core.growToFullCore(self.o.cs)\n        self.expectedSymmetryRatio = 1 / 3\n        with (\n            self._checkCore(expectedCoreParams),\n            self._checkAssembly(expectedAssemblyParams),\n            self._checkBlock(expectedBlockParams),\n        ):\n            converter.restorePreviousGeometry()\n"
  },
  {
    "path": "armi/testing/tests/__init__.py",
    "content": "# Copyright 2026 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/testing/tests/test_symmetryTesting.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the symmetry testing fixture.\"\"\"\n\nfrom armi.testing import symmetryTesting\n\n\nclass SymmetryTestFixtureTester(symmetryTesting.BasicArmiSymmetryTestHelper):\n    \"\"\"Run the basic symmetry test helper with some input known to raise errors.\"\"\"\n\n    def setUp(self):\n        self.blockParamsToTest = [\"zbottom\", \"massHmBOL\"]\n        self.expectedSymmetricBlockParams = [\"massHmBOL\"]\n        self.parameterOverrides = {\"xsType\": [\"A\"], \"xsTypeNum\": 65, \"notes\": \"\"}\n        return super().setUp()\n\n    def test_errorWhenExpandedButNotRequested(self):\n        if (\n            len(\n                self.expectedSymmetricCoreParams\n                + self.expectedSymmetricAssemblyParams\n                + self.expectedSymmetricBlockParams\n            )\n            > 0\n        ):\n            with self.assertRaises(AssertionError) as err:\n                self.symTester.runSymmetryFactorTests()\n                self.assertIn(f\"The value of {self.expectedSymmetricBlockParams} on the\", err.msg)\n\n    def test_errorWhenRequestedButNotExpanded(self):\n        with self.assertRaises(AssertionError) as err:\n            targetParam = self.blockParamsToTest[0]\n            self.symTester.runSymmetryFactorTests(expectedBlockParams=targetParam)\n            self.assertIn(f\"The after-to-before expansion ratio of parameter '{targetParam}'\", err.msg)\n"
  },
  {
    "path": "armi/tests/1DslabXSByCompTest.yaml",
    "content": "nuclide flags:\n  NA23: {burn: false, xs: true}\n  FE: {burn: false, xs: true}\n  U235: {burn: false, xs: true}\n  U238: {burn: false, xs: true}\n  PU239: {burn: false, xs: true}\n  PU240: {burn: false, xs: true}\n  PU241: {burn: false, xs: true}\ncustom isotopics:\n  eUranium:\n    input format: number densities\n    U235: 0.025\n    U238: 0.02\n  PuUranium:\n    input format: number densities\n    PU239: 0.02\n    PU240: 0.0075\n    PU241: 0.0025\n    U238: 0.015\n  depletedUranium:\n    input format: number densities\n    U238: 0.045\n  sodium:\n    input format: number densities\n    NA23: 0.02\n  structuralSteel:\n    input format: number densities\n    FE: 0.07\n  eUraniumHalf:\n    input format: number densities\n    U235: 0.0125\n    U238: 0.01\nblocks:\n  eu fuel block: &block_eufuelblock\n    depleted_uranium: &component_eufuelblock_depleted_uranium\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: depletedUranium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 1.0\n    enriched_uranium fuel:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: eUranium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 2.0\n    sodium: &component_eufuelblock_sodium\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: sodium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 1.0\n    iron: &component_eufuelblock_iron\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: structuralSteel\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 4.0\n    latticeboundarycell: &component_eufuelblock_latticeboundarycell\n      shape: Rectangle\n      material: Void\n      Tinput: 20.0\n      Thot: 20.0\n      lengthInner: 1.0\n      lengthOuter: 1.0\n      mult: 1.0\n      widthInner: 8.0\n      widthOuter: 8.0\n  reversedeu fuel block: &block_reversedeufuelblock\n    iron: *component_eufuelblock_iron\n    sodium: *component_eufuelblock_sodium\n    enriched_uranium fuel:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: eUraniumHalf\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 2.0\n    depleted_uranium: *component_eufuelblock_depleted_uranium\n    latticeboundarycell: *component_eufuelblock_latticeboundarycell\n  inheritseublocks: &block_inheritseublocks\n    sodium:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: sodium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 0.5\n    pu(fuel):\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: PuUranium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 3.0\n    iron:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: structuralSteel\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 1.0\n    pu(fuel)2:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: PuUranium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 0.5\n    iron2:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: structuralSteel\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 3.0\n    latticeboundarycell: *component_eufuelblock_latticeboundarycell\n  blanket fuel block: &block_blanketfuelblock\n    depleted_uranium fuel 1: *component_eufuelblock_depleted_uranium\n    sodium:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: sodium\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 6.0\n    depleted_uranium(fuel)2: *component_eufuelblock_depleted_uranium\n    latticeboundarycell: *component_eufuelblock_latticeboundarycell\n  reflectorblockinheritsblanket: &block_reflectorblockinheritsblanket\n    iron:\n      shape: SolidRectangle\n      material: Custom\n      Tinput: 20.0\n      Thot: 20.0\n      isotopics: structuralSteel\n      lengthOuter: 1.0\n      mult: 1.0\n      widthOuter: 8.0\n    latticeboundarycell: *component_eufuelblock_latticeboundarycell\nassemblies:\n  heights: &standard_heights [10.0, 30.0, 30.0, 15.0, 15.0, 30.0, 30.0, 10.0]\n  axial mesh points: &standard_axial_mesh_points [1, 2, 2, 1, 1, 2, 2, 1]\n  feed fuel:\n    specifier: D1\n    blocks:\n      [\n        *block_reflectorblockinheritsblanket,\n        *block_blanketfuelblock,\n        *block_eufuelblock,\n        *block_inheritseublocks,\n        *block_reversedeufuelblock,\n        *block_eufuelblock,\n        *block_blanketfuelblock,\n        *block_reflectorblockinheritsblanket,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: &feed_fuel_xs_types [AZ, AZ, AA, AA, AA, AA, AZ, AZ]\n  drawerset2:\n    specifier: D2\n    blocks:\n      [\n        *block_reflectorblockinheritsblanket,\n        *block_blanketfuelblock,\n        *block_inheritseublocks,\n        *block_eufuelblock,\n        *block_reversedeufuelblock,\n        *block_reversedeufuelblock,\n        *block_blanketfuelblock,\n        *block_reflectorblockinheritsblanket,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *feed_fuel_xs_types\n  NotInCoreButGetBlocks:\n    specifier: NotInCore\n    blocks:\n      [\n        *block_reflectorblockinheritsblanket,\n        *block_blanketfuelblock,\n        *block_eufuelblock,\n        *block_inheritseublocks,\n        *block_reversedeufuelblock,\n        *block_eufuelblock,\n        *block_blanketfuelblock,\n        *block_reflectorblockinheritsblanket,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: [AZ, AZ, AC, AC, AC, AC, AZ, AZ]\n"
  },
  {
    "path": "armi/tests/__init__.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nGeneral framework-wide testing functions and files.\n\nThis package contains some input files that can be used across a wide variety of unit tests in other lower-level\nsubpackages.\n\"\"\"\n\nimport datetime\nimport itertools\nimport os\nimport unittest\nfrom typing import Optional\n\nfrom armi import runLog\nfrom armi.testing import (  # noqa: F401\n    ARMI_RUN_PATH,\n    COMPXS_PATH,\n    ISOAA_PATH,\n    getEmptyCartesianReactor,\n    getEmptyHexReactor,\n)\n\nTEST_ROOT = os.path.dirname(os.path.abspath(__file__))\nTESTING_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"testing\")\n\n\nclass Fixture:\n    \"\"\"\n    Fixture for presenting a consistent data source for testing.\n\n    A Fixture is a class that wraps a function which generates resources needed by one or more tests that does not need\n    to be updated every time tests are run.\n\n    Do not use this class directly, instead use the :code:`@fixture` and :code:`@requires_fixture` decorators.\n    \"\"\"\n\n    def __init__(self, refDirectory, targets, dependencies, function):\n        def resolvePath(relativePath):\n            absolutePath = os.path.abspath(relativePath)\n            if absolutePath != relativePath:\n                absolutePath = os.path.join(refDirectory, relativePath)\n            return absolutePath\n\n        self.targets = [resolvePath(t) for t in targets]\n        self.dependencies = [resolvePath(d) for d in dependencies]\n        self._function = function\n        self._isUpToDate = None\n        self.__name__ = function.__name__\n        self.__doc__ = function.__doc__\n        self._error = None\n        self._success = False\n        self.status = None\n\n    def __repr__(self):\n        return f\"{self._function.__module__}.{self.__name__}\"\n\n    def __call__(self):\n        if self._error is not None:\n            raise self._error\n        elif not self._success:\n            missingDependencies = [d for d in self.dependencies if not os.path.exists(d)]\n            if any(missingDependencies):\n                self._error = EnvironmentError(\n                    \"Missing dependencies:\\n    {}\".format(\"\\n    \".join(missingDependencies))\n                )\n                raise self._error\n\n            # at this point we need to update because\n            # 1) there are missing targets that need to be generated, or\n            # 2) targets are older than the dependencies.\n            missingTargets = [t for t in self.targets if not os.path.exists(t)]\n            needToUpdate = any(missingTargets)\n            if any(missingTargets):\n                runLog.important(\"Fixture is missing targets {}\\n    {}\".format(self, \"\\n    \".join(missingTargets)))\n            if not needToUpdate:\n                # this doesn't need to run if there are any missing targets.\n                oldestTarget = sorted((os.path.getmtime(t), t) for t in self.targets)[0]\n                newestDependency = sorted((os.path.getmtime(d), d) for d in self.dependencies)[-1]\n                needToUpdate = newestDependency[0] > oldestTarget[0]\n                if needToUpdate:\n                    targetTime = datetime.datetime.fromtimestamp(oldestTarget[0])\n                    dependencyTime = datetime.datetime.fromtimestamp(newestDependency[0])\n                    runLog.important(\n                        \"Fixture is out of date {}\\noldest target:     {} {}\\nnewest dependency: {} {}\".format(\n                            self,\n                            targetTime,\n                            oldestTarget[1],\n                            dependencyTime,\n                            newestDependency[1],\n                        )\n                    )\n            if needToUpdate:\n                runLog.important(f\"Running test fixture: {self}\")\n                try:\n                    self._function()\n                except Exception as ee:\n                    self._error = ee\n                    raise\n            else:\n                runLog.important(f\"Skipping test fixture: {self}\")\n\n        runLog.important(f\"Fixture is up to date: {self}\")\n        self._success = True\n\n\ndef fixture(refDirectory=None, targets=None, dependencies=None):\n    \"\"\"\n    Decorator to run function based on targets and dependencies similar to GNU Make.\n\n    Parameters\n    ----------\n    refDirectory : str\n        String reference directory for all targets/dependencies. This makes it possible to simplify file paths.\n        If ``os.path.abspath(<path>) == <path>``, then refDirectory is not used.\n    targets : iterable(str)\n        List of targets that the function generates.\n    dependencies : iterable(str)\n        List of dependencies that the ``targets`` require.\n    \"\"\"\n\n    def _decorator(makeFunction):\n        return Fixture(refDirectory, targets, dependencies, makeFunction)\n\n    return _decorator\n\n\ndef requires_fixture(fixtureFunction):\n    \"\"\"\n    Decorator to require a fixture to have been completed.\n\n    Parameters\n    ----------\n    fixtureFunction : function without any parameters\n        Fixture function is a function that has been decorated with fixture and is called prior to running the decorated\n        function.\n\n    Notes\n    -----\n    This cannot be used on classes.\n    \"\"\"\n\n    def _decorator(func):\n        def _callWrapper(*args, **kwargs):\n            fixtureFunction()\n            func(*args, **kwargs)\n\n        return _callWrapper\n\n    return _decorator\n\n\nclass ArmiTestHelper(unittest.TestCase):\n    \"\"\"Class containing common testing methods shared by many tests.\"\"\"\n\n    def compareFilesLineByLine(self, expectedFilePath, actualFilePath, falseNegList=None, eps=None):\n        \"\"\"\n        Compare the contents of two files line by line.\n\n        .. warning:: The file located at actualFilePath will be deleted if they do match.\n\n        Some tests write text files that should be compared line-by-line with reference files. This method performs the\n        comparison.\n\n        This class of test is not ideal but does cover a lot of functionality quickly. To assist in the maintenance\n        burden, the following standards are expected and enforced:\n\n        * The reference file compared against will be called either ``[name]-ref.[ext]`` or ``[name].expected``.\n        * The file that the test creates will be called ``[name]-test.[ext]`` or ``[name]``.\n\n        Parameters\n        ----------\n        expectedFilePath: str\n            Path to the reference or expected file\n        actualFilePath: str\n            Path to the file that will be compared to ``expectedFilePath``\n        falseNegList: None or Iterable\n            Optional argument. If two lines are not equal, then check if any values from ``falseNegList`` are in this\n            line. If so, do not fail the test.\n        eps: float, optional\n            If provided, try to determine if the only difference between compared lines is in the value of something\n            that can be parsed into a float, and the relative difference between the two floats is below the passed eps.\n        \"\"\"\n        if falseNegList is None:\n            falseNegList = []\n        elif isinstance(falseNegList, str):\n            falseNegList = [falseNegList]\n\n        with open(expectedFilePath, \"r\") as expected, open(actualFilePath, \"r\") as actual:\n            for lineIndex, (expectedLine, actualLine) in enumerate(itertools.zip_longest(expected, actual)):\n                if expectedLine is None:\n                    raise AssertionError(\"The test-generated file is longer than expected file\")\n                if actualLine is None:\n                    raise AssertionError(\"The test-generated file is shorter than expected file\")\n\n                if not self.compareLines(actualLine, expectedLine, eps):\n                    if any(falseNeg in line for falseNeg in falseNegList for line in (actualLine, expectedLine)):\n                        pass\n                    else:\n                        raise AssertionError(\n                            \"Error on line {}:\\nE>{}\\nA<{}\".format(\n                                lineIndex, expectedLine.rstrip(), actualLine.rstrip()\n                            )\n                        )\n\n        os.remove(actualFilePath)\n\n    @staticmethod\n    def compareLines(actual: str, expected: str, eps: Optional[float] = None):\n        \"\"\"\n        Impl of line comparison for compareFilesLineByLine.\n\n        If rstripped lines are equal -> Good. Otherwise, split on whitespace and try to parse element pairs as floats.\n        If they are both parsable, compare with relative eps, if provided. A side effect of the epsilon comparison is\n        that differing whitespace between words is treated as irrelevant.\n        \"\"\"\n        actual = actual.rstrip()\n        expected = expected.rstrip()\n\n        if actual == expected:\n            return True\n\n        if eps is None:\n            # no more in-depth comparison is allowed\n            return False\n\n        actualWords = actual.split()\n        expectedWords = expected.split()\n\n        if len(actualWords) != len(expectedWords):\n            # different number of words can't possibly be the same enough\n            return False\n\n        for actualWord, expectedWord in zip(actualWords, expectedWords):\n            actualVal = ArmiTestHelper._tryFloat(actualWord)\n            expectedVal = ArmiTestHelper._tryFloat(expectedWord)\n\n            if (actualVal is None) ^ (expectedVal is None):\n                # could not coerce both words into a float, so they cannot possibly match\n                return False\n\n            if actualVal is not None:\n                # we have two floats and can compare them\n                if actualVal == expectedVal == 0:\n                    continue\n                elif abs(actualVal - expectedVal) / expectedVal > eps:\n                    return False\n            else:\n                # strings, compare directly\n                if actualWord != expectedWord:\n                    return False\n\n        # The lines should match.\n        return True\n\n    @staticmethod\n    def _tryFloat(val: str) -> Optional[float]:\n        try:\n            return float(val)\n\n        except ValueError:\n            return None\n"
  },
  {
    "path": "armi/tests/armiRun.yaml",
    "content": "settings:\n# global\n  availabilityFactor: 1\n  beta: 0.003454\n  branchVerbosity: debug\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: Simple test input.\n  cycleLength: 2000.0\n  detailAssemLocationsBOL:\n    - 002-001\n  freshFeedType: igniter fuel\n  loadingFile: refSmallReactor.yaml\n  moduleVerbosity:\n    armi.reactor.reactors: info\n  nCycles: 6\n  outputFileExtension: png\n  power: 100000000.0\n  rmExternalFilesAtEOL: true\n  startCycle: 1\n  startNode: 2\n  targetK: 1.002\n  verbosity: extra\n  versions:\n    armi: uncontrolled\n\n# cross section\n  crossSectionControl:\n    DA:\n      geometry: 0D\n      blockRepresentation: Median\n      criticalBuckling: true\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n    UA:\n      geometry: 1D cylinder\n      blockRepresentation: ComponentAverage1DCylinder\n      validBlockTypes:\n        - fuel\n      externalDriver: false\n      mergeIntoClad:\n        - gap2\n        - inner liner\n        - gap3\n        - outer liner\n        - gap4\n      mergeIntoFuel:\n        - gap1\n      numInternalRings: 1\n      numExternalRings: 1\n    XA:\n      xsFileLocation:\n        - ISOXA\n    YA:\n      geometry: 0D\n      fluxFileLocation: rzmflxYA\n    ZA:\n      geometry: 1D cylinder\n      blockRepresentation: ComponentAverage1DCylinder\n      validBlockTypes:\n        - fuel\n      externalDriver: false\n      mergeIntoClad:\n        - gap\n      numInternalRings: 1\n      numExternalRings: 1\n\n# database\n  db: false\n\n# fuel cycle\n  fuelHandlerName: EquilibriumShuffler\n  jumpRingNum: 9\n  shuffleLogic: refSmallReactorShuffleLogic.py\n\n# neutronics\n  epsFSAvg: 1e-06\n  epsFSPoint: 1e-06\n  loadPadElevation: 200.0\n\n# report\n  genReports: false\n  summarizeAssemDesign: false"
  },
  {
    "path": "armi/tests/detailedAxialExpansion/armiRun.yaml",
    "content": "settings:\n# global\n  beta: 0.003454\n  branchVerbosity: debug\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: Simple test input with detailed axial expansion.\n  cycleLength: 2000.0\n  detailAssemLocationsBOL:\n    - 002-001\n  detailedAxialExpansion: true\n  freshFeedType: igniter fuel\n  loadingFile: refSmallReactor.yaml\n  moduleVerbosity:\n    armi.reactor.reactors: info\n  nCycles: 6\n  outputFileExtension: png\n  power: 100000000.0\n  startNode: 1\n  targetK: 1.002\n  verbosity: extra\n  versions:\n    armi: uncontrolled\n\n# cross section\n  crossSectionControl:\n    DA:\n      geometry: 0D\n      blockRepresentation: Median\n      criticalBuckling: true\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n    XA:\n      xsFileLocation:\n        - ISOXA\n\n# database\n  db: false\n\n# fuel cycle\n  fuelHandlerName: EquilibriumShuffler\n  jumpRingNum: 9\n\n# fuel performance\n  axialExpansion: true\n\n# neutronics\n  epsFSAvg: 1e-06\n  epsFSPoint: 1e-06\n  loadPadElevation: 162.5\n\n# report\n  genReports: false\n  summarizeAssemDesign: false\n"
  },
  {
    "path": "armi/tests/detailedAxialExpansion/refSmallCoreGrid.yaml",
    "content": "core:\n  geom: hex\n  symmetry: third periodic\n  lattice map: |\n    SH\n      AF\n    MC  SH\n      LA\n    PC  IC\n\ntwoPin:\n  geom: hex_corners_up\n  symmetry: full\n  lattice map: |\n    - - -  1 1 1 1\n      - - 1 1 2 1 1\n       - 1 1 2 2 1 1\n        1 1 2 2 2 1 1\n         1 1 2 2 1 1\n          1 1 2 1 1\n           1 1 1 1\n"
  },
  {
    "path": "armi/tests/detailedAxialExpansion/refSmallReactor.yaml",
    "content": "!include refSmallReactorBase.yaml\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n    sfp:\n        type: sfp\n        grid name: sfp\n        origin:\n            x: 5000.0\n            y: 5000.0\n            z: 6000.0\ngrids:\n    !include refSmallCoreGrid.yaml\n    sfp:\n        symmetry: full\n        geom: cartesian\n        lattice pitch:\n            x: 50.0\n            y: 50.0\n        grid contents:\n            [0,0]: MC\n            [1,0]: MC\n            [0,1]: MC\n            [1,1]: MC"
  },
  {
    "path": "armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml",
    "content": "custom isotopics:\n    MOX:\n        input format: number densities\n        AM241: 2.3606e-05\n        PU238: 3.7387e-06\n        PU239: 0.00286038\n        PU240: 0.000712945\n        PU241: 9.82312e-05\n        PU242: 2.02221e-05\n        U235: 0.00405533\n        U238: 0.0134125\n    PuUZr:\n        input format: mass fractions\n        density: 9.491820414019937\n        PU239: 0.1\n        U235: 0.15\n        U238: 0.65\n        ZR: 0.1\nblocks:\n    ## ------------------------------------------------------------------------------------\n    ## universal blocks\n    grid plate: &block_grid_plate\n        grid:\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 15.277\n            mult: 1.0\n            op: 16.577\n        coolant: &component_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n        intercoolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: grid.op\n            mult: 1.0\n            op: 19.0\n\n    duct: &block_duct\n        coolant: *component_coolant\n        duct: &component_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 18.0\n            mult: 1.0\n            op: 18.5\n        intercoolant: &component_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 19.0\n\n    SodiumBlock: &block_dummy\n        flags: dummy\n        coolant:\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 0.0\n            mult: 1.0\n            op: 19.0\n\n    ## ------------------------------------------------------------------------------------\n    ## fuel blocks\n    axial shield: &block_fuel_axial_shield\n        shield:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 169.0\n            od: 0.86602\n        bond:\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: shield.od\n            mult: shield.mult\n            od: clad.id\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            mult: shield.mult\n            od: 1.09\n        wire:\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            mult: shield.mult\n            od: 0.10056\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n\n    axial shield twoPin: &block_fuel_multiPin_axial_shield\n        grid name: twoPin\n        shield: &component_shield_shield1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_shield_bond1\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: shield.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_shield_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_shield_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        shield test:\n            <<: *component_shield_shield1\n            latticeIDs: [2]\n        bond test:\n            <<: *component_shield_bond1\n            id: shield test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test:\n            <<: *component_shield_clad1\n            latticeIDs: [2]\n        wire test:\n            <<: *component_shield_wire1\n            latticeIDs: [2]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: shield\n\n    fuel: &block_fuelPin\n        fuel: &component_fuel_fuel\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 169.0\n            od: 0.86602\n        bond:\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: fuel.od\n            mult: fuel.mult\n            od: clad.id\n        clad: &component_fuel_clad\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            mult: fuel.mult\n            od: 1.09\n        wire: &component_fuel_wire\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            mult: clad.mult\n            od: 0.10056\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n\n    fuel lined clad: &block_fuelPin_linedClad\n        fuel:\n            <<: *component_fuel_fuel\n            material: Custom\n            isotopics: MOX\n        bond:\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: fuel.od\n            mult: fuel.mult\n            od: liner.id\n        liner: &component_fuel_liner\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 0.90\n            mergeWith: clad\n            mult: 169.0\n            od: clad.id\n        clad: *component_fuel_clad\n        wire: *component_fuel_wire\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n\n    annular fuel lined clad: &block_fuelAnnular_linedClad\n        gap1:\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: fuel.mult\n            od: fuel.id\n        fuel:\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.600\n            mult: 169.0\n            od: 0.86602\n            flags: annular fuel depletable\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: fuel.od\n            mult: fuel.mult\n            od: liner.id\n        liner: *component_fuel_liner\n        clad: *component_fuel_clad\n        wire: *component_fuel_wire\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n\n    fuel twoPin: &block_fuel_multiPin\n        grid name: twoPin\n        fuel: &component_fuelmultiPin\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: 0.86602\n            latticeIDs: [1]\n        bond: &component_fuelmultiPin_bond\n            shape: Circle\n            material: Sodium\n            Tinput: 25.0\n            Thot: 470.0\n            id: fuel.od\n            od: clad.id\n            latticeIDs: [1]\n        clad: &component_fuelmultiPin_clad1\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            od: 1.09\n            latticeIDs: [1]\n        wire: &component_fuelmultiPin_wire1\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            axialPitch: 30.15\n            helixDiameter: 1.19056\n            id: 0.0\n            od: 0.10056\n            latticeIDs: [1]\n        fuel test: &component_fuelmultiPin_fuel2\n            <<: *component_fuelmultiPin\n            latticeIDs: [2]\n        bond test: &component_fuelmultiPin_bond2\n            <<: *component_fuelmultiPin_bond\n            id: fuel test.od\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: &component_fuelmultiPin_clad2\n            <<: *component_fuelmultiPin_clad1\n            latticeIDs: [2]\n        wire test: &component_fuelmultiPin_wire2\n            <<: *component_fuelmultiPin_wire1\n            latticeIDs: [2]\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel\n\n    plenum: &block_plenum\n        gap:\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: clad.mult\n            od: clad.id\n        clad:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            mult: 169.0\n            od: 1.09\n        wire: *component_fuel_wire\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: clad\n\n    aclp plenum: &block_aclp\n        <<: *block_plenum\n\n    plenum 2pin: &block_plenum_multiPin\n        grid name: twoPin\n        gap: &component_plenummultiPin_gap1\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            od: clad.id\n            latticeIDs: [1]\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        gap test:\n            <<: *component_plenummultiPin_gap1\n            od: clad test.id\n            latticeIDs: [2]\n        clad test: *component_fuelmultiPin_clad2\n        wire test: *component_fuelmultiPin_wire2\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: clad test\n\n    mixed fuel plenum 2pin: &block_mixed_multiPin\n        grid name: twoPin\n        gap: *component_plenummultiPin_gap1\n        clad: *component_fuelmultiPin_clad1\n        wire: *component_fuelmultiPin_wire1\n        fuel test: *component_fuelmultiPin_fuel2\n        bond test: *component_fuelmultiPin_bond2\n        clad test: *component_fuelmultiPin_clad2\n        wire test: *component_fuelmultiPin_wire2\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        axial expansion target component: fuel test\n\n    aclp plenum 2pin: &block_aclp_multiPin\n        <<: *block_plenum_multiPin\n\n    ## ------------------------------------------------------------------------------------\n    ## control\n    moveable duct: &block_ctrl_duct\n        coolant: *component_coolant\n        duct: &component_control_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 15.277\n            mult: 1.0\n            op: 16.28228\n        intercoolant: &component_control_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 25.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 19.0\n\n    moveable control: &block_control\n        control:\n            shape: Circle\n            material: B4C\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: clad.mult\n            od: gap.id\n        gap: &component_control_gap\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 450.0\n            id: 1.286\n            mult: clad.mult\n            od: clad.id\n        clad: &component_control_clad\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 1.358\n            mult: 61.0\n            od: 1.686\n        wire: &component_control_wire\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 50.0\n            helixDiameter: 1.771\n            id: 0.0\n            mult: clad.mult\n            od: 0.085\n        innerDuct: &component_control_innerDuct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 14.268\n            mult: 1.0\n            op: 14.582\n        duct: *component_control_duct\n        coolant: *component_coolant\n        intercoolant: *component_control_intercoolant\n\n    moveable plenum: &block_control_plenum\n        gap: *component_control_gap\n        clad: *component_control_clad\n        wire: *component_control_wire\n        coolant: *component_coolant\n        innderDuct: *component_control_innerDuct\n        duct: *component_control_duct\n        intercoolant: *component_control_intercoolant\n\n    ## ------------------------------------------------------------------------------------\n    ## radial shield\n    radial shield: &block_radial_shield\n        shield:\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 169.0\n            od: 0.90362\n        gap: &component_radial_shield_gap\n            shape: Circle\n            material: Void\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.90362\n            mult: 169.0\n            od: clad.id\n        clad: &component_radial_shield_clad\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            id: 0.90562\n            mult: 169.0\n            od: 1.05036\n        wire: &component_radial_shield_wire\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30.15\n            helixDiameter: 16.85056\n            id: 0.0\n            mult: 169.0\n            od: 0.10056\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n        coolant: *component_coolant\n\n    radial shield plenum: &block_shield_plenum\n        gap: *component_radial_shield_gap\n        clad: *component_radial_shield_clad\n        wire: *component_radial_shield_wire\n        coolant: *component_coolant\n        duct: *component_duct\n        intercoolant: *component_intercoolant\n\n    radial shield aclp: &block_shield_aclp\n        <<: *block_shield_plenum\n        axial expansion target component: clad # not necessary, but useful for testing coverage\n\nassemblies:\n    heights: &highOffset_height [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n    axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]\n    igniter fuel:\n        specifier: IC\n        blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelPin, *block_fuelPin, *block_fuelPin, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy]\n        height: *highOffset_height\n        axial mesh points: *standard_axial_mesh_points\n        material modifications:\n            U235_wt_frac: ['', '', 0.11, 0.11, 0.11, '', '', '', '', '']\n            ZR_wt_frac: ['', '', 0.06, 0.06, 0.06, '', '', '', '', '']\n        xs types: &igniter_fuel_xs_types [A, A, B, C, C, D, A, A, A, A]\n    middle fuel:\n        specifier: MC\n        blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelPin_linedClad, *block_fuelPin_linedClad, *block_fuelPin_linedClad, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy]\n        height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n        axial mesh points: *standard_axial_mesh_points\n        xs types: *igniter_fuel_xs_types\n    annular fuel:\n        specifier: AF\n        blocks: [*block_grid_plate, *block_fuel_axial_shield, *block_fuelAnnular_linedClad, *block_fuelAnnular_linedClad, *block_fuelAnnular_linedClad, *block_plenum, *block_aclp, *block_plenum, *block_duct, *block_dummy]\n        height: *highOffset_height\n        axial mesh points: *standard_axial_mesh_points\n        xs types: *igniter_fuel_xs_types\n    multi pin fuel:\n        specifier: LA\n        blocks: [*block_grid_plate, *block_fuel_multiPin_axial_shield, *block_fuel_multiPin, *block_fuel_multiPin, *block_fuel_multiPin, *block_mixed_multiPin, *block_aclp_multiPin, *block_plenum_multiPin, *block_duct, *block_dummy]\n        height: *highOffset_height\n        axial mesh points: *standard_axial_mesh_points\n        material modifications:\n            U235_wt_frac: ['', '', 0.2, 0.2, 0.2, 0.2, '', '', '', '']\n            ZR_wt_frac: ['', '', 0.07, 0.07, 0.07, 0.07, '', '', '', '']\n        xs types: *igniter_fuel_xs_types\n    control:\n        specifier: PC\n        blocks: [*block_grid_plate, *block_ctrl_duct, *block_ctrl_duct, *block_control, *block_control, *block_control, *block_control_plenum, *block_ctrl_duct, *block_ctrl_duct, *block_dummy]\n        height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n        axial mesh points: *standard_axial_mesh_points\n        xs types: *igniter_fuel_xs_types\n    radial shield:\n        specifier: SH\n        blocks: [*block_grid_plate, *block_radial_shield, *block_radial_shield, *block_radial_shield, *block_radial_shield, *block_shield_plenum, *block_shield_aclp, *block_shield_plenum, *block_duct, *block_dummy]\n        height: [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]\n        axial mesh points: *standard_axial_mesh_points\n        xs types: *igniter_fuel_xs_types\n"
  },
  {
    "path": "armi/tests/mockRunLogs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nThis module contains subclasses of the armi.runLog._RunLog class that can be used to determine\nwhether or not one of the specific methods were called. These should only be used in testing.\n\"\"\"\n\nimport io\nimport sys\nfrom logging import LogRecord\n\nfrom armi import runLog\n\n\nclass BufferLog(runLog._RunLog):\n    \"\"\"Log which captures the output in attributes instead of emitting them.\n\n    Used mostly in testing to ensure certain things get output, or to prevent any output from\n    showing.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super(BufferLog, self).__init__(*args, **kwargs)\n        self.originalLog = None\n        self._outputStream = \"\"\n        self._errStream = io.StringIO()\n        self._deduplication = runLog.DeduplicationFilter()\n        sys.stderr = self._errStream\n        self.setVerbosity(0)\n\n    def __enter__(self):\n        self.originalLog = runLog.LOG\n        runLog.LOG = self\n        return self\n\n    def __exit__(self, exception_type, exception_value, traceback):\n        runLog.LOG = self.originalLog\n\n    def log(self, msgType, msg, single=False, label=None):\n        \"\"\"\n        Add formatting to a message and handle its singleness, if applicable.\n\n        This is a wrapper around logger.log() that does most of the work and is\n        used by all message passers (e.g. info, warning, etc.).\n        \"\"\"\n        # the message label is only used to determine unique for single-print warnings\n        if label is None:\n            label = msg\n\n        # Skip writing the message if it is below the set verbosity\n        msgVerbosity = self.logLevels[msgType][0]\n        if msgVerbosity < self._verbosity:\n            return\n\n        # Skip writing the message if it is single-print warning\n        record = LogRecord(\"BufferLog\", msgVerbosity, \"pathname\", 1, msg, {}, ())\n        record.label = label\n        record.single = single\n        if single and not self._deduplication.filter(record):\n            return\n\n        # Do the actual logging, but add that custom indenting first\n        msg = self.logLevels[msgType][1] + str(msg) + \"\\n\"\n        self._outputStream += msg\n\n    def clearSingleLogs(self):\n        \"\"\"Reset the single warned list so we get messages again.\"\"\"\n        self._deduplication.singleMessageLabels.clear()\n\n    def getStdout(self):\n        return self._outputStream\n\n    def emptyStdout(self):\n        self._outputStream = \"\"\n\n    def getStderrValue(self):\n        return self._errStream.getvalue()\n\n\nclass LogCounter(BufferLog):\n    \"\"\"This mock log is used to count the number of times a method was called.\n\n    It can be used in testing to make sure a warning was issued, without checking the content of the message.\n    \"\"\"\n\n    def __init__(self, *args, **kwargs):\n        BufferLog.__init__(self)\n        self.messageCounts = {msgType: 0 for msgType in self.logLevels.keys()}\n\n    def log(self, msgType, *args, **kwargs):\n        self.messageCounts[msgType] += 1\n"
  },
  {
    "path": "armi/tests/refSmallCartesian.yaml",
    "content": "custom isotopics:\n  MOX:\n    input format: number densities\n    AM241: 2.3606e-05\n    PU238: 3.7387e-06\n    PU239: 0.00286038\n    PU240: 0.000712945\n    PU241: 9.82312e-05\n    PU242: 2.02221e-05\n    U235: 0.00405533\n    U238: 0.0134125\nblocks:\n  fuel: &block_fuel\n    fuel:\n      shape: Circle\n      material: UZr\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      mult: 64.0\n      od: 0.7\n    clad: &component_fuel_clad\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 470.0\n      id: 1.0\n      mult: fuel.mult\n      od: 1.15\n    bond: &component_fuel_bond\n      shape: Circle\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      id: fuel.od\n      mult: fuel.mult\n      od: clad.id\n    wire: &component_fuel_wire\n      shape: Helix\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      axialPitch: 30.15\n      helixDiameter: 1.2\n      id: 0.0\n      mult: fuel.mult\n      od: 0.100\n    coolant: &component_fuel_coolant\n      shape: DerivedShape\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n    duct: &component_fuel_duct\n      shape: Rectangle\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      lengthInner: 9.0\n      lengthOuter: 9.5\n      mult: 1.0\n      widthInner: 9.0\n      widthOuter: 9.5\n    intercoolant: &component_fuel_intercoolant\n      shape: Rectangle\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      lengthInner: 9.5\n      lengthOuter: 10.0\n      mult: 1.0\n      widthInner: 9.5\n      widthOuter: 10.0\n  control: &block_control\n    control:\n      shape: Circle\n      material: B4C\n      Tinput: 600.0\n      Thot: 600.0\n      id: 0.0\n      mult: 25.0\n      od: 1.3\n    innerduct:\n      shape: Rectangle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      lengthInner: 8.0\n      lengthOuter: 8.5\n      mult: 1.0\n      widthInner: 8.0\n      widthOuter: 8.5\n    duct:\n      shape: Rectangle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      lengthInner: 8.7\n      lengthOuter: 9.0\n      mult: 1.0\n      widthInner: 8.7\n      widthOuter: 9.0\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      id: 1.35\n      mult: control.mult\n      od: 1.7\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      axialPitch: 50.0\n      helixDiameter: 1.7\n      id: 0.0\n      mult: control.mult\n      od: 0.085\n    intercoolant: *component_fuel_intercoolant\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 450.0\n      Thot: 450.0\n      id: control.od\n      mult: control.mult\n      od: clad.id\n    coolant: *component_fuel_coolant\n  duct: &block_duct\n    duct: &component_duct_duct\n      shape: Rectangle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      lengthInner: 9.0\n      lengthOuter: 9.5\n      mult: 1.0\n      widthInner: 9.0\n      widthOuter: 9.5\n    coolant: *component_fuel_coolant\n    intercoolant: *component_fuel_intercoolant\n  grid plate: &block_grid_plate\n    grid:\n      shape: Rectangle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      lengthInner: 0.0\n      lengthOuter: 9.5\n      mult: 1.0\n      widthInner: 0.0\n      widthOuter: 9.5\n    coolant: *component_fuel_coolant\n    intercoolant: *component_fuel_intercoolant\n  axial shield: &block_axial_shield\n    shield:\n      shape: Circle\n      material: HT9\n      Tinput: 600.0\n      Thot: 600.0\n      id: 0.0\n      mult: 64.0\n      od: 0.90\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      id: 0.905\n      mult: shield.mult\n      od: 1.050\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 450.0\n      Thot: 450.0\n      id: shield.od\n      mult: shield.mult\n      od: clad.id\n    duct: *component_duct_duct\n    intercoolant: *component_fuel_intercoolant\n    coolant: *component_fuel_coolant\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      axialPitch: 30.15\n      helixDiameter: 10.10\n      id: 0.0\n      mult: shield.mult\n      od: 0.100\n  plenum: &block_plenum\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 470.0\n      id: 1.0\n      mult: 64.0\n      od: 1.09\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      mult: clad.mult\n      od: clad.id\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      axialPitch: 30.\n      helixDiameter: 1.2\n      id: 0.0\n      mult: clad.mult\n      od: 0.1\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\n  fuel2: &block_fuel2\n    fuel:\n      shape: Circle\n      material: Custom\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      isotopics: MOX\n      mult: 64.0\n      od: 0.87\n    clad: *component_fuel_clad\n    bond: *component_fuel_bond\n    wire: *component_fuel_wire\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\nassemblies:\n  heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]\n  axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]\n  igniter fuel:\n    specifier: IC\n    blocks:\n      &igniter_fuel_blocks [\n        *block_grid_plate,\n        *block_fuel,\n        *block_fuel,\n        *block_fuel,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    hotChannelFactors: TWRPclad\n    material modifications:\n      U235_wt_frac: &igniter_fuel_u235_wt_frac [\"\", 0.11, 0.11, 0.11, \"\"]\n      ZR_wt_frac: &igniter_fuel_zr_wt_frac [\"\", 0.06, 0.06, 0.06, \"\"]\n    xs types: &igniter_fuel_xs_types [A, A, A, A, A]\n  middle fuel:\n    specifier: MC\n    blocks:\n      [\n        *block_grid_plate,\n        *block_fuel2,\n        *block_fuel2,\n        *block_fuel2,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *igniter_fuel_xs_types\n  feed fuel:\n    specifier: OC\n    blocks: *igniter_fuel_blocks\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    material modifications:\n      U235_wt_frac: *igniter_fuel_u235_wt_frac\n      ZR_wt_frac: *igniter_fuel_zr_wt_frac\n    xs types: *igniter_fuel_xs_types\n  primary control:\n    specifier: PC\n    blocks:\n      [\n        *block_grid_plate,\n        *block_duct,\n        *block_duct,\n        *block_control,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *igniter_fuel_xs_types\n  radial shield:\n    specifier: SH\n    blocks:\n      [\n        *block_grid_plate,\n        *block_axial_shield,\n        *block_axial_shield,\n        *block_axial_shield,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *igniter_fuel_xs_types\n\nsystems:\n  core:\n    grid name: core\n    origin:\n      x: 0.0\n      y: 0.0\n      z: 0.0\n\ngrids:\n  core:\n    geom: cartesian\n    symmetry: full\n    lattice pitch:\n      x: 10.0\n      y: 10.0\n    lattice map: |\n      SH SH SH SH SH SH SH SH SH SH SH SH SH\n      SH OC OC OC OC OC OC OC OC OC OC OC SH\n      SH OC MC MC MC MC MC MC MC MC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC IC IC IC IC IC IC IC MC OC SH\n      SH OC MC MC MC MC MC MC MC MC MC OC SH\n      SH OC OC OC OC OC OC OC OC OC OC OC SH\n      SH SH SH SH SH SH SH SH SH SH SH SH SH\n"
  },
  {
    "path": "armi/tests/refSmallCoreGrid.yaml",
    "content": "core:\n  geom: hex\n  lattice map: |\n    -   -   SH\n      -   SH  SH\n    -   SH  OC  SH\n      SH  OC  OC  SH\n        OC  IC  OC  SH\n      OC  IC  IC  OC  SH\n        IC  IC  IC  OC  SH\n          IC  IC  PC  OC  SH\n        IC  PC  IC  IC  OC  SH\n          LA  IC  IC  IC  OC\n            IC  IC  IC  IC  SH\n          IC  LB  IC  IC  OC\n            IC  IC  PC  IC  SH\n              LA  IC  IC  OC\n            IC  IC  IC  IC  SH\n              IC  IC  IC  OC\n            IC  IC  IC  PC  SH\n  symmetry: third periodic\n"
  },
  {
    "path": "armi/tests/refSmallReactor.yaml",
    "content": "!include refSmallReactorBase.yaml\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n    Spent Fuel Pool:\n        type: sfp\n        grid name: sfp\n        origin:\n            x: 5000.0\n            y: 5000.0\n            z: 6000.0\ngrids:\n    !include refSmallCoreGrid.yaml\n    !include refSmallSfpGrid.yaml"
  },
  {
    "path": "armi/tests/refSmallReactorBase.yaml",
    "content": "custom isotopics:\n  MOX:\n    input format: number densities\n    AM241: 2.3606e-05\n    PU238: 3.7387e-06\n    PU239: 0.00286038\n    PU240: 0.000712945\n    PU241: 9.82312e-05\n    PU242: 2.02221e-05\n    U235: 0.00405533\n    U238: 0.0134125\n  PuUZr:\n    input format: mass fractions\n    PU239: 0.1\n    U235: 0.15\n    U238: 0.65\n    ZR: 0.1\nblocks:\n  fuel: &block_fuel\n    fuel: &component_fuel_fuel\n      shape: Circle\n      material: UZr\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      mult: 169.0\n      od: 0.86602\n    clad: &component_fuel_clad\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 470.0\n      id: 1.0\n      mult: fuel.mult\n      od: 1.09\n    bond: &component_fuel_bond\n      shape: Circle\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      id: fuel.od\n      mult: fuel.mult\n      od: clad.id\n    wire: &component_fuel_wire\n      shape: Helix\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      axialPitch: 30.15\n      helixDiameter: 1.19056\n      id: 0.0\n      mult: fuel.mult\n      od: 0.10056\n    coolant: &component_fuel_coolant\n      shape: DerivedShape\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n    duct: &component_fuel_duct\n      shape: Hexagon\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      ip: 16.0\n      mult: 1.0\n      op: 16.6\n    intercoolant: &component_fuel_intercoolant\n      shape: Hexagon\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      ip: duct.op\n      mult: 1.0\n      op: 16.75\n  moveable control: &block_control\n    control:\n      shape: Circle\n      material: B4C\n      Tinput: 600.0\n      Thot: 600.0\n      id: 0.0\n      mult: 61.0\n      od: 1.286\n    innerDuct:\n      shape: Hexagon\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      ip: 14.268\n      mult: 1.0\n      op: 14.582\n    duct: &component_control_duct\n      shape: Hexagon\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      ip: 15.277\n      mult: 1.0\n      op: 16.28228\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      id: 1.358\n      mult: control.mult\n      od: 1.686\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      axialPitch: 50.0\n      helixDiameter: 1.771\n      id: 0.0\n      mult: control.mult\n      od: 0.085\n    intercoolant: *component_fuel_intercoolant\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 450.0\n      Thot: 450.0\n      id: control.od\n      mult: control.mult\n      od: clad.id\n    coolant: *component_fuel_coolant\n  duct: &block_duct\n    duct: *component_control_duct\n    coolant: *component_fuel_coolant\n    intercoolant: *component_fuel_intercoolant\n  grid plate: &block_grid_plate\n    grid: &component_grid_plate_grid\n      shape: Hexagon\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      ip: 15.277\n      mult: 1.0\n      op: 16.577\n    coolant: *component_fuel_coolant\n    intercoolant:\n      shape: Hexagon\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      ip: grid.op\n      mult: 1.0\n      op: 16.75\n  grid plate broken:\n    grid: *component_grid_plate_grid\n    coolant: *component_fuel_coolant\n    intercoolant:\n      shape: Hexagon\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      ip: grid.op\n      mult: 1.0\n      op: 0.0\n  axial shield: &block_axial_shield\n    shield:\n      shape: Circle\n      material: HT9\n      Tinput: 600.0\n      Thot: 600.0\n      id: 0.0\n      mult: 169.0\n      od: 0.90362\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      id: 0.90562\n      mult: shield.mult\n      od: 1.05036\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 450.0\n      Thot: 450.0\n      id: shield.od\n      mult: shield.mult\n      od: clad.id\n    duct: *component_control_duct\n    intercoolant: *component_fuel_intercoolant\n    coolant: *component_fuel_coolant\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 450.0\n      Thot: 450.0\n      axialPitch: 30.15\n      helixDiameter: 16.85056\n      id: 0.0\n      mult: shield.mult\n      od: 0.10056\n  moveable plenum: &block_plenum\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 470.0\n      id: 1.0\n      mult: 169.0\n      od: 1.09\n    gap:\n      shape: Circle\n      material: Void\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      mult: clad.mult\n      od: clad.id\n    wire:\n      shape: Helix\n      material: HT9\n      Tinput: 25.0\n      Thot: 450.0\n      axialPitch: 30.15\n      helixDiameter: 1.19056\n      id: 0.0\n      mult: clad.mult\n      od: 0.10056\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\n  fuel2: &block_fuel2\n    fuel:\n      shape: Circle\n      material: Custom\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      isotopics: MOX\n      mult: 169.0\n      od: 0.86602\n    bond: &component_fuel_bond2\n      shape: Circle\n      material: Sodium\n      Tinput: 450.0\n      Thot: 450.0\n      id: fuel.od\n      mult: fuel.mult\n      od: liner1.id\n    clad: *component_fuel_clad\n    liner1: &component_fuel2_liner1\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.99\n      mergeWith: clad\n      mult: 169.0\n      od: 1.0\n    liner2: &component_fuel2_liner2\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.98\n      mergeWith: clad\n      mult: 169.0\n      od: 0.99\n    wire: *component_fuel_wire\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\n  lta1 fuel: &block_lta1_fuel\n    fuel: *component_fuel_fuel\n    clad: *component_fuel_clad\n    liner1: *component_fuel2_liner1\n    liner2: *component_fuel2_liner2\n    bond: *component_fuel_bond\n    wire: *component_fuel_wire\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\n  lta2 fuel: &block_lta2_fuel\n    fuel:\n      shape: Circle\n      material: UZr\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      isotopics: PuUZr\n      mult: 169.0\n      od: 0.86602\n    clad: *component_fuel_clad\n    liner1: *component_fuel2_liner1\n    liner2: *component_fuel2_liner2\n    bond: *component_fuel_bond\n    wire: *component_fuel_wire\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\n  annular fuel gap: &block_fuel3\n    gap1:\n      shape: Circle\n      material: Void\n      Tinput: 20.0\n      Thot: 430.0\n      id: 0.0\n      mult: fuel.mult\n      od: fuel.id\n    fuel:\n      shape: Circle\n      material: UZr\n      Tinput: 20.0\n      Thot: 600.0\n      id: 0.600\n      mult: 169.0\n      od: 0.878\n      flags: annular fuel depletable\n    gap2:\n      shape: Circle\n      material: Void\n      Tinput: 20.0\n      Thot: 430.0\n      id: fuel.od\n      mult: fuel.mult\n      od: inner liner.id\n    inner liner:\n      shape: Circle\n      material: HT9\n      Tinput: 20.0\n      Thot: 430.0\n      id: 0.878\n      mult: fuel.mult\n      od: 0.898\n    gap3:\n      shape: Circle\n      material: Void\n      Tinput: 20.0\n      Thot: 430.0\n      id: inner liner.od\n      mult: fuel.mult\n      od: outer liner.id\n    outer liner:\n      shape: Circle\n      material: Zr\n      Tinput: 20.0\n      Thot: 430.0\n      id: 0.898\n      mult: fuel.mult\n      od: 0.900\n    gap4:\n      shape: Circle\n      material: Void\n      Tinput: 20.0\n      Thot: 430.0\n      id: outer liner.od\n      mult: fuel.mult\n      od: clad.id\n    clad:\n      shape: Circle\n      material: HT9\n      Tinput: 20.0\n      Thot: 430.0\n      id: 0.900\n      mult: fuel.mult\n      od: 1.000\n    wire: *component_fuel_wire\n    coolant: *component_fuel_coolant\n    duct: *component_fuel_duct\n    intercoolant: *component_fuel_intercoolant\nassemblies:\n  heights: &standard_heights [25.0, 25.0, 25.0, 25.0, 75.0]\n  axial mesh points: &standard_axial_mesh_points [1, 1, 1, 1, 4]\n  igniter fuel:\n    specifier: IC\n    blocks:\n      &igniter_fuel_blocks [\n        *block_grid_plate,\n        *block_fuel,\n        *block_fuel,\n        *block_fuel,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    material modifications:\n      U235_wt_frac: &igniter_fuel_u235_wt_frac [\"\", 0.11, 0.11, 0.11, \"\"]\n      ZR_wt_frac: &igniter_fuel_zr_wt_frac [\"\", 0.06, 0.06, 0.06, \"\"]\n    xs types: &igniter_fuel_xs_types [A, A, A, A, A]\n    nozzleType: Inner\n  middle fuel:\n    specifier: MC\n    blocks:\n      [\n        *block_grid_plate,\n        *block_fuel2,\n        *block_fuel2,\n        *block_fuel2,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: &middle_fuel_xs_types [Z, Z, Z, Z, Z]\n  annular fuel:\n    specifier: AF\n    blocks:\n      [\n        *block_grid_plate,\n        *block_fuel3,\n        *block_fuel3,\n        *block_fuel3,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: &annular_fuel_xs_types [U, U, U, U, U]\n  lta fuel:\n    specifier: LA\n    blocks:\n      [\n        *block_grid_plate,\n        *block_lta1_fuel,\n        *block_lta1_fuel,\n        *block_lta1_fuel,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    material modifications:\n      U235_wt_frac: &lta_fuel_u235_wt_frac [\"\", 0.2, 0.2, 0.2, \"\"]\n      ZR_wt_frac: &lta_fuel_zr_wt_frac [\"\", 0.07, 0.07, 0.06, \"\"]\n    xs types: *igniter_fuel_xs_types\n    nozzleType: lta\n  lta fuel b:\n    specifier: LB\n    blocks:\n      [\n        *block_grid_plate,\n        *block_lta2_fuel,\n        *block_lta2_fuel,\n        *block_lta2_fuel,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    material modifications:\n      U235_wt_frac: *lta_fuel_u235_wt_frac\n      ZR_wt_frac: *lta_fuel_zr_wt_frac\n    xs types: *igniter_fuel_xs_types\n    nozzleType: lta\n  feed fuel:\n    specifier: OC\n    blocks: *igniter_fuel_blocks\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    material modifications:\n      U235_wt_frac: *igniter_fuel_u235_wt_frac\n      ZR_wt_frac: *igniter_fuel_zr_wt_frac\n    xs types: *igniter_fuel_xs_types\n    nozzleType: Outer\n  primary control:\n    specifier: PC\n    blocks:\n      [\n        *block_grid_plate,\n        *block_duct,\n        *block_control,\n        *block_plenum,\n        *block_duct,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *igniter_fuel_xs_types\n  radial shield:\n    specifier: SH\n    blocks:\n      [\n        *block_grid_plate,\n        *block_axial_shield,\n        *block_axial_shield,\n        *block_axial_shield,\n        *block_plenum,\n      ]\n    height: *standard_heights\n    axial mesh points: *standard_axial_mesh_points\n    xs types: *igniter_fuel_xs_types\n"
  },
  {
    "path": "armi/tests/refSmallReactorShuffleLogic.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom armi.physics.fuelCycle.fuelHandlers import FuelHandler\n\n\nclass EquilibriumShuffler(FuelHandler):\n    \"\"\"Convergent divergent equilibrium shuffler.\"\"\"\n\n    def chooseSwaps(self, factorList):\n        if self.cycle == 0:\n            # no fuel shuffling at cycle 0\n            return\n\n        cycleMoves = [\n            [(2, 1), (3, 3), (4, 2), (5, 1), (6, 7)],\n            [(2, 2), (3, 2), (4, 1), (5, 4), (6, 4)],\n            [(2, 1), (3, 1), (4, 3), (5, 2), (6, 7)],\n        ]\n        cascade = []\n        for ring, pos in cycleMoves[self.cycle - 1]:\n            loc = self.r.core.spatialGrid.getLocatorFromRingAndPos(ring, pos)\n            a = self.r.core.childrenByLocator[loc]\n            if not a:\n                raise RuntimeError(\"No assembly in {0} {1}\".format(ring, pos))\n            cascade.append(a)\n        self.swapCascade(cascade)\n        fresh = self.r.blueprints.constructAssem(self.cs, name=\"igniter fuel\")\n        self.dischargeSwap(fresh, cascade[0])\n\n        if self.cycle > 1:\n            # do a swap where the assembly comes from the sfp\n            if self.r.excore.get(\"sfp\") is None:\n                raise RuntimeError(\"No SFP found.\")\n\n            incoming = self.r.excore[\"sfp\"].getChildren().pop()\n            if not incoming:\n                raise RuntimeError(f\"No assembly in SFP {self.r.excore['sfp'].getChildren()}\")\n            outLoc = self.r.core.spatialGrid.getLocatorFromRingAndPos(5, 1 + self.cycle)\n            self.dischargeSwap(incoming, self.r.core.childrenByLocator[outLoc])\n\n\ndef getFactorList(cycle, cs=None, fallBack=False):\n    # prefer to keep these 0 through 1 since this is what the branch search can do.\n    defaultFactorList = {}\n    factorSearchFlags = []\n    defaultFactorList[\"divergentConvergent\"] = 1\n\n    return defaultFactorList, factorSearchFlags\n"
  },
  {
    "path": "armi/tests/refSmallSfpGrid.yaml",
    "content": "sfp:\n  symmetry: full\n  geom: cartesian\n  lattice pitch:\n    x: 50.0\n    y: 50.0\n  grid contents:\n    [0, 0]: MC\n    [1, 0]: MC\n    [0, 1]: MC\n    [1, 1]: MC\n"
  },
  {
    "path": "armi/tests/refTestCartesian.yaml",
    "content": "settings:\n# global\n  beta: 0.003454\n  buGroups:\n    - 100\n  burnSteps: 0\n  comment: Full-core Cartesian input file with a 10x10 cm square pitch.\n  cycleLength: 2000.0\n  freshFeedType: igniter fuel\n  loadingFile: refSmallCartesian.yaml\n  outputFileExtension: png\n  power: 400000000.0\n  startNode: 1\n  targetK: 1.002\n  versions:\n    armi: uncontrolled\n\n# fuel cycle\n  jumpRingNum: 9\n\n# neutronics\n  epsFSAvg: 1e-06\n  epsFSPoint: 1e-06\n  loadPadElevation: 200.0\n\n# report\n  summarizeAssemDesign: false\n"
  },
  {
    "path": "armi/tests/smallestTestReactor/armiRunSmallest.yaml",
    "content": "# This is a non-physical test reactor.\n# This is designed to speed up testing of code that only technically needs a full reactor object.\n# This is a single-hex-assembly reactor, with only one block.\n\nsettings:\n# global\n  availabilityFactor: 1\n  beta: 0.003454\n  branchVerbosity: debug\n  buGroups:\n    - 100\n  burnSteps: 2\n  comment: Simple test input.\n  cycleLength: 2000.0\n  detailAssemLocationsBOL:\n    - 002-001\n  freshFeedType: igniter fuel\n  loadingFile: refSmallestReactor.yaml\n  moduleVerbosity:\n    armi.reactor.reactors: info\n  nCycles: 2\n  outputFileExtension: png\n  power: 1000000.0\n  rmExternalFilesAtEOL: true\n  startCycle: 1\n  startNode: 2\n  targetK: 1.002\n  verbosity: extra\n  versions:\n    armi: uncontrolled\n\n# cross section\n  crossSectionControl:\n    DA:\n      geometry: 0D\n      blockRepresentation: Median\n      criticalBuckling: true\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n    XA:\n      xsFileLocation:\n        - ISOXA\n    YA:\n      geometry: 0D\n      fluxFileLocation: rzmflxYA\n    ZA:\n      geometry: 1D cylinder\n      blockRepresentation: ComponentAverage1DCylinder\n      validBlockTypes:\n        - fuel\n      externalDriver: false\n      mergeIntoClad:\n        - gap\n      numInternalRings: 1\n      numExternalRings: 1\n\n# database\n  db: false\n\n# fuel cycle\n  fuelHandlerName: EquilibriumShuffler\n  jumpRingNum: 9\n\n# neutronics\n  epsFSAvg: 1e-06\n  epsFSPoint: 1e-06\n  loadPadElevation: 200.0\n\n# report\n  genReports: false\n  summarizeAssemDesign: false\n"
  },
  {
    "path": "armi/tests/smallestTestReactor/refOneBlockReactor.yaml",
    "content": "blocks:\n    fuel: &block_fuel\n        fuel: &component_fuel_fuel\n            shape: Circle\n            material: UZr\n            Tinput: 25.0\n            Thot: 600.0\n            id: 0.0\n            mult: 169.0\n            od: 0.86\n        clad: &component_fuel_clad\n            shape: Circle\n            material: HT9\n            Tinput: 25.0\n            Thot: 470.0\n            id: 1.0\n            mult: fuel.mult\n            od: 1.09\n        bond: &component_fuel_bond\n            shape: Circle\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            id: fuel.od\n            mult: fuel.mult\n            od: clad.id\n        wire: &component_fuel_wire\n            shape: Helix\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            axialPitch: 30\n            helixDiameter: 1.20\n            id: 0.0\n            mult: fuel.mult\n            od: 0.10056\n        coolant: &component_fuel_coolant\n            shape: DerivedShape\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n        duct: &component_fuel_duct\n            shape: Hexagon\n            material: HT9\n            Tinput: 25.0\n            Thot: 450.0\n            ip: 16.0\n            mult: 1.0\n            op: 16.7\n        intercoolant: &component_fuel_intercoolant\n            shape: Hexagon\n            material: Sodium\n            Tinput: 450.0\n            Thot: 450.0\n            ip: duct.op\n            mult: 1.0\n            op: 16.8\nassemblies:\n    heights: &standard_heights [25.0]\n    axial mesh points: &standard_axial_mesh_points [1]\n    igniter fuel:\n        specifier: IC\n        blocks: &igniter_fuel_blocks [*block_fuel]\n        height: *standard_heights\n        axial mesh points: *standard_axial_mesh_points\n        material modifications:\n            U235_wt_frac: &igniter_fuel_u235_wt_frac [0.11]\n            ZR_wt_frac: &igniter_fuel_zr_wt_frac [0.06]\n        xs types: &igniter_fuel_xs_types [A]\n"
  },
  {
    "path": "armi/tests/smallestTestReactor/refSmallestReactor.yaml",
    "content": "!include refOneBlockReactor.yaml\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\n    Spent Fuel Pool:\n        type: sfp\n        grid name: sfp\n        origin:\n            x: 5000.0\n            y: 5000.0\n            z: 6000.0\ngrids:\n    core:\n      geom: hex_corners_up\n      lattice map: |\n        IC\n      symmetry: full\n    sfp:\n      geom: cartesian\n      symmetry: full\n      lattice pitch:\n        x: 32.0\n        y: 32.0\n"
  },
  {
    "path": "armi/tests/test_apps.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for the App class.\"\"\"\n\nimport copy\nimport unittest\n\nfrom armi import (\n    configure,\n    context,\n    getApp,\n    getDefaultPluginManager,\n    isStableReleaseVersion,\n    meta,\n    plugins,\n)\nfrom armi.__main__ import main\nfrom armi.reactor.flags import Flags\n\n\nclass TestPlugin1(plugins.ArmiPlugin):\n    \"\"\"This should be fine on its own.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        return {\"oldType\": \"type\"}\n\n\nclass TestPlugin2(plugins.ArmiPlugin):\n    \"\"\"This should lead to an error if it coexists with Plugin1.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        return {\"oldType\": \"type\"}\n\n\nclass TestPlugin3(plugins.ArmiPlugin):\n    \"\"\"This should lead to errors, since it collides with the framework `type` param.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        return {\"type\": \"newType\"}\n\n\nclass TestPlugin4(plugins.ArmiPlugin):\n    \"\"\"This should be fine on its own, and safe to merge with TestPlugin1.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        return {\"arealPD\": \"arealPowerDensity\"}\n\n\nclass TestApps(unittest.TestCase):\n    \"\"\"Test the base apps.App interfaces.\"\"\"\n\n    def setUp(self):\n        \"\"\"\n        Manipulate the standard App. We can't just configure our own, since the\n        pytest environment bleeds between tests.\n        \"\"\"\n        self._backupApp = copy.deepcopy(getApp())\n\n    def tearDown(self):\n        \"\"\"Restore the App to its original state.\"\"\"\n        import armi\n\n        armi._app = self._backupApp\n        context.APP_NAME = \"armi\"\n\n    def test_getParamRenames(self):\n        # a basic test of the method\n        app = getApp()\n        app.pluginManager.register(TestPlugin1)\n        app.pluginManager.register(TestPlugin4)\n        app._paramRenames = None  # need to implement better cache invalidation rules\n\n        renames = app.getParamRenames()\n\n        self.assertIn(\"oldType\", renames)\n        self.assertEqual(renames[\"oldType\"], \"type\")\n        self.assertIn(\"arealPD\", renames)\n        self.assertEqual(renames[\"arealPD\"], \"arealPowerDensity\")\n\n        # test an invalid param manager situation\n        app._paramRenames[0][1] = -3\n        renames = app.getParamRenames()\n\n        self.assertIn(\"oldType\", renames)\n        self.assertEqual(renames[\"oldType\"], \"type\")\n        self.assertIn(\"arealPD\", renames)\n        self.assertEqual(renames[\"arealPD\"], \"arealPowerDensity\")\n\n        # test the exceptions that get raised\n        app.pluginManager.register(TestPlugin2)\n        app._paramRenames = None  # need to implement better cache invalidation rules\n        with self.assertRaisesRegex(\n            plugins.PluginError,\n            \".*parameter renames are already defined by another plugin.*\",\n        ):\n            app.getParamRenames()\n        app.pluginManager.unregister(TestPlugin2)\n\n        app.pluginManager.register(TestPlugin3)\n        with self.assertRaisesRegex(plugins.PluginError, \".*currently-defined parameters.*\"):\n            app.getParamRenames()\n\n    def test_registerPluginFlags(self):\n        # set up the app, pm, and register some plugins\n        app = getApp()\n\n        # validate our flags have been registered\n        self.assertEqual(Flags.fromString(\"FUEL\"), Flags.FUEL)\n        self.assertEqual(Flags.fromString(\"PRIMARY\"), Flags.PRIMARY)\n\n        # validate we can only register the flags once\n        for _ in range(3):\n            with self.assertRaises(RuntimeError):\n                app.registerPluginFlags()\n\n    def test_getParamRenamesInvalids(self):\n        # a basic test of the method\n        app = getApp()\n        app.pluginManager.register(TestPlugin1)\n        app.pluginManager.register(TestPlugin4)\n        app._paramRenames = None  # need to implement better cache invalidation rules\n\n        renames = app.getParamRenames()\n\n        self.assertIn(\"oldType\", renames)\n        self.assertEqual(renames[\"oldType\"], \"type\")\n        self.assertIn(\"arealPD\", renames)\n        self.assertEqual(renames[\"arealPD\"], \"arealPowerDensity\")\n\n        # test the strange, invalid case\n        self.assertIsNotNone(app._paramRenames)\n        app._pm._counter = -1\n        renames = app.getParamRenames()\n        self.assertIn(\"oldType\", renames)\n        self.assertEqual(renames[\"oldType\"], \"type\")\n        self.assertIn(\"arealPD\", renames)\n        self.assertEqual(renames[\"arealPD\"], \"arealPowerDensity\")\n\n    def test_version(self):\n        app = getApp()\n        ver = app.version\n        self.assertEqual(ver, meta.__version__)\n\n    def test_getSettings(self):\n        app = getApp()\n        settings = app.getSettings()\n\n        self.assertGreater(len(settings), 100)\n        self.assertEqual(settings[\"nTasks\"].value, 1)\n        self.assertEqual(settings[\"nCycles\"].value, 1)\n\n    def test_splashText(self):\n        app = getApp()\n        splash = app.splashText\n        self.assertIn(\"========\", splash)\n        self.assertIn(\"Advanced\", splash)\n        self.assertIn(\"version\", splash)\n        self.assertIn(meta.__version__, splash)\n\n    def test_splashTextDifferentApp(self):\n        import armi\n\n        app = getApp()\n        name = \"DifferentApp\"\n        app.name = name\n        armi._app = app\n        context.APP_NAME = name\n\n        splash = app.splashText\n        self.assertIn(\"========\", splash)\n        self.assertIn(\"Advanced\", splash)\n        self.assertIn(\"version\", splash)\n        self.assertIn(meta.__version__, splash)\n        self.assertIn(\"DifferentApp\", splash)\n\n    def test_isStableReleaseVersion(self):\n        self.assertTrue(isStableReleaseVersion(None))\n        self.assertTrue(isStableReleaseVersion(\"0.1.2\"))\n        self.assertFalse(isStableReleaseVersion(\"1.2.3-asda132a\"))\n\n    def test_disableFutureConfigures(self):\n        import armi\n\n        # save off, in in case of poorly parallelized tests\n        old = armi._ignoreConfigures\n\n        # test it works (should be False to start)\n        armi.disableFutureConfigures()\n        self.assertTrue(armi._ignoreConfigures)\n\n        # reset, in case of poorly parallelized tests\n        armi._ignoreConfigures = old\n\n\nclass TestArmiHighLevel(unittest.TestCase):\n    \"\"\"Tests for functions in the ARMI __init__ module.\"\"\"\n\n    def test_getDefaultPluginManager(self):\n        \"\"\"Test the default plugin manager.\n\n        .. test:: The default application consists of a list of default plugins.\n            :id: T_ARMI_APP_PLUGINS\n            :tests: R_ARMI_APP_PLUGINS\n        \"\"\"\n        pm = getDefaultPluginManager()\n        pm2 = getDefaultPluginManager()\n\n        self.assertNotEqual(pm, pm2)\n        pluginsList = \"\".join([str(p) for p in pm.get_plugins()])\n\n        self.assertIn(\"BookkeepingPlugin\", pluginsList)\n        self.assertIn(\"EntryPointsPlugin\", pluginsList)\n        self.assertIn(\"NeutronicsPlugin\", pluginsList)\n        self.assertIn(\"ReactorPlugin\", pluginsList)\n\n    def test_overConfigured(self):\n        with self.assertRaises(RuntimeError):\n            configure()\n\n    def test_main(self):\n        with self.assertRaises(SystemExit):\n            main()\n"
  },
  {
    "path": "armi/tests/test_armiTestHelper.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests to demonstrate the test helper is functional.\"\"\"\n\nimport os\n\nfrom armi.tests import ArmiTestHelper\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass TestArmiTestHelper(ArmiTestHelper):\n    def setUp(self):\n        self.goodFilePath = os.path.join(THIS_DIR, \"goodFile\" + self._testMethodName)\n        self.badFilePath = os.path.join(THIS_DIR, \"badFile\" + self._testMethodName)\n        self.BLOCK_TEXT = (\n            \"TerraPower aims to develop a sustainable and economic nuclear energy technology using:\\n\"\n            \"Next-generation safe, affordable, clean and secure technologies\\n\"\n            \"Advanced materials for more durable metallic fuels\\n\"\n            \"World-class leadership for dynamic reactor engineering and innovation\\n\"\n            \"Supercomputing for reliable and comprehensive modeling\\n\"\n        )\n        self.BAD_TEXT = self.BLOCK_TEXT.replace(\"class\", \"NEGATIVE\")\n        for path, text in zip([self.goodFilePath, self.badFilePath], (self.BLOCK_TEXT, self.BAD_TEXT)):\n            with open(path, \"w\") as fileObj:\n                fileObj.write(text)\n\n    def tearDown(self):\n        for path in [self.goodFilePath, self.badFilePath]:\n            if os.path.exists(path):\n                os.remove(path)\n\n    def test_compareFilesSucess(self):\n        self.compareFilesLineByLine(self.goodFilePath, self.goodFilePath)\n\n    def test_compareFilesFail(self):\n        self.assertRaises(\n            AssertionError,\n            self.compareFilesLineByLine,\n            self.goodFilePath,\n            self.badFilePath,\n        )\n\n    def test_compareFilesSucceedFalseNegative(self):\n        self.compareFilesLineByLine(self.goodFilePath, self.badFilePath, falseNegList=[\"NEGATIVE\"])\n"
  },
  {
    "path": "armi/tests/test_cartesian.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for Cartesian reactors.\"\"\"\n\nimport unittest\n\nfrom armi.reactor import geometry\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import directoryChangers\n\n\nclass CartesianReactorTests(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        # prepare the input files. This is important so the unit tests run from wherever\n        # they need to run from.\n        cls.directoryChanger = directoryChangers.DirectoryChanger(TEST_ROOT)\n        cls.directoryChanger.open()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.directoryChanger.close()\n\n    def setUp(self):\n        \"\"\"Use the related setup in the testFuelHandlers module.\"\"\"\n        self.o, self.r = test_reactors.loadTestReactor(\n            self.directoryChanger.destination, inputFileName=\"refTestCartesian.yaml\"\n        )\n\n    def test_custom(self):\n        \"\"\"Test Custom material with custom density.\"\"\"\n        fuel = self.r.core.getFirstAssembly(Flags.MIDDLE | Flags.FUEL).getFirstBlock(Flags.FUEL)\n        custom = fuel.getComponent(Flags.FUEL)\n        self.assertEqual(self.r.core.geomType, geometry.GeomType.CARTESIAN)\n        # from blueprints input file\n        self.assertAlmostEqual(custom.getNumberDensity(\"U238\"), 0.0134125)\n"
  },
  {
    "path": "armi/tests/test_context.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Serial tests for the Context module.\"\"\"\n\nimport unittest\n\nfrom armi import context\n\n\nclass TestContextSerial(unittest.TestCase):\n    \"\"\"Serial tests for the Context module.\"\"\"\n\n    @unittest.skipIf(context.MPI_SIZE > 1, \"Serial test only\")\n    def test_rank(self):\n        self.assertEqual(context.MPI_RANK, 0)\n        self.assertEqual(context.MPI_SIZE, 1)\n\n    @unittest.skipIf(context.MPI_SIZE > 1, \"Serial test only\")\n    def test_nonNoneData(self):\n        self.assertGreater(len(context.APP_DATA), 0)\n        self.assertGreater(len(context.DOC), 0)\n        self.assertGreater(len(context.getFastPath()), 0)\n        self.assertGreater(len(context.PROJECT_ROOT), 0)\n        self.assertGreater(len(context.RES), 0)\n        self.assertGreater(len(context.ROOT), 0)\n        self.assertGreater(len(context.USER), 0)\n"
  },
  {
    "path": "armi/tests/test_interfaces.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests the Interface.\"\"\"\n\nimport unittest\n\nfrom armi import interfaces, settings\n\n\nclass DummyInterface(interfaces.Interface):\n    name = \"Dummy\"\n    purpose = \"dummyAction\"\n\n\nclass TestCodeInterface(unittest.TestCase):\n    \"\"\"Test Code interface.\"\"\"\n\n    def setUp(self):\n        self.cs = settings.Settings()\n\n    def test_isRequestedDetailPoint(self):\n        \"\"\"Tests notification of detail points.\"\"\"\n        newSettings = {\"dumpSnapshot\": [\"000001\", \"995190\"]}\n        cs = self.cs.modified(newSettings=newSettings)\n\n        i = DummyInterface(None, cs)\n\n        self.assertEqual(i.isRequestedDetailPoint(0, 1), True)\n        self.assertEqual(i.isRequestedDetailPoint(995, 190), True)\n        self.assertEqual(i.isRequestedDetailPoint(5, 10), False)\n\n    def test_enabled(self):\n        \"\"\"Test turning interfaces on and off.\"\"\"\n        i = DummyInterface(None, self.cs)\n\n        self.assertEqual(i.enabled(), True)\n        i.enabled(False)\n        self.assertEqual(i.enabled(), False)\n        i.enabled(True)\n        self.assertEqual(i.enabled(), True)\n\n    def test_nameContains(self):\n        i = DummyInterface(None, self.cs)\n        self.assertFalse(i.nameContains(\"nope\"))\n        self.assertTrue(i.nameContains(\"Dum\"))\n\n    def test_distributable(self):\n        i = DummyInterface(None, self.cs)\n        self.assertEqual(i.distributable(), 1)\n\n    def test_preDistributeState(self):\n        i = DummyInterface(None, self.cs)\n        self.assertEqual(i.preDistributeState(), {})\n\n    def test_duplicate(self):\n        i = DummyInterface(None, self.cs)\n        iDup = i.duplicate()\n\n        self.assertEqual(type(i), type(iDup))\n        self.assertEqual(i.enabled(), iDup.enabled())\n\n\nclass TestTightCoupler(unittest.TestCase):\n    \"\"\"Test the tight coupler class.\"\"\"\n\n    def setUp(self):\n        cs = settings.Settings()\n        cs[\"tightCoupling\"] = True\n        cs[\"tightCouplingSettings\"] = {\"dummyAction\": {\"parameter\": \"nothing\", \"convergence\": 1.0e-5}}\n        self.interface = DummyInterface(None, cs)\n\n    def test_couplerActive(self):\n        self.assertIsNotNone(self.interface.coupler)\n\n    def test_storePreviousIterationValue(self):\n        self.interface.coupler.storePreviousIterationValue(1.0)\n        self.assertEqual(self.interface.coupler._previousIterationValue, 1.0)\n\n    def test_storePreviousIterationValueException(self):\n        with self.assertRaises(TypeError) as cm:\n            self.interface.coupler.storePreviousIterationValue({5.0})\n            the_exception = cm.exception\n            self.assertEqual(the_exception.error_code, 3)\n\n    def test_isConvergedValueError(self):\n        with self.assertRaises(ValueError) as cm:\n            self.interface.coupler.isConverged(1.0)\n            the_exception = cm.exception\n            self.assertEqual(the_exception.error_code, 3)\n\n    def test_isConverged(self):\n        \"\"\"Ensure TightCoupler.isConverged() works with float, 1D list, and ragged 2D list.\n\n        .. test:: The tight coupling logic is based around a convergence criteria.\n            :id: T_ARMI_OPERATOR_PHYSICS1\n            :tests: R_ARMI_OPERATOR_PHYSICS\n\n        Notes\n        -----\n        2D lists can end up being ragged as assemblies can have different number of blocks.\n        Ragged lists are easier to manage with lists as opposed to numpy.arrays,\n        namely, their dimension is preserved.\n        \"\"\"\n        # show a situation where it doesn't converge\n        previousValues = {\n            \"float\": 1.0,\n            \"list1D\": [1.0, 2.0],\n            \"list2D\": [[1, 2, 3], [1, 2]],\n        }\n        updatedValues = {\n            \"float\": 5.0,\n            \"list1D\": [5.0, 6.0],\n            \"list2D\": [[5, 6, 7], [5, 6]],\n        }\n        for previous, current in zip(previousValues.values(), updatedValues.values()):\n            self.interface.coupler.storePreviousIterationValue(previous)\n            self.assertFalse(self.interface.coupler.isConverged(current))\n\n        # show a situation where it DOES converge\n        previousValues = updatedValues\n        for previous, current in zip(previousValues.values(), updatedValues.values()):\n            self.interface.coupler.storePreviousIterationValue(previous)\n            self.assertTrue(self.interface.coupler.isConverged(current))\n\n    def test_isConvergedRuntimeError(self):\n        \"\"\"Test to ensure 3D arrays do not work.\"\"\"\n        previous = [[[1, 2, 3]], [[1, 2, 3]], [[1, 2, 3]]]\n        updatedValues = [[[5, 6, 7]], [[5, 6, 7]], [[5, 6, 7]]]\n        self.interface.coupler.storePreviousIterationValue(previous)\n        with self.assertRaises(RuntimeError) as cm:\n            self.interface.coupler.isConverged(updatedValues)\n            the_exception = cm.exception\n            self.assertEqual(the_exception.error_code, 3)\n\n    def test_getListDimension(self):\n        a = [1, 2, 3]\n        self.assertEqual(interfaces.TightCoupler.getListDimension(a), 1)\n        a = [[1, 2, 3]]\n        self.assertEqual(interfaces.TightCoupler.getListDimension(a), 2)\n        a = [[[1, 2, 3]]]\n        self.assertEqual(interfaces.TightCoupler.getListDimension(a), 3)\n"
  },
  {
    "path": "armi/tests/test_lwrInputs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for C5G7 input files.\"\"\"\n\nimport os\nimport unittest\nfrom logging import WARNING\n\nfrom armi import runLog\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import mockRunLogs\nfrom armi.utils import directoryChangers\n\nTEST_INPUT_TITLE = \"c5g7-settings.yaml\"\n\n\nclass C5G7ReactorTests(unittest.TestCase):\n    def setUp(self):\n        self.td = directoryChangers.TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_loadC5G7(self):\n        \"\"\"\n        Load the C5G7 case from input and check basic counts.\n        (Also, check that we are getting warnings when reading the YAML).\n        \"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_loadC5G7\")\n            runLog.LOG.setVerbosity(WARNING)\n\n            # load the reactor\n            _o, r = test_reactors.loadTestReactor(\n                os.path.join(TESTING_ROOT, \"reactors\", \"c5g7\"),\n                inputFileName=TEST_INPUT_TITLE,\n            )\n\n            # test warnings are being logged for malformed isotopics info in the settings file\n            streamVal = mock.getStdout()\n            self.assertIn(\"Case Information\", streamVal, msg=streamVal)\n            self.assertIn(\"Input File\", streamVal, msg=streamVal)\n\n            # test that there are 100 of each high, medium, and low MOX pins\n            b = r.core.getFirstBlock(Flags.MOX)\n            fuelPinsHigh = b.getComponent(Flags.HIGH | Flags.MOX)\n            self.assertEqual(fuelPinsHigh.getDimension(\"mult\"), 100)\n\n            # test the Guide Tube dimensions\n            gt = b.getComponent(Flags.GUIDE_TUBE)\n            self.assertEqual(gt.getDimension(\"mult\"), 24)\n"
  },
  {
    "path": "armi/tests/test_mpiActions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for MPI actions.\"\"\"\n\nimport unittest\nfrom collections import defaultdict\nfrom unittest.mock import patch\n\nfrom armi import context\nfrom armi.mpiActions import (\n    DistributeStateAction,\n    DistributionAction,\n    MpiAction,\n    _disableForExclusiveTasks,\n    _makeQueue,\n    runActions,\n    runBatchedActions,\n)\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import mockRunLogs\nfrom armi.utils import iterables\n\n\nclass MockMpiComm:\n    \"\"\"Mock MPI Communication library.\"\"\"\n\n    def allgather(self, name):\n        return [\"1\", \"2\", \"3\", \"4\"]\n\n    def bcast(self, data, root=0):\n        return defaultdict(int)\n\n    def Get_rank(self):\n        return 1\n\n    def Get_size(self):\n        return 4\n\n    def scatter(self, actions, root=0):\n        return None\n\n    def Split(self, num):\n        return self\n\n\nclass MockMpiAction(MpiAction):\n    \"\"\"Mock MPI Action, to simplify tests.\"\"\"\n\n    runActionExclusive = False\n\n    def __init__(self, broadcastResult: int = 3, invokeResult: int = 7):\n        self.broadcastResult = broadcastResult\n        self.invokeResult = invokeResult\n\n    def broadcast(self, obj=None):\n        return self.broadcastResult\n\n    def invoke(self, o, r, cs):\n        return self.invokeResult\n\n\n@unittest.skipUnless(context.MPI_RANK == 0, \"test only on root node\")\nclass MpiIterTests(unittest.TestCase):\n    def setUp(self):\n        \"\"\"Save MPI size on entry.\"\"\"\n        self._mpiSize = context.MPI_SIZE\n        self.action = MpiAction()\n\n    def tearDown(self):\n        \"\"\"Restore MPI rank and size on exit.\"\"\"\n        context.MPI_SIZE = self._mpiSize\n        context.MPI_RANK = 0\n\n    def test_parallel(self):\n        self.action.serial = False\n        self.assertTrue(self.action.parallel)\n\n        self.action.serial = True\n        self.assertFalse(self.action.parallel)\n\n    def test_serialGather(self):\n        self.action.serial = True\n        self.assertEqual(len(self.action.gather()), 1)\n\n    def test_mpiIter(self):\n        allObjs = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]\n        distObjs = [[0, 1, 2], [3, 4, 5], [6, 7], [8, 9], [10, 11]]\n\n        context.MPI_SIZE = 5\n        for rank in range(context.MPI_SIZE):\n            context.MPI_RANK = rank\n            myObjs = list(self.action.mpiIter(allObjs))\n            self.assertEqual(myObjs, distObjs[rank])\n\n    def _distributeObjects(self, allObjs, numProcs):\n        context.MPI_SIZE = numProcs\n        objs = []\n        for context.MPI_RANK in range(context.MPI_SIZE):\n            objs.append(list(self.action.mpiIter(allObjs)))\n        return objs\n\n    def test_perfectBalancing(self):\n        \"\"\"Test load balancing when numProcs divides numObjects.\n\n        In this case, all processes should get the same number of objects.\n        \"\"\"\n        numObjs, numProcs = 25, 5\n        allObjs = list(range(numObjs))\n        objs = self._distributeObjects(allObjs, numProcs)\n        counts = [len(o) for o in objs]\n        imbalance = max(counts) - min(counts)\n\n        # ensure we haven't missed any objects\n        self.assertEqual(iterables.flatten(objs), allObjs)\n\n        # check imbalance\n        self.assertEqual(imbalance, 0)\n\n    def test_excessProcesses(self):\n        \"\"\"Test load balancing when numProcs exceeds numObjects.\n\n        In this case, some processes should receive a single object and the rest should receive no objects.\n        \"\"\"\n        numObjs, numProcs = 5, 25\n        allObjs = list(range(numObjs))\n        objs = self._distributeObjects(allObjs, numProcs)\n        counts = [len(o) for o in objs]\n        imbalance = max(counts) - min(counts)\n\n        # ensure we haven't missed any objects\n        self.assertEqual(iterables.flatten(objs), allObjs)\n\n        # check imbalance\n        self.assertLessEqual(imbalance, 1)\n\n    def test_typicalBalancing(self):\n        \"\"\"Test load balancing for typical case (numProcs < numObjs).\n\n        In this case, the total imbalance should be 1 (except for the perfectly balanced case).\n        \"\"\"\n        numObjs, numProcs = 25, 6\n        allObjs = list(range(numObjs))\n        objs = self._distributeObjects(allObjs, numProcs)\n\n        # typical case (more objects than processes)\n        counts = [len(o) for o in objs]\n        imbalance = max(counts) - min(counts)\n        self.assertLessEqual(imbalance, 1)\n        self.assertEqual(iterables.flatten(objs), allObjs)\n\n    @patch(\"armi.context.MPI_COMM\", MockMpiComm())\n    @patch(\"armi.context.MPI_SIZE\", 4)\n    def test_runActionsDistributionAction(self):\n        o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        act = DistributionAction([self.action])\n        results = runActions(o, r, o.cs, [act])\n        self.assertEqual(len(results), 1)\n        self.assertIsNone(results[0])\n\n        o.cs[\"verbosity\"] = \"debug\"\n        res = act.invokeHook()\n        self.assertIsNone(res)\n\n    @patch(\"armi.context.MPI_COMM\", MockMpiComm())\n    @patch(\"armi.context.MPI_SIZE\", 4)\n    @patch(\"armi.context.MPI_NODENAMES\", [\"node0\", \"node0\", \"node1\", \"node1\"])\n    @patch(\"armi.context.MPI_DISTRIBUTABLE\", True)\n    def test_runBatchedActions(self):\n        o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        actionsByNode = {\n            \"node0\": [MockMpiAction(invokeResult=1)],\n            \"node1\": [MockMpiAction(invokeResult=5), MockMpiAction(invokeResult=11)],\n        }\n\n        # run in serial\n        with mockRunLogs.BufferLog() as mock:\n            results = runBatchedActions(o, r, o.cs, actionsByNode, serial=True)\n            self.assertIn(\"Running 3 MPI actions in serial\", mock.getStdout())\n        self.assertEqual(len(results), 3)\n        self.assertListEqual(results, [1, 5, 11])\n\n        # run in parallel\n        with mockRunLogs.BufferLog() as mock:\n            results = runBatchedActions(o, r, o.cs, actionsByNode)\n            self.assertIn(\"Running 3 MPI actions in parallel over 2 nodes.\", mock.getStdout())\n        self.assertEqual(len(results), 1)\n        self.assertIsNone(results[0])\n\n    @patch(\"armi.context.MPI_COMM\", MockMpiComm())\n    @patch(\"armi.context.MPI_SIZE\", 4)\n    @patch(\"armi.context.MPI_NODENAMES\", [\"node0\", \"node0\", \"node1\", \"node1\"])\n    @patch(\"armi.context.MPI_DISTRIBUTABLE\", True)\n    def test_runBatchedActionsOverload(self):\n        \"\"\"Test that an error is thrown if the number of tasks exceeds number of ranks.\"\"\"\n        o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        actionsByNode = {\n            \"node0\": [MockMpiAction()],\n            \"node1\": [MockMpiAction(), MockMpiAction(), MockMpiAction()],\n        }\n\n        # run in parallel\n        with mockRunLogs.BufferLog() as mock:\n            with self.assertRaises(ValueError):\n                runBatchedActions(o, r, o.cs, actionsByNode)\n            self.assertIn(\"There are more actions (3) than ranks available (2) on node1!\", mock.getStdout())\n\n    @patch(\"armi.context.MPI_COMM\", MockMpiComm())\n    @patch(\"armi.context.MPI_SIZE\", 4)\n    def test_runActionsDistributeStateAction(self):\n        o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        act = DistributeStateAction([self.action])\n        results = runActions(o, r, o.cs, [act])\n        self.assertEqual(len(results), 1)\n        self.assertIsNone(results[0])\n\n    @patch(\"armi.context.MPI_COMM\", MockMpiComm())\n    @patch(\"armi.context.MPI_SIZE\", 4)\n    @patch(\"armi.context.MPI_DISTRIBUTABLE\", True)\n    def test_runActionsDistStateActionParallel(self):\n        o, r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        act = DistributeStateAction([self.action])\n        results = runActions(o, r, o.cs, [act])\n        self.assertEqual(len(results), 1)\n        self.assertIsNone(results[0])\n\n    def test_invokeAsMaster(self):\n        \"\"\"Verify that calling invokeAsMaster calls invoke.\"\"\"\n        self.assertEqual(7, MockMpiAction.invokeAsMaster(1, 2, 3))\n\n\nclass QueueActionsTests(unittest.TestCase):\n    def test_disableForExclusiveTasks(self):\n        num = 5\n        actionsThisRound = [MpiAction() for _ in range(num - 1)]\n        actionsThisRound.append(None)\n        useForComputation = [True] * num\n        exclusiveIndices = [1, 3]\n        for i in exclusiveIndices:\n            actionsThisRound[i].runActionExclusive = True\n\n        useForComputation = _disableForExclusiveTasks(actionsThisRound, useForComputation)\n        for i in range(num):\n            if i in exclusiveIndices:\n                # won't be used for computation in future round\n                self.assertFalse(useForComputation[i])\n            else:\n                self.assertTrue(useForComputation[i])\n\n    def test_makeQueue(self):\n        num = 5\n        actions = [MpiAction() for _ in range(num)]\n        for i, action in enumerate(actions):\n            action.runActionExclusive = True\n            action.priority = 10 - i  # make it reverse so it actually has to sort\n        useForComputation = [True] * (num - 1)\n        queue, numBatches = _makeQueue(actions, useForComputation)\n        self.assertEqual(numBatches, 2)\n        self.assertEqual(len(queue), len(actions))\n\n        lastPriority = -999\n        for action in queue:\n            # check that when more exclusive than cpus they go to non-exclusive\n            self.assertFalse(action.runActionExclusive)\n            self.assertGreaterEqual(action.priority, lastPriority)\n            lastPriority = action.priority\n\n        exclusiveIndices = [1, 3]\n        for i in exclusiveIndices:\n            actions[i].runActionExclusive = True\n        useForComputation = [True] * (num - 2)\n        queue, numBatches = _makeQueue(actions, useForComputation)\n        # 3 batches since 2 are exclusive and 3 left over tasks\n        self.assertEqual(numBatches, 3)\n        # check that they remain exclusive\n        for i in exclusiveIndices:\n            self.assertTrue(actions[i].runActionExclusive)\n\n        lastPriority = -999\n        foundFirstNonExclusive = False\n        for action in queue:\n            if not action.runActionExclusive:\n                foundFirstNonExclusive = True\n                # priority order resets for non-exclusive\n                lastPriority = -999\n\n            if foundFirstNonExclusive:\n                # all after the first nonExclusive should be non-exclusive\n                self.assertFalse(action.runActionExclusive)\n            self.assertGreaterEqual(action.priority, lastPriority)\n            lastPriority = action.priority\n"
  },
  {
    "path": "armi/tests/test_mpiFeatures.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTests for featurest that need MPI, and thus require special testing.\n\nThese tests will be generally ignored by pytest if you are trying to run\nthem in an environment without MPI installed.\n\nTo run these tests from the commandline, install MPI, mpi4py, and do:\n\nmpiexec -n 2 python -m pytest armi/tests/test_mpiFeatures.py\nor\nmpiexec.exe -n 2 python -m pytest armi/tests/test_mpiFeatures.py\n\"\"\"\n\nimport os\nimport shutil\nimport unittest\nfrom unittest.mock import patch\n\nfrom armi import context, mpiActions, settings\nfrom armi.interfaces import Interface\nfrom armi.mpiActions import DistributeStateAction\nfrom armi.operators import OperatorMPI\nfrom armi.physics.neutronics.const import CONF_CROSS_SECTION\nfrom armi.reactor import blueprints, reactors\nfrom armi.reactor.parameters import parameterDefinitions\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import ARMI_RUN_PATH, TEST_ROOT, mockRunLogs\nfrom armi.utils import pathTools\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n# determine if this is a parallel run, and MPI is installed\nMPI_EXE = None\nif shutil.which(\"mpiexec.exe\") is not None:\n    MPI_EXE = \"mpiexec.exe\"\nelif shutil.which(\"mpiexec\") is not None:\n    MPI_EXE = \"mpiexec\"\n\nMPI_COMM = context.MPI_COMM\n\n\nclass FailingInterface1(Interface):\n    \"\"\"utility classes to make sure the logging system fails properly.\"\"\"\n\n    name = \"failer\"\n\n    def interactEveryNode(self, cycle, node):\n        raise RuntimeError(\"Failing interface failure\")\n\n\nclass FailingInterface2(Interface):\n    \"\"\"utility class to make sure the logging system fails properly.\"\"\"\n\n    name = \"failer\"\n\n    def interactEveryNode(self, cycle, node):\n        raise RuntimeError(\"Failing interface critical failure\")\n\n\nclass FailingInterface3(Interface):\n    \"\"\"fails on worker operate.\"\"\"\n\n    name = \"failer\"\n\n    def fail(self):\n        raise RuntimeError(\"Failing interface critical worker failure\")\n\n    def interactEveryNode(self, c, n):\n        context.MPI_COMM.bcast(\"fail\", root=0)\n\n    def workerOperate(self, cmd):\n        if cmd == \"fail\":\n            self.fail()\n            return True\n        return False\n\n\nclass MockInterface(Interface):\n    name = \"mockInterface\"\n\n    def interactInit(self):\n        pass\n\n\nclass MpiOperatorTests(unittest.TestCase):\n    \"\"\"Testing the MPI parallelization operator.\"\"\"\n\n    def setUp(self):\n        self.old_op, self.r = test_reactors.loadTestReactor(\n            TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\"\n        )\n        self.o = OperatorMPI(cs=self.old_op.cs)\n        self.o.r = self.r\n\n    @patch(\"armi.operators.Operator.operate\")\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_basicOperatorMPI(self, mockOpMpi):\n        \"\"\"Test we can drive a parallel operator.\n\n        .. test:: Run a parallel operator.\n            :id: T_ARMI_OPERATOR_MPI0\n            :tests: R_ARMI_OPERATOR_MPI\n        \"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            self.o.operate()\n            self.assertIn(\"OperatorMPI.operate\", mock.getStdout())\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_primaryException(self):\n        \"\"\"Test a custom interface that only fails on the main process.\n\n        .. test:: Run a parallel operator that fails online on the main process.\n            :id: T_ARMI_OPERATOR_MPI1\n            :tests: R_ARMI_OPERATOR_MPI\n        \"\"\"\n        self.o.removeAllInterfaces()\n        failer = FailingInterface1(self.o.r, self.o.cs)\n        self.o.addInterface(failer)\n\n        if context.MPI_RANK == 0:\n            self.assertRaises(RuntimeError, self.o.operate)\n        else:\n            self.o.operate()\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_primaryCritical(self):\n        self.o.removeAllInterfaces()\n        failer = FailingInterface2(self.o.r, self.o.cs)\n        self.o.addInterface(failer)\n\n        if context.MPI_RANK == 0:\n            self.assertRaises(Exception, self.o.operate)\n        else:\n            self.o.operate()\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_finalizeInteract(self):\n        \"\"\"Test to make sure workers are reset after interface interactions.\"\"\"\n        # Add a random number of interfaces\n        interface = MockInterface(self.o.r, self.o.cs)\n        self.o.addInterface(interface)\n\n        with mockRunLogs.BufferLog() as mock:\n            if context.MPI_RANK == 0:\n                self.o.interactAllInit()\n                context.MPI_COMM.bcast(\"quit\", root=0)\n                context.MPI_COMM.bcast(\"finished\", root=0)\n            else:\n                self.o.workerOperate()\n\n            logMessage = \"Workers have been reset.\" if context.MPI_RANK == 0 else \"Workers are being reset.\"\n            numCalls = len([line for line in mock.getStdout().splitlines() if logMessage in line])\n            self.assertGreaterEqual(numCalls, 1)\n\n\n# these two must be defined up here so that they can be pickled\nclass BcastAction1(mpiActions.MpiAction):\n    def invokeHook(self):\n        nItems = 50\n        results = [None] * nItems\n        for objIndex in range(nItems):\n            if objIndex % context.MPI_SIZE == context.MPI_RANK:\n                results[objIndex] = objIndex\n\n        allResults = self.gather(results)\n\n        if allResults:\n            return [allResults[ai % context.MPI_SIZE][ai] for ai in range(nItems)]\n        else:\n            return []\n\n\nclass BcastAction2(mpiActions.MpiAction):\n    def invokeHook(self):\n        results = []\n        for num in self.mpiIter(range(50)):\n            results.append(num)\n\n        allResults = self.gather(results)\n        if allResults:\n            return self.mpiFlatten(allResults)\n        else:\n            return []\n\n\nclass MpiDistributeStateTests(unittest.TestCase):\n    def setUp(self):\n        self.cs = settings.Settings(fName=ARMI_RUN_PATH)\n        bp = blueprints.loadFromCs(self.cs)\n\n        self.o = OperatorMPI(self.cs)\n        self.o.r = reactors.factory(self.cs, bp)\n        self.action = DistributeStateAction()\n        self.action.o = self.o\n        self.action.r = self.o.r\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_distributeSettings(self):\n        \"\"\"Under normal circumstances, we would not test \"private\" methods;\n        however, distributeState is quite complicated.\n        \"\"\"\n        self.action._distributeSettings()\n        if context.MPI_RANK == 0:\n            self.assertEqual(self.cs, self.action.o.cs)\n        else:\n            self.assertNotEqual(self.cs, self.action.o.cs)\n            original = {ss.name: ss.value for ss in self.cs.values()}\n            current = {ss.name: ss.value for ss in self.action.o.cs.values()}\n            # remove values that are *expected to be* different...\n            # CONF_CROSS_SECTION is removed because unittest is being mean about\n            # comparing dicts...\n            for key in [\"stationaryBlockFlags\", \"verbosity\", CONF_CROSS_SECTION]:\n                if key in original:\n                    del original[key]\n                if key in current:\n                    del current[key]\n\n            for key in original.keys():\n                self.assertEqual(original[key], current[key])\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_distributeReactor(self):\n        \"\"\"Under normal circumstances, we would not test \"private\" methods;\n        however, distributeState is quite complicated.\n        \"\"\"\n        original_reactor = self.action.r\n        self.action._distributeReactor(self.cs)\n        if context.MPI_RANK == 0:\n            self.assertEqual(original_reactor, self.action.r)\n        else:\n            self.assertNotEqual(original_reactor, self.action.r)\n        self.assertIsNone(self.action.r.core.lib)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_distributeInterfaces(self):\n        \"\"\"Under normal circumstances, we would not test \"private\" methods;\n        however, distributeState is quite complicated.\n        \"\"\"\n        original_interfaces = self.o.interfaces\n        self.action._distributeInterfaces()\n        if context.MPI_RANK == 0:\n            self.assertEqual(original_interfaces, self.o.interfaces)\n        else:\n            self.assertEqual(original_interfaces, self.o.interfaces)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_distributeState(self):\n        original_reactor = self.o.r\n        original_lib = self.o.r.core.lib\n        original_interfaces = self.o.interfaces\n        original_bolassems = self.o.r.blueprints.assemblies\n        self.action.invokeHook()\n\n        if context.MPI_RANK == 0:\n            self.assertEqual(self.cs, self.o.cs)\n            self.assertEqual(original_reactor, self.o.r)\n            self.assertEqual(original_interfaces, self.o.interfaces)\n            self.assertDictEqual(original_bolassems, self.o.r.blueprints.assemblies)\n            self.assertEqual(original_lib, self.o.r.core.lib)\n        else:\n            self.assertNotEqual(self.cs, self.o.cs)\n            self.assertNotEqual(original_reactor, self.o.r)\n            self.assertNotEqual(original_bolassems, self.o.r.blueprints.assemblies)\n            self.assertEqual(original_interfaces, self.o.interfaces)\n            self.assertEqual(original_lib, self.o.r.core.lib)\n\n        for pDef in parameterDefinitions.ALL_DEFINITIONS:\n            self.assertFalse(pDef.assigned & parameterDefinitions.SINCE_LAST_DISTRIBUTE_STATE)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_compileResults(self):\n        action1 = BcastAction1()\n        context.MPI_COMM.bcast(action1)\n        results1 = action1.invoke(None, None, None)\n\n        action2 = BcastAction2()\n        context.MPI_COMM.bcast(action2)\n        results2 = action2.invoke(None, None, None)\n        self.assertEqual(results1, results2)\n\n\nclass MpiPathToolsTests(unittest.TestCase):\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_cleanPathMpi(self):\n        \"\"\"Simple tests of cleanPath(), in the MPI scenario.\"\"\"\n        with TemporaryDirectoryChanger():\n            # TEST 0: File is not safe to delete, due to not being a temp dir or under FAST_PATH\n            filePath0 = \"test0_cleanPathNoMpi\"\n            open(filePath0, \"w\").write(\"something\")\n            self.assertTrue(os.path.exists(filePath0))\n            with self.assertRaises(Exception):\n                pathTools.cleanPath(filePath0, mpiRank=context.MPI_RANK)\n            MPI_COMM.barrier()\n\n            # TEST 1: Delete a single file under FAST_PATH\n            filePath1 = os.path.join(context.getFastPath(), \"test1_cleanPathNoMpi\")\n            open(filePath1, \"w\").write(\"something\")\n            self.assertTrue(os.path.exists(filePath1))\n            pathTools.cleanPath(filePath1, mpiRank=context.MPI_RANK)\n            MPI_COMM.barrier()\n            self.assertFalse(os.path.exists(filePath1))\n\n            # TEST 2: Delete an empty directory under FAST_PATH\n            dir2 = os.path.join(context.getFastPath(), \"gimmeonereason\")\n            os.mkdir(dir2)\n            self.assertTrue(os.path.exists(dir2))\n            pathTools.cleanPath(dir2, mpiRank=context.MPI_RANK)\n            MPI_COMM.barrier()\n            self.assertFalse(os.path.exists(dir2))\n\n            # TEST 3: Delete an empty directory with forceClean=True\n            dir3 = \"tostayhere\"\n            os.mkdir(dir3)\n            self.assertTrue(os.path.exists(dir3))\n            pathTools.cleanPath(dir3, mpiRank=context.MPI_RANK, forceClean=True)\n            MPI_COMM.barrier()\n            self.assertFalse(os.path.exists(dir3))\n\n            # TEST 3: Delete a directory with two files inside with forceClean=True\n            dir4 = \"andilldirrightbackaround\"\n            os.mkdir(dir4)\n            open(os.path.join(dir4, \"file1.txt\"), \"w\").write(\"something1\")\n            open(os.path.join(dir4, \"file2.txt\"), \"w\").write(\"something2\")\n            self.assertTrue(os.path.exists(dir4))\n            self.assertTrue(os.path.exists(os.path.join(dir4, \"file1.txt\")))\n            self.assertTrue(os.path.exists(os.path.join(dir4, \"file2.txt\")))\n            pathTools.cleanPath(dir4, mpiRank=context.MPI_RANK, forceClean=True)\n            MPI_COMM.barrier()\n            self.assertFalse(os.path.exists(dir4))\n\n\nclass TestContextMpi(unittest.TestCase):\n    \"\"\"Parallel tests for the Context module.\"\"\"\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_rank(self):\n        self.assertGreater(context.MPI_RANK, -1)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_nonNoneData(self):\n        self.assertGreater(len(context.APP_DATA), 0)\n        self.assertGreater(len(context.DOC), 0)\n        self.assertGreater(len(context.getFastPath()), 0)\n        self.assertGreater(len(context.PROJECT_ROOT), 0)\n        self.assertGreater(len(context.RES), 0)\n        self.assertGreater(len(context.ROOT), 0)\n        self.assertGreater(len(context.USER), 0)\n"
  },
  {
    "path": "armi/tests/test_mpiParameters.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of the MPI portion of the Parameters class.\"\"\"\n\nimport shutil\nimport unittest\n\nfrom armi import context\nfrom armi.reactor import composites, parameters\n\n# determine if this is a parallel run, and MPI is installed\nMPI_EXE = None\nif shutil.which(\"mpiexec.exe\") is not None:\n    MPI_EXE = \"mpiexec.exe\"\nelif shutil.which(\"mpiexec\") is not None:\n    MPI_EXE = \"mpiexec\"\n\n\nclass MockSyncPC(parameters.ParameterCollection):\n    pDefs = parameters.ParameterDefinitionCollection()\n    with pDefs.createBuilder(default=0.0, location=parameters.ParamLocation.AVERAGE) as pb:\n        pb.defParam(\"param1\", \"units\", \"p1 description\", categories=[\"cat1\"])\n        pb.defParam(\"param2\", \"units\", \"p2 description\", categories=[\"cat2\"])\n        pb.defParam(\"param3\", \"units\", \"p3 description\", categories=[\"cat3\"])\n\n\ndef makeComp(name):\n    \"\"\"Helper method for MPI sync tests: mock up a Composite with a minimal param collections.\"\"\"\n    c = composites.Composite(name)\n    c.p = MockSyncPC()\n    return c\n\n\nclass SynchronizationTests(unittest.TestCase):\n    \"\"\"Some tests that must be run with mpirun instead of the standard unittest system.\"\"\"\n\n    def setUp(self):\n        self.r = makeComp(\"reactor\")\n        self.r.core = makeComp(\"core\")\n        self.r.add(self.r.core)\n        for ai in range(context.MPI_SIZE * 3):\n            a = makeComp(\"assembly{}\".format(ai))\n            self.r.core.add(a)\n            for bi in range(3):\n                a.add(makeComp(\"block{}-{}\".format(ai, bi)))\n\n        self.comps = [self.r.core] + self.r.core.getChildren(deep=True)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_noConflicts(self):\n        \"\"\"Make sure sync works across processes.\n\n        .. test:: Synchronize a reactor's state across processes.\n            :id: T_ARMI_CMP_MPI0\n            :tests: R_ARMI_CMP_MPI\n        \"\"\"\n        _syncCount = self.r.syncMpiState()\n\n        for ci, comp in enumerate(self.comps):\n            if ci % context.MPI_SIZE == context.MPI_RANK:\n                comp.p.param1 = (context.MPI_RANK + 1) * 30.0\n            else:\n                self.assertNotEqual((context.MPI_RANK + 1) * 30.0, comp.p.param1)\n\n        syncCount = self.r.syncMpiState()\n        self.assertEqual(len(self.comps), syncCount)\n\n        for ci, comp in enumerate(self.comps):\n            self.assertEqual((ci % context.MPI_SIZE + 1) * 30.0, comp.p.param1)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_withConflicts(self):\n        \"\"\"Test conflicts arise correctly if we force a conflict.\n\n        .. test:: Raise errors when there are conflicts across processes.\n            :id: T_ARMI_CMP_MPI1\n            :tests: R_ARMI_CMP_MPI\n        \"\"\"\n        self.r.core.p.param1 = (context.MPI_RANK + 1) * 99.0\n        with self.assertRaises(ValueError):\n            self.r.syncMpiState()\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_withConflictsButSameValue(self):\n        \"\"\"Test that conflicts are ignored if the values are the same.\n\n        .. test:: Don't raise errors when multiple processes make the same changes.\n            :id: T_ARMI_CMP_MPI2\n            :tests: R_ARMI_CMP_MPI\n        \"\"\"\n        self.r.core.p.param1 = (context.MPI_SIZE + 1) * 99.0\n        self.r.syncMpiState()\n        self.assertEqual((context.MPI_SIZE + 1) * 99.0, self.r.core.p.param1)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_conflictsMaintainWithStateRetainer(self):\n        \"\"\"Test that the state retainer fails correctly when it should.\"\"\"\n        with self.r.retainState(parameters.inCategory(\"cat2\")):\n            for _, comp in enumerate(self.comps):\n                comp.p.param2 = 99 * context.MPI_RANK\n\n        with self.assertRaises(ValueError):\n            self.r.syncMpiState()\n"
  },
  {
    "path": "armi/tests/test_plugins.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Provides functionality for testing implementations of plugins.\"\"\"\n\nimport unittest\nfrom copy import deepcopy\nfrom typing import Optional\n\nimport yamlize\n\nfrom armi import (\n    context,\n    getApp,\n    getPluginManagerOrFail,\n    interfaces,\n    plugins,\n    settings,\n    utils,\n)\nfrom armi.bookkeeping.db import loadOperator\nfrom armi.bookkeeping.db.databaseInterface import DatabaseInterface\nfrom armi.physics.neutronics import NeutronicsPlugin\nfrom armi.reactor.blocks import Block\nfrom armi.reactor.converters.axialExpansionChanger import AxialExpansionChanger\nfrom armi.reactor.flags import Flags\nfrom armi.testing import loadTestReactor\nfrom armi.tests import TEST_ROOT\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass PluginFlags1(plugins.ArmiPlugin):\n    \"\"\"Simple Plugin that defines a single, new flag.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineFlags():\n        \"\"\"Function to provide new Flags definitions.\"\"\"\n        return {\"SUPER_FLAG\": utils.flags.auto()}\n\n\nclass SillyAxialExpansionChanger(AxialExpansionChanger):\n    \"\"\"Fake, test-specific axial expansion changer that a plugin will register.\"\"\"\n\n\nclass SillyAxialPlugin(plugins.ArmiPlugin):\n    \"\"\"Trivial plugin that implements the axial expansion hook.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def getAxialExpansionChanger() -> type[SillyAxialExpansionChanger]:\n        return SillyAxialExpansionChanger\n\n\nclass BeforeReactorPlugin(plugins.ArmiPlugin):\n    \"\"\"Trivial plugin that implements the before reactor construction hook.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def beforeReactorConstruction(cs) -> None:\n        cs.beforeReactorConstructionFlag = True\n\n\nclass TestPluginRegistration(unittest.TestCase):\n    def setUp(self):\n        \"\"\"\n        Manipulate the standard App. We can't just configure our own, since the\n        pytest environment bleeds between tests.\n        \"\"\"\n        self.app = getApp()\n        self._backupApp = deepcopy(self.app)\n\n    def tearDown(self):\n        \"\"\"Restore the App to its original state.\"\"\"\n        import armi\n\n        armi._app = self._backupApp\n        context.APP_NAME = \"armi\"\n\n    def test_defineFlags(self):\n        \"\"\"Define a new flag using the plugin defineFlags() method.\n\n        .. test:: Define a new, unique flag through the plugin pathway.\n            :id: T_ARMI_FLAG_EXTEND1\n            :tests: R_ARMI_FLAG_EXTEND\n\n        .. test:: Load a plugin into an app and show it is loaded.\n            :id: T_ARMI_PLUGIN_REGISTER\n            :tests: R_ARMI_PLUGIN\n        \"\"\"\n        app = getApp()\n\n        # show the new plugin isn't loaded yet\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(\"PluginFlags1\", pluginNames)\n\n        # show the flag doesn't exist yet\n        with self.assertRaises(AttributeError):\n            Flags.SUPER_FLAG\n\n        # load the plugin\n        app.pluginManager.register(PluginFlags1)\n\n        # show the new plugin is loaded now\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"PluginFlags1\", pluginNames)\n\n        # force-register new flags from the new plugin\n        app._pluginFlagsRegistered = False\n        app.registerPluginFlags()\n\n        # show the flag exists now\n        self.assertEqual(type(Flags.SUPER_FLAG._value), int)\n\n    def test_axialExpansionHook(self):\n        \"\"\"Test that plugins can override the axial expansion of assemblies via a hook.\"\"\"\n        pm = self.app.pluginManager\n        first = pm.hook.getAxialExpansionChanger()\n        # By default, make sure we get the armi-shipped expansion class\n        self.assertIs(first, AxialExpansionChanger)\n        pm.register(SillyAxialPlugin)\n        try:\n            second = pm.hook.getAxialExpansionChanger()\n            # Registering a plugin that implements the hook means we get that plugin's axial expander\n            self.assertIs(second, SillyAxialExpansionChanger)\n        finally:\n            pm.unregister(SillyAxialPlugin)\n\n    def test_beforeReactorConstructionHook(self):\n        \"\"\"Test that plugin hook successfully injects code before reactor initialization.\"\"\"\n        pm = getPluginManagerOrFail()\n        pm.register(BeforeReactorPlugin)\n        try:\n            o, r = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\", useCache=False)\n            self.assertTrue(o.cs.beforeReactorConstructionFlag)\n\n            # Check that hook is called for database loading\n            with TemporaryDirectoryChanger():\n                dbi = DatabaseInterface(r, o.cs)\n                dbi.initDB(fName=self._testMethodName + \".h5\")\n                db = dbi.database\n                db.writeToDB(r)\n                db.close()\n                o = loadOperator(self._testMethodName + \".h5\", 0, 0, callReactorConstructionHook=True)\n            self.assertTrue(o.cs.beforeReactorConstructionFlag)\n        finally:\n            pm.unregister(BeforeReactorPlugin)\n\n\nclass TestPluginBasics(unittest.TestCase):\n    def test_defineParameters(self):\n        \"\"\"Test that the default ARMI plugins are correctly defining parameters.\n\n        .. test:: ARMI plugins define parameters, which appear on a new Block.\n            :id: T_ARMI_PLUGIN_PARAMS\n            :tests: R_ARMI_PLUGIN_PARAMS\n        \"\"\"\n        # create a block\n        b = Block(\"fuel\", height=10.0)\n\n        # unless a plugin has registered a param, it doesn't exist\n        with self.assertRaises(AttributeError):\n            b.p.fakeParam\n\n        # Check the default values of parameters defined by the neutronics plugin\n        self.assertIsNone(b.p.axMesh)\n        self.assertEqual(b.p.flux, 0)\n        self.assertEqual(b.p.power, 0)\n        self.assertEqual(b.p.pdens, 0)\n\n        # Check the default values of parameters defined by the fuel performance plugin\n        self.assertEqual(b.p.gasPorosity, 0)\n        self.assertEqual(b.p.liquidPorosity, 0)\n\n    def test_exposeInterfaces(self):\n        \"\"\"Make sure that the exposeInterfaces hook is properly implemented.\n\n        .. test:: Plugins can add interfaces to the interface stack.\n            :id: T_ARMI_PLUGIN_INTERFACES0\n            :tests: R_ARMI_PLUGIN_INTERFACES\n        \"\"\"\n        plugin = NeutronicsPlugin()\n\n        cs = settings.Settings()\n        results = plugin.exposeInterfaces(cs)\n\n        # each plugin should return a list\n        self.assertIsInstance(results, list)\n        self.assertGreater(len(results), 0)\n        for result in results:\n            # Make sure all elements in the list satisfy the constraints of the hookspec\n            self.assertIsInstance(result, tuple)\n            self.assertEqual(len(result), 3)\n\n            order, interface, kwargs = result\n\n            self.assertIsInstance(order, (int, float))\n            self.assertTrue(issubclass(interface, interfaces.Interface))\n            self.assertIsInstance(kwargs, dict)\n\n    def test_pluginsExposeInterfaces(self):\n        \"\"\"Make sure that plugins properly expose their interfaces, by checking some\n        known examples.\n\n        .. test:: Check that some known plugins correctly add interfaces to the stack.\n            :id: T_ARMI_PLUGIN_INTERFACES1\n            :tests: R_ARMI_PLUGIN_INTERFACES\n        \"\"\"\n        # generate a test operator, with a full set of interfaces from plugsin\n        o = loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")[0]\n        pm = getPluginManagerOrFail()\n\n        # test the plugins were generated\n        plugins = pm.get_plugins()\n        self.assertGreater(len(plugins), 0)\n\n        # test interfaces were generated from those plugins\n        ints = o.interfaces\n        self.assertGreater(len(ints), 0)\n\n        # test that certain plugins exist and correctly registered their interfaces\n        pluginStrings = \" \".join([str(p) for p in plugins])\n        interfaceStrings = \" \".join([str(i) for i in ints])\n\n        # Test that the BookkeepingPlugin registered the DatabaseInterface\n        self.assertIn(\"BookkeepingPlugin\", pluginStrings)\n        self.assertIn(\"DatabaseInterface\", interfaceStrings)\n\n        # Test that the BookkeepingPlugin registered the history interface\n        self.assertIn(\"BookkeepingPlugin\", pluginStrings)\n        self.assertIn(\"history\", interfaceStrings)\n\n        # Test that the EntryPointsPlugin registered the main interface\n        self.assertIn(\"EntryPointsPlugin\", pluginStrings)\n        self.assertIn(\"main\", interfaceStrings)\n\n        # Test that the FuelHandlerPlugin registered the fuelHandler interface\n        self.assertIn(\"FuelHandlerPlugin\", pluginStrings)\n        self.assertIn(\"fuelHandler\", interfaceStrings)\n\n\nclass TestPlugin(unittest.TestCase):\n    \"\"\"This contains some sanity tests that can be used by implementing plugins.\"\"\"\n\n    plugin: Optional[plugins.ArmiPlugin] = None\n\n    def test_defineBlueprintsSections(self):\n        \"\"\"Make sure that the defineBlueprintsSections hook is properly implemented.\"\"\"\n        if self.plugin is None:\n            return\n        if not hasattr(self.plugin, \"defineBlueprintsSections\"):\n            return\n\n        results = self.plugin.defineBlueprintsSections()\n        if results is None:\n            return\n\n        # each plugin should return a list\n        self.assertIsInstance(results, (list, type(None)))\n\n        for result in results:\n            self.assertIsInstance(result, tuple)\n            self.assertEqual(len(result), 3)\n            self.assertIsInstance(result[0], str)\n            self.assertIsInstance(result[1], yamlize.Attribute)\n            self.assertTrue(callable(result[2]))\n\n    def test_exposeInterfaces(self):\n        \"\"\"Make sure that the exposeInterfaces hook is properly implemented.\"\"\"\n        if self.plugin is None:\n            return\n\n        cs = settings.Settings()\n        results = self.plugin.exposeInterfaces(cs)\n        if results is None or not results:\n            return\n\n        # each plugin should return a list\n        self.assertIsInstance(results, list)\n        for result in results:\n            # Make sure all elements in the list satisfy the constraints of the hookspec\n            self.assertIsInstance(result, tuple)\n            self.assertEqual(len(result), 3)\n\n            order, interface, kwargs = result\n\n            self.assertIsInstance(order, (int, float))\n            self.assertTrue(issubclass(interface, interfaces.Interface))\n            self.assertIsInstance(kwargs, dict)\n"
  },
  {
    "path": "armi/tests/test_runLog.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of the runLog tooling.\"\"\"\n\nimport logging\nimport os\nimport unittest\nfrom io import StringIO\nfrom pathlib import Path\nfrom shutil import rmtree\n\nfrom armi import runLog\nfrom armi.tests import mockRunLogs\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestRunLog(unittest.TestCase):\n    def test_setVerbosityFromInteger(self):\n        \"\"\"Test that the log verbosity can be set with an integer.\n\n        .. test:: The run log verbosity can be configured with an integer.\n            :id: T_ARMI_LOG0\n            :tests: R_ARMI_LOG\n        \"\"\"\n        log = runLog._RunLog(1)\n        expectedStrVerbosity = \"debug\"\n        verbosityRank = log.getLogVerbosityRank(expectedStrVerbosity)\n        runLog.setVerbosity(verbosityRank)\n        self.assertEqual(verbosityRank, runLog.getVerbosity())\n        self.assertEqual(verbosityRank, logging.DEBUG)\n\n    def test_setVerbosityFromString(self):\n        \"\"\"\n        Test that the log verbosity can be set with a string.\n\n        .. test:: The run log verbosity can be configured with a string.\n            :id: T_ARMI_LOG1\n            :tests: R_ARMI_LOG\n        \"\"\"\n        log = runLog._RunLog(1)\n        expectedStrVerbosity = \"error\"\n        verbosityRank = log.getLogVerbosityRank(expectedStrVerbosity)\n        runLog.setVerbosity(expectedStrVerbosity)\n        self.assertEqual(verbosityRank, runLog.getVerbosity())\n        self.assertEqual(verbosityRank, logging.ERROR)\n\n    def test_verbosityOutOfRange(self):\n        \"\"\"Test that the log verbosity setting resets to a canonical value when it is out of range.\"\"\"\n        runLog.setVerbosity(-50)\n        self.assertEqual(runLog.LOG.logger.level, min([v[0] for v in runLog.LOG.logLevels.values()]))\n\n        runLog.setVerbosity(5000)\n        self.assertEqual(runLog.LOG.logger.level, max([v[0] for v in runLog.LOG.logLevels.values()]))\n\n    def test_invalidSetVerbosityByString(self):\n        \"\"\"Test that the log verbosity setting fails if the integer is invalid.\"\"\"\n        with self.assertRaises(KeyError):\n            runLog.setVerbosity(\"taco\")\n\n        with self.assertRaises(TypeError):\n            runLog.setVerbosity([\"debug\"])\n\n    def test_parentRunLogging(self):\n        \"\"\"A basic test of the logging of the parent runLog.\"\"\"\n        # init the _RunLog object\n        log = runLog.LOG = runLog._RunLog(0)\n        log.startLog(\"test_parentRunLogging\")\n        runLog.createLogDir(0)\n        log.setVerbosity(logging.INFO)\n\n        # divert the logging to a stream, to make testing easier\n        stream = StringIO()\n        handler = logging.StreamHandler(stream)\n        log.logger.handlers = [handler]\n\n        # log some things\n        log.log(\"debug\", \"You shouldn't see this.\", single=False, label=None)\n        log.log(\"warning\", \"Hello, \", single=False, label=None)\n        log.log(\"error\", \"world!\", single=False, label=None)\n\n        log.logger.flush()\n        log.logger.close()\n        runLog.close(99)\n\n        # test what was logged\n        streamVal = stream.getvalue()\n        self.assertIn(\"Hello\", streamVal, msg=streamVal)\n        self.assertIn(\"world\", streamVal, msg=streamVal)\n\n    def test_getWhiteSpace(self):\n        log = runLog._RunLog(0)\n        space0 = len(log.getWhiteSpace(0))\n        space1 = len(log.getWhiteSpace(1))\n        space9 = len(log.getWhiteSpace(9))\n\n        self.assertGreater(space1, space0)\n        self.assertEqual(space1, space9)\n\n    def test_warningReport(self):\n        \"\"\"A simple test of the warning tracking and reporting logic.\n\n        .. test:: Generate a warning report after a simulation is complete.\n            :id: T_ARMI_LOG2\n            :tests: R_ARMI_LOG\n        \"\"\"\n        # create the logger and do some logging\n        log = runLog.LOG = runLog._RunLog(321)\n        log.startLog(\"test_warningReport\")\n        runLog.createLogDir(0)\n\n        # divert the logging to a stream, to make testing easier\n        stream = StringIO()\n        handler = logging.StreamHandler(stream)\n        log.logger.handlers = [handler]\n\n        # log some things\n        log.setVerbosity(logging.INFO)\n        log.log(\"warning\", \"test_warningReport\", single=True, label=None)\n        log.log(\"debug\", \"invisible due to log level\", single=False, label=None)\n        log.log(\"warning\", \"test_warningReport\", single=True, label=None)\n        log.log(\"warning\", \"simple_warning\", single=False, label=None)\n        log.log(\"error\", \"high level something\", single=False, label=None)\n\n        # test that the logging found some duplicate outputs\n        dupsFilter = log.getDuplicatesFilter()\n        self.assertIsNotNone(dupsFilter)\n        warnings = dupsFilter.warningCounts\n        self.assertGreater(len(warnings), 0)\n\n        # run the warning report\n        log.warningReport()\n        runLog.close(1)\n        runLog.close(0)\n\n        # test what was logged\n        streamVal = stream.getvalue()\n        self.assertIn(\"Final Warning Count\", streamVal, msg=streamVal)\n        self.assertIn(\"simple_warning\", streamVal, msg=streamVal)\n        self.assertIn(\"test_warningReport\", streamVal, msg=streamVal)\n        self.assertIn(\"Total Number of Warnings\", streamVal, msg=streamVal)\n        self.assertNotIn(\"invisible\", streamVal, msg=streamVal)\n        self.assertEqual(streamVal.count(\"test_warningReport\"), 2, msg=streamVal)\n\n        # bonus check: edge case in duplicates filter\n        backupLog, log.logger = log.logger, None\n        self.assertIsNone(log.getDuplicatesFilter())\n        log.logger = backupLog\n\n    def test_warningReportInvalid(self):\n        \"\"\"A test of warningReport in an invalid situation.\n\n        .. test:: Test an important edge case for a warning report.\n            :id: T_ARMI_LOG3\n            :tests: R_ARMI_LOG\n        \"\"\"\n        # create the logger and do some logging\n        testName = \"test_warningReportInvalid\"\n        log = runLog.LOG = runLog._RunLog(323)\n        log.startLog(testName)\n        runLog.createLogDir(0)\n\n        # divert the logging to a stream, to make testing easier\n        stream = StringIO()\n        handler = logging.StreamHandler(stream)\n        log.logger.handlers = [handler]\n\n        # log some things\n        log.setVerbosity(logging.INFO)\n        log.log(\"warning\", testName, single=True, label=None)\n        log.log(\"debug\", \"invisible due to log level\", single=False, label=None)\n        log.log(\"warning\", testName, single=True, label=None)\n        log.log(\"error\", \"high level something\", single=False, label=None)\n\n        # test that the logging found some duplicate outputs\n        def returnNone(*args, **kwargs):\n            return None\n\n        log.logger.getDuplicatesFilter = returnNone\n        self.assertIsNone(log.logger.getDuplicatesFilter())\n\n        # run the warning report\n        log.warningReport()\n        runLog.close(1)\n        runLog.close(0)\n\n        # test what was logged\n        streamVal = stream.getvalue()\n        self.assertIn(testName, streamVal, msg=streamVal)\n        self.assertIn(\"None Found\", streamVal, msg=streamVal)\n        self.assertNotIn(\"invisible\", streamVal, msg=streamVal)\n        self.assertEqual(streamVal.count(testName), 1, msg=streamVal)\n\n    def test_closeLogging(self):\n        \"\"\"A basic test of the close() functionality.\"\"\"\n\n        def validate_loggers(log):\n            \"\"\"Little test helper, to make sure our loggers still look right.\"\"\"\n            handlers = [str(h) for h in log.logger.handlers]\n            self.assertEqual(len(handlers), 1, msg=\",\".join(handlers))\n\n            stderrHandlers = [str(h) for h in log.stderrLogger.handlers]\n            self.assertEqual(len(stderrHandlers), 1, msg=\",\".join(stderrHandlers))\n\n        # init logger\n        log = runLog.LOG = runLog._RunLog(777)\n        validate_loggers(log)\n\n        # start the logging for real\n        log.startLog(\"test_closeLogging\")\n        runLog.createLogDir()\n        validate_loggers(log)\n\n        # close() and test that we have correctly nullified our loggers\n        runLog.close(1)\n        validate_loggers(log)\n\n        # in a real run, the parent process would close() after all the children\n        runLog.close(0)\n\n    def test_setVerbosity(self):\n        \"\"\"Let's test the setVerbosity() method carefully.\n\n        .. test:: The run log has configurable verbosity.\n            :id: T_ARMI_LOG4\n            :tests: R_ARMI_LOG\n\n        .. test:: The run log can log to stream.\n            :id: T_ARMI_LOG_IO0\n            :tests: R_ARMI_LOG_IO\n        \"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_setVerbosity\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.info(\"hi\")\n            self.assertIn(\"hi\", mock.getStdout())\n            mock.emptyStdout()\n\n            runLog.debug(\"invisible\")\n            self.assertEqual(\"\", mock.getStdout())\n\n            # setVerbosity() to WARNING, and verify it is working\n            runLog.LOG.setVerbosity(logging.WARNING)\n            runLog.info(\"still invisible\")\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.warning(\"visible\")\n            self.assertIn(\"visible\", mock.getStdout())\n            mock.emptyStdout()\n\n            # setVerbosity() to DEBUG, and verify it is working\n            runLog.LOG.setVerbosity(logging.DEBUG)\n            runLog.debug(\"Visible\")\n            self.assertIn(\"Visible\", mock.getStdout())\n            mock.emptyStdout()\n\n            # setVerbosity() to ERROR, and verify it is working\n            runLog.LOG.setVerbosity(logging.ERROR)\n            runLog.warning(\"Still Invisible\")\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.error(\"Visible!\")\n            self.assertIn(\"Visible!\", mock.getStdout())\n\n            # we shouldn't be able to setVerbosity() to a non-canonical value (logging module defense)\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.ERROR)\n            runLog.LOG.setVerbosity(logging.WARNING + 1)\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.WARNING)\n\n    def test_setVerbosityBeforeStartLog(self):\n        \"\"\"The user/dev may accidentally call ``setVerbosity()`` before ``startLog()``,\n        this should be mostly supportable. This is just an edge case.\n\n        .. test:: Test that we support the user setting log verbosity BEFORE the logging starts.\n            :id: T_ARMI_LOG5\n            :tests: R_ARMI_LOG\n        \"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate, before debug logging\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.setVerbosity(logging.DEBUG)\n            runLog.LOG.startLog(\"test_setVerbosityBeforeStartLog\")\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.DEBUG)\n            runLog.debug(\"hi\")\n            self.assertIn(\"hi\", mock.getStdout())\n            mock.emptyStdout()\n\n            # we should start with a clean slate, before info logging\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.setVerbosity(logging.INFO)\n            runLog.LOG.startLog(\"test_setVerbosityBeforeStartLog2\")\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.debug(\"nope\")\n            runLog.info(\"hi\")\n            self.assertIn(\"hi\", mock.getStdout())\n            self.assertNotIn(\"nope\", mock.getStdout())\n            mock.emptyStdout()\n\n    def test_callingStartLogMultipleTimes(self):\n        \"\"\"Calling startLog() multiple times will lead to multiple output files, but logging should still work.\"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_callingStartLogMultipleTimes1\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.info(\"hi1\")\n            self.assertIn(\"hi1\", mock.getStdout())\n            mock.emptyStdout()\n\n            # call startLog() again\n            runLog.LOG.startLog(\"test_callingStartLogMultipleTimes2\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.info(\"hi2\")\n            self.assertIn(\"hi2\", mock.getStdout())\n            mock.emptyStdout()\n\n            # call startLog() again\n            runLog.LOG.startLog(\"test_callingStartLogMultipleTimes3\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.info(\"hi3\")\n            self.assertIn(\"hi3\", mock.getStdout())\n            mock.emptyStdout()\n\n            # call startLog() again, with a duplicate logger name\n            runLog.LOG.startLog(\"test_callingStartLogMultipleTimes3\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            # we should start at info level, and that should be working correctly\n            self.assertEqual(runLog.LOG.getVerbosity(), logging.INFO)\n            runLog.info(\"hi333\")\n            self.assertIn(\"hi333\", mock.getStdout())\n            mock.emptyStdout()\n\n    def test_deduplicationFilter(self):\n        \"\"\"Test that the logic to only print a log message once works correctly.\"\"\"\n        with mockRunLogs.BufferLog() as mock:\n            # we should start with a clean slate\n            self.assertEqual(\"\", mock.getStdout())\n            runLog.LOG.startLog(\"test_deduplicationFilter\")\n            runLog.LOG.setVerbosity(logging.INFO)\n\n            msgInfo = \"singleInfoMessage\"\n            for i in range(4):\n                runLog.info(f\"{msgInfo}: {i}\", single=True, label=msgInfo)\n\n            msgWarn = \"singleWarnMessage\"\n            for j in range(4):\n                runLog.warning(f\"{msgWarn}: {j}\", single=True, label=msgWarn)\n\n            logs = mock.getStdout()\n            self.assertEqual(logs.count(msgInfo), 1)\n            self.assertEqual(logs.count(msgWarn), 1)\n\n    def test_concatenateLogs(self):\n        \"\"\"\n        Simple test of the concat logs function.\n\n        .. test:: The run log combines logs from different processes.\n            :id: T_ARMI_LOG_MPI\n            :tests: R_ARMI_LOG_MPI\n\n        .. test:: The run log can log to file.\n            :id: T_ARMI_LOG_IO1\n            :tests: R_ARMI_LOG_IO\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            # create the log dir\n            logDir = \"test_concatenateLogs\"\n            if os.path.exists(logDir):\n                rmtree(logDir)\n            runLog.createLogDir(logDir)\n\n            # create as stdout file\n            stdoutFile1 = os.path.join(logDir, \"{}.runLogTest.0000.stdout\".format(runLog.STDOUT_LOGGER_NAME))\n            with open(stdoutFile1, \"w\") as f:\n                f.write(\"hello world\\n\")\n\n            stdoutFile2 = os.path.join(logDir, \"{}.runLogTest.0001.stdout\".format(runLog.STDOUT_LOGGER_NAME))\n            with open(stdoutFile2, \"w\") as f:\n                f.write(\"hello other world\\n\")\n\n            # verify behavior for a corner case\n            stdoutFile3 = os.path.join(logDir, \"{}..0000.stdout\".format(runLog.STDOUT_LOGGER_NAME))\n            with open(stdoutFile3, \"w\") as f:\n                f.write(\"hello world again\\n\")\n\n            self.assertTrue(os.path.exists(stdoutFile1))\n            self.assertTrue(os.path.exists(stdoutFile2))\n            self.assertTrue(os.path.exists(stdoutFile3))\n\n            # create a stderr file\n            stderrFile = os.path.join(logDir, \"{}.runLogTest.0000.stderr\".format(runLog.STDOUT_LOGGER_NAME))\n            with open(stderrFile, \"w\") as f:\n                f.write(\"goodbye cruel world\\n\")\n\n            self.assertTrue(os.path.exists(stderrFile))\n\n            # concat logs\n            runLog.concatenateLogs(logDir=logDir)\n\n            # verify output\n            combinedLogFile = os.path.join(logDir, \"runLogTest-mpi.log\")\n            self.assertTrue(os.path.exists(combinedLogFile))\n            self.assertFalse(os.path.exists(stdoutFile1))\n            self.assertFalse(os.path.exists(stdoutFile2))\n            self.assertFalse(os.path.exists(stdoutFile3))\n            self.assertFalse(os.path.exists(stderrFile))\n\n            # verify behavior for a corner case\n            stdoutFile3 = os.path.join(logDir, \"{}..0000.stdout\".format(runLog.STDOUT_LOGGER_NAME))\n            with open(stdoutFile3, \"w\") as f:\n                f.write(\"hello world again\\n\")\n            # concat logs\n            runLog.concatenateLogs(logDir=logDir)\n            # verify output\n            combinedLogFile = os.path.join(logDir, \"armi-workers-mpi.log\")\n            self.assertTrue(os.path.exists(combinedLogFile))\n            self.assertFalse(os.path.exists(stdoutFile3))\n\n    def test_createLogDir(self):\n        \"\"\"Test the createLogDir() method.\n\n        .. test:: Test that log directories can be created for logging output files.\n            :id: T_ARMI_LOG6\n            :tests: R_ARMI_LOG\n        \"\"\"\n        with TemporaryDirectoryChanger():\n            logDir = \"test_createLogDir\"\n            self.assertFalse(os.path.exists(logDir))\n            for _ in range(10):\n                runLog.createLogDir(logDir)\n                self.assertTrue(os.path.exists(logDir))\n\n\nclass TestRunLogEnvEdits(unittest.TestCase):\n    \"\"\"Tests that will use monkeypatch to alter an environment variable.\"\"\"\n\n    def setUp(self):\n        # We cannot import pytest at the top of the file right now. The ARMI unit tests are currently imported at\n        # runtime, and until that is changed, we don't want pytest to be a runtime dependency. For now, hide the import\n        # down here. Once the testing module is complete and ARMI's unit tests aren't all imported, the pytest import\n        # can move up to where it belongs.\n        import pytest\n\n        self.monkeypatch = pytest.MonkeyPatch()\n\n    def tearDown(self):\n        self.monkeypatch.undo()\n\n    def test_createLogDirNonDefault(self):\n        \"\"\"Test the scenario where a user sets the environment variable that edits the log dir location.\"\"\"\n        with TemporaryDirectoryChanger() as td:\n            self.monkeypatch.setenv(\"ARMI_TEMP_ROOT_PATH\", str(Path(td.destination) / \"logzGoHere\"))\n            runLog.createLogDir()\n            # assert the env variable-edits logs path exists\n            p = Path(td.destination) / \"logzGoHere\" / \"logs\"\n            self.assertTrue(p.exists())\n            # assert the default logs path doesn't exist\n            p = Path(os.getcwd()) / \"logs\"\n            self.assertFalse(p.exists())\n\n    def test_getLogDir(self):\n        \"\"\"Test getLogDir with and without an environment variable edit.\"\"\"\n        default = Path(runLog.getLogDir())\n        self.assertEqual(default, Path(os.getcwd()) / \"logs\")\n        root = Path(\"somewhere\") / \"else\"\n        self.monkeypatch.setenv(\"ARMI_TEMP_ROOT_PATH\", str(root))\n        altered = Path(runLog.getLogDir())\n        self.assertEqual(altered, root / \"logs\")\n\n\nclass TestRunLogger(unittest.TestCase):\n    def setUp(self):\n        self.rl = runLog.RunLogger(\"ARMI|things_and_stuff|0\")\n\n    def test_getDuplicatesFilter(self):\n        df = self.rl.getDuplicatesFilter()\n        self.assertEqual(type(df), runLog.DeduplicationFilter)\n\n        self.rl.filters = []\n        self.assertIsNone(self.rl.getDuplicatesFilter())\n\n    def test_allowStopDuplicates(self):\n        # the usual case, where the DeduplicateFilter already exists\n        self.assertEqual(len(self.rl.filters), 1)\n        self.rl.allowStopDuplicates()\n        self.assertEqual(len(self.rl.filters), 1)\n\n        # the unusual case, where the DeduplicateFilter isn't there\n        self.rl.filters = []\n        self.assertEqual(len(self.rl.filters), 0)\n        self.rl.allowStopDuplicates()\n        self.assertEqual(len(self.rl.filters), 1)\n\n    def test_write(self):\n        \"\"\"Test that we can write text to the logger output stream.\n\n        .. test:: Write logging text to the logging stream and/or file.\n            :id: T_ARMI_LOG7\n            :tests: R_ARMI_LOG\n        \"\"\"\n        # divert the logging to a stream, to make testing easier\n        stream = StringIO()\n        handler = logging.StreamHandler(stream)\n        self.rl.handlers = [handler]\n\n        # log some things\n        testName = \"test_write\"\n        self.rl.write(testName)\n\n        # test what was logged\n        streamVal = stream.getvalue()\n        self.assertIn(testName, streamVal, msg=streamVal)\n"
  },
  {
    "path": "armi/tests/test_symmetry.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAudit symmetry-aware parameters in baseline ARMI.\n\nSee Also\n--------\n    armi.testing.symmetryTesting\n\"\"\"\n\nfrom armi.reactor.assemblyParameters import getAssemblyParameterDefinitions\nfrom armi.reactor.blockParameters import getBlockParameterDefinitions\nfrom armi.reactor.reactorParameters import defineCoreParameters\nfrom armi.testing import symmetryTesting\n\n\nclass ArmiSymmetryTest(symmetryTesting.BasicArmiSymmetryTestHelper):\n    \"\"\"Run symmetry intentionality tests for ARMI.\"\"\"\n\n    def setUp(self):\n        self.coreParamsToTest = defineCoreParameters()\n        self.assemblyParamsToTest = getAssemblyParameterDefinitions()\n        self.blockParamsToTest = getBlockParameterDefinitions()\n        self.expectedSymmetricBlockParams = [\n            \"molesHmNow\",\n            \"molesHmBOL\",\n            \"massHmBOL\",\n            \"initialB10ComponentVol\",\n            \"kgFis\",\n            \"kgHM\",\n        ]\n        self.expectedSymmetricAssemblyParams = [\"THmassFlowRate\"]\n        self.parameterOverrides = {\"xsType\": [\"A\"], \"xsTypeNum\": 65, \"notes\": \"\"}\n        self.paramsToIgnore = [\"maxAssemNum\"]\n\n        super().setUp()\n"
  },
  {
    "path": "armi/tests/test_tests.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for the test helpers.\"\"\"\n\nimport unittest\n\nfrom armi import tests\n\n\nclass TestCompareFiles(unittest.TestCase):\n    def test_compareFileLine(self):\n        expected = \"oh look, a number! 3.14 and some text and another number 1.5 and another 0.0\"\n\n        # any line compared with itself should pass\n        self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected))\n        self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected, eps=0.01))\n\n        # if we vary the numbers a tiny bit, the epsilon parameter should correctly control the comparison\n        actual = \"oh look, a number! 3.15 and some text and another number 1.6 and another 0.0  \"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04))\n        self.assertTrue(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.07))\n\n        # if we add an extra, non-number word, the comparison should fail\n        actual = \"oh look, a number! 3.15 and some text and another number 1.6 extra and another 0.0\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04))\n\n        # if we replace a number with not a number, the comparison should fail\n        actual = \"oh look, a number! notANumber and some text and another number 1.5 and another 0.0\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.04))\n\n    def test_onlySomeMatch(self):\n        # only the first number in the line matches, so the line should fail\n        expected = \"oh look, a number! 3.14 and some text and another number 1.5 and another 0.0\"\n        actual = \"oh look, a number! 3.14 and some text and another number 2.2 and another 9.9\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01))\n\n        # only the second number in the line matches, so the line should fail\n        expected = \"oh look, a number! 3.14 and some text and another number 1.5 and another 0.0\"\n        actual = \"oh look, a number! 7.7 and some text and another number 1.5 and another 9.9\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01))\n\n        # only the last number in the line matches, so the line should fail\n        expected = \"oh look, a number! 3.14 and some text and another number 1.5 and another 0.0\"\n        actual = \"oh look, a number! 7.7 and some text and another number 8.5 and another 0.0\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual, eps=0.01))\n\n    def test_strangeCases(self):\n        # comparing the same string should return True, even if there are no numbers\n        expected = \"There are no numbers\"\n        self.assertTrue(tests.ArmiTestHelper.compareLines(expected, expected))\n\n        # comparing different strings should return False, even if there are no numbers\n        actual = \"There are SOME numbers\"\n        self.assertFalse(tests.ArmiTestHelper.compareLines(expected, actual))\n\n        # comparing empty strings should return True\n        self.assertTrue(tests.ArmiTestHelper.compareLines(\"\", \"\"))\n\n        # comparing equal strings of whitespace should return True\n        whiteSpace3 = \"   \"\n        self.assertTrue(tests.ArmiTestHelper.compareLines(whiteSpace3, str(whiteSpace3)))\n"
  },
  {
    "path": "armi/tests/test_user_plugins.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the UserPlugin class.\"\"\"\n\nimport copy\nimport os\nimport unittest\n\nfrom armi import context, getApp, interfaces, plugins, utils\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.settings import caseSettings\nfrom armi.tests import TEST_ROOT\nfrom armi.utils import directoryChangers\n\n\nclass UserPluginFlags(plugins.UserPlugin):\n    \"\"\"Simple UserPlugin that defines a single, new flag.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineFlags():\n        \"\"\"Function to provide new Flags definitions.\"\"\"\n        return {\"SPECIAL\": utils.flags.auto()}\n\n\nclass UserPluginFlags2(plugins.UserPlugin):\n    \"\"\"Simple UserPlugin that defines a single, new flag.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineFlags():\n        \"\"\"Function to provide new Flags definitions.\"\"\"\n        return {\"FLAG2\": utils.flags.auto()}\n\n\nclass UserPluginFlags3(plugins.UserPlugin):\n    \"\"\"Simple UserPlugin that defines a single, new flag.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineFlags():\n        \"\"\"Function to provide new Flags definitions.\"\"\"\n        return {\"FLAG3\": utils.flags.auto()}\n\n\n# text-file version of a stand-alone Python file for a simple User Plugin\nupFlags4 = \"\"\"\nfrom armi import plugins\nfrom armi import utils\n\nclass UserPluginFlags4(plugins.UserPlugin):\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineFlags():\n        return {\"FLAG4\": utils.flags.auto()}\n\"\"\"\n\n\nclass UserPluginBadDefinesSettings(plugins.UserPlugin):\n    \"\"\"This is invalid/bad because it implements defineSettings().\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineSettings():\n        \"\"\"Define settings for the plugin.\"\"\"\n        return [1, 2, 3]\n\n\nclass UserPluginBadDefineParameterRenames(plugins.UserPlugin):\n    \"\"\"This is invalid/bad because it implements defineParameterRenames().\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def defineParameterRenames():\n        \"\"\"Return a mapping from old parameter names to new parameter names.\"\"\"\n        return {\"oldType\": \"type\"}\n\n\nclass UserPluginOnProcessCoreLoading(plugins.UserPlugin):\n    \"\"\"\n    This plugin flex-tests the onProcessCoreLoading() hook,\n    and arbitrarily adds \"1\" to the height of every block,\n    after the DB is loaded.\n    \"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def onProcessCoreLoading(core, cs, dbLoad):\n        \"\"\"Function to call whenever a Core object is newly built.\"\"\"\n        blocks = core.getBlocks(Flags.FUEL)\n        for b in blocks:\n            b.p.height += 1.0\n\n\nclass UpInterface(interfaces.Interface):\n    \"\"\"\n    A mostly meaningless little test interface, just to prove that we can affect\n    the reactor state from an interface inside a UserPlugin.\n    \"\"\"\n\n    name = \"UpInterface\"\n\n    def interactEveryNode(self, cycle, node):\n        \"\"\"Logic to be carried out at every time node in the simulation.\"\"\"\n        self.r.core.p.power += 100\n\n\nclass UserPluginWithInterface(plugins.UserPlugin):\n    \"\"\"A little test UserPlugin, just to show how to add an Interface through a UserPlugin.\"\"\"\n\n    @staticmethod\n    @plugins.HOOKIMPL\n    def exposeInterfaces(cs):\n        \"\"\"Function for exposing interface(s) to other code.\"\"\"\n        return [interfaces.InterfaceInfo(interfaces.STACK_ORDER.PREPROCESSING, UpInterface, {\"enabled\": True})]\n\n\nclass TestUserPlugins(unittest.TestCase):\n    def setUp(self):\n        \"\"\"\n        Manipulate the standard App. We can't just configure our own, since the\n        pytest environment bleeds between tests.\n        \"\"\"\n        self._backupApp = copy.deepcopy(getApp())\n\n    def tearDown(self):\n        \"\"\"Restore the App to its original state.\"\"\"\n        import armi\n\n        armi._app = self._backupApp\n        context.APP_NAME = \"armi\"\n\n    def test_userPluginsFlags(self):\n        # a basic test that a UserPlugin is loaded\n        app = getApp()\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(\"UserPluginFlags\", pluginNames)\n\n        app.pluginManager.register(UserPluginFlags)\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"UserPluginFlags\", pluginNames)\n\n        # we shouldn't be able to register the same plugin twice\n        with self.assertRaises(ValueError):\n            app.pluginManager.register(UserPluginFlags)\n\n    def test_validateUserPluginLimitations(self):\n        # this should NOT raise any errors\n        _up = UserPluginFlags()\n\n        # this should raise an error because it has a defineSettings() method\n        with self.assertRaises(AssertionError):\n            _bad0 = UserPluginBadDefinesSettings()\n\n    def test_registerUserPlugins(self):\n        app = getApp()\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(\"UserPluginFlags2\", pluginNames)\n\n        plugins = [\"armi.tests.test_user_plugins.UserPluginFlags2\"]\n        app.registerUserPlugins(plugins)\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"UserPluginFlags2\", pluginNames)\n        self.assertIn(\"FLAG2\", dir(Flags))\n\n    def test_registerUserPluginsAbsPath(self):\n        app = getApp()\n\n        with directoryChangers.TemporaryDirectoryChanger():\n            # write a simple UserPlugin to a simple Python file\n            with open(\"plugin4.py\", \"w\") as f:\n                f.write(upFlags4)\n\n            # register that plugin using an absolute path\n            cwd = os.getcwd()\n            plugins = [os.path.join(cwd, \"plugin4.py\") + \":UserPluginFlags4\"]\n            app.registerUserPlugins(plugins)\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"UserPluginFlags4\", pluginNames)\n        self.assertIn(\"FLAG4\", dir(Flags))\n\n    def test_registerUserPluginsFromSettings(self):\n        app = getApp()\n        cs = caseSettings.Settings().modified(\n            caseTitle=\"test_registerUserPluginsFromSettings\",\n            newSettings={\n                \"userPlugins\": [\"armi.tests.test_user_plugins.UserPluginFlags3\"],\n            },\n        )\n\n        pNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(\"UserPluginFlags3\", pNames)\n\n        cs.registerUserPlugins()\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"UserPluginFlags3\", pluginNames)\n        self.assertIn(\"FLAG3\", dir(Flags))\n\n    def test_userPluginOnProcessCoreLoading(self):\n        \"\"\"\n        Test that a UserPlugin can affect the Reactor state,\n        by implementing onProcessCoreLoading() to arbitrarily increase the\n        height of all the blocks by 1.0.\n        \"\"\"\n        # register the plugin\n        app = getApp()\n        name = \"UserPluginOnProcessCoreLoading\"\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(name, pluginNames)\n        app.pluginManager.register(UserPluginOnProcessCoreLoading)\n\n        # validate the plugins was registered\n        pluginz = app.pluginManager.list_name_plugin()\n        pluginNames = [p[0] for p in pluginz]\n        self.assertIn(name, pluginNames)\n\n        # grab the loaded plugin\n        plug0 = [p[1] for p in pluginz if p[0] == name][0]\n\n        # load a reactor and grab the fuel assemblies\n        o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        fuels = r.core.getBlocks(Flags.FUEL)\n\n        # prove that our plugin affects the core in the desired way\n        heights = [float(f.p.height) for f in fuels]\n        plug0.onProcessCoreLoading(core=r.core, cs=o.cs, dbLoad=False)\n        for i, height in enumerate(heights):\n            self.assertEqual(fuels[i].p.height, height + 1.0)\n\n    def test_userPluginWithInterfaces(self):\n        \"\"\"Test that UserPlugins can correctly inject an interface into the stack.\"\"\"\n        # register the plugin\n        app = getApp()\n\n        pNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertNotIn(\"UserPluginWithInterface\", pNames)\n\n        # register custom UserPlugin, that has an\n        plugins = [\"armi.tests.test_user_plugins.UserPluginWithInterface\"]\n        app.registerUserPlugins(plugins)\n\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertIn(\"UserPluginWithInterface\", pluginNames)\n\n        # load a reactor and grab the fuel assemblieapps\n        o, r = test_reactors.loadTestReactor(TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n        _fuels = r.core.getAssemblies(Flags.FUEL)\n\n        # This is here because we have multiple tests altering the App()\n        o.interfaces = []\n        o.initializeInterfaces(r)\n\n        app.pluginManager.hook.exposeInterfaces(cs=o.cs)\n\n        # This test is not set up for a full run through all the interfaces, for\n        # instance, there is not database prepped. So let's skip some interfaces.\n        for skipIt in [\"fuelhandler\", \"history\"]:\n            for i, interf in enumerate(o.interfaces):\n                if skipIt in str(interf).lower():\n                    o.interfaces = o.interfaces[:i] + o.interfaces[i + 1 :]\n                    break\n\n        # test that the core power goes up\n        power0 = float(r.core.p.power)\n        o.cs[\"nCycles\"] = 2\n        o.operate()\n        self.assertGreater(r.core.p.power, power0)\n\n    def test_registerRepeatedUserPlugins(self):\n        app = getApp()\n\n        # Test plugin registration with two userPlugins with the same name\n        with directoryChangers.TemporaryDirectoryChanger():\n            # write a simple UserPlugin to a simple Python file\n            with open(\"plugin4.py\", \"w\") as f:\n                f.write(upFlags4)\n\n            # register that plugin using an absolute path\n            cwd = os.getcwd()\n            plugins = [os.path.join(cwd, \"plugin4.py\") + \":UserPluginFlags4\"] * 2\n            app.registerUserPlugins(plugins)\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertEqual(pluginNames.count(\"UserPluginFlags4\"), 1)\n\n        # Repeat test for other type of path\n        cs = caseSettings.Settings().modified(\n            caseTitle=\"test_registerUserPluginsFromSettings\",\n            newSettings={\n                \"userPlugins\": [\n                    \"armi.tests.test_user_plugins.UserPluginFlags3\",\n                    \"armi.tests.test_user_plugins.UserPluginFlags3\",\n                ],\n            },\n        )\n        cs.registerUserPlugins()\n        pluginNames = [p[0] for p in app.pluginManager.list_name_plugin()]\n        self.assertEqual(pluginNames.count(\"UserPluginFlags3\"), 1)\n"
  },
  {
    "path": "armi/tests/tutorials/data_model.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Hands-on at the ARMI Terminal\\n\",\n    \"\\n\",\n    \"This tutorial will walk you through some exploration with ARMI on the command\\n\",\n    \"line with the goal of exposing you to some of the capabilities\\n\",\n    \"and organization of information in the ARMI system.\\n\",\n    \"\\n\",\n    \"## Initializing and Exploring the ARMI Model\\n\",\n    \"First we need to get some inputs. We built some from scratch in\\n\",\n    \"[Building input files for a fast reactor](walkthrough_inputs.html)\\n\",\n    \"and we pick those up and use them `here <https://github.com/terrapower/armi/tree/main/armi/testing/reactors/anl-afci-177>`_ as well:\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can load these inputs using armi's ``init`` function. This will build an **Operator**, a **Reactor**, and an **Interface Stack** full of various interfaces.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# you can only configure an app once\\n\",\n    \"import armi\\n\",\n    \"\\n\",\n    \"if not armi.isConfigured():\\n\",\n    \"    armi.configure(armi.apps.App())\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import os\\n\",\n    \"\\n\",\n    \"# Depending on which test runs this, the test reactor will be in a different place.\\n\",\n    \"filePath1 = \\\"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"filePath2 = \\\"../anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"\\n\",\n    \"if os.path.exists(filePath1):\\n\",\n    \"    filePath = filePath1\\n\",\n    \"else:\\n\",\n    \"    filePath = filePath2\\n\",\n    \"\\n\",\n    \"o = armi.init(fName=filePath)\\n\",\n    \"o.r.core.sortAssemsByRing()  # makes innermost assemblies appear first\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"You have now created an ``operator`` object, which contains a ``Reactor`` object (called ``o.r``) that\\n\",\n    \"represents the beginning-of-life (BOL) state of the nuclear reactor defined in the inputs. The reactor looks\\n\",\n    \"like this:\\n\",\n    \"\\n\",\n    \".. figure:: /.static/armi_reactor_objects.png\\n\",\n    \"    :align: center\\n\",\n    \"\\n\",\n    \"    **Figure 1.** The primary data containers in ARMI\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"The data hierarchy in an ARMI model often is made up of:\\n\",\n    \"\\n\",\n    \"* :py:mod:`Reactors <armi.reactor.reactors>`, often named ``reactor`` or ``r`` contain a Core and possibly other equipment. They represent collections of assemblies. \\n\",\n    \"* :py:mod:`Assemblies <armi.reactor.assemblies>`, often named ``assembly`` or ``a``, are individual pieces that collect into a System.\\n\",\n    \"* :py:mod:`Blocks <armi.reactor.blocks>`, often called ``block`` or ``b`` are divisions of the assemblies into sections one on top of the other.\\n\",\n    \"* :py:mod:`Components <armi.reactor.components>` The geometrically defined objects (Circles, Hexagons, Helices, Dodecagons) and their dimensions.\\n\",\n    \"* :py:mod:`Materials <armi.materials>` are objects which have material properties like linear expansion coefficients, thermal conductivities, isotopic mass fractions, and densities.\\n\",\n    \"\\n\",\n    \"Each of these objects house more than the listed objects, they also are responsible for a variety of state information\\n\",\n    \"like the reactor's overall keff, flux, height, temperature, etc. In this section, we will explore these objects,\\n\",\n    \"see how to access them, and how to view their contained information.\\n\",\n    \"\\n\",\n    \"Exploring it a little, we can list all the assemblies in the reactor with:\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"core = o.r.core\\n\",\n    \"core.getAssemblies()[:25]  # only print the first 25\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \".. admonition:: Quiz Question 1 \\n\",\n    \"    \\n\",\n    \"    How many assemblies does the model have? (see answers at bottom)\\n\",\n    \"    \\n\",\n    \"    \\n\",\n    \".. tip::\\n\",\n    \"    A reactor is made up of assemblies, which are made up of blocks, and so on. Each composite ARMI\\n\",\n    \"    object has a ``getChildren`` method that will retrieve a list of its contents. For clarity,\\n\",\n    \"    reactors have a ``getAssemblies()`` method and assemblies have a ``getBlocks()`` method,\\n\",\n    \"    but these do exactly the same thing as ``getChildren()`` in both cases.\\n\",\n    \"\\n\",\n    \"    Reactor, assembly, blocks, etc. objects act like lists as well, so you can get the fifth\\n\",\n    \"    assembly out of a reactor just like you'd get the fifth item out of any other list\\n\",\n    \"    (don't forget that Python uses `zero-based numbering <http://en.wikipedia.org/wiki/Zero-based_numbering>`_)::\\n\",\n    \"\\n\",\n    \"        >>> fifthAssem = core[4]\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can drill down the hierarchy for a particular assembly:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"core = o.r[0]\\n\",\n    \"print(core)\\n\",\n    \"assem = core[1]\\n\",\n    \"print(assem)\\n\",\n    \"block = assem[5]\\n\",\n    \"print(block)\\n\",\n    \"print(f\\\"Block's parent is: {block.parent}\\\")\\n\",\n    \"components = block.getChildren()\\n\",\n    \"print(components)\\n\",\n    \"material = components[0].material\\n\",\n    \"print(material)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"## Exploring the *state* of the reactor\\n\",\n    \"State can be explored using a variety of framework methods, as well as looking at state *parameters*. Let's first try out some methods to find out how much U-235 is in the model and what the average uranium enrichment is:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"u235 = core.getMass(\\\"U235\\\")\\n\",\n    \"u238 = core.getMass(\\\"U238\\\")\\n\",\n    \"print(f\\\"The core contains {u235} grams of U-235\\\")\\n\",\n    \"print(f\\\"The average fissile enrichment is {u235 / (u235 + u238)}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"That's how much U-235 is in the 1/3 core. If we want the total mass (including all nuclides), we can just leave the argument out:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"core.getMass() / 1.0e6\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"core.getMass?\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Furthermore, you can get a list of available methods by pressing the tab key. Try `core.` followed by `[Tab]`. Try out some options!\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"Use tab completion to explore other methods of ARMI reactors assemblies and blocks. You can\\n\",\n    \"view a summary of the methods of any object in the :ref:`API documentation <modindex>`.\\n\",\n    \"For a good example, see :py:class:`the API docs for a block <armi.reactor.blocks.Block>`.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Next, lets find out what the number density of U235 is in a particular fuel block. We'll use the *FLAGS* system to select a particular type of block (in this case, a fuel block):\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.reactor.flags import Flags\\n\",\n    \"\\n\",\n    \"b = core.getFirstBlock(Flags.FUEL)\\n\",\n    \"print(f\\\"U-235 ndens: {b.getNumberDensity('U235'):.4e} (atoms/bn-cm)\\\")\\n\",\n    \"print(f\\\"Block name: {b.getName()}\\\")\\n\",\n    \"print(f\\\"Block type: {b.getType()}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can find lots of other details about this block with:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"b.printContents(includeNuclides=False)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Modifying the state of the reactor\\n\",\n    \"Each object in the Reactor model has a bunch of *state parameters* contained in its special `.p` attribute, called its *Parameter Collection*. The state parameters are defined both by the ARMI framework and the collection of plugins. For instance, you can look at the core's keff parameters or each individual block's power and multi-group flux parameters like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"print(b.p.power)\\n\",\n    \"print(core.p.keff)\\n\",\n    \"print(b.p.mgFlux)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"As you might expect, the values are zero because we have not performed any physics calculations yet. We could run a physics plugin at this point to add physics state, but for this tutorial, we'll just apply dummy data. Here's a fake physics kernel that just sets a power distribution based on spatial location of each block (e.g. a spherical distribution):\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"midplane = core[0].getHeight() / 2.0\\n\",\n    \"center = np.array([0, 0, midplane])\\n\",\n    \"peakPower = 1e6\\n\",\n    \"mgFluxBase = np.arange(5)\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def setFakePower(core):\\n\",\n    \"    for a in core:\\n\",\n    \"        sf = a.getSymmetryFactor()\\n\",\n    \"        for b in a:\\n\",\n    \"            vol = b.getVolume()\\n\",\n    \"            coords = b.spatialLocator.getGlobalCoordinates()\\n\",\n    \"            r = np.linalg.norm(abs(coords - center))\\n\",\n    \"            fuelFlag = 10 if b.isFuel() else 1.0\\n\",\n    \"            # Use the symmetry factor to account for the central assembly being split\\n\",\n    \"            b.p.power = peakPower / r**2 * fuelFlag / sf\\n\",\n    \"            b.p.pdens = b.p.power / vol\\n\",\n    \"            b.p.mgFlux = mgFluxBase * b.p.pdens\\n\",\n    \"            if b.isFuel():\\n\",\n    \"                print(b.p.power, b.getLocation())\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"setFakePower(core)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"print(b.p.power)\\n\",\n    \"print(b.p.pdens)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import matplotlib.pyplot as plt\\n\",\n    \"\\n\",\n    \"a = b.parent\\n\",\n    \"z = [b.spatialLocator.getGlobalCoordinates()[2] for b in a]\\n\",\n    \"power = a.getChildParamValues(\\\"power\\\")\\n\",\n    \"plt.plot(z, power, \\\".-\\\")\\n\",\n    \"plt.title(\\\"Fake power distribution on reactor\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can take a look at the spatial distribution as well:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.utils import plotting\\n\",\n    \"\\n\",\n    \"# Note, if you were plotting outside jupyter, you could click\\n\",\n    \"# on different depths at the bottom to view different axial planes.\\n\",\n    \"plotting.plotBlockDepthMap(core, \\\"power\\\", depthIndex=5)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Modifying number densities\\n\",\n    \"Analysts frequently want to modify number densities. For example, if you needed to compute a coolant density coefficient, you could simply reduce the amount of coolant in the core. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"sodiumBefore = core.getMass(\\\"NA\\\")\\n\",\n    \"print(f\\\"Before: {sodiumBefore / 1e6:.2f} MT Sodium\\\")\\n\",\n    \"for b in core.getBlocks():  # loop through all blocks\\n\",\n    \"    refDens = b.getNumberDensity(\\\"NA23\\\")\\n\",\n    \"    b.setNumberDensity(\\\"NA23\\\", refDens * 0.98)  # reduce Na density by 2%\\n\",\n    \"sodiumAfter = core.getMass(\\\"NA\\\")\\n\",\n    \"print(f\\\"After:  {sodiumAfter / 1e6:.2f} MT Sodium\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"If you analyze the keff with a physics plugin before and after, the change in the `core.p.keff` param would determine your density coefficient of reactivity. \\n\",\n    \"\\n\",\n    \"## Saving state to disk\\n\",\n    \"During analysis, it's often useful to save the reactor state to disk in a database. The ARMI database package handles this, and writes it out to an [HDF-formatted](https://en.wikipedia.org/wiki/Hierarchical_Data_Format) file. This is typically done automatically at each point in time in a normal simulation, and can also be done manually, like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"dbi = o.getInterface(\\\"database\\\")\\n\",\n    \"dbi.initDB()\\n\",\n    \"dbi.database.writeToDB(o.r)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Fuel management\\n\",\n    \"One plugin that comes with the framework is the Fuel Handler. It attaches the Fuel Handler interface, which we can grab now to move fuel around. In a typical ARMI run, the detailed fuel management choices are specified by the user-input custom shuffle logic file. In this particular example, we will simply swap the 10 highest-power fuel assemblies with the 10 lowest-power ones. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.physics.fuelCycle import fuelHandlers\\n\",\n    \"\\n\",\n    \"fh = fuelHandlers.fuelHandlerFactory(o)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"moved = []\\n\",\n    \"for n in range(10):\\n\",\n    \"    high = fh.findAssembly(param=\\\"power\\\", compareTo=1.0e6, blockLevelMax=True, exclusions=moved)\\n\",\n    \"    low = fh.findAssembly(param=\\\"power\\\", compareTo=0.0, blockLevelMax=True, exclusions=moved)\\n\",\n    \"    fh.swapAssemblies(high, low)\\n\",\n    \"    moved.extend([high, low])\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"plotting.plotBlockDepthMap(core, \\\"power\\\", depthIndex=5)\\n\",\n    \"# You can also plot total assembly params, which are the sum of block params\\n\",\n    \"plotting.plotFaceMap(core, \\\"power\\\", vals=\\\"sum\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can write this new state to DB as well, since we've shuffled the fuel\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"o.r.p.timeNode += 1\\n\",\n    \"dbi.database.writeToDB(o.r)\\n\",\n    \"dbi.database.close()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Loading from the database\\n\",\n    \"Once you have a database, you can use it to load a Reactor object from any of the states that were written to it. First, create a Database object, then open it and call its `load()` method.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.bookkeeping import db\\n\",\n    \"\\n\",\n    \"databaseLocation = \\\"../tutorials/anl-afci-177.h5\\\"\\n\",\n    \"cycle, timeNode = 0, 1\\n\",\n    \"dbo = db.databaseFactory(databaseLocation, \\\"r\\\")\\n\",\n    \"with dbo:\\n\",\n    \"    # Load a new reactor object from the requested cycle and time node\\n\",\n    \"    r = dbo.load(cycle, timeNode)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"We can see that the time node is what we expect (node 1), and there is some fission product mass since we loaded from a cycle after a depletion step.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"print(r.p.timeNode)\\n\",\n    \"print(o.r.getFissileMass())\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Having a Reactor object by itself can be very useful for all sorts of post-processing tasks. However, sometimes we may wish initialize more ARMI components to do more advanced tasks and interactive follow-on analysis.  Lucky for us, the database stores the settings that were used to run the case in the first place. We can get them like this:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"with dbo:\\n\",\n    \"    cs = dbo.loadCS()\\n\",\n    \"    print(cs[\\\"neutronicsKernel\\\"])\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"With this `Settings` object, we could create a brand new `Case` and `Operator` and do all sorts of magic. This way of interacting with ARMI is rather advanced, and beyond the scope of this tutorial.\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"That's just a brief exploration of the data model. Hopefully it helped orient you to the underlying ARMI structure.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"celltoolbar\": \"Raw Cell Format\",\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.7.2\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "armi/tests/tutorials/param_sweep.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Parameter sweeps\\n\",\n    \"Parameter sweeps allow you to quickly and easily build a series of related cases that all change one or more aspects of the input model or modeling approximations. Because ARMI automates full-scope engineering analysis, ARMI-driven parameter sweeps are extremely useful for design exploration, sensitivity studies, and statistical analysis. \\n\",\n    \"\\n\",\n    \"To get started with a parameter sweep, you first need some inputs. \"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"* :download:`Blueprints <anl-afci-177-blueprints.yaml>`\\n\",\n    \"* :download:`Settings <anl-afci-177.yaml>`\\n\",\n    \"* :download:`Core map <anl-afci-177-coreMap.yaml>`\\n\",\n    \"* :download:`Fuel management <anl-afci-177-fuelManagement.py>`\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Next, you need an app and a `Case` object as the starting point. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"# you can only configure an app once\\n\",\n    \"import armi\\n\",\n    \"\\n\",\n    \"if not armi.isConfigured():\\n\",\n    \"    armi.configure(armi.apps.App())\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import os\\n\",\n    \"\\n\",\n    \"from armi import cases, settings\\n\",\n    \"from armi.cases import suiteBuilder\\n\",\n    \"from armi.cases.inputModifiers import inputModifiers\\n\",\n    \"\\n\",\n    \"fPath = \\\"../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"if not os.path.exists(fPath):\\n\",\n    \"    fPath = \\\"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"\\n\",\n    \"cs = settings.Settings(fPath)\\n\",\n    \"case = cases.Case(cs)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Next, you make a SuiteBuilder, which is the thing that will perturb the input files to generate a suite of related cases from the base case. There are two basic choices, the `FullFactorialSuiteBuilder` which will expand each degree of freedom in every combination (a full multi-dimensional matrix), and the `SeparateEffectsSuiteBuilder` builder, which varies each degree of freedom in isolation. We'll make a FullFactorial case for this demo.\\n\",\n    \"\\n\",\n    \"Once you have a `SuiteBuilder`, you start adding one or more degrees of freedom, each of which will adjust one aspect of the input definitions (modeling options, reactor design, etc.).\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \".. note:: You may also find the :py:mod:`more detailed API documentation useful<armi.cases.suiteBuilder>`. \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## A simple one-dimensional parameter sweep\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"builder = suiteBuilder.SeparateEffectsSuiteBuilder(case)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Each degree of freedom is defined by an `InputModifier` and a range of values. ARMI contains a few basic `InputModifier` for simple things (like changing settings), and for design-specific param sweeps you can make your own design-specific modifiers. \\n\",\n    \"\\n\",\n    \"The simplest form of parameter sweep just adjusts settings. For example, we could adjust the reactor power from 10 MW to 100 MW in a few steps. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"powers = np.linspace(10, 100, 4)\\n\",\n    \"print(f\\\"Building power modifiers with powers: {powers}\\\")\\n\",\n    \"powerModifications = [inputModifiers.SettingsModifier(\\\"power\\\", mw * 1e6) for mw in powers]\\n\",\n    \"builder.addDegreeOfFreedom(powerModifications)\\n\",\n    \"print(f\\\"There are {len(builder.modifierSets)} cases in this suite so far.\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Now we can build the suite. The `Suite` object itself can write input files or just run on the local computer with `suite.run`.\\n\",\n    \"\\n\",\n    \"The suite will generate copies of the base case with the power modified across the defined range. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"suite = builder.buildSuite()\\n\",\n    \"suite.echoConfiguration()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"On the other hand, if you want to write inputs and then submit them all to a high-performance computer, you can do that too with `suite.writeInputs()`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"suite.writeInputs()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"You can now see that perturbed input files have been produced in the `case-suite` folder.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"!grep -R \\\"power:\\\" case-suite/*\"\n   ]\n  },\n  {\n   \"cell_type\": \"raw\",\n   \"metadata\": {\n    \"raw_mimetype\": \"text/restructuredtext\"\n   },\n   \"source\": [\n    \"To submit this suite to a computer cluster, one would run a series of ``python -m armi run`` commands from the ``case-suite`` folder. On a HPC, one would submit these commands to the HPC using the queuing system. \\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Modifying the reactor design\\n\",\n    \"Modifying settings is one thing, but the real power of parameter sweeps comes from programmatically perturbing the reactor component designs themselves. We accomplish this by modifying ARMI Blueprint objects as derived from the base input. \\n\",\n    \"\\n\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"class CladThicknessModifier(inputModifiers.InputModifier):\\n\",\n    \"    \\\"\\\"\\\"Modifier that adjusts the cladding outer diameter.\\\"\\\"\\\"\\n\",\n    \"\\n\",\n    \"    def __call__(self, cs, bp):\\n\",\n    \"        for blockDesign in bp.blockDesigns:\\n\",\n    \"            for componentDesign in blockDesign:\\n\",\n    \"                if componentDesign.name == \\\"clad\\\":\\n\",\n    \"                    # by default, values passed to a modifier end up in the\\n\",\n    \"                    # independentVariable dict\\n\",\n    \"                    componentDesign.od = self.independentVariable[\\\"cladThickness\\\"]\\n\",\n    \"        return cs, bp\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"cladThicknesses = np.linspace(0.8, 0.9, 5)\\n\",\n    \"builder = suiteBuilder.SeparateEffectsSuiteBuilder(case)\\n\",\n    \"cladModifications = [CladThicknessModifier({\\\"cladThickness\\\": float(od)}) for od in cladThicknesses]\\n\",\n    \"builder.addDegreeOfFreedom(cladModifications)\\n\",\n    \"suite = builder.buildSuite()\\n\",\n    \"suite.echoConfiguration()\\n\",\n    \"suite.writeInputs()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Now we can inspect the input files and see that the cladding outer diameter definition has indeed been modified\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"!grep -R \\\"clad:\\\" -A6 case-suite/* | grep \\\"od:\\\"\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## A full factorial parameter sweep\\n\",\n    \"Of course, one can use factorial sweeps as well. Below we add two degrees of freedom, one of length 5 and another of length 20. This suite has 100 cases total with all combinations of each setting.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"builder = suiteBuilder.FullFactorialSuiteBuilder(case)\\n\",\n    \"powers = np.linspace(10, 100, 5)\\n\",\n    \"powerModifications = [inputModifiers.SettingsModifier(\\\"power\\\", mw * 1e6) for mw in powers]\\n\",\n    \"builder.addDegreeOfFreedom(powerModifications)\\n\",\n    \"\\n\",\n    \"cycleLengths = np.linspace(200, 1000, 20)\\n\",\n    \"cycleLengthMods = [inputModifiers.SettingsModifier(\\\"cycleLength\\\", cL) for cL in cycleLengths]\\n\",\n    \"builder.addDegreeOfFreedom(cycleLengthMods)\\n\",\n    \"print(f\\\"There are {len(builder.modifierSets)} cases in this suite.\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Post-processing the results of the sweep\\n\",\n    \"After all the runs have completed in a parameter sweep, you will want to post-process them to come to some kind of useful conclusion. Because post-processing is very design-specific, you need to make a simple post-processing script. The ARMI framework has useful functions that will assist you in this task. \\n\",\n    \"\\n\",\n    \"First, we assume you're in a new shell and we discover all the cases that ran:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def loadSuite():\\n\",\n    \"    print(\\\"Loading suite results...\\\")\\n\",\n    \"    import os\\n\\n\",\n    \"    fPath = \\\"../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"    if not os.path.exists(fPath):\\n\",\n    \"        fPath = \\\"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\\\"\\n\",\n    \"    cs = settings.Settings(fPath)\\n\",\n    \"    suite = cases.CaseSuite(cs)\\n\",\n    \"    suite.discover(patterns=[\\\"anl-afci-177-????.yaml\\\"])\\n\",\n    \"    suite = sorted(suite, key=lambda c: c.cs.inputDirectory)\\n\",\n    \"    return suite\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"suite = loadSuite()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"At this point, you have two options based on your needs:\\n\\n\",\n    \"- Read the ARMI HDF5 output databases directly (useful if you just need to pull certain scalar parameters directly out of the database)\\n\",\n    \"- Have ARMI load HDF5 output databases into full ARMI reactor objects and use the ARMI API to extract data (useful if you want to loop over certain parts of the plant to sum things up)\\n\",\n    \"\\n\",\n    \"Directly reading the database will be inherently less stable (e.g. in case the underlying DB format changes), but can be very fast. Loading ARMI reactors for each case is slower, but should also be more powerful and more stable.\\n\",\n    \"\\n\",\n    \"After you extract the data, you can plot it or make tables or anything else you need. We often pass it to non-parametric regression systems like the [Alternating Conditional Expectation](https://github.com/partofthething/ace) (ACE) and then on to a multi-objective optimization system (like [Physical Programming](https://github.com/partofthething/physprog)). \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"celltoolbar\": \"Raw Cell Format\",\n  \"kernelspec\": {\n   \"display_name\": \"Python 3\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.7.4\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "armi/tests/tutorials/pin-rotations.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"403d57f2\",\n   \"metadata\": {},\n   \"source\": [\n    \"# Pin data, selection, and rotation\\n\",\n    \"\\n\",\n    \"This tutorial is here to help make sense of how ARMI stores data on a `Block` for things that exist within the `Block`. For example, the parameter `Block.p.linPowByPin` is a `(N, )` vector of linear pin powers with one entry per pin. You may be wondering\\n\",\n    \"\\n\",\n    \"1. Where do those powers exist in the block?\\n\",\n    \"2. What component produces those powers? `linPowByPin` is a `Block` parameter, not a `Component` parameter.\\n\",\n    \"3. What happens when the block is rotated?\\n\",\n    \"\\n\",\n    \"By the end of this tutorial, these questions should be answered.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 1,\n   \"id\": \"8feec552\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"\\n\",\n      \"       +===================================================+\\n\",\n      \"       |            _      ____     __  __    ___          |\\n\",\n      \"       |           / \\\\    |  _ \\\\   |  \\\\/  |  |_ _|         |\\n\",\n      \"       |          / _ \\\\   | |_) |  | |\\\\/| |   | |          |\\n\",\n      \"       |         / ___ \\\\  |  _ <   | |  | |   | |          |\\n\",\n      \"       |        /_/   \\\\_\\\\ |_| \\\\_\\\\  |_|  |_|  |___|         |\\n\",\n      \"       |        Advanced  Reactor  Modeling Interface      |\\n\",\n      \"       |                                                   |\\n\",\n      \"       |                    version 0.5.1                  |\\n\",\n      \"       |                                                   |\\n\",\n      \"       +===================================================+\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import armi\\n\",\n    \"\\n\",\n    \"if not armi.isConfigured():\\n\",\n    \"    armi.configure()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"e5806ba5\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Single pin demonstration\\n\",\n    \"This tutorial uses the same `anl-afci-177/anl-afci-177.yaml` inputs that exist for the fast reactor example. We'll start by initializing the reactor and grabbing a fuel block, it doens't really matter what one. This reactor has a single fuel pin type which means we won't immediately see interesting behavior, but it makes for easier discussion on the fundamentals. Towards the end, we'll look at demonstrative assembly with multiple fuel pin types per block, a more realistic scenario.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 2,\n   \"id\": \"bba88e02\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"=========== Settings Validation Checks ===========\\n\",\n      \"=========== Case Information ===========\\n\",\n      \"[info] ---------------------  --------------------------------------------------------------------------------\\n\",\n      \"       Case Title:            anl-afci-177\\n\",\n      \"       Case Description:      ANL-AFCI-177 CR 1.0 metal core but with HALEU instead of TRU\\n\",\n      \"       Run Type:              Standard - Operator\\n\",\n      \"       Current User:          aeinstein\\n\",\n      \"       ARMI Location:         C:\\\\Users\\\\aeinstein\\\\codes\\\\armi\\\\armi\\n\",\n      \"       Working Directory:     c:\\\\Users\\\\aeinstein\\\\codes\\\\armi\\\\armi\\\\testing\\\\reactors\\\\anl-afci-177\\n\",\n      \"       Python Interpreter:    3.13.3 (tags/v3.13.3:6280bb5, Apr  8 2025, 14:47:33) [MSC v.1943 64 bit (AMD64)]\\n\",\n      \"       Python Executable:     c:\\\\Users\\\\aeinstein\\\\codes\\\\armi\\\\.venv\\\\armi\\\\Scripts\\\\python.exe\\n\",\n      \"       Master Machine:        TP011870\\n\",\n      \"       Number of Processors:  1\\n\",\n      \"       Date and Time:         Tue Aug 12 15:18:24 2025\\n\",\n      \"       ---------------------  --------------------------------------------------------------------------------\\n\",\n      \"=========== Input File Information ===========\\n\",\n      \"[info] --------------------------------------------------------------------  ------------------------------  ------------\\n\",\n      \"       Input Type                                                            Path                            SHA-1 Hash\\n\",\n      \"       --------------------------------------------------------------------  ------------------------------  ------------\\n\",\n      \"       Case Settings                                                         anl-afci-177.yaml               93a5105368\\n\",\n      \"       Blueprints                                                            anl-afci-177-blueprints.yaml    b7b2c74028\\n\",\n      \"       Included blueprints                                                   anl-afci-177-coreMap.yaml       35ab97dadc\\n\",\n      \"       <Setting shuffleLogic value:anl-afci-177-fuelManagement.py default:>  anl-afci-177-fuelManagement.py  baedb35785\\n\",\n      \"       --------------------------------------------------------------------  ------------------------------  ------------\\n\",\n      \"=========== Machine Information ===========\\n\",\n      \"[info] ---------  ----------------------  -------\\n\",\n      \"       Machine      Number of Processors    Ranks\\n\",\n      \"       ---------  ----------------------  -------\\n\",\n      \"       local                           1        0\\n\",\n      \"       ---------  ----------------------  -------\\n\",\n      \"=========== System Information ===========\\n\",\n      \"[info] OS Name:                   Microsoft Windows 10 Enterprise\\n\",\n      \"       OS Version:                10.0.19045 N/A Build 19045\\n\",\n      \"       Processor(s):              1 Processor(s) Installed.\\n\",\n      \"                                  [01]: Intel64 Family 6 Model 186 Stepping 2 GenuineIntel ~1425 Mhz\\n\",\n      \"=========== Reactor Cycle Information ===========\\n\",\n      \"[info] ---------------------------  -----------------------------------------------------------------\\n\",\n      \"       Reactor Thermal Power (MW):  1000.0\\n\",\n      \"       Number of Cycles:            10\\n\",\n      \"       Cycle Lengths:               411.11, 411.11, 411.11, 411.11, 411.11, 411.11, 411.11, 411.11,\\n\",\n      \"                                    411.11, 411.11\\n\",\n      \"       Availability Factors:        0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9, 0.9\\n\",\n      \"       Power Fractions:             [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0,\\n\",\n      \"                                    1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0], [1.0, 1.0]\\n\",\n      \"       Step Lengths (days):         [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\\n\",\n      \"                                    [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\\n\",\n      \"                                    [184.9995, 184.9995], [184.9995, 184.9995], [184.9995, 184.9995],\\n\",\n      \"                                    [184.9995, 184.9995]\\n\",\n      \"       ---------------------------  -----------------------------------------------------------------\\n\",\n      \"=========== Constructing Reactor and Verifying Inputs ===========\\n\",\n      \"[info] Constructing the `core`\\n\",\n      \"=========== Adding Composites to <Core: core id:1902501464784> ===========\\n\",\n      \"[info] Will expand HE, NA, AL, SI, V, CR, MN, FE, CO, NI, ZR, NB, MO, W elementals to have natural isotopics\\n\",\n      \"[info] Constructing assembly `inner fuel`\\n\",\n      \"[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: A ENV GP: A> so pin-to-duct gap not calculated\\n\",\n      \"[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: A ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\\n\",\n      \"[info] Constructing assembly `middle core fuel`\\n\",\n      \"[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: B ENV GP: A> so pin-to-duct gap not calculated\\n\",\n      \"[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: B ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\\n\",\n      \"[info] Constructing assembly `outer core fuel`\\n\",\n      \"[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: C ENV GP: A> so pin-to-duct gap not calculated\\n\",\n      \"[warn] The gap between wire wrap and clad in block <plenum block-bol-006 at ExCore XS: C ENV GP: A> was 3.999999999998449e-05 cm. Expected 0.0.\\n\",\n      \"[info] Constructing assembly `radial reflector`\\n\",\n      \"[warn] Some component was missing in <reflector block-bol-000 at ExCore XS: A ENV GP: A> so pin-to-duct gap not calculated\\n\",\n      \"[info] Constructing assembly `radial shield`\\n\",\n      \"[warn] Temperature 597.0 out of range (25 to 500) for B4C linear expansion percent\\n\",\n      \"[info] Constructing assembly `control`\\n\",\n      \"[info] Constructing assembly `ultimate shutdown`\\n\",\n      \"=========== Verifying Assembly Configurations ===========\\n\",\n      \"=========== Applying Geometry Modifications ===========\\n\",\n      \"[info] Resetting the state of the converted reactor core model in <EdgeAssemblyChanger>\\n\",\n      \"[info] Updating spatial grid pitch data for hex geometry\\n\",\n      \"=========== Summarizing Source of Material Data for <Core: core id:1902501464784> ===========\\n\",\n      \"[info] ---------------  -----------------\\n\",\n      \"       Material Name    Source Location\\n\",\n      \"       ---------------  -----------------\\n\",\n      \"       B4C              ARMI\\n\",\n      \"       HT9              ARMI\\n\",\n      \"       Sodium           ARMI\\n\",\n      \"       UZr              ARMI\\n\",\n      \"       Void             ARMI\\n\",\n      \"       ---------------  -----------------\\n\",\n      \"=========== Initializing Mesh, Assembly Zones, and Nuclide Categories ===========\\n\",\n      \"[info] Nuclide categorization for cross section temperature assignments:\\n\",\n      \"       ------------------  -----------------------------------------------------\\n\",\n      \"       Nuclide Category    Nuclides\\n\",\n      \"       ------------------  -----------------------------------------------------\\n\",\n      \"       Fuel                PU236, LFP40, HE4, LFP41, LFP38, DUMP1, AM241, NP237,\\n\",\n      \"                           AM243, U236, PU242, U234, NB93, DUMP2, CM245, LFP39,\\n\",\n      \"                           NP238, U238, ZR90, PU240, U235, ZR91, PU239,\\n\",\n      \"                           CO59, PU238, CM243, CM244, PU241, ZR92, AM242M,\\n\",\n      \"                           ZR96, CM242, CM246, LFP35, ZR94, CM247, AL27\\n\",\n      \"       Coolant             NA23\\n\",\n      \"       Structure           SI29, NI64, MO98, CR54, MO97, FE58, W182, B11, CR53,\\n\",\n      \"                           NI61, MO92, MO94, FE54, SI28, NI62, CR52, MN55,\\n\",\n      \"                           MO96, CR50, MO100, V51, MO95, SI30, NI60, V50,\\n\",\n      \"                           NI58, FE56, C, W183, B10, W184, FE57, W186\\n\",\n      \"       ------------------  -----------------------------------------------------\\n\",\n      \"[info] Constructing the `Spent Fuel Pool`\\n\",\n      \"[warn] Changing the name of the Spent Fuel Pool to 'sfp'.\\n\",\n      \"=========== Creating Interfaces ===========\\n\",\n      \"=========== Interface Stack Summary  ===========\\n\",\n      \"[info] -------  ------------------------  ---------------  ----------  ---------  -----------  ------------\\n\",\n      \"         Index  Type                      Name             Function    Enabled    EOL order    BOL forced\\n\",\n      \"       -------  ------------------------  ---------------  ----------  ---------  -----------  ------------\\n\",\n      \"            01  Main                      main                         Yes        Reversed     No\\n\",\n      \"            02  FissionProductModel       fissionProducts              Yes        Normal       No\\n\",\n      \"            03  FuelHandler               fuelHandler                  Yes        Normal       No\\n\",\n      \"            04  CrossSectionGroupManager  xsGroups                     Yes        Normal       No\\n\",\n      \"            05  HistoryTracker            history                      Yes        Normal       No\\n\",\n      \"            06  Report                    report                       Yes        Normal       No\\n\",\n      \"            07  Database                  database                     Yes        Normal       No\\n\",\n      \"            08  MemoryProfiler            memoryProfiler               Yes        Normal       No\\n\",\n      \"            09  Snapshot                  snapshot                     Yes        Normal       No\\n\",\n      \"       -------  ------------------------  ---------------  ----------  ---------  -----------  ------------\\n\",\n      \"===========  Triggering Init Event ===========\\n\",\n      \"=========== 01 - main                           Init            ===========\\n\",\n      \"=========== 02 - fissionProducts                Init            ===========\\n\",\n      \"=========== 03 - fuelHandler                    Init            ===========\\n\",\n      \"=========== 04 - xsGroups                       Init            ===========\\n\",\n      \"=========== 05 - history                        Init            ===========\\n\",\n      \"=========== 06 - report                         Init            ===========\\n\",\n      \"=========== 07 - database                       Init            ===========\\n\",\n      \"=========== 08 - memoryProfiler                 Init            ===========\\n\",\n      \"=========== 09 - snapshot                       Init            ===========\\n\",\n      \"===========  Completed Init Event ===========\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"o = armi.init(fName=\\\"../../testing/reactors/anl-afci-177/anl-afci-177.yaml\\\")\\n\",\n    \"o.r.core.sortAssemsByRing()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 3,\n   \"id\": \"af28a55b\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.reactor.blocks import HexBlock\\n\",\n    \"from armi.reactor.flags import Flags\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 4,\n   \"id\": \"76832152\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import numpy as np\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 5,\n   \"id\": \"15633a30\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fuelBlock = o.r.core.getFirstBlock(Flags.FUEL)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"82fbb27c\",\n   \"metadata\": {},\n   \"source\": [\n    \"Next, assign _some_ power profile to the block. We'll pick a 2D function `p(x, y) = x + y` for each pin centered at `(x, y)`. This way, the rotation of the block be visible.\\n\",\n    \"\\n\",\n    \"This introduces the first big point: pin-related data assigned as a block parameter **must** be ordered according to `Block.getPinLocations()`. That is the key connection between how data are ordered, where data exist in space, and what components are associated with those data.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 6,\n   \"id\": \"e3795419\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def setPinPow(b: HexBlock):\\n\",\n    \"    \\\"\\\"\\\"Fake a pin power p(x, y) = x + y.\\\"\\\"\\\"\\n\",\n    \"    pinPow = np.empty(b.getNumPins(), dtype=float)\\n\",\n    \"    for ix, loc in enumerate(b.getPinLocations()):\\n\",\n    \"        x, y, _z = loc.getLocalCoordinates()\\n\",\n    \"        pinPow[ix] = x + y\\n\",\n    \"    b.p.linPowByPin = pinPow\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"id\": \"e99ee9c3\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"setPinPow(fuelBlock)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"id\": \"dd71cf83\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from matplotlib import pyplot\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"6bcb5620\",\n   \"metadata\": {},\n   \"source\": [\n    \"To demonstrate this, we'll make a plot of the block-level pin powers by iterating jointly over the locations in `Block.getPinLocations` and scalar pin values in `Block.p.linPowByPin`. It's not immediately useful because the function\\n\",\n    \"above already set that for us. But this will be helpful to show off rotation too.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"id\": \"4d0d56e7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def plotPinPow(b: HexBlock, **kwargs):\\n\",\n    \"    pinPows = b.p.linPowByPin\\n\",\n    \"    xs: list[float] = []\\n\",\n    \"    ys: list[float] = []\\n\",\n    \"    ps: list[float] = []\\n\",\n    \"    for ix, loc in enumerate(b.getPinLocations()):\\n\",\n    \"        x, y, _z = loc.getLocalCoordinates()\\n\",\n    \"        xs.append(x)\\n\",\n    \"        ys.append(y)\\n\",\n    \"        ps.append(pinPows[ix])\\n\",\n    \"    # finely tuned scatter plot size to make nice images here\\n\",\n    \"    kwargs.setdefault(\\\"s\\\", 150)\\n\",\n    \"    return pyplot.scatter(xs, ys, c=ps, **kwargs)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"id\": \"a3936a90\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.collections.PathCollection at 0x1baf72d38c0>\"\n      ]\n     },\n     \"execution_count\": 10,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWecHFeVt59b1XFy1ARJMxrlnJMl5yDnnLMNmLywsOwC+7KkhTVpWWBhwSY454izLQdJtmRbOecwGk3OM51D1X0/tEbI0lR1ddfYyKYe/wbsuafvv7unu+6pc889R0gpJQ4ODg4ODg4OfweUv/cTcHBwcHBwcPjHxXFEHBwcHBwcHP5uOI6Ig4ODg4ODw98NxxFxcHBwcHBw+LvhOCIODg4ODg4OfzccR8TBwcHBwcHh74bjiDg4ODg4ODj83XAcEQcHBwcHB4e/G66/9xMwQ9d1mpubyc/PRwjx9346Dg4ODg4ODhaQUhIIBKiurkZRzGMeJ7Qj0tzczMiRI//eT8PBwcHBwcEhCw4dOsSIESNMbU5oRyQ/Px9IvZCCgoK/87NxcHBwcHBwsEJ/fz8jR448so6bcUI7IgPbMQUFBY4j4uDg4ODg8DHDSlqFk6zq4ODg4ODg8HfDcUQcHBwcHBwc/m44joiDg4ODg4PD3w3HEXFwcHBwcHD4u3FCJ6s6OBzLoVA3TzdsoCHYTSgZI9flpSavhCtrZjMit3hINFojvTzftIaGcAehZAy/6mF4TikXVc+hJrd8SDS6Yr0sbXuPQ+EWQskoPtXDMG8pZ1UsoDa3akg0+hJ9vNP5DofChwhrYbyKlxJPCYvLFlOTUzMkGqFkP+t63qIpcoCoFsKteClwFTOr+DRG5owdEo2oFmBX32u0RXcS04KowkOuq4TxBWdR6Z8yJDWGEnqQhsBLdEc3Edf7UYUHr1rCiLxzKfPNGRINTQ/THf4rgehqknofQrhwq6UU51xIgXcRQti/L5QyQiT8PLHYSnS9ByFUFKUUn/98vN4zhkgjDtFXkbF3QO8FoYBSjPCeCd7TEcJZVhwyQ0gp5d/7SRjR399PYWEhfX19zqmZf3Debd/HPXtXsapjH4oQSAk6EgWBEKBLyeJhY7l97CIWlo/OSmND934eql/Bqs5dDCw7OhKBQBECTerMLRnDDbWnclL5hKw0dvYf4JnGN3i3a/PxGgg0dKYUjOHS4WdwUtmMrDTqQ/W80voKa7rXIEl9veVhDYFAR2d07miWVCxhfsn8rBbZ1mgDy9ufZVPvSiQ6II78v4KCjka1r47F5Rcyq+hUlCwWwJ5YAxu6H2dX/1I0mUQcpSFQkGiUeOqYUXIFkwrPQxFqxhqhRCO7e++nIfAcmowPqpHnHsXYwusZVXA5inBnrBFLNtMW+CMdwcfQZRhQAQ0Qh/89iVetYVj+bQzLvxFFeDPW0LQ2gsG7CIceQsrAoBqKWk1e7qfIyb0NRcnJWEPq3cjQPRB+FGTfURr87d+VckTOTZBzM0LJy1jD4ZNDJuu344g4nNBIKbl799v8ZuebqEKgmXxcB8b/edJZfGbcyRktsI8efIdf73oBVShoUje0UxDoSG4bfSafHXNORhqvtqzkd3sfO+IMpNO4pPp0Pj368owW8Xe73uVP+/8EYKqRWnAlp5adyi2jbkHNYBHf1reahxt+iZS6JY0ZhYu5euSXcSnWF/GDwTW83PRdNJlEHlnsBlcBSV3eIpZUfwe34rOs0RlZx6qWr6DJmAUNGOZfwILKX+BWci1rBGMb2d1xK5oeBAsaeZ65jBv2J1xKoWWNRGIbXZ03oOvdaTQAFNzuqZSUPoiqllnWkMl9yO7bQe+wpIFrNKL4Lwi10rKGwyeLTNZvJ0fE4YTmj3tSTghg6oQcPf6rHW/w570rLWs80bCSX+964fAcxgsrpKIXAPfuf5O7975mWeP11vf47d5HkUjTxftojeeal/Gn/U9b1ljTvYa799+NjrmDAByJlLzd+Tb31t+L1fuRnf3refDgz9Fk0rLG5r5VPHboN+hp3tsBGkMbeaHx30nKeBoHIaUCUB98j1eavo8u09mn6I5u4Z3mL5CUUYsako7I6sOOS9ySRji+g13t16PpAdIv3imNYHw9u9pvQtMjljSSiX10dlxh0QkB0A87Lleh6wFLGlJrQnbdYNEJSWmQPIDsvhGp91jScPjHxnFEHE5Y3uvYz693vJnVY/9n++us6axPa7ett4Ff7nw+K417D7zFyo6dae3qQ838756Hs9J4vnk5K9rXpbVrj7Zz1/67Mp5fInmn8x1WdK5Ia9uf6ObBg78g0xCqRLKl711Wdr6Y1jaq9fNi03cAHTJQkugcDK1hbdeDaW2TeoRVLf+EjnZYx7pGV3QD27t+m9ZWlzF2d9yGLuMZaYBGOL6Nhp4fpn8+UqOr62akDGPNQfibRjK5j97ef7WgIZE9nwfZn7EGWjPSgoaDg+OIOJyw3Lf3XdQskwRVoXDv3lVp7R5teAc1ywQ+BcFD9cvT2r3QvALI7nUIBE83vpHW7s32Ny1HNQbj5ZaX0z5+dffraDJJJg7C0azoeC5txGJH7ysk9MiRaEpmSDZ1P42mm0csDgVfIq73kZmD8DeN/f1PkNTDplY94VdIaG1ktngPoNMVeoKkZh5NiEXfRNPqs9TQiEZeQNOazc0SayC5K2sN4iuQyQNZPNbhHwnHEXE4IWkK9/B2+5602zFGaFJnedtumsO9hjbdsQBvtW1Nux1jhI5kQ88B6oPthjahZIQ3295Pu41hhESyL3SIPYEGQ5u4Hmd5x/KsNQDaYm3sCuwyHNdkknc7Xz2cyJkdgWQPOwMbDMel1Nnc8wzZOjoAMT3A3sDbJhqSvb0Pk61jCKDJKA2Bl0xt2gL3YefyKtHoCD1hahMK/YVUkmj2hEIPmT+P0IM2NVRk+BEbj3f4R8BxRBxOSJ5p2Ihi88ikEIJnGzYajr/cssFWFAFSkZfnmtYYjq/oWEdSJu1poPBaq3F0Z13POqJ61JaGgsKyjmWG47sCGwlpfbY0BAqru4zzaprCmwgk22xrbO19znC8N7adQGI/dpwdEBzoN3YSIol9hOLryS7iMoCkPfCA4aimNROLrSC7SMUAOuHQ/cbPQO+F2Gs2NTSIPIG0+R1w+GTjOCIOJyQNoW7snucSwKGwcXi7Mdxluz6EJnWaI92G4y2RzqyOlX5AA53mSIfheHu0HdXmnbGOTmu01XC8K9aCsHm5kOh0xIy3AvoSabYJLGr0xZsMx0PJRtsaIAkljOeJJY2jV5kQ15oMHeVksgF7zlQKXe9CSoPEWK0Ze87UYWQIdHtOrMMnG8cRcTghCSfjR06PZIsuJeFkzHA8osVsR0QAgknjaERUM9bPhFDS+BRFTI/Z2Wk4QkQz1ojrUcQQiMRMIjcJPWLb2RmYx4h0uR1WSUrj16HroSHRAB0pB//8SDlUGibPdwg1hnQuh08cjiPicEKS6/Kg2Fz4FKGQ4zIuDpWjeu1v/wB5LuPaFX6Xdyh8BHJdfsMxr5J5AazB8KvGGh7FZys/ZACfYqzhVvxDouE20XBlUchr0HmEsYaSQZ0Rc1SEQXEzIYauWJhiVHhMDNXrAJziZg4mOI6IwwnJqLwy23f5UkpG5ZYajtfklmedDDuAIhRqcowLQw33V5C0WNvCUAOFkTkVhuOVvkq0IdAY7h9uOF7uHZ7lSZYPagzzjTAcL/aMtDU/pHJEir3G5evz3KNsa4Agz22s4XPVDYmG11VjuHXoco1iKMJgijIMIQwcaXU4Q9IFROSDsF6gzeEfD8cRcTghubxm5lBsgafmMeC8qllZHw8eQJM6F4+YZzh+ctksvIrHloaOzrmViwzHZxfPJke1d6evo3N6+emG4+PzZ5DvstfLR0dnQekSw/Eq/zQK3NXYWWAlOtOKLjEcL/JOoNAzAXuXPsnowmsMR33uUeR5F2D3RMuwvJsNx1S1Aq/vLJsaCrm5txmOCqUQfOfb1FAh5zqEzTwph082jiPicEJS6S/ktMrxtuqInFU1kWF+49LCRZ5czq6ckX0dEaEwr2QsI00iIjkuH2dVLEDJ8qumIBifX0tdnnEkwa24Ob389Kw1BIIqXxVj84yb1ClC5aTS82zliRS5yxiXZ9w/RwjBjOIrsp4fwKcWUpd/sqnNmMLrsZOE6RI5jMg719SmIv8W7Jw2Ebgpy7vS1CY393ZbGiDIyb3B3CLnBpsaOsJ/rY3HO/wj4DgiDicst49dhJ7l1okudW4de1Jau+tqT846YVWXOjeOOjWt3UXVp5Jt4EVHcuWIs9PanTHsjIz6xRyNRHJB1QVpTxDNKzkLt+LJ2hk5tfzStH1zJhYuwavkZp20OrPkatQ03V9H5p2LVy3NUkMwuvA6XCZ5KABF/iV41OFkF00QlOddn7bfjNd7Gi7XuCw1FPz+K1DVYeZm7tngmp61Bt6zEa6h6fTs8MnFcUQcTljmlNbyb1PN7zyN+Pa085lVkv4COKFgON+cnN1d+OfGnsuCsvFp7UbmVPL18bdkpXHViHNYVDYzrV2Zt4wvjv3ikQ67mXBm+ZksLl2c1i7fXcQto76VhYZgdtFpnFR6XlpLr5rHRSPvRAglQ0dBMDrvZOaUXJfWUlV8LK76HYrwkNklUGGYfwGTSz6f3lK4GT/sPhThJ7NFXCHPM4eRxf+e1lIIhZLSBxFKQYYaKm73FAqL7rSgIRDFvwOlJGMN1FGIwp9k8BiHf1QcR8ThhOaWMSfxrannpZqZp7mbVkXqnM2/TzufG0cvsKxxyYh5fGvyFSgISxoAXxx3HrfWnW5Z49Rhc/iXCbeiHP7HjIHxa0eexy2jLrasMbNoJl8e+2VUoVrWWFKxhBtrb7RcT2Vs3jRuq/t3XMKdVmPAkZhXfCZXjvyiZY0q/xQuHflzXIo3rTMyMD4u/wzOrf4OwuI2W5F3AqdU/xG3kodIu8Cm5qzKOZWFlb9EEda6CPvdY5lU8QQupZj0i3jqvSnwLWb8sHtRDE7LHIvLNZKysmdR1ArSX84FIHB75lBa9hiKxRNEQq1AlDx6OHnVmgauiYiShxBKviUNh39shByKQgofEpm0EXb4ZLOhu4H7977L6607Qabu1HQpUYRIba0IOKdqEreMOYmZJdmdvtjR18ijB9/hjbbNR+Y+WkMCp5RP4trak5ldMjorjQPBRv7atIzlHWvRpH5EYyDCoKMzt3gylww/g1nFE7PSaIo0sbRtKas6V5GQCVShokv9AxqTCyazpGIJM4qMczbM6Iy1sLLzRdZ2v0VCxlBQkXxQoy53EovLLmRKwYKsCsf1x1vZ1PMU23tfIiEjhx0GnYHFUKJR6ZvM9JIrGJd/RlYakWQ7+/oe4UD/kyT0IALXB16HRKPIM5kxRddRk3dBVkmXCa2DtsB9tAcfRNN7SZ1E0TmyaJPE755IRf5tlOZeadnRORpN6yIcupdQ6D50vXNQDZdrLLm5nyIn93rDY8FmSL0fwg8jww+C3n6UBqT+JklQaxA5N0POtcancRz+Ichk/XYcEYcTgpiWRCDwqOYX+o5ogGcbNnIo1E0wGSPP5aUmr5RLR86g3Gd+9xXXUmWmPap5DkF3LMjLLes5GGonlIyR4/Iy3F/CBdVzGOYz37dP6El0KfGq5otJfyLEW+2raQi3EE5G8alehnmLObNiARU+4yPHAEldQ5c6njQa4WSYVV2raIw0Ek6G8apeSjwlnFR6EpW+yjQaSXR03MJtusDHtAgbe9+mKXKAqBbCpXgodJcwq+hU06O6AJrU0GQSt/CYaiT0CHv636ItuouYFkAVHnJdJYwvOIsy3xhTDV1qaDKBS3hNNTQ9RlNoKV3RjST0AApuvK4yRuQtodg72YJGHJfwmWroMk5P+BUCsffR9D4EHlxqCSU5F5DrmWX6WCk1dBlDEf40dgmi0deIxd5JlWjHhaKU4PNfgMczP81jdaSMItJqaBBbjoy/fbhiqgJKEcJ7FngWptVARiGNhsPHH8cRcTjhkVKyqvUg9+9ex/KmfcT0VGa+T3Vxzohx3DxhDnPLR9i6WEkpWdvVwEP71vBW626ihx0Rr+Li1Mqx3Dh6HgvLR9nW2Np7iMcb3uPNtu1EtQQAbkVlYelYrqk9iYVlY9ImaaZjd+AQzzW9w4qOjUS0VHdZl1CZWTSWS4efwrzSSVmf/hngYKiRpW3LWNW15kiVVVWoTMgfw7mVZzKneEbWCbEDtEWbWNm5lLU9bxPRUtU2FVRqc8dyStm5TCucj0uxV7uiO9bExp6X2Nq3lKgWAECgUuEbw5ySS5hQcAoum0eqg4lmdvc9y77+54ke7pKbqmEylgmFVzMq/xxcir2IQCzZSnPgMVqDjxPXOkmdZ1fJcY9meMHNVORejGqzeFpSa6Mv9DB9oQfRtNbDGgpu12iK8m4lP+dqVMXetVdqXSQjj6OFH0JqzYc1BEKtQc25CVfOVQilyJaGw4mH44g4nNAsb97Hd1e/RkOwF1WI44qKqUJBkzpjC0v58YLzmT8s862W9zvq+f6GF9kf7Doy3wc1Uro1uSV8f+b5LK4wv7MejC09Dfxo27PsDbQZaKR+V+Ur4l8nX8RpFZMy1tgbaOKXux5lT7BxUA0FBR2dMk8hnxt7KacPm5WxRlOkhbv23cee4P4j8w2mUegu4PqaKzit3LimiRGdsVYebbiLfaEdg2oIBBJJrprH+VXXsLjMuN6IEf2JDl5p/hX1ofUIlOOqtA5oeJVcFpXfwNySyzN2QiPJLt5ru5PG8DuDaqS2KHRcIoepJTcztfhWy3krAyS0HnZ3fZfO8GuktlWO1RCARBF+RhTcxqiir2S8ZaTp/XT0/DuByF9JOQaDaYDAQ2HebZQV/jsiwy0jqYdJ9P8ALfI0qSPAxy41A++9CzXnOtwF/y+rLSOHExPHEXE4YXli32a+9e5LSAt1OhUEihD86uRLuLDW+iL+UuM2vrHmGaSUafvVpHbQBf815xIur7WeL7GibQf/tuERNKlb7onzzckXc03tQssa63t2890tf0pt91jUuGP0xVxTc6ZljV2Bvfxkx2+I6/HjnAMjrhh+EVePNC4adiyHwvv5/b4fE9MiljVOK7+AS6tvtuwodEYP8ujBbxHR+i2XiZ9edB7nVv2TZUehP36IpU1fIpLsQlqsrTEqbwmLK7+LkuZI8QDRZDObWm8mmmzGav2OUv+ZTB72m8OngNKT1Npp7LiaRHK/RQ2B33sy1WX3Hj4FlB6p9xLrugmZ3IG1mi0Kwj0Lb8m9CKcc/CeCTNZv59SMw0fG0kN7+Oa7L6JbLBauI0lKna++81dWtdZb0ljVvp9/WfO0ZQdBHtb59rq/8lbLbksam3oO8q8bHiYhtYwa8/10+/O81rLZku3eQCP/seWPxDNwQgD+uP95Xml535JtU6SFn+z4DTE9ZtlBAHi66QVebnnDkm1XrJ0/7Psx0QycEIDlHS/xetuzlmwDiU4eO/jtjJwQgM29r7Ci/V5LttFkD683fTkjJwSgPriUNR2/tFSrJqn1s7n1dmIZOCEAXZG32NX575Y0dD1MU+eNGTghAJJIbCWtXV9M5Yeks5YxYt2fRiZ3Yr1wnI5MbCDe8wWkTFh8jMMnBccRcfhIiGlJvvHuC1k9Vkr4l5XPo+nmF7WkrvOva54l2xjft9b+9UhCq/FzkXx385NZFVoTwA+3PGPaEXhA4xc7HyGpa1n1d/n17ifoT6Tvdvqn/Q8Q1+NZaTxw8HG64z1p7Z5uvIeoFsmqmd1LrY/REWtJa7es7U+Etb6sNN7veoK2yN60dhu77iac7MzICUkh2d33NO3RTWktD/b9nkiyISuN9tBzdEeWp7XsCf6BeGIHmVdL1QlFXyUYSf8d1kIPIhMbstLQ4++gRZ7K8HEOH3ccR8ThI+HFgzvoj0ezah+jI2mNBFnWvM/UblnrbjpjwawWVgn0JiK81rzT1G5N134aw90ZRSmO1ohocV5pNl+UdgUa2BdqzkoDUv1vXm1dbWrTGG5mZ2BvRlGKY3mz7W3T8e5YO9sDG7LWUFBY1fm6qU0o2cvO/rez7torUNnQ86KpTVwLsj/wYhYOwt80dvU+aWqj6VFaAo+RfTl1lebAg6YWUiboC95L9uXtFXqD96TR0EmG781yfgBBMnRv1tWOHT6eOI6Iw0fCfbvWodjoU6IKwf271pnaPLBvta0mdgqCB/aZL+CPHXzP1ukUATxy8F3TC+1zTe/Y0pBInm18G10aLzhL25Zn3ZtmQGNp2zKSunEE6d2uN7Iu1Q6pWiTvdr1JXDeOIG3pfRU73RElGtv63iCqBQ1t9gdeRrOxXSDRaAi+RSTZZWjTEX4JTRo/h/RodEfeJpI4ZGgRii5F0zttaOhE46uJJXYZW8RXIrVGsv+bSGRyFzKxMcvHO3wccRwRhw+d1nCAzV0tWd/hA2hSsqLlAMHE4ItSfzzKex31x53AyQQdycbuRtojgUHHE3qSFe07jju5kgkS2B9s51B48EVJSsnyjo22NADaYz3sCzYZjq/qWm0rGgLQnwyyO2gcpVrXszLrSMUAMT3CnsBWw/HtfcuyioAdjSYT7A+uNRyvDyzFbitoicahkHEEqSP0MvYvx+LwSZvBCYSfx25HYFAJhp83HNUiL5EqdGYHF1rUPErl8MnCcUQcPnS6ounzFazSE4sM+vvu+NBpdMfDg/6+LxGx5Ux9UGPw5xvV48RNogyZ0Bsf/A5blzqh5OCvMVP6E4M7bQAhzXgsE4LJfsOxcLLX9vwCYTqPWSTDuoZKTDPOqYlp7djpCJzSUEho3YbjmtaOvU66KRVNN34/pN4F2P38SqRu/DocPnl86I5IU1MTN910E6Wlpfj9fqZNm8batcZ3Hw6fPJJpkkwzIaEPfiFNl8iaCcm/o4bZdkrGGiYnHOxGEQbQTDSG6rWYatheWAEEujRePLPNDclIw2QsE6SJE2A2NlQa9p2QlAJD9H44fDywG0Mzpaenh8WLF3PGGWfw8ssvU15ezp49eyguLv4wZR1OMAo8Q9dzotAzeB2DAoPfD6mGe+g0jObyq94jhbfsku8avKmZIhS8ioeYHretkasaN07zqX6CSftHMf2qcfVQn5J3pHpqtkh0fKpxewCPUkCIVpsaGh6TCqVuxf41USJxKcYtCFSlhIFiaHZQhbGGUApJbf/Ycd4UMHkdDp88PtSIyE9/+lNGjhzJPffcw/z586mrq2PJkiWMGZN5FUuHjy8j84oo99krRS2AuvwSSryDL+Bl3lxqcottpMOmqPDlU50z+EXQ7/IwPr/SVtItpJyQUbnlg44pQmFqYZ1tDZ/iYUzecMPxyQUTbCWrQqrE/Ji8OsPxcXlTbWsIBHW5EwzHa/NmWeiem54ROVMNx6py5tlKuk0hqfDPNBwt8i/E/uVYo9A3z3DU7z3J5vwASdN5FM8C7G//JA/P4/CPwofqiDz33HPMnTuXq6++mmHDhjFr1iz++Mc/GtrHYjH6+/s/8OPw8celKNw8YY7txfW2iXMNK20KIbhpzHxb8ysIbhozz/TEynW1J9nKE1EQXFUz37Tx3qXDT7GloaJwXtUC/C7jctlLKs+wlayqoLCodD75buMqmCeXnWtbY2rhXIo8JYY2s4ovtLV1IlCozZ1JidfYaRtfeLnNpFuFUu9kSnzGDlVV3tU25gcQ+N2jKfTONbQoyL0GQeadfY/GpVaT4zvDcFz1XQLCOEpmCVGM6su8xL/Dx5cP1RHZv38/v//97xk3bhyvvvoqX/jCF/jKV77CfffdN6j9nXfeSWFh4ZGfkSOza+fucOJx3dgZ2Gm26VFdXF5nfNcKcHnNDNxK9nfHihBcWWveq2VJ9XRy1OwbpkngipHGd60Ai8umUejOPoKkoXNR9WJTm+mFkyk1WeDToaOzpNJ4QQKoyx1PhXcEZOmA6uicUnauqc0w32iq/BMRWWpIdGaXmJerz/eMoMo/30ZURGdi0TWmFl5XBWU555D9qRbJiPxbTUviq0oReTlX2NBQKMr7lGlJfKHkoPqvtaXhyr0JYbFcvcMngw/VEdF1ndmzZ/Nf//VfzJo1i89+9rPccccd/OEPfxjU/tvf/jZ9fX1Hfg4dMj4T7/Dxotyfx2cnW++zcixfnXYy+R7zhlgFHh9fmnhq1hqfGb+I0jRbSH7Vw5fGZ3e3JoBraxdS5TfPB3ApKp8ZfXGWGoIllfOoza0wtVOEwk21V2WtMa94FmPyRpnbCcGlw2/MykUQKEzIn87YvClpbU8bdjvZODsCheH+yYzJSx9Jm1n2ucOOSGY6ApVi73hq89L3/6kt+vLhnjSZa/hddVTkXZrWtqTgnxDCT+aXfhWXWk1B7o1pLV15nwGRn5UGSimunFsyfJzDx50P1RGpqqpi8uTJH/jdpEmTaGhoGNTe6/VSUFDwgR+HTw7fmHkaF2XQvG6A68fO5PNTrDkxn5twMlePyrwD7cUjpvLVyeZ3+ANcW7uQG0eZRxyORQCnDpvE1yaeb8n+vKoF3FSbmcMjEMwsGss/jze/+x5gYelcbqzJzBkRCMbm1fHlcZ+2ZD+pYBZXjrg9Qw2Fan8Nt436mqWmdzW507mg+usMtDC0qlHsGc4VNd9HsdC5tsw3hZMrf3g48mLtsilQyXEN46zq/0FV0t/h53kmMKX8d4dzXqxruNVSplfeg6qk3xLxuOqoLrvv8BaN1aiFiqoUMrzsUVQLSaSKWo235B4Qvow0EDl4S+5HqGUWH+PwSeFDdUQWL17Mrl0frMK3e/duamtrP0xZhxMURQh+tfgSPj0pdQdqVgVVFalL/lenncyPF5xnuQurEIL/nHURX5x4CsKCBsCnx53Ez+ZdjpKBxtcmns+Xxy9JdQg2WfwG8k2uqlnAz2ZdjyuDraNb687nS2MvR0GYbj2oh7/GZ1fM5cfTP4tbsX4Y7qLqJXx29C2oQjXVGEg6nV8yi+9M/joeCwvrAIvLlnBz7VdwCZcljYn50/mnsd/Hp1o/pTSl6CwuH/ld3GlC+gOJrSNzpnFT3S/xm5yWOZba/DM5s/p/cCsDz8sgX+mwRql3IheM/At+V6lljZKcU5leeR8uZeB5GV2iUxo57nHMrnoKn6vaskaO9yRGDHsG9chJHXMNt6uOkcNexuMebVlD8czEW/oUKANJ2eYaQq3GW/YsinuiZQ2HTw5CfohF/desWcOiRYv4wQ9+wDXXXMPq1au54447uPvuu7nxxvQhvkzaCDt8vDjQ383Dezbw6N6NBBMfPEZa6PFx4/jZXD92JiPysj/G1xjq5bED63j0wDr6E9EPjOW5vFxTN5vr6+ZQk5d9rkRbpI+nD63hyYb36U18sEiYX/Vw2Yi5XFkzn7q8wU/JWKE71s/LLe/x1+Z36Il/8KiqV3GzpHI+F1cvpi6vKmuN/kSAZe0rebXtreOa2bmEi1PKFnJO5WnU5WZ/ExFKBlnTvZy3O1+hO97xgTFVqMwqWszJZUuoyRlj2fE8lpgWYlvfG6zr/is98eYPjAkUJhScwuySixjun5K1RkIPcyDwKjt7H6cvfuCYUYWa3FOZUHQ1Ff7ZWWtoeoT20Is09d9PKHFs/yNBqf8Mqgtuoti3yDRnwwxdRgmGX6Q3+Gdix5VUF+R4T6Uw71Pk+s5EWIgaDYaUcbToa6n+MYnjWzQonpNw5d6K4j0LIT7UahIOHzGZrN8fqiMC8MILL/Dtb3+bPXv2UFdXx9e//nXuuOMOS491HJGPF1JKtnW209DfSyiRIN/tYUxxCeNKjEOt0WSCjV0t9MUiCCEo8vqZUVqF1+RUyY7uduoDPQQTcfLcHkblFzOpZJihfVxLsrmnid54BCmhyOtnWnE1PtX4BMHuvg72BzoJJeLkuDzU5BUzuajCcGFJ6Brb+hrpjYfQpaTQncPkwuH4XcZ36PXBDvYG2gklY/hVD9U5RUwpHG6ooekauwKH6E0E0aROvsvPuPyR5LqM67Q0hjvZH2whlIziVT1U+IqYXFBjqKFLnf3BevqSAZJ6klxXLnW5NeQa1CQBaI92cSDUSEiL4BFuSryFTMwfjWKwQOpSpzFygECil6RM4ldzGO4fRa7LODrRE+/mYPgAES2MS7gochczJm+8oYaUkrboPkLJbpIyhlfJo9xXR66ryFAjkOimKbKHqBZCFS7yXEXU5E5GNVggpZT0xPcSSXaQ1KN41DwKPaPJcRl/3iPJHtqj24nrARRc+FxFVPpnoArjz2IovptoshldRlCVAnLdY/C6Kg3t41ovPbGNJLQ+hHDhUYsp8c5BVYxzrOKJPSSSh9BlCEXJx+Mai9s1wtBe0/sIxdaR1HsQKKhKMXneBSiKcRRLT+5HJg+BDIKSh1DrUFw1hvZSD5KIr0bqKcdYKMW4PfMRivFJLYcThxPKEbGD44h8PAgl4vx19w7u3bKe3d3Hl3+eVVHFbdNmc96YcaYOhhnRZILnDuzgvp3r2dbddtz41JIKbps0h4tGTcTnyu6IYkxL8nLjDh7Ys4ZN3c3HjY8vKOeWcfO4pHYqOSYOhhkJPcmbrTt45MB7bOg5PldqVG4ZN9Qt5MLhM8hzZ1cILqlrrOrczlONK9nQc3wvmGp/KVeOWMx5VXPJz7JImyZ1NvRs56WW5Wzo3X7ceJmnmAuqTuOsipMoMDnia4YudXYGtrGsfSlb+jYeV+St0F3E6eXnsLjsNArc2UXOpJTUh7awuvsldva/f9wx3Vy1kHmlFzC7+BwK3Na3WI7VaItuYXvvMxwIvHXccWOvUsCkosuYWHgJeW7zJGMzjb74Vg72PUJz6KXjqp+6lHxq8q+iJv8actzZn0YMx7fSFbifntDTSD7Y90kRuZTkXUdp3s343NnXikomdhINPUAs/BjwwUgmwo/Xfw2+3JtxOds4JzSOI+LwkbGupYlPvfQMfbGoYc1GRQh0KanKy+eBi65ibElmF/QtXa3c/voTdEbDKIhBa2wM/L7Ml8t951zNlJLMLui7+zq4fcXDtEUChhoDr6/I4+ePp1zLrFLjO8bBaAh18YX376Mx3GOqAZDj8vKruTcwv8z6vjxAW7SHf9nwJxrC7Sgog9bxGNDwKm5+OO0WFpZldkHvjvfxo+3/x4FQo6FGSkfgEipfG38bJ5VllkAcTAb43d5fciC0N62GIhRurv0MC0tPzkgjqoV4tOFO6kNb0mikoi4XVn+OuSXnZaSR0CO82fJ9DoVWIVANa54IFCSSBeVfZGrRtRlt6Wh6jE0d/05r+NU0GioSnXFFX2Js0ecy0tBlnMbub9ETegLzyqmpsfKCL1JV+M2Mto2k1Aj1fY9Y+F5LGt6c28gt/EHW20YOHy6OI+LwkbCqsYFbX3gSTUp0Cx8jVQh8LjdPXn49k8qs5Uysa2/ihtceJaFrljXcisoj517PrHJrCXzbe1q57q37iWkJS917FQSqonDPqdezcNgoSxoHgh3cvPJuwsm4pc66CgIhBL+eeyOnVIy3pNEc6eYLa/6X/mTYkoY4/L/fn3ojZ1TMsKTRHevl3zb/nJ54v6ViZQPO2z+NvYkzK6xV9gwk+vnZrh/QFevMqCDadSNv4fRh51iyjWhB/rL/23TGGjMqVnZ2xS2cXH6lJduEHuHFQ1+hK7Y7I42ZJbcwt8za9rUm46xuuYOe2AYyaZpXm38jk0u/ZckZkTLBgY5PEYguJ5Py8MW51zCy5BcWNTSCPV8kHn0pAw2Bx3cBecW/zzpPxuHDI5P12/nrOWTF/t5uPvPSM5adEABNSqLJBDc//wSd4fTdchuDfdz++hOWnZABjbiucdvrT9AcSl+ZtzMa5LYVD1t2QgB0JJqu89l3Hqc+kL5LaH88wufeu9eyEzKgoUudf1n3CHv60/c5CSdj/MuGu+lPWHNCYOByL/nPbY+wvW/wI/VHk9AT/GD77yw7IX/TgN/tfYitfbvT2mtS43d7/ztjJwTg0UP3s6VvY/rnJCWPNfwkYycE4PW2+9na944ljbdafpCxEwKwsft+dve9aMl2S8d36YmtJ9POvQcDD3Gw/yFLtk0938vYCQHoCT1Oe///WrINB35KPPpihhqSePRFwoGfZPS8HE48HEfEISv+sH41MS1p2UEYQJOS7miEB7dtSmv75+1rCCXjGWvoUhJMxLhne/ouzw/sWUtvPGLZCTmigSSmJfjTrnfT2j7VsJb2aMCygzCABJJS5097V6S1fa11HU2RLrQMFyRJKhfj3gNL09qu7NxAQ7g5q7LtEnj44Atp7bb0baQ+vD8rDYHg2abHSRfkPRDaQn1oS9Zl219vvT9tZ+GO6A4aQiuz1ljTeZdpt16AQHwfzaEXyLaJ3e6e36LpUVObePIQXcEHs9Zo6/8Nmm7elFDXOokG78pqfoBo8G50rTPrxzv8/XEcEYeM6YtGeWb39owX7wF0KXlgywYSmnGPkHAizmN7NmetoUnJI3s2ETHp/hrXNB7aty5jR+dojafrN9MfN76Ya1Lnkfr3su6mq0mdpS3b6IoFDW2klDx56J2sO/noSN7r2klLxDy681LLMhul1CU7AvtoCB+fBHw0b7W/lnWjPImkKXKI+vDxCbpHs7rrBVvN+HoTbRwIbTa12dH7jK1mfBGth4bgKlObhsBjtjSSMkhL6FVTm67gQ9hZJqSM0xN6ytQmFn4Uex2BtcNzOHxccRwRh4x5YtdWkrqdJmDQFY2wtH6v4fhzB3YQttlCPpiI8/yBHYbjS5t20huP2NJI6BrP1BsvSqva99IWtde8UUrJMw3H12AYYHPvARrCHbYu5QqC55reMxw/EDzEnuDBrB2qlIbCKy1vG463RVvZFdhuu1HesvbXDcf7E13sCqy2pSFQWN31kuF4VOtjX+B12834tvUaL+BJPUxj4GlbGqCYbs/oMn44GmKvm25n4B7DKJWUGpHQPWS6tXTMLERC9yKl3a6/Dn8vHEfEIWPWtTTZnsOlKKxtMb47XtveaFoV1QqqUFjXYfxc13U24rKZ5CYQrOtqNBzf2HPQtoaOZH33QcPxLX31tu7wBzQ2DnLUd4AdgX02eyenmtht7dtjOL4/ZDyWicbuwLEFwP5GY3i3LWcKUo3yDoa3GY53RnejY76tYkWjLbrFcDwQ34MmzbdV0qPTF99uuAUUTxxA0/tsakhiyf3ocnBnXNdakfrxx/EzVtFb0YdgHoe/D44j4pAxPdGozUs5SInplkZ/PJb1tswAutTNNRJR24uSjqQvZhxVCSTsLhYpeuNhw7FAMmK5PL0Z/Unj1xFKRgyLh2VCMGn8OsLJcNZbP0cT0Yw1onr6JGkrxE004rrxNlom6DKBpscHHUumybvIBKO5NAPnITuNwR0aKe06OkfNpfcO2VwOHy2OI+KQMR7V/rl9IcBt0nfFrSi2FyVx+CivES4l826qg2H2fmTSW8ZUw+y9Eir29tiPnmdwXMLFUBz0N3s/XIpq2zGEVLl447GhKSOumMwzVBqAYY2MoSyHLgyqugqGTkPBqMhgdsUHB0OQXZFBh78/jiPikDHlObm2t02klJT5jcuGl/ntaygISn0mGt5cW/NDavun1Gc8T6k3N+tk2AEUBOU+4/LnxZ5829EjgaDUa3zWv9CTbyuvYoBij3EF1AJX9n2FPjCPSZXVPJMS75mQ4zJ+r/xq9r2Ljsaj5Bt2Bvaq2VV5PRYFDy4x+OfXpWbfH+lYFVUtHnxkCDvtiiF6Txw+ehxHxCFjLho7wfbCp0nJRWMnGGuMmkQyw+Oux5KUOheNmmQ4fsHIyRkfqT0WTepcOHKy4fg5VVMHraCaCTqSc6unGY6fNmyq7biORHJ2pXH103nF03DZrGApgNPK5xqOTyqYhsekH4o1DcGCkkWG47U5U/Cr9nqVCBSmF55uOF7mm0iOzUVcoDK2wLg4W557LLmuWuxE9AQqVbnGna09rhH4PdOxt0yoFPjPQRGDtytQlGJcnkVg4/QPqLg8i1GUwZ0dhxMfxxFxyJjTauqoyrPePv1YFCGYW1nNhFLji/XcYcMZW1ia9WVWABOKypltUl11cnElM0qqUbJUEcCI3CIWVxiXYa/JLWVh2RhbORwlnlxOrzAuw17mLeTk8qmoNr7OuaqPM4cZV1fNd+dyavk8W0mxbsXNaeULDMd9qo/FZafZ0hAIFpWdZjjuUtzMLTn/SNn2bJDozC0513BcESpTiq/AjpMg0ZhUeJnhuBCC2sKbsp5/QKO28HpTm7K827F3okWjLP82Uwtf7u3YO5mj4c+93cbjHf7eOI6IQ8YoQnD7tNnZ162QklunzTa1EUJw+yTju+d0SOD2SXPSlpe+ddx8WxGLW8bOS+tkXF+3MOvtGQXBtaMWpM01uXLk4oyLmR2tcfHwBXhNuhEDnF95atbbMwoKp5cvINdl3mTvtPKzbGnMKV6QtgHenOJzyTanRqAwLm8uRR7jbs8A4wsuRMnyLl+gUOmfTrG3ztRueN7FKMJLdg6PQr5nAoWeqaZWRbkXoYrCrDU8rlryvItNrTy+cxDKMLJbjhSEMgy37+wsHutwouA4Ig5Zcev0WcytGp5xHociBBeMGc+FJtsyA1w7bjonV4/KOJqgCMHpw0dz1Vjj7YwBLqqZwpLhEzKOiqhCMLe8hhvHzklre9qwCVw8YmbGl3JVCCYWVnHrGPMLOcDMotFcPnxRFhoKNbnDuLUu/YV8bH4tVwxfkqFCykEo95ZwY+3FaW0rfdVcWn11VhoF7kKuGnlDWtsiTznnVX0mYw2Bgl/N48Lqz6W19buKObniG1lpuJUcTqn4Zlpbt5LHjPIfk7lTpaAKLzPK/yuto64IHzVlvyFzR0QgcFFb+r9pNYRwkV/828MamegIQCG/+LdDmrzr8NHjOCIOWeFVXfzp/MuZUlZh2VEQwGkjR/HLsy6w9BiXovD70y9jbvlwy5cngWB+xUh+d9qlh0/FmKMIwS8XXMbJlaMtaygIphZXcdfia/Cq6S+AQgi+N/1Szqg0zlcZTGNMXgW/m38LfjX9aQAhBF+ZcCnnVJpHmj6ooTDcX8p/z7yDXNfge/jHcmPtxZxbcUpGGmXeYn4w9Z8ocFvLzTiv8mKWVFyYkUa+u5B/Hv9tCt1Flh6zoPQizhiW3mkZYMAJuWXUD9NGQwYYX3ghC8q/nJGGW8nhvOH/TaGnxtJjqnLPZWrp97C6iAtUVOFjbsX/UeBJfzMAUOA/k5rS/yG1XFj5ligI4WFU+Z/I8Vrruuz2LiKv+A+AC2vLkgK4yCv+PW6vcU6Qw8cDp/uugy2iyQT/tWo5j+3YQvxwyfajP1AD7e5z3R4+NX02X523yJKDcDQxLcnP16/gwV0biGnJ4zQGOrz6VRc3T5zNN2admvER46Su8+tty7lvz2rCycSROY/WgNSR4+tGz+bfpp+Jz5XZ0UNd6vxxz3Lu37+SYDJ25L05GoHAJRQuHjGTf51yPjmuzJI3pZQ82rCcB+vfTNUXMdBQhODsipl8Zfxl5LvNt0sG03i5dTmPNbxMfzJoqCGAk8pmccfoayh0Z55TtLJzGc81P0VfohcF5bgtm4Hj3TOKZnPdyFsp8mSerLi5dzlvtN1PX6ITgXJcb5iB343Lm8OF1Z+37IQczYHAclZ3/h+BRDMC9bhqqAO/q/bPYXHFv1DoGZmxRnt4BTu6fkYoWW+qUeydw9Sy75DvGZexRiC6kuae7xFN7CKVXHpsXkfqdzme2Qwv+RE5nvQRyWNJxNcS6vsPtMQWUw3VPY3cwv/E7cl++9bhwyWT9dtxRByGhL5YlKd3beehbRtp7O8npiXxudyMLirmlqmzuGTcRPxuezUDgokYz+zbzoO7NtAQ6CGqJfGpLmoLirlpwiwuGz2ZPLe9UxfhZJznG7bx0N61HAh0E9USeFUXI3KLuGHMHC6vnUa+x1r0wIioluC15q08Vv8++4IdRLU4HsVFpb+QK2vmcdnIWRR6jI8dWyGuJ1nRvoWnG1eyL9hKVIvhVlyUeQu4qHoBF1bPp9hj7/RIUtdY3b2Jl1pWsC/YQFSP4RYuijwFnDXsJM6pXEyJyXFdK2hSY2vfJt5qX0p9aC8xPYYqVPJdBZxUegonl59BicfesU1d6uwLbmB114s0hHcQ1yMoQiVHLWB60enMLTmPYk+FLQ0pJc3hdWzvfZqWyHoSegSBglctYEzBOUwqvDQrB+RYjZ7oOur7H6Yz+i5JPXQ4ylJAVd551ORfS75njG2NcHw9nYH7CETeRJOp4m2qUkhRzgWU5t2M32N8iswqyfgmIqH7SERfQ8pUwTUh8nH7luDPvRWXxzix2uHEwHFEHGwTSyZ5aeduVh1soC8SQ1EExX4/54wbw2mjR6GmiWpIKdPuDcc1jVf37WH5wXp6oxGEEBT7fJxVN4az6sakjZxY0UjoGksP7uXNQ/vpOVwBtdjr47QRdZxbOz5t5MSKhqbrLGvex9Km3XRHw0gkhR4/J1fWcX7NxLTbN1Y0dKmzsn0fS5t30BMPk9Q1Cj1+5pfVcf7wKfhd5ts3VjSklKzr3s8brZvpjodI6kny3X5mFI/i3KqZaaMzVjW29e9nefs6euL9JGSSPFcOE/NHcWbFvLTJrFY19oX2827ne/QmeknoCXJcOdTl1nFy2SJyXeb1Y6xoADSG97O+Zzl9iS7iehy/mkO1v465JaeTl6YmilWNzuh+tve9RiDZTlKP4lFyKfXWMaXoPHJd5vVKBi7t6XT64weoDzxHKNFMUo/gVnMpcNcxquBSclzmDphVjWjiAB3BJ4kmD6LpIVQlD7+7jvK8a/C6RgyJRjJ5iFD4ERLJ/Ug9gFDycKm15OZej9tlnvjrMPQ4johD1nSGQvxlzXoe2biFQCyGKsSRmiGqoqDpOpX5edw8eyY3z55JjifzKEdPJMKfN67joS2b6I1FP6ghFDSpU5aTw83TZnL7zNnkezKPcvTHY9yzbS33bd9AVzR8ZN6URkqvxOvnpkkz+fTUuRR5M9ueAAgl4ty3ey0P7F5LWyQ4qEaB28cN42bxqYnzKTMpfGZEVEvwyP41PLT/fZojfahCQZepjZABjVyXl6tqZ3P72JMY5s/8e5LQkzxzaDWPHVxJU6T7A69jYNvFp3q4ePgcbhh1ClX+zLdANKnxasu7/LVpOY2Rtg+8joFtF4/i5qyK+Vwx4kyq/ZnX4dClzjudq3i1dSmNkUYUFOThfwa2cVShclLpQi6oOpdqv/HRbiOklGzofYd3Ol6gMbIfBfXwltHfNAQKM4oWcfqwS6ny12alsTewgvXdT9Ea3X54W2VAQyG1aSgYm38Kc0quocJvLdfjWJpDK9jV+wCd0fUf0OBw6rYEqnNOZULxLZT5sotA9EaW09L/J/qj75DaVpGkjgMP5JvoFPpOo7rwcxT4FmalEY2tIhD4P6KxNw/Pe6yGhtd7Kvl5n8fvOyMrDYfMcRwRh6zY3dHJbY89TVc4nLZgmSIEE8rL+MvVl1OeZ32BPdDbw83PPklrMGBJo66omPsvvZLqfOt//8ZAHze98jgHA71pj80qQjA8r4AHz7uGUQXWF9j2SJBb33qEPb2daY//qkJQ5svl/jOvZ1yh9QW2Jxbi8+8+zNbeprTnIlQhKHT7uXvRzUwuqrKsEUhE+LcND7Cx5wBgfv5CFQp+1cP/zLmNaUXWF9iIFuPO7X9hXc+O43JvBtNwCzffnXIHM4rHW9aI6wn+sO9u1vWsRyBMS8UrKKhC5Z/GfZEZRdMtayT1BE82/oH1PSssaQghuKHmn5lWZH2B1aXGsrbfsqX3+UFzVo5GHF7Yz6n6BpMKrZ9mklJnc9dv2N33AKnF2lxDojO77FuMKbwqAw1JU9+vaer7NYPnehxNarym6NtUFtxhKVI0oBEI/oG+/h9a1ijI/wYF+V+3rOGQPZms386pGQcADvb0cv3Dj1tyQiBVC2R3Ryc3PvIE/VFrjd1aggGueepRS07IgEZ9bw/XPPUYXRHjJmNH0xkJcfWLj9BgwQkZ0GgO9nPVCw/TGrLWSKwvHuG61x9gb196JwRSVWQ7oyGuXfoAh4K9ljRCyRi3r7yP7X3Nlg5nalLSm4hw6zv3sC/QYUkjqiX42rp72NRTjyT9IVBN6oSTMb685s/s6jfunHw0SV3jh1vvZkNPqiOuFY2YHue7W3/P9r79ljR0qfO7vb9nfc+GwxrmKjo6SZnkV7v/l2192y1rPH7od2zoeduyhiZ1Hjz4S7b1rbGkIaXkrdbfsKX3hcMa5vVUJBoSnddafsau/jctaQBs7vr1YSck9UzTaYBkfeed7O9/2rJGU99vDjshkL5YWWq8ofdOWgN/tqwRDN512AmxrtEf+AX9gV9a1nD4aHAcEQd0KbnjyWcJxuIZlW7XpORgTy/fenlpWlspJZ9/8a/0RCIZa7QGA3z11Rct2X/5redpC1tzdI7W6I6G+dwbz2IlQPiv777AoWBvxhqBRIxPL3/cksZ/bnqRvf0dGWnoUhLVEnz+3YdI6ukrVf7vrpfY3teYUUE3HUlCT/K1dfcQ0xJp7R88+CJb+vZmpCGR6FLnB9vuImzSEXiAF1teZmPvpowa5g1s2fx6z2/pT6R3QN/tfJWNvSszbMqX0njo4C/pjXemtd7R9xpb+14km2JrrzX/lJ54Y1q7xuAb7O57MOP5AdZ13ElvbFdau97Icpr6fpWVRkPPfxGIrk1rF4utprf/B1lp9Ad+QST6VlaPdfhwcBwRB945cJD93T1Z9Y/RpGTp7r009pm3817f2szm9rasNVYeamBPd5ep3Y7udt5tachaY2NHC5s6W03tDgZ6eKNpT9Yae/s6WdVWb2rXEQ3wQuOWrCq+alLSFO5lWetuU7v+RITnGtdkpaEj6Y4HebNti6ldVIvxfNPbWXXU1ZEEkxHebDdflJJ6kldaX8t4fki5CXE9ztsdb5s/F6mzrOOvWWlAKsrzXpe5sy6lZG33Y2RbFl4i2dyT/jnu6r2fbC/7AsGevkfT2rX0/4nse8cotAb+ktYqELzbhoZKIHhXlo91+DBwHBEHHli/0VanW0UIHt1ovig9sNmehioED27ZaGrz4A67Ggr3b19vavPI3g22+saoQvDA7nWmNk/Wr8+2AvkRjYf2rza1ealpHUmZfX8PBcHjB1eZ2ixvX09Uj2WtIYDnmpabRpDW9WwgmAxmrSGRLG17E92k+eHuwCb6EuZOsLmGzntdr5HUjSNIzZGt9MQbyPYPL9HZ2vsyCd04gtQb20V3bCvZ9o6RaDQEXiau9RvaRBP1hxNTs/1saXSHXyWebDO20FqJRF+2pRGLLSeZrM/y8Q5DjeOI/IPTEQyxbN8BW910NSl5ZONmwwWjPxbjxb27bWs8sX3rkaJpxxJNJnliz1abGjrP7d9BMD744qlLySN7N9h+Ha837qEzGjK0ebR+ra3+N5qUvN95gKZQj6HN04fez3p+SEUsdvQ3sS9gHEF6ueWdIydJskECTZF2dgbqDW3eal9mSwOgJ9HDtn7jXJH3u1631YgPIKwF2dFv7IBu633pcPJp9iRllD39KwzHD/T/1baGTpKG4MuG4x3BJ7DXSffwPKGnDMdC4cdtzw8qwdDDQzCPw1DgOCL/4DT29dtsUp+iLxojGI8POtYaDJDU7XTwTBFJJuk2SFrtjISOVF21Q0LXaQ0PfofdH48SSGR/hz+AjqQpNPhWVkLX6IhaS5pNx6GwsSPSHOkZkr97U6TbRKMzq22ZY2mJGOdXtEbbhkSjPWqc4NsRa8q6Ed8ACgpdceO7/J74oeOqoWauodKXaDEcDyQabGsIVIIJ41yUaLIee916ARRiyYOGo6lIht2lS5LUjDUcPlocR+QfnHBicOchG0LxwUPPoUT6pEbrGoM/33ByCDUM3pNQcgjfKwON8FBqGMyV1DVb2zIf1DB2zGL60LyWiGZ8KitmY+tnAAWFqG6mYe1UmDmCmMm2SdxkLBONuG58uiyhZ7+F9TckSd04mqfpIWztKwKgH57HYFSGsO/s6Eh9aBx+B/s4jsg/ODnu9A3VrJLnGXyuXJul3T+oMXhxs9yhfB0Gcxn9PjuNwV9HTpoKqRlpGFRCdSkqbmE/fA6Qa1Jt1WehWZ8VclTjkvo+1V5Jf0gdtfWrxgXtvErmxe6OR5rO41HslfS3Mo9bsVfSP4XApRjXDVKVPOwuKwIF1URDEbm2NUBBKE5tqhMFxxH5B6emqNBW8uUAJTl+cg2qrFbl5+POsNHdYOS63ZT4B7+Yl/lzyMmwCd1geBSVytzBG7Tlu30U2ewzA6nk3hG5g5cAdysqVVlURz0WAdTkGpcAH5Fjrz/LADU5ZcYa/grb+RsAw3OMG81V+6pt528AVPqMS5lX+EbY1tDRKfcaV3It8dYOSf5Gsce4XHqBp862hkQj321czM7vHk22J3/+pqHjc402HHe5xmA/IiJwm2g4fLQ4jsg/OKW5OZw9bgyqYu/UzA0zpxtWK8z3eLlk/CTbp2aumzIdt0FvGK/q4prx02xrXDF2imF0RRGCG8bNtnlqRuH8kRMp8RnfuV5XN+9wke1sNQSLh42hKse438mVNQttLRcKgmlFNYzKM3YSLqg+2Vb+hkBQm1PFuLwaQ5szhp1uO3+jzFPKxHzjMukLS8+xrZHnKmRiwSzD8WlFF9rO3/AoOYzNP8VwvK7gMvt5KMJDTd65huPledcwFE5Ced6VhqO5Oddg19kBndyc623O4TBUOI6IAzfPnoGm29vXvXaGecvvm6fPtH3a5Iap5uW4b5xoX+OmSTNNba4fO8tSQTJjDZ2bxs02tbmidpatEtSalNwwer6pzfnVs3Ar5s34zNCRXF1zkqnNKeWzTLdV0iGRXDz8VNP3YlbxDApc2UeQBIKzK85CEcaXwjF5UynxGDtc6TUUTio9F1UYv98VvomUeuvIdoEVKEwtuhCXYrxVVegZQ6lvBtnXEVEZlX8hbnXwiCGA1zWCQt+p2KnxUZJzAW7VONKmquX4/RfZ0vB5z8Tlstft2GHocBwRBxbWjGRieVlW0QRFCC6YOJ6qAuOLE8CMikrmVFVnpaEKwRm1dYwuNu82Or64jNOG12WtMb9iBNPKKk3thucWcn7NxKyiIqoQTC6uYP4w4zt8gFJvHpfVzMgqKqIKhdrcEk6pGGdql+vyccXIBVltnahCYZi3kNMrppraeRQ3lw4/PeP5IRVxKXDncvqwuWmei8r5VcZ36GYIBD7Vxylli82fi1A4fdhlWWu4hIv5pWeZ2wnB3JLryC7RUyCEwvSii9NaTiy6DTsRi7EF16a1qSr4LNnX+NCpKvhUWqv8vM+R/evQyM//QpaPdfgwcBwRB4QQ3H3VpRT6fRkt4qoQjCsr5Ufnnm3J/vfnX8Kw3NyMNUYWFvI/Sy6wZP+bMy5iZH5hxhoVOXn8/qxLLdn/ZMGFjC3IzHFThaDYm8MfT7vaUrTj/02/gClFmTluqhDkujzctegmVJM7/AG+NP48ZhaPysjhURB4FBe/mns7HgsRletrz2Vu8eSMHB4FgUtx8cOpX8BvIRn1vMolzC+Zm5GGQKAIha+P/yp57vRJnAtKzmZucaadWwUguLXu3yh0mzvRABMLz2Jm8RVZaEjOr/4OhZ703YSrc09lUvFnMtRIMW/Y9yn0jk1rV+hfxMiib2WlMarkP8nzzkxr5/XMorjozqw0Cgv+A5/X3Pl0+GhxHBEHAKoLCnj8xmupzM9Pe7cvDv9Mq6rkweuvIs9r7XREeW4uj195PTWFRZY1xpeW8fgV11HosxbiL/L6efzCGxhfXGZpWVIQjCoo5qmLbqTMb62LcJ7by8Nn3cjUksojzzOdRnVOAY+fczNVOda2EXyqmz8uuonZJanoSVoNISjz5vHgKZ8yTVI9Grfi4r9n38qCsnFHnqepBoJCdw5/mP85RucZJ3cejSpU/n3ypzipNLWtls5ZUFDIcfn4r+lfZly+eeToyGOEwudG38Gi0oWWNTyKh3+d8HXG55tHjgYQQnDlyM+xsHTJYQ3zS6eCgku4uK3um4zPn2FJA+DUYZ9nTsk1ljQEKgoqF1R/l7H5J1vWmFL8eSYXf+7IHOk0BArzhv2A2nxrNwOQiorUFH378H+l20JRAcGokh9RkX+jZY283FspLvwJqW+HFQ0oLPgu+XlONOREQ0g7G94fMpm0EXYYGnojUR5cv5EH12+iMxzGpShHutgqQpDUdeqKi7h5ziyunTEVryvzPIP+WIyHt27i3k0baAsFB9UYWVDArdNnc8PU6fizOP4bTsR5cOdG7tu+nsZgPy6hHKlWqiBISp2q3HxumzybGyfOJN/gWLAZMS3Jo3s3cN/utdQHegbVKPflccv4Odw0fjaFnsyPgcb1JM8c3MgD+95jf7ATl1CQMpUCqoiURoknh2vr5nHj6PmUeK05U0eT1DVebt7AYwdXsjfYinpYA1ILsCZ18l1+rhi5gKtrT6LMm/l3UZc6y9rX8XzzcnYHGg5rpPJAlMMaOaqP86oWcUn1aZT7ijPWkFKyunstS9teZ09w75GTLhKJgkBDx6t4ObX8ZJZUnMMwX3lWGtv71/J2x4vsD237gIZAQUfDLTzMLTmdk8svND0pY0Z9cDUbup+mIbz2sEMikOgoKOjoqMLFxIKzmVVyJaXeUVlptIVXs6fvYVrCAxVwUxoC5fD/q9Tkncu4ohso9k7MSqM/uprW/r/QExnotaOQ2rZRSW2tCEpyLqCq4FOWIiGDEYuvJxC8m0jkBVJbW8rhuZXD/y3x+84jP+8OvN6FWWk4ZE4m67fjiPyDoek6saSG3+0y3SJI6jpv7t3PyvoG+qJRFCEo8fs5Z/xY5o8cbvpYXUqiiWRaDU3XWX6wnmUHD9AXiyKAIp+fs+vGsGhkjWnURJeSaDKJz+VKa/dOUz1vHNpHTyyClFDs83P6iLpUPonJsWIpJZFkAp/LbaohpeT99gZea9xNTyyMLiVFHj+LK0dx5vBxuIZIY0P3IZY2b6c7FkaTOgUeH/PL6jiraiJuxfiOUEpJREvgVV2mWzZSSrb3NfJG2xZ64kESuka+28/MolGcUTnVdCtGSklUT+BRzDUA9gUPsbx9Pb3xAAmZJNflZ2L+KE4pn4XXpPaIlJKYnsAtVFST1wvQGG5kVdf79CV6iesJctQc6nJHcVLpArwm2z1SSuJ6ApeioqaptdIebWJ9zwr6E93E9Rh+NZdq/yhmFZ+CTzU+FSWlJCETqEIxTWAF6I03s6NvKYFkO0k9glfJo9Rbx8TCs/GZJI0CJPU4QghUYe7IhxItHAy8QCjZTFIP41byKPDUUZt/IV7V3CHU9BgIJa1GPNlKR+gpYokGNBlEVfLwueooz7sSt2ruEOoyVRhPEeaRV03rIBR+nGRyP7oMoIg8XK5acnKuwaVWmT5WynjK0RP2j+c7pHAcEYcP0NYf5Ik1W3hi7RY6A6Ejd9N1ZcXcuHAmF8+cRK7F7RUjOoMhnti4lUc3bKG1P3BEo6a4kBvnzODy6ZMpsLi9YkR3JMIT27by8ObNNPb3IUkFZUcUFHLTjBlcNXkKxQZ1RqzSF4vy5K5tPLBtAwf7e49oVOXlc+OkGVw7aZrlLRwjAvEYzxzYyv271rO/vxtdSgQwzJ/H9eNmcv24GVTkmC8y6Qgn4zx/aCsP7lvD3v4OtMMapd5crq6bxbV1s6k2Od5rhaiW4LWWLTxa/x67Ay1HolpF7hwuGTmbq2rmMyLH2jaREXE9yYr2TTzb9A67A4fQDjeny3flsKRyLhcPX8RIkzojVkjqGmt6NvJKy1vsCe4/opGj+jmlbD7nVJ7GyJzsIhsDaFJjW996VnS8yv7QTrTDlW29ip/ZxSdxctk5jMgZZUtDlxr7g+tY1/08B8Ob0GSq5YFb+JhYcDKzSy6i2j/eloaUOi3htezse5Lm8Gq0w06CKnzU5J7KxKIrKfdNtXXqS0pJb3Q1jYGH6IosQ5ep6rmK8FLqP50R+TdS5JtvWyMaX0tP8F6CkVeQMlXZVuAlx3c6xfm3keM9BWEh18phcE5IR+QnP/kJ3/72t/nqV7/Kr371K0uPcRwRe/RFovzgr2/w2rY9AEcWigFSaW7gc7u46aRZfOWsRbjUzL54wVicH776Js9v3Yk00ABwqyo3zJnON848BY9BLRAjIokE/7l8GU9u34amD94OTgCqonDNlKl857TT8GVY3CymJbnzveU8vH0zCT21SByroyAQAi4bN5kfnnxWxtVck7rOzzcu596d64gf7otznIYQIOHC2on8aOG5FGZYQE2Xkt/uWMGfd79LREsc+RsfqyGl5KzqCfxo9oUZb+dIKbl3/wr+sm8FoWQMgTiuXohy+HeLysfz3WmXUe7L7PsrpeTpxrd5oP5VAskICuK4RoAD2xSzisbxr5OupcKXudPzRtvbPNLwVwLJoKnGxPyxfG7MzVT7reXGHM2a7rf5a9PDBJK9R7Y9BtOozRnLdTWfpdqf+bHSnf3v8Hrr3QSSnYNqCFQkGhW+0Zxf9VWq/NZyY46mMbSS99t/STDZcmS+wTSKPHWcNOxbDPObH+kfjO7Iu+zq+h6R5EFTDb9rFBNKv0+J3/wI+WCEY6tp6/4m8eQuUltEx57wSf3OrdYwrPhH5PmtJeM7fJATzhFZs2YN11xzDQUFBZxxxhmOI/IR0NYf5LY/P0Fjd5+l2hoCOGX8KH5zw8V4LOZ9dIXC3PrQk+zt7D7OARlUQ8D8mhHcdc1l5BhUYT2W/miUW555iq3t7ZY0FCGYXlHBvZdfSYHXWt5HKBHnUy8/zeqWRkuHJxUhmFBcxkMXXU2J31pp7qiW5HPLnmJF8wFLGqoQ1OYX88g511uOjiR0ja+9/zSvNe+0ZK8KQaW/gAdOvYURuUWWHqNJne9vfpoXmzZa1FAo8eRy14JPMSrPWk6GlJLf7H6K55pXWbJXhEK+y8/PZ36BMXnWIhdSSh5ueIbnml+zpoGCT/Xy/yZ9hbH5dZYeA/Bq69O81PKEJVuBgltx87kx32Rs3iTLGqu7nuGNtj9a1lCEylUjv8vovDmWNXb3Pcu77T8//F/pPsEKCgqnVf0nNXmnWdZoDT7H9s5vMpDXYU4qp2Vy2U+pzLvEskYg/DLNXZ8n5XykO/6buo2qKP4ZRXnWk2gdUmSyfn/ocadgMMiNN97IH//4R4qLM09Ac8icYDTGZ+59isYea04IpL72b++p51tPvopuobhZJJHgjseeYZ9FJwRASljT0MQ/P/MimoVuvLFkkjue/yvbLDohkIoIbGlr4/PP/5W4lr6WQVLX+eLS51jT2mS5goMuJbt7Orn95aeJWmi2p0vJ1955nreb6y1raFJyMNDDLW88RtBCx18pJf+x/kWWWnRCBjRaIwFuf/tBeuPWmq79csfLlp2QlIZOdzzIF1bfQ1fMWtO1ew68bNkJgVQibCAR5t82/oG2qHHH4aN5rvk1y04IpEq0R7UoP97xG1oixl10j+btjtcsOyGQKm2e0OPcte+nNEcOWXrMlt43LDshAxqaTPLkoR/SEtlt6TEHg8t4t/1nWHMQIPVuaSxr+Q5tkY2WNLrCKw47IbpFDQnobO/8Jl2Rty1phKPv0dz1OSCJtRokqdfb1vNvBMIvWdJwyI4P3RH50pe+xIUXXsjZZ6cPb8ViMfr7+z/w45A5f1yxhgMdPRlXS5USXtm6m7d27U9re9/qDWxv7ci4kqkuJcv2HuCFbbvS2j62dQtrm5oy1tCk5L3GRh7fujWt7bN7trP8UL1lR+dojc0dbdy7dUNa29cO7eblhl3Hhf2taOzp6+Kube+ntV3VfoCnD27KuByWJnUaw738bseKtLabew7xSP27GSqkXkdnLMhvd6Vf+A8EW3jo4OsZa+hIAskwf9j717S27dFOHm54JiuNqBbjLwceTWvbn+jl6cb7MtaQSBJ6gsca0jsXUS3Iyy2/yVgDJLrUeKHpl2mrBCf1KCvbfpyVhkTydusPkdJ80ddl4qhISOY62zv+DV2a3xBIqdPS/RWsOzofpLX7n9FNOhs72ONDdUQeffRR1q9fz513Wis8c+edd1JYWHjkZ+RIpwRvpsSTSR5bsznjhXUAVQgeem+jqY2m6zy4dmPWGooQPLDGfAGXUnLvxvSLvBECuG/jhrQX2nu3rs+6d4xEct/WDWmjO/fuXJd1DxxdSh7avSFtdOeBfWssFTEbDE1KnjiwkXAybmr3+MH3bGjovNS8if6EeeTluaaVtjTe6dhCV8z8BmZp24qsm9jp6Gzu20FrtMPU7t2uN7PusyPRqQ/voSly0NRuS+/rRxJSs9HojDfQFNlhancg8DoJPZSVBuiEkq00h1ebWnWGXyehd5OtI5LQu+kMv2FqFY4uJ6k1kl01VokugwTCz2XxWAcrfGiOyKFDh/jqV7/KQw89hM/iaYlvf/vb9PX1Hfk5dMhaeNLhb7y2bS/9kfShfCM0KXlvXwP1ncYh7uV7D9AezPbilFpcN7e0sb213dDm/cZG6nt7s26ZJoF9Pd2saW4ytNnc0crWTuvbPoPREgqworHecHxvXxfvtTXY6oHTHYvw2iHjMHpLuI+3WnYfOe2RDWEtzouHthmO98RDvNqyxZZGUtd4vtHYuQwlo7zausaWBsBLLe8ZjsX1BK+3vW2riZ2CwuttxhEkTWq83bHUVsM/BYWVncaRISkla7ufI7vFO4VAYX33C6YaO3ofx06DOYHCzr6nTG0O9T+IvaVI4VD/A6YWPcG/kH1vmpRGT/DPNh7vYMaH5oisW7eO9vZ2Zs+ejcvlwuVysXz5cn7zm9/gcrnQBrnD83q9FBQUfODHITNe2brbVndYSEUslh4+aTMYL+/cY6vLLYCqCF7ZYazx0p7dpvU3rOBSFF7eY7yAv7R/d9Z33wOoQvDiPuNtplcadtl+rxQheOGg8Z3r0uZd2O1GKoAXG40dkRVtO207CBJ4tXmz4fja7l3E9PQ5N2boSN5sW284vr1/N2HNWj6MsYbOys41huMNoX0Ekr22Ndb1GOfJdMTq6U202tKQ6Ozof8dw6ySYbKEnvhc7zo5EpzG0iqQ++M1RXOumL7YWex17dfpia4lr3YOP6mFC0TfJvv9NSiOW2EYi6dwcfxhk334zDWeddRZbtmz5wO9uv/12Jk6cyDe/+U3UDI9wOlijIxCydYcPqYWvO2R8se4Mhmzd4UOqDHd32HjPtSsStpTQaoYuJV1h49fRFQlj5yILqQhSV9TkvYqGUvUObLxfupR0RIwjUF2xEKoQJG1oSKAjapxM2hMPoQrFtjPSGQsYjvUmgoMeN86Unrjx6+hPGOtnQiBhrBFI9g2JRlQLo0t90M7AIZuOzgA6SWJ6BJ96/BHuqGYt8Tc9kpjWh0s5vt5LwsB5yIaE1oNHPf4It6b3YP9TlSKpdeJ2uvYOOR+aI5Kfn8/UqR/szpmbm0tpaelxv3cYOpKavYVigISJE5C06SBAKr/CbB6jeiEZacj0GkNBwiR/Q9PlkFwDP+y/R7p5kjYdkAHMHJlUkS/7roi5xof/OvQh0kjNpQ3qiOi27u6P1Rg8z0TKIdTAQMPg99kgGTyadmwtkg9Dw8EeTtm4TxjFufZLFEug0Gdcg6PY78fmbgMCYVpptcCbWSfgwVCFYvo6Crw+W9UZIVW4q8jkdWRakMyIEq9xxdgCjy/jEzmDUWTSCyff5RuSBbbAbayR5/Lbyqv42zzG73muaq/y7gB+Ew2/SXn3THAJFy5l8Ho7PiV9x2Cr+NTB5/Io9qr7Ho3XYC6XYq+6r5W51CHUUJWiIZvL4W98pI7IsmXLLBczc8iO+XUjbeeIaLrO3LoRhuPzakbYvstP6jrzaoYbji8YMcL29k9S6swfbvw6FlaNsB1N0JEsqDLWWFAx0nY0QUGwsNK4E+2Cstoh2Y47adgow/G5pXW2XQRVKMwvG2M4Pq1wtE2FlMbsYuMy5uPzR2d9YmYABYWpBRMMx0fmjMaVpoeMFY0xJkXNhvlG4VHsOVUChWr/RBSDnjoFnpF41SJbGiAo9IzCbeA4edUKvKq98vmpearxqoNXvVVEAR7XeOzmUalKKR6X9WJ2DtZxIiKfMK6cM9VWtEIAI0sKWTjaeB/0smmT8Ljs5fiU5+VyxjjjhefC8ePJ89jrf1Pg9XLBOONF6exRYym1WBnVCL/LxeXjphiOL64axci8QluXQEURXDNmuuH4jJLhjC8YZu8yK+G6OuNKm2PyK5hRXINiQ0WTOlfXzDccr/KXMq9koi1HQZM6l4442XC8yFPIgtJZtjR0dM6rPMNwPMeVy9ziU2xrnFp+nuG4W/Exo+jcw515s0OiM7fEuCqpIlxMLLzClgZIJhddYxh5FEJhZMFN2HMSBCMLbjbsCyOEoDj/09i7e1IoyrsVkaa5n0N2OI7IJ4zSvBzOnToeVcn+i33TSbNMtyzyfV4unzY5aw1FCG6aO9P0VIzP5eb6adOy3p5RheCGadPxmpSrdykKt0yZmXUESRWCqydMNXWYFCG4dYL1UtqDaVwyajIlPmOHSQjBLWPnZ32ZVYXgzKpxVOaYn1K7rvakrLeAFARzS+vSlnm/fMTJWR+tFQjG549gfL5xhArg3MrTbR3frfINY1KBea+Wk8vPsaVR6C5mcsFMU5tZxRcc11MmE3xqPhPyF5vajCu4xNby7RI+6vKXmNpU5V2BsHG0VuCiKu9yU5uCnCsQwl4EqTDXKfP+YeE4Ip9AvnD6AtyqmnFkRFUEI0uKuGK28R3+AHcsmkeO25PxIq4KwbC8XK6fbXyHP8CnZs2h0OfLSqPI5+e2WbPS2t48eSbDcnIzdngUIch1e7hjxry0tteOncHIvKLMNRB4VRdfmpq+sdclNVMZm1+WsYYgtZ3x5cnpe4KcWTmZyYXVGR95FqScpS+NPyet7bySiUwvHJ115OWO0ReltZmYP5ZZRVOz1rip9sq0uUUjc+qYVXQSIkuNS6tvHDRJ9WhKvSOYWXQe2UYTzhh2u2EOygC57mFMLro2q/kBZpbegVsxjzq61WJGFX0ha41RRZ/HrZq3D1GUXMoK/y1rjeL8O3C7qrJ+vIM5jiPyCWTMsFJ+e+MluBTF8iKuKoKiHD9/uu0Kcr3pt0RGFhVy97WX4lYVy4ufKgT5Pi/33HAlRf70SZwVeXnce/kV+FyujDT8bjf3XX4Fw3LTJ/SV+HN44MKryHV7MtLwKCr3nH8lI/PTJ8Lle7w8ePZ1FHn9GWm4FIU/nX4lYwpL09r7VDd/OflGyn35ljUUBKpQ+N+FVzG5qDKtvVtR+c3cW6jyF1l2RsThf34882pmFBvnuRx5TkLhP6d9mprcCsuOwoDVNyZey+yS9G3uhRD88/jPMCp3ZMbOyG2jrmVuyQxLtjfWfp663AkZOyMXVV3LnBLzSMUAS6q+yOjc2WTqjJxUeg0zi423fo5mTtkXqck7PWONiYVXMbnoOku2owq/RGXuZRnND1CZezmjCr9kybY477MU5d6eoYIgz38h5YXfyfi5OVjnI+m+my1O9117bGxo5osP/pXecBRFiEETGlVFoOmScRWl3HXL5VQWZpYpv62ljc8+9iwdobCxhhBoUjKquIg/XX85NcVFGWns7urk9meeoSUYSKsxPL+Aey6/nLEl6Rfvo6nv6+G2l56ivr/3yFzHMqA9LCeXe86/killx9dFMKMp1MftbzzB7r5OY43DrehLvH7+fMbVzCrPLJGvIxrksysfYVtvq6HGwAHZfLeX3590LfPLazPS6I2H+dq6B9nU02BYW2RAI0f18NNZ17F4WHoH4WiCyQg/2Hof63t2m2oAuBU3/z75Rk4pTx9lO5qoFuM3e/7Mup7NKCiGWykCgSpUPj/mZk4pX5CRRkKP83DDXazvWZVWQ6Bw9cjbWVR2VkYamkzySvNv2dz3GgLFcLtmINfjzIpPM7/UfCvjWHSpsabjN+zsewKBangkNqUvmVn6GaYX35bRqTQpdfb3/g8H+/5I6h7Z6NitCujUFt7B6KKvGeaGDK4h6e7/NZ39Pyf1CTLa2lIBjaK8TzOs6PsIg4ReB2MyWb8dR+QTTjSR5JUtu3jwvY1sb/5gSXUBnDq+jhsWzmTx2FqULHM+4skkr+7cywNrN7Cx6fhqj4vqarh57kxOH1uHmmW11ISmsXTfPu7buGHQsu0Lho/glpkzOXv0GNxZFstL6jpvNuzn3i3rWdXccNz47Ioqbp06m/PqxuFVszsVoek6K1oOcN/OdSxv3n/c/vu0kkpumziHC2sn4nNllxinS8m77Qd4cN8a3mzZfZzG+IJh3Dp2PheOnEKOK7uEYCkl67vreezge7zZuv243JFRueVcP2ohFwyfSa7L+Ah1Oo3t/fX8tXElyzo2HueMVPtLuXzEKSypmEeeybHgdOwN1vNa63JWdq4meUztjDJvCedVnsHp5SeR787+yGxjuJ6Vna+zunsFyWMatBW6izml7FwWlp5Ovjv7o6Yd0YOs73mRLb1LScgPVjLNUYuYU3IRM4rOJd+dmZN+NH3xBnb3PcPuvudJyg8WJPQqhUwoupzxBZeS6x78BIsVIolDNAUepTnwGEn5wQJ0LpFPdf61DM+/Dr87+8JiiWQjvaGH6A3eh673fmBMiFyKcm+kKO9mPG7jU14O5jiOyD8A3cEwW+pb6Q9HURWFknw/s8cMx2OSnLm3vYvm3n4i8QR5Xi+jy0uoKjKOgPSFo2xsaKbvcESlODeHOXXD8bmNNfZ3ddPY208oHiff62VUSREjiowvroFojPWHmumJRBAIinP8zK0ZTo7HeBGu7+2hobePYDxOnsdDbVERtUVFhvaheJy1TU30RFIVUIt8fuYMrybfa7xAHurvZX9fD8F4nFy3h5EFhYwpOr5q4wCRRILVLSkNHUmR18ecymoKTWqMNIX62NfXTTARw+9yMyK3kHFFZYb20WSSNe2H6I5GSOo6RV4fs8qrTRNZWyP97OvvJJCI4lPdVOUUpE7YGNypxjWNtZ2H6IyGSOoahR4fM0qrKfMZL8KdsQD7Am0EElG8qpthvgLG51caaiR1jQ3dDXTGgsT1JAVuP1OKqhnmM/6O98aD7A82E0hG8CguSr0FjMsbYaihSZ2tvQfpjPUR15PkufyMK6im0mecSxBMhDgQPkQ4GcYlXBR6ChidW2OYq6FLnV2BerpivcT0OLkuP3W5w6nwGf8NI1qYQ+H9hLUQLuEiz1VATc4YQw0pJQdC++mOdxLX4/hVP9X+EVT4jLfSYlqY1uheoloARaj41QKq/ONRDY4USylpjuyjJ9FGXI/hVfyUe0cwzGe80Cf1KJ3RHcT0fgQCr1pImW8yqsGpEiklXbG99CeaSOgR3EoORZ6RlHiNT85peoz++GaSWqpSrUstpMAzHVUx/t72x/cQjNejyTCq8JPrrqHAM8HwcyJlnEh8Q6r6qpSoShE+zwwUk7yWSGIP0cRedD2IovjxqCPJ8Uy3XZPok4bjiHxCkVKyub6Fx97exCsbdh9XGbQgx8tVi6Zz1aJpDC/N/s5qa2Mrj7y7iRc37iRxTKXWXK+Hq+dP45oF06ktK8paY2drBw+v28Szm7cTS37wLtTvdnPVrClcP2cGY8qMF/907Ons4qFNm3hyy1YiyQ9WcPS5XFwxZTI3zZzBhHLzkxxm7O/t5qGtm3ls+xaCiQ92r/UoKpdNmMQt02YytTz7O8RDgV4e2rWRh3dvpD/+wTtdl6JwSd0kbpowi1nl1VlfDJvD/Ty6bz0P711PT/yDJetVIThvxERuHjeXuWUjs9boiAZ4umEdj9a/T1fsgyXrFQRnVE7kuroFzC+ty1qjJx7kheY1PH1oJR3HdOEVCE4qm8AVIxYzv3Rc2mRQIwKJEG+0vccLLcvpiB1fonxm0UQurDqNOSVTsu5lFE6Gea9rJW+0v0ZH7PjmkOPzJnJmxTnMKJqFmuW2QUyLsKl3Oe91vUhHrPG48ZE5E1lYegGTCxamTWo1IqFH2df/Blt6n6I7tu+48XLvBKYWX8no/NNxmTgYZmh6jObQqxzoe4S++PH9kgo8E6gruJ7qvPNxZVl7RZdxesOv0BG4h1B83XHjPtdYyvNvpyT3ctQhLDb3ccZxRD6BROIJvn3/y7y1Zd+RvI7BUIRAIvnKRSdz+1lzM7qgx5NJvvPka7y4cZepxsDY589cwJfPOSkjjYSm8cOX3+TxDVvNNQ7nN3xq4Rz+9exTMjo5o+k6dy5fwT3r1hvmSRytccOM6XzvrDMzarInpeS/31/Jb9e9b0nj8vGT+OmZ5+LJYNtISsn/bXmPX6xfgWJB47za8fzqlIsy3tK5d/cafrxhKQgMC6MN5GmcWjma/110BXnuzBaNpxvW8Z+bn0dKaXgEeEBjTkktv553AwUmlV4HY2nrBv5r2+MkpW5YoXUgT2NSwUh+NvN2ij2ZLRrvd23mF7vuIaEn02rU5lTzvSlfpNRblJHGtr4t/GHf/xLTBxzbwXKJUhoV3kq+Ov4blHkzc6brQ9t4qP5OonoIo7L6A/kmRe5ybqn7HuVe4wKEg9Ee2cHLTd8kqvWl1chRS7lgxM8p9WW2FdIf2817rV8gpnWQyisZLOcj9XuPUsyCyt9R5MusxUg0sY+97TcT1xpNNFKvTxUFjCn/M3m+zHKJPok4jsgnjEg8wWd/9yRbD7ZlVEHztrPm8rVLTrFkG09qfP6eZ1i9/1BG/dmuXTCd/7jsTEvOSFLX+fLjz7Fsz4GMahNcMm0iP730PEvOiC4lX3/xZZ7fudPy/AI4e+wYfnfJxZZyWKSU/PuypTyyfUta26M1Fo+o4Z6LrrCcw/LjNW/yx23GXV6PRRGC2eXVPLjkWsvOyG+2ruDX297OSGNi4TAePfMWct3W8kvu27eS/97+qmUNVQhqckt5YPEdlp2RZxvf4xc7n7bcqUZBocJXxF3zvkSJ11qC9rL21fzP7vsz0ijy5PPzGd+gzGt+vHSADT1r+cO+3wJYKnevoJDjyuVbE/+DYT5rUbe9gY08UP8jJNJSHRKBgkfxcceYO6nwpT/5BNAS3sSLjd9Al0nLGqrwcEnNbyj3GVetPZre2DZWNX8KTcax1llXQREuFlbdTalvtiWNSHwXu9quQJdhyxqgMLb8Xgr8p1rS+KSSyfrtHN/9GPCdB1/J2AkBuPeNtTyx0rjt+tH88JnXM3ZCAB57fzP3vW3cdv1ofrp0ecZOCMBzW3byuxXvWbL91cpVGTkhkFpUXt+7j58sX2HJ/q4NazJyQgY0VjY28B8r3rBk/8DODRk5IZBywtZ3NPMv77xkyf6Z+i0ZOSEDGjv72vmnVU9j5R7mjZbtGTkhkOpo3BDq4itrHrbU32Z1127+e+czgPXamTo67bFe/nXjX0jq6ReY7X37+PXuBzLW6I0H+N7W3xHT4mntD4bquXv/75FYc0IGNMLJEL/a/XMimnEX6AHao4d4+OCdpOJS1oqhSXQSepT7DnyfcLI/rX1/vIWXm75l2QkZ0NBknJcO/SuhZGda+2iynfdavoAuY1hzEAB0dJlkdcuXCCeO34o6lqTWzd72GzNwQlIaoLOv8zNEEnssPsbBcUROcHY0tvP6pr1Z9xL57YurSCTNv0QHO3t4Zt32rDvV/+71dwnHzbtStvUHeXDNpqyrNN69cg19kaipTU8kwl2rM1u8B5DAves30B40bu8OqcTXX695N2uNx7Zv4WBfr6ldTEvyi/XWnKJj0aXkxfqd7Og+Pq/gaDRd52eb3sxaY3nrPtZ3HX966WiklPzPjteyKrelScn67oO823F8XsGx/GHvy1kopBJadwWaeLvj+LyCY3mo4YWsNHR0GiOtvNOZ3ll/oflZpNTJtBS5jk5nvJN3O99Ja7ui4yk0abytZKYRTPaypvu1tLabeh4lqccyrvoq0YnpAbb2PJ3W9kDfwyT1/iwqy+poMsq+vvvTWnYEHyKhd2LdCfmbhpQJ2vp+l+Hj/nFxHJETnMfe3mSrXHtvKMKbW/aa2jz63mZbnW7D8QQvb9plavPYemuRGSMSmsYzm7eb2jy5ddtxCbyZ8tiWrabjz+7ecVziayYoQvDwtk2mNq8c3E1f3NzpMkMVggd3bTC1Wdayl/aoudNlrqHw4J61pjZru+ppCHXbKD2v8Gj9+6Y2O/sPsTvQlHXXXgXBU4dWmto0hlvZ2rcn6/L2AsHzzctMbbrjXWzu22irLPwb7a+ZRqlCyX629L6TtYZE8n7XS+jSeGGO62F29b1sWGckvYbO9t6/ounGESRNj1EfeCLr8vYSjUOBZ0nqIWMbmaQjcB/GdUbSodETfo6kdnwys8PxOI7ICUx/OMoLa3cYJnRaQRGCR1ZsNByPJpI8tWaLrU63QsCDK40XvoSm8fC6zbY7xD6weoPhhVaXkvvXb7DVF0OXkgc3bDTtyHvv5g222nNpUvLwts1ETZyZe3ess9VBWZOSJ/duJXDMCZujuX/PWlvOpyZ1Xjq0g66o8cX80fr3sz41MqCxom03LeFeQ5tnDr1rS0NHsrH3APWhNkObV1rfsdXATiI5EGpkT+Cgoc3bHcuyLgc/oNIRa2dXYIehxfqeN2z1pgEIJHvYFTj+1MgAe/peO5yzkT1xPcj+4HLD8ZbQUpJ6wHDcCpqM0hh80XC8L/ImSd08qpgOiU5X6HFbc/yj4DgiJzC7mjrSbqukQ5eSTfUthgv4vrYuQjHzbZV0SAm7WzuJJQZfXBt7++kJp9+/NtVIM09nKERzwN7FCaAzHKa5f/B98FA8zp6eLlvODkAgHmd/7+B3SrqUbOxotu20xbQkO3s6DMfXdTbacj4BklJnS0+L4fjaroODVkTNBAls7jXez9/Qu9+2BsDWXmMnIRUNsachEOzo3284vjuwy7aGgsLeoHFeQkNoR9aRo79pqDSEjJ2d1shWmw4VCFRaw8YR1O7oBgTZFRT8Gwrd0Y2Go8HYmiHQkARj5lFDhxSOI3ICE4gY39Fmgq5LIgY5HIHo0GgA9BvkcAylRp/BXP2xj0DDJMKQsYbB8w0m4rYdnSMaBts7mq4T0ew5nwP0m2whhZJD8371x42d2GAi+y2sARQE/UkTjWTYcMyyhlAImcwTSma/TTaAEIKIZqwR1uw76gBRzTgKFtMDtqMuIInpxu9HYkg0dBKHC6UNhqb3DcH3UDpbMxZxHJETmGxLlWcyl1sduo/AR6Mx+FyZ1ABJh+ej0DCYyz2EGkZzKUJk3Xn2eA3jz6idLRPLGkPwfknAbVIULNuCYcequBTjO2zVZCwTzJ6rUWXVzDVMXseQaAjDCq0AinDZjrqAQBHGx8+FyVhGKkM0zycdxxE5gSktMG+fbZUcrxu3a/ALVGle7pBouBSFfP/gRa5KcofmdQigOGfwuhKlOTlDtLSm5hqMIq/PVl7F0ZTlDP6++1QXviz72Byn4RtcQwhBkTf73ixWNABKvEPz2Sr1GhcdK/Vk1qRxMCSSEpPCZiWeQtufLU3qFLqNn2uRu9j24qpLnXyXcb2GfFfJkcZ32SKR5LqMqzb71RIE9h03n2pcd8WrZt8rZwCBglc1rtrsVsvI9PTS8ai41ewrKv8j4TgiJzAThw+jusReITdVEVwwZ6LheG1ZEWMrSm1dAlVFsGTaOMO704r8PGaOqLKVgKkKwWlj68j1DH6Hke/1cvKoWluOgiIE84YPpyx38AXUraqcO3qcPQ0Ek8rKqSkY/GIuhOCS0ZNsaQigJr+IySXG3YEvrZ1q26kq9+Uyq3SE4fhFw6fbjrzku3zML6szHF9SOcv2Au5V3CwsM/6OnFo+1/6SJBQWlE4zHJ9fssB2/gbA7OJ5hmNTixbb3tKQ6EwtOtlwfEzBGVmfmPmbhsbYgjMNx6vzzhsSjeq88wzHi3MuIvNju8eiHZ7HIR2OI3ICoyiC60+diZ31QtMl15w8w3BcCMFNi2fZugRquuT6k4w1AG6ZN8tWAqYmJTfOm2muMWuWrQRMXUpumT3LXGPaTHsaSG6fNsu0Eu0tE2fbTiS9bdIcU40bxtjTUBDcNHau6XbVlbVzs54fUo7hVbVz8arGYfoLqufZ2gJShcL51XPJdRk3JzytfB5eJfsQu4LCKWVzTCMis4vnkaNmHzlUUJheOJNSr3G0YEL+HPJd1iq8DoZAoS53qmmp92r/LArcwyFL51AgKPOON62uWuSdQoFnUtYaIMh11VDqM/58+txjyfMuBBvRHbdaQaHf2KFy+BuOI3KCc+n8KVnniqiKYFptJROGm/ehuHDmRHI87qy+1ooQjBlWwqzaalO7cyaNpdjvy8qpUoSgujCfk8fUmtqdVjeKyry8rCIvQkCJ3885Y817XSyoHsHoouLsNIA8j4eLxxnffQNMLa1kemllVhELAXhUlSvHmPfTGF1QyknDRmUdFRFCcO3omaY2lf5CTq+cmLWjICVcncaZKfLkcnbljKw1NKlz+YiFpjY5Lh9nVyzM+givjs4FVeblvt2Km9PKz8x660RH54xhZ5vaKEJlQemFWUeQJDoLSi8wtRFCMK34qqzmT2lIphZfmdZudOEN2Nk6qSu8MW1bivL828k+KqJQnncrYkjyiz75OI7ICU5hro/vXXdOxo9ThMDrdvHDG5aktc3xuLnzGuMwpRFCpJJHf3LteWm/1B5V5ReXX5DxRVCQei2/uPz8tIu/qij8+qILUURmKoLUndivLrowrdMnhODX51yIW8lu0+F/zj4fvzt9H5hfnHwBXtWV8daGBH62+AIKvcZ3+AP8eN4F5Lm9WTlVP5xzHuX+9A3jvjX1Aoo8/qwcnn+ZvIQRuem7L39p3EWUeQuychQ+NfocxuRVpbW7ofYiKn2lWWlcNvxMJhQYby8NcH7VRVT7h2elcWrZ6UwqmJLWblHZRQz3j83Y4REIphWezOQCc6cNYFLRxVT7Z2ahoVCbu4hxBemvdyPyLqIi5zQyXcIECqW+edQWpHeWivznUpxzacYaoJLjmcmw/M9k+Lh/XBxH5GPARfMm8c0rTwesBSNVReD3uvn9F65gdKW1xK6zp47lh1eek1rELYgoQuB1ufi/2y5j8nBrCVknj6nlvy8/H1URWCkWqwiBW1X57dUXM2ektc6fc0cM53eXXIxLUSwtsIoQRxyYxbXWGnpNG1bBny+8HK/LZUlDIFCE4Odnncc5dWMtaYwvLue+c67B73ZbWsTF4Z//XHgOl46ebEmjNq+Y+067gXy3NyNH4d+mn8l1Y8y3sAao9Bdy98LbKHTnZKTxuXGncfPoRZZsiz15/Gr2HZR68zOKjFxbcwq315lHEQbIc+Xwg6n/xDBfSUaOwjkVi7h11GWWbH2qn6+O+wYVvsqMNOaVLOT62lssNZ50K15uHvUdKn2jMnIUJuTP5YoRX7GkoQo35w7/McN8kzO48RBU58zirOrvoViIIgihMnvYzyjzzcP6Fo1CkXc68yp/hWJyKudvGgq1pb+g0H+WxflTGn73JMaW34OipL8ZcEjhdN/9GPHWln3897MrONTZi6qI4yquDvxu3riR/L+rz6SuIv3d5LG8s7uen76wnP3t3aYas2qr+c6lZzKxOrP24wCrDzbyo1feYld7p6nG9OoK/uP8M5leXZmxxsaWFr7/+ptsaWtDFeK4fIiB300qL+d7Z53BvBHGSZdGbOto57sr3mBda7OpxrjiUr57yumcMnJUxhp7ezv5znuv8V7rIVONUQXFfGfemZw90pqjczQHgz18d+3LvNN2wEBDQZM6I3IL+eaMs7hg5KSMNVojffx4y/OsaNuNQBxXLn1Ao8JXwFcmns3FI2dmrNEdC/DfO5850jvmWA3lsG6JJ59Pjz6HS9NsyQxGIBHi7v1P8E7H+sPdawfXKHDlcs3I87io+nRLi/fRhJNhHj/0EO91vXu4Od0HNQQCiSRHzeHcygs5t/IClAy3puJ6lFda7mV9zxtoUuPYbY4BDa/iZ1HZJZw+7GpLDsLRJPUY73fezY7e5wyqrab6GLuEj6nFVzC37NMZH//VZYJd3b/jQP8jaDJyZM4PaoAiPNTmX8Wk0q+hZnikVkqNlr5f0x74E7oMkrp/PzrpVxz+XzeludcwvPg7qMrQnBT8OJPJ+u04Ih8zpJSs3nOIx97exPu7GwjHEiiKoCDHx/mzJ3DN4umMysIBOVZj/cFmHl61kZW7DxKKxVGEoMDvZcm08Vy3cDrjKstsa2xubuWhNZtYtucAwVgMBBR4vZwzcSzXz5nB5CrjUx9W2drWxoMbNrF0714Ch4uI5Xm9nDVmNDfPnMn0qsydnGPZ2dXBg1s38dLe3fTHY+hSku/xcGrNKG6dNos5ldUZL0bHsre3iwd3beCFAzvojUXRkeS6PCyuruXWiXNYWDnStkZ9oJtH9m3grwe30huPkNR18twe5pfXcPO4uSyuqLN18gmgKdzDkwfX8nzjJnpiIZJSJ8flYWbxSK6vW8DiYeNs1x/piPbxXNP7vNS8lu54gKTU8KteJhWM4MqRi1lUNgmXSW0SK/TE+1natoqlre/SE+8jIZP4FC+j80ZwYdVpLCydYVsjkOhnZefbvNO5nJ54NwmZwKt4Ge4fwRnDzmZ28TzcSvo7ezMiWpANPW+ypvs1euMdJGUct/BS7h3BgtLzmVZ0Mm5l8GP5VolrIXb3v8b23mcJJFpIyjgu4aXQM5zJRZcxruBs3DYX7qQepin4EvX9jxJKNKDJGKrwkuMaTm3BtYzIvwi3kn4r0Qxdj9ITfo6OwH1Ek3vRZRQhvHjU4ZTl3Uhp3lW4lCJbGp8kHEfkY8bug+28sHI77d0BovEkeTkexo0s5+JTplKSppaIlNLSArS3uZPn3ttOS3c/kViCPL+XMVWlXLZoCuWF5l/QgY9IOp0D7d08u2YbjV19ROIJcn0e6spLuHz+FKqKzf9+VjUOdffy1IZtHOzqJRSPk+vxUFtaxFWzpzKi2Li+QSYazX39PLF5Kwe6ewjG4uR43NQUFXHl9CnUlZifOrCq0RoM8Pj2rezt7iIQj+N3uxmRX8BVk6YwvjS9k2fl794RDvHEnq3s6O4gEIvid7upys3nqnFTmVya3smzotEdDfPk/i1s626lPxHDp7qoyMnj8rqpzCg1T2C2qtEXj/DXhk1s6WkmkIjiUV2U+/K4aMQ0ZpaMSPt4KxrBZJRXmjeyuaeBQDKCW1Ep8eSxpGo6s4rrhkQjqsV4q30dW/v2EUyGUYVKkTufU8pnMqNoXNqohhWNuB7n/a41bO/fSTAZQhEKBa585pTMYnrh1CHRSOoJNve9z+7AZsLJIEIIcl35TC6Yw6SC2WkLwFnR0GSSfYFVHAiuJqqlWi741ALq8uYzJn9R2siJFQ1dJmkJraQptIKY1gtIPGoBlTkLGZF3ZtrIiRUNKTV6oyvoCr9GUutGyiQutZhC30mU5lyI+gnevnEckY8BUkqWvr+Lh19dz/YDraiKQJcSKVN5C5LU8d2z543n5vPnMr428+iAlJK3Nu3jgTfWsXF/83EaA5w5Yyy3njOXqaOyiw6s2HGA+5avY/XeQ4NqSCSnTx7NbafPZXadtVyPY1m17yB/WbmOlfsOHnl/dCmPJKbqUnLy2Fo+vXguC0dby/U4ltUNjfxp9VqW7T3wAQ0hUiF3TUpOqh3Jp+fP4bQx6ZMPB2NDazN3r1/La/v/1hFZl/JwHknqmPK86uF8ZuYclowZl5XGls427tq8mpcO7EJKQAxopEqNa1JnZnkVn5k6l4tGT8gqkrKrt4O7tr/H8/Xb0aR+ZLvlaI0pxRXcPnEel9dNzSqSciDQyV/2rOK5Q5tJ6NqgGuMKhnHzmAVcUTszq0hKU7ibBw+8zQtN64nriUE1anPLuLZ2EZeNmJdVlKMj2sNTTW/xasu7RPU4CsqRvjIDW1KVvlIuHX4qF1SdjCeLKqu98V5ebl3KW+0riGiRD2gM/HuJp4QlFWdydsUZeNXMoxzBZD8rOl7k3a6lRLTQMRoqOhr5riIWl53LyWXn41MzL5oX1QJs6H6GTT3PE9H6EKhHaoYM/LtfLWRG8cXMKrkcn5p5QbuEHmJP72Ps6XuCqNY5qIZHKWBM4ZVMKLoBr1qUsYamR2gNPEBL4D7iWgvgInUCR5I6Eqyhinwq8q+jOv/TeFz2o78nGo4jcoKTTGrced8bPP/2VhQhTOtrqIoAIfjBHeexZKH5sc+j0XSd/35qOY8s22hJQ0r4jxvO5rJF5sc+j0ZKyW9eXsmf3lxjSUPXJd+87HRuPNlaouOAxl0rVvOrN1cNmk/yAY3D+Q1fP3sxd5w8L6MF9p4167nzjeUog+RIDKbxxUUL+OdTTspI45Gtm/nOstcRYKox8F5+auZs/t/Jp2e0iD+7dztfX/4SIEybwQ1oXD9hOj9afE5G5etfO7SbL7/zLLqUphoDeQaX1E7mZyddiDeDirHvtO3ln95/jISup9FIXdrPrJzAL+Zdid9lfatiffcBvr7ufmJ6Iq0GwLzSsfx01o3kuqwv4rv6D/KdrX8gnIymbWongMkFo/nelDvId1vfqjgYOsTPd/0PgUTQgoagNqeGb0z8KoVu69fU9mgTd+3/Ef2J3rRF0QSCYd7hfHbM/6PQbX2buC/ewlMN36Y/0WpBQ6HAXcmVNXdS6El/6mmAcLKd5c3/RH+8Hixo+F3lnFb9Wwo8oyxrxLVOdrR/ilB8G+mPGKu4lWImV9xHrifz3KsTmUzWb+fUzEeMlJIf3fMaL7yzFSBtkS9Nl+iaznf+8BJvrTXurHmsxs+eWMYjyzZa15CSHzy0lOff325JA+DXh50QqxoS+Mmzy3h05SbLGne9vYZfvbnqyBymGoefwy9fX8mf3rHe9fL+tRv4rzeWIzF3EI7W+L9V7/M/b6+yrPHE9q38+1tLDy/e5hoD7+U9G9fzn2+/ZVnjxf27+OqyF9HSOAhHazy6azPffudVw+7Mx/JW016+sOJpkrqWVmMgyfL5gzv4+qrnLRe0W91Rz+fffYSYlrSgkWJZ626+uvpxkrq1yqFbew/xT2v/QlSLW9KQwNqufXxt3X3E9cG7TB/LgWAT39z8W8LJiKXOuhLY0V/P/9vyf0S1wZI7j6cl0sqPd/zMkhOS0pA0hA9x545fEDFp9Hc03fEOfrf3ewQsOCEDGh2xZn6393uEktYa7QUTXTx+8F8sOSEpDZ3+RCuPH/wXQklrjeViWi9vNn6WQPwg6ZyQAY1IspM3G+8glDDuMn00ST3AtrYbCcV3YK3OiUZC72Fr63VEEsbdmT/pOI7IR8zTb23mpZU7yCQOJUnV7PjOH16kqcO4Y+QAL6/ZyeMrrC/2R/P9B19jX3NnWru3tu3jz4edkEz5r2ffZNuh1rR27+1v4FdvrMxK479ff4fV9cbt4wfY2NzCj15flpXG71et5q296S8eOzs7+Nabr2U8vwTu3bSB53fvTGt7sL+Hry57IePaJhJ4fPdWHt1l3HZ9gLZwgC+8/cwgZ0XSaUheatjJX3am/7z0xiN88b1HkDIzDR3JO217uWvXirS2kWScr627D03XjztZk05jU89Bfr87/d8yrif5j613kdCTGWro7As2cte+p9PbSp1f7Po1MS1myQk5WqMl0sqfD9yf1lZKyT0HfkZEC2Ws0Rvv5JGG31qyf7HpR4SSPRmVoJfohJLdvND4n5bs32v7HuFka0bl4SUacT3AOy3/YslZ39f1/4gk9pFZITQNTYbZ0f5ppLRbVv7jieOIfITouuSBl9ZkVQhLylRE4Om3zB0MKSX3LF2bdVl4ATxmwYm55621WZ+gUITggbc3pLX7y6p1WVf+VBXBPSvXpbW7d816FCtFTQZBEYI/vZ9e4/7NG7MuRq0Iwd3r0y/gD+zYmMrNyUJDAHdtXpP2Qvvo3o0kdC3repZ/2vE+WpqIxTMHNxBOJjJavAeQwP373ieumUcsXmnZSF8inKWG5KmG9wknY6Z2qzo30RXvy2jxHkBH8nrbavoTIVO7Tb1baI91ZKmhs7p7Ld0x82jC/tAOWqINWWvsDGykI9ZsatcW2U1zZFtW/WMkOs2RbbRFzKPF/fF6WsOrstTQ6I3voSNqfs2KJVvoCr+IlWjL8WhEkwfpiSzP4rEffxxH5CNk7Y4Gmjv7s76Q67rkmbe2EIsbX2i31reyt7kzo4jL0Wi65Ln3thOMGF9o97Z2sqG+OeveMZoueXXjbrqDYUObxp4+3t5Tn3U/FE2XLNu9n+befkObzlCIV3buSbvlY4QuJasPNbK3s8vQpj8W4+md27J+HbqUbO1oZ3ObcQQpmkzwyM7NWWtI4EB/D++1HDK0SegaD+xeb6tfUFskyFvN+wzHdSl5cN9qW83f+hNRXmveYTgupeTRg6tstcmL6glead5oavPXphW2Gv5pUue11vdNbZa2vZl12fkB3up423R8ZeertjQUFN7tfN3UZlPPC7Y69gpUNvc8b2qzr+9p2xp7+54wtWkLPkr2/W8AVFoD6aNUn0QcR+Qj5NnlW1LJpzYIRmIsX7/XcPyZVVtta8QTSV5bv9tw/OnV22xraFLy/DrjBePpDdts16wQQvD0hm2G489u3TEEXVUFT2421nhhz07imr1wqyoEj23fYjj+Sv0egglrOQVmGo+YbM8sb95PV8zYcbSq8fAe47vKNZ31NEfSbz2aoSB49IBxftCO/iYOBNtt/d0F8NQhYyehMdzGzkB9VhGXASSSF1veMRzvjnWzpW9bVpGKozXeaFtmOB5OBtnSt9qWho7O+90DRdOOJ6FH2dn/hq1uuhKNHf1vkNCjgz8HmWR//19tazQG3ySmDf75lFLSGniI7KIhA2j0Rt8mlrSWj/JJwnFEPkLqW7qzvvseQFUEje3GF+v6th77GqrCoY5ew/GGzl77GkLQ2GX8Ohq6+2w7CULAoR5jjYM9vbadHV1KGnpN/h69vRmdSBkMTUrqe3sMxw/29+KyWQRMk5J9vcZh+oOBHtvvlSYlBwLGGg0h49doFR3JwaCxRlPYWmKjGTLNPM3R9DlWVmiPdhtul7XHhkYjkAwQ1wd3YnviHRnlbBgR06OEk8FBx0LJLjSZsK2hyYRh0mpc6yMp7TnRkNoGCicHj0zqMkpSt//ZAkksaRyZ/KTiOCIfIeGo/S+cEIJw1PjuN2QylgnhmPFzDUbN98etIJGEY8bPNRyP29oGgNRWVjhu/DrCiYRtDQlHKrYaaQzF+fh+E41QIp51TtDRBBMmGsm4ra2GI/OYRG7CQ6QRNjlxYjaWCVHN+HNl9cRLOnQkcX1wnajB3X82RLTB54oNoUZMH/yETnwINeIGGklp7XSQFZL64A6NJs3zeTJhKOf6uOA4Ih8hub7MehwMhpSQ6zeeJ89kLBPMnmu+z17JZ0g5VDleY41cr8f2HbiiCHI8xnUlcj32NQSQ7zV+P3I97iFYWlOl7w013J6sc4KOJt9jouHy2HbaAPLcxn/zHJfH1nbGALmqiYbJWCb4TOYxG8sEBQWPQQl3/xBW5PQbFB7zZlGQzAifOnhdFI8ydBoegzLxLjF0fV9cSu6gv1eFvfLxH9ZcHxccR+QjZPTwUvu5FbpOTaVxmfHRlfY1kppO7TBjjbphxUPyOkaVm2iUmpdSt4KU5vPUlRTbXlwVIagrKTIcH11UQsJibQsjVCEYU2zcRXl0YQnJNLUwrGiMKzLRKCix7SSoQjC20Lh8fV2etU7RZigIRucba9Tm2uuRBKmCXTUm84zwD02VzCp/qWHBvArfsAw62xpT5C40dHaK3WUoNhI8B/ApOeSogy+uea7SjJvQDYZLeMlzDV48zaMW2u4zA6mE1VzX4MXTVMWHW828AehgKj5X7RDM8/HCcUQ+Qi4/fZrt3IqCXB+nzhpjOH7F4qm2NfweN0tmjzccv3y+fQ2XonDRHONKglfMmmJr/gEuN5nn0imThiTv4ZoZ0wzHLxw3Ab8r85Ldx2pcN8VYY8mosRSYRDOsatwwcYbh+KlVoxnmt3cx16TkxnHGVXXnlNYwMrfY1vKqI7lu9FzD8fEF1YzPr7K1BSSRXFWzwHC82l/O1MIxtjQEcFH1yYbjRZ4iZhRNs3WiRSA4q+IMw/EcVx4zihba1FBYWHq2YY8bl+JlSuES2ydaJheeg8ugOZ8iVMYUXI6w9TpUavLOwWNSUr4y7ybsLakqxf4zP5Hl3tPxoToid955J/PmzSM/P59hw4Zx2WWXsWvXrg9T8oRm1oQRptGMdCiK4IozpuN2GX9pJ9VUMGnksKwXWFURXHrSFPxe4y2NumElzBszwpbGhbMnUphjHF6uLMzn9PF1WUdeVEVw9sQxVBQYL54lOX4unDQh+1olQrB4VA21xUWGNnkeD1dOmpK1hiIEMysqmVxufHHyqi5unDgjaw0BjC0qYW6FcR8gVVG4efxsW4trdU4Bp1SNNn4eQnDzGOMF3grFnhzOqjJvhXBt7SJb0Z0c1cOSqummNpdUn2JLwyVcnF0x39TmnIozbZ1oEQhOLz/F1GZR2RKbJ3N0Tio929RmevFFtk+0zCi+2NRmTOEVthJvJRpjC68ytanIuzbr+VNoVObfbHOOjycfqiOyfPlyvvSlL/Hee++xdOlSEokES5YsIRT6x0vGgdSF9pYL5mX5WHCrKleeYXzXOsDtS+ZlteUgSD3Ha0+zoHH63Ky3NaTEUr+ZTy2eg55tjQ9dctuiOWntbp83K+vlQpOSTy8wvvse4LbpsxAiu0C6LiWfm22+IAHcNGkmLiU7N0ECX5i+IG3fnGvHzMTncmXtjHx28oK0zuulI2eQ7/ZlrXHb2IW40zSmO6dqOiWevKw0BHBN7Ulp80BOKp3OMG9xVtEEgeD8qpPIc5nnNkwtnEy1ryprjUVlCynymHesHpUzgZH+MVlqKEwtmEept8LUrtw3mpE5M7OKWAgURubMpMxn3oQyzz2C4bmnZamhUuKdTKnP3Pn0uIZRnnsp2S2rKn73WIp8xlGwTzIfqiPyyiuvcNtttzFlyhRmzJjBvffeS0NDA+vWpa9G+Unl4lOmcOUZ5h/oYxGkLhw/+fJFVJSm7zZ5zuzx3Hp2+kX4WA0J/Ndt5zOqIn2jqlMm1fGlc0/KSGOA7199DhOHpw8/zq0dwbfOOy0rjX8//3Rm16RvQz+lsoIfnWd+x2bE109dzCl16fdzx5SU8stzzs/K4fnCnPmcN/b/s/fe8XFUV///585s16r3LlnFliWruPdece8YTEsgCRDyJDyEkAIESKWmkAQSQgIYU9x7772pWcVVlmT1Lq2278z9/bFaR7Y1s7MzgvB8f/7k5SdPNGfnvTO7e++Zc889x3sX3jj/QPx16gIAvpVUIgAezsjF0jTvS2Hhej/8Y9IyEOI7Y0lyFh5O9/6dDNDo8N6YB8AyjE+OAgOC6dGD8Hi694Fcx6rxp+GPQs345lQxIBgZmorvpHr/vqgYFr8a8iR0rManSZwBQUZAEr49YJF3W8LguUH/A4PK4CODQYIhHo8kPejVlhCCR5Ofg1EV4DMjXBuFlQlPSbKfG/tz+KvDfVqiIWDhrw7H3NifS7IfGfFLGNUJPjO0bDDGR78pqcHlgJDX4KfOAHxaamKhYgKQEfEhiMJt+P9X9bVedWenu95CSEjfE53dbkdXV9dt//5fEyEEzz00FStnuCMC3sqLswyBSsXi9R8swLgc4bD2nfrBwgn49qyRt87hjcEwDH772H2YIZIbcqe+O30UfjBnnHQGIXh1xUwsHik9/+ORMUPxs9mTQCQyCICfz5mMh0ZL7/C7PCcLv5o9HQwhXpc3PMefmzwe3xsjPbo1P30Q/jDzPrA+MJ4ZMRo/HiP9CWl6Yiren74IKoaRzPhW1jD8csw0yV2Ex0Yl4Z+TV0DLqsB6GTQ9jJUpOfj96LmSGXmh8fjnuIegV6m9XofHkZgTl4m3RiyVvFyYHhCD90Y+DqNa55XhiWVNiBiEN4auhspLxMWjeEMk3sz5AQLUfl4dHg8jNzgdr2V9DxpGWl5RuDYMLw7+CYI0QZKTV1ONA/DTjP+FjpWWVxSoDsHTqa8hWBMukUEQo0/Ekym/hF5gt8yd0qsCsTLxHQRr4iDNzSUI0cRhZeI70KvEozoeaVgjpsa+jyBtGjyPd+Ji4KeOwbS4D6BXSUtEZRkDMiPXwF+bK5mhYSOQFfUldKo4SYz/F0Wo1LabCsXzPBYsWICOjg4cP953xcBf/vKXeOWVV+76u5Q2wv8XdazgOj7bm4/z5TdvTbI8pWAYBjzPQ61iMXdcJlbNHIqkGOnttHvrVHkVPj2Yj5NllbecHp5SMIQBT3moGAZzRgzCg1OHIj1WXtb3uWs38cmxfBwuqwABASH/YVBKwRCCWbnpeGjiUGTGiYdphZRfXYePTuVjX7m7qux/GOTW1tWZg9PwyJg85MV7j4T0pYv1DfjXuXzsunT11rk5yoMBcXdgpRTT0lLw6IihGJUgb9Aoa27Ch4X52HqlHC6eB8sw4HgKj4/FU4pJicn4Vu5QTEhIksW40t6CD0suYOO1Ujg4DixhwFPa4wi4u/+Oj0nEt7KGYVqCcOKzmG50teFfl89h/fVi2DhXL4b7OEcpRkbE47GBIzArPl2yE9JbteYOfHz9NNZXFsDCOaC6jeH+bHJD4rA6ZRTui82UxWiydeKLqlPYdPMsul02qAgDjlL3E1rP5z8oIBYrE8dgdkyuV+erL7U7urC19hh21B+HyWX5z73q+a1wlEeSXzQWxkzC9MiRkh2d3jI5TdjXeAgHGg+hy2UCS1jwlHczAHDgEa2LwsyoaZgUPh5qgZ0yYrJyZpxs2YsTLXvQ5WoHA/ZWOX4CAh4cQjWRGB82G6NDp0PN+L4bxsFZUNyxA4VtW2ByNfXJ8FdFIDdkIbKD58na/uvibajo2oQrHV/C7KoBgQoUfI/LwIDCBR0bhrTAFUgNXCaaoCokntrR2P0l6rs+gs1VcYvhvg43Q82EIsr/IUT5PwQ1q3yX4DdNXV1dCAwMlDR/f22OyJNPPoldu3bh+PHjiIvrexC32+2w9yrc1NXVhfj4+P+TjgilFA6HCyzLQCWSXAoA1Q3t2HWyDI1t3bA5nDDqtUiLD8ecsRkwGsSfWmwOJ1SMd0ZtSye2nylDfZsJVocTfjoNUmNCMXfkYAT6idcksDtdYAgRTZIFgIYOE7acL0NtWyfMNgf89VokhQdjwfDBCDGKPxk5XC4QeGc0mbqxubAM1W2d6LbbYdRqkRgShEW5gxHu3/ce/94MANB42cXSarZgU0kZKlrb0e2ww0+jQXxQIBZnDUZ0gPig5OA4UEqh9cJot1qx6XIZrrS2otthh0GtRqx/AJZmZCIuQPwJz8lx4CUwOu02bLpWhkttzTA57NCr1Ig2+mNJaiaSA8UHPifHgaM8tKxKdILvdtqxtbIMJW0N6HLYoVOpEKn3x+LkTNGtugDg4nk4eQ46LwyLy4GdNSUoaa9Dp9MGLaNCuM6IefFDMDBQ3LGVyrBzThxoKEFRRxW6nVaoGBVCtUbMiMpGRqBwEi/gdiQcnAs6Vi3KcPIunGgpxsXOa+h2WaAmKgSq/TA+PA+D/BNFX8tRHg7eCR2jEbVz8S4UdBShtPMSzJwZLBgY1UaMCB6KdP800dfyPQytFwZPeZR3FeCyqQhWrhsEBAaVPzIDhiPVKO4Q8pSHk3dAw2hF7SjlUWW+gBvdZ2Hj3JFxHRuAZONIJPoNE13CoJTCSe1QE41XuybrOdSaj8LBdYKCQsMGIMowGtGGcWCI8FhEKYWL2qAiWq8Mk/0cWi174OLbQakLKiYIgboxCDZMB0OEHUJKKXhqBUO0ICLv5Zuqb5wj8v3vfx9btmzB0aNHkZwsnlTUW75cyDdBLheH4+euY8OOfJRcqoOLc3vAfgYNpk/IwOI5uUhJVLbXnON5nCi8gXX7ClBwqQZOl5th0KkxbdRALJueg0FJ8qIOHvE8xamySnxxqAhnLlXD6XJntOs1akzNS8WKKTnISoqS9QTqEaUUZ67exOfHC3G8vBKOHoZOrcLkrAG4f3wu8pJjFDMuVNZi7ekiHL5cAZvT7YhoVSwmpCfjgdE5GDUgXjGjqLYBn14owt5LV2HtYWhYFuMGJGD18FyMG5CoeJvwxcZGrCkqxI4rV2BxuqttqhkGo+Pj8XBuHiYnJYFVWEq+vLUZn5QWYtu1cpgcjluMYVGxeDQrD9OTUhWXq7/W0YI1VwqwsaIEXQ73Q4eKMMgNi8Yjg4ZjVkI6NKyyQbfS1IrPKvKxqaoIHQ53VU2WMMgMisLq1BGYEzcYWlbZlupaSzs2VJ/D5pv5aHO4k+8ZQpDuH4X7k0ZhVswQ6BUWNmu2dWBb3WnsqDuNdocJFO6lqES/SCyKG48ZkUOhVynbtt3u6MSBxhPY13gMbY4OAO7IQ7Q+ArOjJmFi+Gj4qZQVHTM5u3Cq9TBOtBxAu6MVFO5oUKg2AhPCpmNU6ET4qZRtDbe6TCju2I8L7TvR4Wi4xQhQh2No8BzkBM+En8QlHCE5uG5c69qN8o6N6HLW9EQ4CAyqMAwKXIj0wHkwqJTVqXHxZtR3b0NN11p0O68DPQwNG4ZY/2WI818BnSpKEePr0jfGEaGU4plnnsGmTZtw+PBhpKV5T7rrrf9LjsiO/Rfx/ppjaO+0gGHIXbs9WIaA4ymyBsXghadnITHO9+JNe05dwp/WHkFLh1mUMSg5Ej//9kyky3B6DhVewxtfHEZDm+nW+fpipMWF4cXVM5CV7PuP4nh5JX674SButnaKMgZEhuDF5dMwLMX3ZZCzFTfxytYDuNHcLspICAnCSwunYmyq70WECmvq8Yud+3GlqQUsIXd1v/UwYgL98eKsKZiW7vsySFlTE17YtxclTU19M3r+Fmk04ucTJ2HewIE+M661t+L5w7uR31jfJ4MhBDylCNMb8MLoiVg2MMtnRpWpHc+f3IkzjTf7ZoCAB0WwVo9ncyfgoYFDfWbUWzrxswvbcaKxQpThr9bi6YyJeCzN+06hO9ViM+HVi1twrOlyz1JB3wwDq8FjKRPw7dSJgjU0hNTpNOPtS+txrPkiCHAXw5NYrmXUWB4/CY8OmOXzkpHZZcUHFZ/hZMt597KjQCq1mlFjdtQkPJCwyOclIztnw/qaj3G29Tgo+D4ZBAQMYTE2dAoWxz3g83KOi3dgf8M/Udixp6exXt8MAgbZQdMwI/o70PhYlZanLpxveQ/lHRt79cW58zNx3/9k/2kYG/EcNKx4dPZOUcrhevufUdX1EXhqw38+5d5yMyIM05ER9ktovuHLOd8YR+Spp57C2rVrsWXLFgzsNUAGBgZCr/fuZf9fcEQopfhg7Ql8vP60JHuGIdBp1XjrpWXIGiQ9l+GjbWfx1y+Fu3HeydCoWLz17CIMz0yQzPj8UCFe//xQnz+BuxiEgGUZvPm9eZgwRHoS7eYzpfjlF/tAqfdm74S4dwb8/qE5mJkrPYl2V/FlPL9uV09o0zuDgOBXS2Zg0VDpSbQHr1zHMxu2g+Op123MnqnupdlT8eBw71ujPTpRXY0ntmyGo2cpRopemDAB3xkuPYn2fH0tHtm5ATaX866JW0g/GDYGz44YJ5lxsbUBq/d9jm6nXTLj2xkj8IvhUyU7Clc7m/Dw0TXocFjBSawyuyI5D68OnSs5WlVtbsUTpz9Ei71bMmNOTDZey1kieRJvtLXjR/l/Q6OtXXL9jrFhmfhl1sNQS0xwbXd04pXSP6De2iip1gkBQVbgQPxk0JPQSozymF3dePfqb1FrrRJ0cu5kJPul4cnU56GTWFrezlnwedXLqLVekshgEKFLxoOJv4JeJS3vw8XbsK/2BdRbL8D7qOhmBGoSMDvujzCopD1s8tSB4qYfotlySJI9wEKvisHw6I+gE6j0+k2QL/P3V7pr5m9/+xs6OzsxefJkREdH3/r3xRdffJXYr1VfbL0g2QkB3MseNpsT//vqelTXSuvWuPlQsWQnxMOwO1149u3NuFLVLOk1u89ewuufu38IUqYKnlK4XBye+9s2FFdIa1t9qOQ6Xv5iL3gJTgjgrjfC8Tx+8slOnLlaLYlx8loVfvzlrh4HQRqDpxQ/37AXhy9VSGLk36zD99dvh4vjJTkItOffK7sPYkeptIJ+pU1NeHzzJthdLp/qtfzu2DF8WVIiyfZaeyse2bkBVqd0JwQA/nThFD4slrYF/6apA6v3fQ6TD04IAPyz/Bz+cvGUJNsGaxceObYGHQ6LZAcBAL68UYC3Sg5Ksm2zm/Gd0/9Ci93kE2N3XTFeL90p2EW3t7qdVjxX+D6a7NKdEAA41VKG18u/kMSwcXb8uuxd1FubJBdco6Ao6byMP175UNK1O3kH3r/+Juqs1ZIcBA/jhvkaPqj4Azjq8mrPURfW3/w1aq2XfWDwaLLdwBfVr8Al0HG4t3jK4XD9K2iw5kPaqOhmdDpuYm/t/8Ip0ITvNntKUdr8CzRbDks6v1scbK46XGj4Npzc/xs7S79SR4RS2ue/Rx999KvEfm1qbOnC3z464vPreEphszvx5nv7vNq2d1nwxkfSBsveohRwujj8+oM9Xm27rXa8+sk+n8s7UQAcT/HSv3d7HQTtThdeXLtH6u/5dg4Ffv7pHnBeera4OB4vrNsteWC6Uz9dv+dWQqvwe6F4futuyc5UbxEAP9u+D2aH+CBIKcXze/fAycurzfnigf1ot3ofBH96dC9sLqesCqC/OnUYDd0mr3Yvnd2HbqddVvG7NwuP4kaXd2f99eL9aLdbfHJ0PPrH5ZMoa/fuSP/1ygE0200+MyiAL6vPoqC9yqvtmqr9qLO0+OTouBkU+xvzcab1klfbbXX7UW2p9blaKgXFufYinG7N92p7rPkAKs3XZDB4XDaV4EzrMa+2xR0HUGku8rlaKgWPWutlXGjb6dW2svswqs3HZDA4tNsrUNL+uVfbFutRNJi3wdeBkYKDxVmNG53v+/S6b6r+/1k9pZ+0bW+xb5WdeonnKQpKbnqNimw7WuJ1AhZjXKpsQvmNRlG7nWcuwe5wyZr0eEpR3diB/Ku1onZ7C6+gy2qXzWjq7Mbx8kpRu8OXK9DSbZHViZYC6LTasK/0mqjdqcqbqG7vlDWxUgBWpxPbSsQnjOLGBpQ3N8uuXOvieWwoKxW1udrWgnP1tbImb48+Ky8WPX6zuwOHa6/LZrCE4NMrBaI2rTYzdtWUK2AwWFshHt0xOW3YVlPgs4PQm/FF5RlRGzvnxPba07LLwjNgsKlGPGrq4jnsaTgi21FnQLC7/rCoDU95HG3eI5tBQHCkaY/ogw2lFOdat0L24AuK823bQL18nmXt62X3p6HgUd6xEbyX6M7NrjUK+uxwqOn6Ehxvk/n6b47uOSIy5XRy2LS7UHYJcsCdzLh5T5HgcY7nsW5foaIW7yxDsOGAMINSis8Oig/2UhhfHCoUtVl7rFDRzhGWIfjsmDjj01OFsvutAO68lzWnxBlrzhcq6jxMAHx8tkB0oP2ksEjRdVAAHxUUijoya8qUMXhK8UlpIZyccI+Qz64UKfrMOUrx+dUiWF1OQZt1leL30juDx+aqYnQ5hAfzHbWFcPDelwvEGPsbytBiE44gHWoqhJmTP6Hw4HG27RLqra2CNhfai9Hp9B7FEmZQXDJdx01LnaDNFVMpWh3SloP7EgVFne0mqizXBW3qrJfRbK+CrPBqjzqcjag0CzvS7fYKNNkuKupPY+PaUd19QvC4xXkTrdYTivrscLQbjebdsl//TdE9R0Smistr0WVS5olyPMX+o+WCxy9XNqGprVs547RwXkJlYzuqGtsVNXjneIqDhddubVe+U40d3Si92Sj7Cd/DOHm5CmZb38saXVYbzlTcVPSEz1OKopv1aO7q+547OA4Hr1Qo6jxMAVxraUNVe0ffxynFjiuXFV0HANSaulDW1CR4fMtV+VEEj9psVlxoFJ6UttwoVczodjpwskF4WWN7dami5nIA4OA5HG0QjoTtrruo6PyAO1JwuEk4Ena4sVByZVQhMSA41iz8Xk+15itqWuhmMDjVIrw8U9B+RlG3XjeDRX67cN5deddxMAq69XoY5V3CEaQbpkMKIhVuETCo7BZOQG2y7If8qI5HDBrNuxSe47+ve46ITLV3WvrlPF0mq+ATXVtX/zCsdicczr6f6PqLwfMUJqu9z2Nt3f3DAIB2c9+5D20Cf5ejNkvf5+q02hQ5U73Vau77nlhdLthFogw+Max9M3hK0Wnvn3CuEAMA2uz985m0WoWbZLbalTnqgHsCb7ULM1rsJoWujnv3V7sIo9XRJXs5ozejwyF8P9odnYqdNkIIulzCURWTq0tRt163KLpFGGZXp+J7xYOD2dUheNzGtSt2DCl4WFzCS+9OrlX20s9/xMPOyY9AfVN0zxGRKU7g6d9X8TwVXHrpLwYAwWiF3PyTvhl9T6D9y/g6ruO/x3B9DQwpW6clM0TeL99P1+IUWctXGnEBABDx65CbG3IHAi7R6+ifeyXO6A8Hl37lDAoqeh6+X64DovkbPDjFzo6bIbys+HVcx/8V3XNEZMrop6yioUd6vUaw8Z2/l/LuUsUyBHpt36WE+4sBAAGGvgsF+ev7k9H3uQL0vhUpEmUIvN8AXX8y+j6XUaNRHKy9xRB4vyzDQK/yvddInwyt8Gdr1PTP5x6oEb7vAWrlnwlPKQJEGdIat4mJoxQBauH6GAFq3wpg9SUKCn+RKqj+CquXukVgFGlk56fyU7w0Q8DAIFIQTMf6KY5WEDDQifSQ0TK+95fpSzpWuJqrmvGHkjwXj77phc2k6J4jIlOZA6OhYpXdPpYhGJoVL3h8YFIktBplpagZhiB3YKxgYagB0aEwKnQUGEKQkRABrbrv9xobEogwf2WDOQGQGB6EYL++B9owowHxIYGKJ/GIACNigvouvmPQqDEoMhwKclUBAIF6HVJC+x48GEIwPDZWcUl4vVqNweHClXVHx8QrSlYF3OXfcyOECyqNi0qU1SSutxgQDI8Qrqw7NjJZ8XUAwIgw4cJ/o8IGKP48KCiGhQhX780LTlWcv8FRHtlBwsUFMwPTFU/gHOUwOFC4QnaqMUPx0gwPDqnGQYLHE/2GgFeQ4Am4l00S/IQrBEfp8xQlkbpFEGUQ7gIerBvRDwwGwbqRCs/x39c9R0SmggIMmDp+kKIdFBxPsXSucClrP70G8yZkKmLwPMWKmcI/Bq1ahaUThghGZSQxKMWqqcIMFcvg/vG5igfzByfmCTpUhBCsHpOr6PwMIXhwdI5oz5aHR+RKKpQmxlg1NFu08d4juXmKclFYQrAiMxN+GuEqmI9k5Sla1mAJwcLUDATrhJ/AHx40TNGSA0sYzEhIQ5RB+Ol01YBhiq9jbEQykvyFq2AuTxip6PNgQJAVGItBgcKVlOfGjJJ9fsDtqCcYIkQdkckRo8EqbJ4WqglGbtBgwePDgsdAyyh7sPFj/ZETJFwdeKD/GOhldMTtLTWjQ1bgJMHjsX4j4aeKUMRgiAppAXMEjwfphsOgSobShNU4/+WKXv9N0D1HRIGWzMlVtIMiJjIQQ4eIl2BfOi1HESMkwIDxeeI9TpZNygZVwDDqNZgxXLwE+5LRWVDih2hULOYNzxC1WZg3GGoFDdMYQrBkmHgflbmZA+Gnkb+sQSnFyrwhojYzUlIQIqEFgpA4SvFgtngp+YnxSYgxyh/MOUrxUFauqM2w8FikBYbKHmY5yuMRLz1nMoKikBMSKzuawFGK1aniJfHj/UIwOixFNoMHxarkMaI2YdpAjA8f4nNfGo8ogKXxE0RL4htVfpgQNkL20gkBwZzoyaLvUcvqMCZ0igIGgwnh06ESKVfPMmoMDb5PdqInAYO8oFlQi/ScYQiLjKClkOskELBI8Z8JLStc2pwQgoTAh2Sd38OIMMyAVqHD9E3QPUdEgQanR2P8yFTZ0YSnHpnk9bUp8WGYMy5DdofYZ1ZN9LqEFBsWiOWTcmRPGN9fNF5wWcajsAA/PDpluEwC8L1Zo2HUiT9pBeh1eHLqaNmMb08cjlCj+BKSXq3Gj6ZI77PSWwTA6hG5iBVY+vFIzbL4yYQJshlLBw9Gaqh4nwuGEPx8zGRZDIYQzE5OQ47IsgzgHmh/NnyqbMaE6CSMifLejPDHQ6bJmi9YQpAXGofJUd6bcT49cDoY4vvCBksYDAqIxowo732MHkmeCRXx3d1hCYN4QzhmRA3zars4bjY0jNrnJRoGDMK0wZgeOd6r7dTI+6BjDT47IwwY+Kv8MTF8hlfbEaHzYVAF+uyMuHND/DAqbJFX24GBC2BURfq8jZeAgYpokRPi3cmIMS6EQZ0kY6swASEqDAh+ysfXfTN1zxFRIEIIXn52LgalRPm87PD0o5MxaYy0Rm4/+/YMDB0U57Mz8sSSMbhvvHAYtbeeWzkZ44ck+zwIPjRjGFZMltbI7Zn7xmF2nvTmdR4tGzME35omrZHbdyaNwLLhvneHnZczCM9MGyvJ9qHhuXh0lG/dYQmAqekD8NMZwuHg3lqemYVnRvnmVBEAY+IT8Ovp3gdyAJibMhA/Gy3t/XjEEILciCj8Ydp9kuynxKbg1VEzb70/qYxBQeH42+TFkr7zI8MT8bvhC0B8YLCEINEYgvfH3Q+VyFKcR0OC4vC7vBU9fVylUVhCEKkLwLsjHoKG9Z7rNcAYjdeGPAaGMD4wGASpjXgj97vQs96XRKL1EXgh4ymoCCuZwYCBn8qAFwf/D/xU3nO9gjWheDL1eagYtWRnhAEDDaPF02kvwF8tnODpkZ8qCA8kvgYNo5PsjLgdBA3uT3wFAWrvncm1rD9mxb0DDWOU7CgQMGCICjNiX0eAxnvXcJYxYGjUB1CzIT44IwwIWORE/Bn+Gt/H02+ivtLuu0r1f6H7LgDY7E689oedOHr6ap8t5z0ihIBlCX785EzcN9W3ydLhdOHXH+zD7pPlogyGEBAC/Gj1FCyfkesTw8Xx+P3nB7Hh6EWvDAD4/uJxeGTmcJ8cJI7n8c62Y/j4cL5XBgXFd2eOxpOzRvvEoJTi3QOn8N6hMyA9Lez7kqdV/LcmDMOzMyf4FNmilOLvJ8/hncMnAcAr44Fh2fjFrCmSJr3e+qigAL86chhUAmNJxmD8ZsYMaHxcnvry0kX8/Og+uHgquGXRw7hvQDrenjoHOh933Wy9UYbnTuyAk3cn5/VFYQkDjvKYHDsAf5m4CH5q31rCH6i7jGfPbIKNc3pljApPxF/GrBDdLdOXTjVfw3P5n8Pssgt2qfYwhgTF4Y/DVyNE69uOmOKOCvyi+F8wuSwgIH1+JiwYcOCRYozB73IeR5jW++TdW9dMlfjdpb+i02kSZDBgwINHrD4KP8v4PiJ00rrJelRnrcZfr72OTme7IIOAAQWPUE04nkr9CSJ0vnWTbbXX4vOql9DhbLx1LiGGvyoU9ye+gghdkk8Mk7Mee2ueQ6ezyitDxwZjZuwbCNMJJ9v2JZurCQWN30W34xIIWIEkVvc3TsUEIjfyLwjWeY+A/Tfly/x9zxHpR12+3oCNuwqx90gZXK7bv6wRYf5Ycl8e5k7LQlCA/B0kV6ubsfFAEbYfK4XDefuXNTTID8un52LBpCyEBsnfDlhR34r1R4qx5UQprI7b98EHG/VYPjkHS8YPQUSw/O2AVc3tWHfyIjacugiz/fZqqQF6LVaMy8GyMUMQEyL/c69t78QXZy/iy7PF6LLdXmzNqNVg+YghWDkyGwmhQbIZDV0mfJF/EWvzi9F+RyE0g1qN5XlZuH9oNlLCQmQzms1mfFFyEZ8UFqLZcnsBMZ1KhWWZmXgwOwcDw8JkM1qtFnx56SI+KilAg/n2olgahsXi9MF4KDMXWeGRshkddivWX7+If5dfQI2587ZjKobBwqTBeGjQUOSERsteijQ5bdhcdREfXzuLqu7bi0mxhMHsuAw8mDIcw0LjZTMsLjt21hZjbeUpVHTfXkyKAcGUqAzcnzgKw0OTZTNsnAMHGwuwseY4rnffXr2WgGB0aAYWx43HsJA02XklDt6J06352FV/CNe6765emxM0GHOiJiM3OFP27icX70JRxzkcbd6LCvOVu46nGQdjUsRMZAUOlZ1Iy1MOV0xncL51G6osd1eWjdNnYETofAz0HwOWkZffxVMONebTKO/YgFrL2buOh2kzMDh4GZKMk6GSmaxLKY9W60nc7PoULdYjuNPNNWoGISHgIUT5zQHLyM8h+7p0zxHpR3Ecj7KyWrQ0m+BwuOBn1CEtNRKRUcJPIKZuG65UNMFktkGtYhEUaMCglEiwArkaPE9RdqUOTc0m2O0u+PlpkJIUjtho4f3h3VY7Lt1ohMlsB8syCPbXI2NAlGA+CKUUZdcb0NDSBZvdCT+9FgPiQ5EQLTxBWmwOlFU1otNsA8swCDLqkJkcJZgQSinF5eom1DR3wmpzwk+vQVJ0CAbECD9J2RwulFQ3oNNiAyFAoEGHIYlRortKrtQ2o7q5A2a7A35aDRLCg5AeKxxqdbhcKK5pQIfFBlAgyKBDVlwUdCJ5LdcaWlDR3A6z3QGDRo2E0CAMigkXnFicHIfiuga0W9zVV4P0OmRFR8Igktha0dKGa82t6LY7YFCrERsUgKyYSEGGi+dR3NCANqsVLp5HoE6LrIhI+IvU8qhsb8eV5haYHA7oVCrEBPgjN1p4oud4HsXNjWi1WuDkOQRqdcgMi0CgVjhyUNPVibKWJpjsDmhVLCL9jBgWLbwFmacUJa0NaLaZ4eBcCNDoMDg4UnQHTr3ZhIstDehy2KFhWUQajBgeESu4w4lSirKOBjTZumHnnPBX6zAoMBKhOmEHvcnajeK2OnQ6bFAzLMJ1fhgeHg81I/x9v2JqQJPNBBvngL9ajxRjBMJ1wknArfZuXGyvRZfTChVhEaL1w7DQBKhFkjNvdNej0dYBG++AUaVDoiES4bogQftOhxklndXoclrcSzcaP+QEJUPLCn8Xayz1aLa3wsbZYVDpEaOLRLhIBKTbZcGlrhvocprBEIIAtRGDA1KgY4WjWE22erTYm2DnbdCxeoRroxCmFU60tHFWXO2+im6X2zE2qoxIM6ZBxwp/T9odDWhz1MLBWaFh9AjWRCFEGyto7+DtqDKXwexyV7Y1sP5I8BsEvUgdk25nAzod1XDw3VAxevirohGkTRK0d/EO1FuLYeU6wFMOOjYAUfpM6EQSWW2uBpid1+HiTWCIHnpVNIwiyzA8daLVlg871waeuqBhAhCsGwItK/8BSKnuOSL9oM5OC3btLMbmTefR3Hx3ueGRIwdg0eJhGD5igKCD4U2mbht27S/Bxu35qG/svOv4sJwELJk3FKNHpMiuWWK22LHreBm+3F2Am/Xtdx3PHRSHZbNyMWl4KlQqeU8kFpsDu89cwuf7C1BRd3fTrawB0bh/Wi6mDkuDxktSq5BsDhd251/GZ0cLcanm7h4qGfEReGBiLmbmDYROZu0Vu9OFvRev4tOThbh4s+Gu42mRoXhwXB7m5g0SdTDE5OA4HLh0DZ+cKUT+zbv7tCSHBuOhUXlYkD0IRhEHQ0wunsfBa9fxcUEhTlffvOt4QlAgHh6ahyWZg2UXaeN4HkeqK/FxcQGOVlfeFXSPMfrj4ew8LM/IRIheXgSQpxTH6yrxUXk+Dt68fhcjUm/Ewxl5WJGejXC9vAggpRSnm6rwydUL2Fd75a7lr1CtAavThmFlSi4i9fJ2GVFKUdh2E59VnsXeutK7tjQHqvVYkTQCyxOHIdoQJJtR3lWDjTdPYl9DEVx3VO00qnRYEDsKi+JGIdbg2xJLb13vvomddcdxqOksnHdU9NSzWsyIHIM50eMRZ5AfOau11uBw00EcbzkOJ709WqphNBgXOh6TI6YiVu89B0NIzbZanG3bjfNtB+C4o3utiqiRGzwZo0JnI0afLJvR6ahFacc2lHVuh4O/vbw/AxXSAqYhK2gRIvW+LeP0lsVVj8qu9ajsWg8nf/scQsAixm8mkgNXIESbKzs6J1f3HBGFOnK4HL/9zTa4XLxgHxiGIeB5irT0SPzmtysREuLbQHjq3HW8/PutcDhcAO17rdnDSEoIxRu/XIaIcN/uwfmSavzkrS2w2hyC9fs8jNjIIPzhp0sRFxnkE6PoWh1+9KfN6DK7Ixp93S6mJ08jMsQf7/5oCZJFIiR9qay6EU+/vxltJgsYgj7reHgYof4G/PXJxRgU59uWtmsNLfjOPzehsav71rnulOf6Ag06/O3RRchJ9G09u6qtA4+v2Yib7Z3CjJ7/9tNq8O7KBRidLFzwri/VdXXhsXUbcb2t7VZehxBDp1bh3QXzMWmAb4Ntk7kbj23biLKWZkGGh6NmWfxhxn2Yk+pbUl2bzYLH929EfnOdKIMBAcsQ/H7cHCxJ9b4zpbdMDhu+d3wDTjdViTN6dsv8ctgsPJDqW5KyxeXA8xfW4UjjlVu5I0IMSoH/zZyJhweM8WnSsHNO/KrkCxxsuijO6Mlj+HbKDDyaPM0nhpN34S9XP8eBpjNeGTx4LI2bjoeT5vu0bMRTHp9Vf4pDzQdunUeMMSV8GlYlPOgzY1/DpzjavMkLgwUPDkODp2JR3PfAEukPN5RSXGj9BGdb/yWYUwLgVi5Iqv8UTI16ASpGek4UpRTXO9egtO3tnvwbcUakYSKGR/weqq9xSeeeI6JAu3YW4c03dkq2Z1mC0FB/vPvXRxAaKi1n4uCxS3j1jW0A+p6472IwBAEBerz31mpERUhLSjuefx0vvLUFPIWkNuksQ+Cn1+Ifr60SXa7prXPl1XjmnY3geSqp4BPLEGg1avzzhZVIi/eetQ4ARTfq8MS76+F08ZIZapbFP55ZhuwkaY5CeV0THv7bl7C7XJJqtjDEPfn9/dtLMDJFmqNQ0dKG+//5OcwOh2QGIcBf71+ISWnSHIWbHZ1Y9uln6LBaJRX5InAnUP9h/lzcN1Cao9DY3Y3F6z9Fk9ksmQEAr0+bhWUZ0hK0W20WLNm+BjXdnT4VK3t19HQ8nCHNUehy2LBi/8eoMLX6xHg+Zwq+myFeE8Qji8uBb538F8o76n1qNvdE2kT8IGOaJFsH78KPLnyAoo5Kn3qjLIsfix8OXCDJGXHxHH5V9nfkt5f7xJgeOQo/SHtQEoOnPN6//ldc6Dgv+fwAMCxoOL6b8pQkZ4RSio017yK/Xbgj7t0iSPfPw+qkn0rKYaGU4njTu7jYsdEHAkGUfggWxL0BVqIzUt72Lq50fCCZATAI0mRgXMwHX5sz4sv8fW/7bi8V5FfirTd9a6nMcRQtrSa88PwXcDq9l+stvVSHX721wx0Fkfib5niKri4rnntpHaw2h1f7a1XN+PkftvU01JMG4XgKs9WOH/xmPUxm751Zqxra8eyft4CT6IR4GDaHE0+/vUFS19+6ti48/d5myU6Ih+FwcXj6vU1oaBfu4OlRi8mM73ywETanNCcEcC8ZcDzF0//egqqWDq/2nVYbvvXJBpjt0pwQD4PnKX7w5TZcbmzxam92OPDIuvWSnRDAHYWjlOLZ7TtRVF/v1d7OufDI1g2SnZBbDAA/ObgXp2vuXia6Uy6ex7f2bfDZCQGAl0/vx6Gb172/J0rx5PENPjshAPB60SHsqC6TxPjJhfU+OyEA8I+rR7GpukCS7W9L1/vshADA+psnsf7mSUm2f69Yj/z2Mp8Z+xvP4MubeyXZbqxd77MTAgAXOs5jY+16SbaHmzb46IQAAMUVUwG210qb9C92bPTJCXETKOqtF3Go4Q1J9lWmzT46IQDAo8NRjgtNP/XxdV+P7jkivfTPfx6RVf2T5ygqKppw7Oglr7YffnpcVvdTjqe4WduOfYfLvTM2noKL873hN8dTNLWasO1QiVfbj3adhcPpkuzoeMTzFB0mK9YfKvJqu+ZQPix2h8/ltXlKYbY58OkR74P52pNF6OhJMPWVYXe58K8j3gfPdfkX0WiSPnl7ROHeUv3+sTNebTeXlqO6w/fJ27Mt+E8nTnm13XntCi63tcguqf7WmRNebQ7evI6ilnrZjN9fOOr1O3mqqQqnm6pkM14vOuT1+1LSUYvDjZd9dkI8+mP5Prh48QebG92N2NtQILtL7D+u7YWdE+4OCwCNtlbsqj8uuzXblzf3wOKyitp0Obuwt2G3TAKwt2EPupxdojY2zozDTdIclrtFca5tL9odd+em9ZaLt+Nsy79kM66Y9qPNfvfupd7iqRNlrX+SyeDRYDmMdpv38f3r1j1HpEfXrzWivKwOvMxS5wxDsGnjBVGb2vp2nC+sks0gBNiw7YLoQNvc1o0j56/JZlAKrNtTIPr6LrMNu06Vyy49z1OKdYcK4XIJD7QWuxObTpXIZnA8xYaTF+/aftxbDheHL04Xye4jwvEUW/LL0GUVjiBxPI81Zwt9dthuvZ5S7Cm/ipZus6ANpRQf5RfIrozLU4qjNypR03l3wnRvfVRUILtfEE8pztfX4kqreHTn3+UXZDexowAutTejsEU8uvPxlfOKmvHVmDtxqrFS1ObzG2cVMVrtZhxpvHu7a29tqjmtiGHmbDjYWCxqs6fhhKJmfA7eiUNN50Rtjrccle1MAe4GdsdbjoraFLQfgYt6jyYLi+Bc2z5Ri2umQ3clpfpGYFDasUXUpsFyFA6+TdRGnMHiRteXsl//VemeI9KjrVsLwLLKmsuVldXi+nVhr3nrriJFzeUoBSqrW1F66e7dFrcYhy4q7izd0NKFsxeFPfNtJ0rh4pV12Gw3WXG4UDiMvvvCJVhEnAgpMtsc2JMvPJgfKL3m3tKrQE4Xh635wlGq49er0NDVLXhciigF1heUCh4/X1uLirY2RR87Qwg+KxKelMqam1DU1KC4Gd+aEuFI2I3ONpysr1bcxO6TcuFIWIPFhAO1VxU24yP45KrwQ0eHw4KdtRcVMRgQrK0QjoRZXHbsqD2niEFAsK5aOErl5F3YVX9CdlTHo611RwQdcZ7yONi0X6EjQnGw6QB4gXtBKcWplh2yz+9m8DjbugcuXnhMKm7fpKi7MQWP8s5dcPLCEaSKzs+gZNqm4FDTvQsOTvyh4+vWPUekR0WFVeA45Xm7ZaU1gscKS27KjlR4xDAEF8trBY8XXa5RNFkAAMsyKL4iwrgm7AhJlYplRM9TcKNOUddhwJ24Wlghwqis87nS6Z0ixH0eIeVXK2fwlOJCtfD36nxNnewogkccpTh7U5hxrr5WYY9QN+N0rXCeyIUm5d8rjlKcbqgWPF7UWqt4YuUoxdlmYUZZRz1cChwEwN0or7BdmFHR3QCbyKQoRRQUl021gktAddZmdLu853J5U521CWau78m13dGODmeHYkaHsx0dzrvLEwDuZZlWh/ccKG+yct1oc9y9rR8AOOpCi/2KIocKAFzUhjb7DcHj7bYiQGCHjFRRONFh977E/3XqniPSo+5uu3cjL2JZApNJ+Anb1K3s6RtwP7mKnadThC9VBOLvtbPbKjnRVkiUUnRbhO+5yWpX1HUYcEepTCLLJiabXfaSyS0GBTpFoiomm/LvFQC0izHstn6pEdAhcq+67HbB4mG+qNMmwnDYFC0D/Oc8wve8y9k/n0e3UzjMb3Ip/w0CgIPn4OBcfR7r7ieG2LnMXnI7fGP07dBYOOWOjkdmAYZVwXLJXefi+o5uOrj+Y9j5vhkcdYCHMufTIxfvPZH/69Q9R6RHcouS9RalgEotvMVLblGy2xgA1CKFx8SO+aKvmkEIgUolfD/UDCMrcfhOhth7VTGM3C7ft0kjxuiHzxyAaP8YoUq3/ctgFDtt7vOIX4fSaAUAwUqo3o75IrEol1pmqfK+JOT8qfqRIXRPlEbybmMI1OGQW9a9LwndE9bnzrbCEqonwvTjdTAQYPTjdRAiryDjV6V7jkiPQkLl92bxiOcpgoOFzxMaYlQ8ufI8j+Ag4UqVYcF+shMKbzEoRUigCCPQT/GyCaUUIf7CjJAAP9k9NDwihCDEKMwIFeFLFcsQhIox/AyKl8oYQhDhL/K9MhjAKczZYQhBuFGYEa73U5S7Abh9vgiDCEOk/LovChOp5BrWT4wQrchn7mOTOyEFqHWCyaghWvl9nnpLTVQwCHTtDVL3T+0mAgJ/dd/3JKCfGADgL3Aug8pfUe5GbxlVQX3+XcP4gemnyd2g6ru1ByEs1Iy86r536r9Z+r0v3XNEejR9eqZiJ0GtZjFmTKowY1KG4iUNQggmjEkTZowZpHji43mKKaOEC1zNGDlQ8bIJx1PMGDFQ8PisvHTFkyvH85g1VPg6ZmWn98t1zM4Rvo7ZmenKPw9KcV+WyL1KT1O8NMNTigUZwqWmpyUPgLofnpAXDswQPDYhNgkGH7v63ikGBItShCusjgiPR5CPHXfvYhCCRYnCxdmygmMRIdJvRopYwmBuXLbg8WS/SMQbwhRNryxhMC0qW/C7E6ELQaoxXtEkzoDBqNAh0Ag0mzOqjBjknwFGwVTEgMEg/wwYVX07Z2pGi0EBIxQxCAhi9akI0vRdiJEQglT/ySCKohYEQep4BGuSBC3ijPcpZAA6NhLBWt+qEH/VuueI9GjmrCGye60A7qWd6TOyYDQKD3LTJmZAr5M/0LIswaSx6QgV6Xo7aUQqAv3lV85jGILROUmIFSn1PiYzCZEh8gdahhDkpMYgNU64W2zegBgMiAyRPQQSAqRFh4lWV82IicCQ+CjZESQCIDY4AGNSEwRtEkOCMHZAgqJk0lA/PaYOTBE8Hmk0YkZaqiKGUaMRra4apNNjYXqGIoaGZbF44GDB435qDVakZStiEAKsTBsieFzLqvBA6lBFUUNKKVal5gkeZwmDVcmjFOW7cJTHyqQRgscJIVieME72+T2MpfFjRW3mx0xSlIDJg8e86ImiNlMjpguWWpfKmBYxXdRmdOgcRQwKijFh94naDAlaBArvRS3FKNnBS0QfKpIClitkEAwIvB+kH5eS+kP3HJEe+fvrMX1GluzttRzHY+Ei8fLSOp0a82ZlK2BQLJ4rPAAC7vyNpTNy5Nd74CmWzRRnMAzB/dPyZEeQeEqxcpo4gxCCBybnyR4CKQUemOS90dPqcXmKIhYPjsvz+nmuHpkre1mDIQQPjMj1ul7/UJ4yxv05Q6BTizvJDw2Rz2AJwdJBmQjw0shv9SBljLlJgxDmpQHeqpQ82VvcWUIwOToFsX7irRaWJMh3dljCYGhIAlL8xfslzY4eCg2jluXuMCBINUYjI0C8cdz48KEwqvSyoiIMCKJ0YcgOEm8fkBOUi0BVoCwGAUGgOgjZQbmidgOMQxCiiQSRNeUR6Fg/ZAWKO20RugyEalNkM1REh/SAGaJWAZpUhOjyZEdFCFgk+C+U9dqvUvcckV564juTERERIKueyIOrxyItLcqr3aOrxiEuJlhWjsXS+UORk+W9t8nq+SORmhjuM4MQYN6kTIzN897bZOW0XOSkxvjsVDGEYNrwNEwf7r23yeLRWRgzKMHnAZ0hBOMzkrBglPfw4305AzE9M8VnBksIhiXHYtUY4fC5R1PSB2BRTobPwyxLCAZHReBbY4d5tR0VH4fVuTmyGANCQvD0mNFebbMjo/Dk0JE+EtyMWP8APDd6vFfb1KBQ/HjoBFmMcL0ffjFyilfbGL9A/GKo+BO0ECNQo8Mrw2d7tQ3R+uGlnAU+MxgQGFgNXsn1Pln4qXT4RdYKn30qAgINq8aLWSu9OuoaRo3/HfiIjwQ3gyUsfjzoEa8MlrB4YsD3ZDsiTyR/12vSK0MYrEj4UU/emY/jIoAV8T+E2ksfGEIIpkX9FCzRyLgWimnRL0DDes8vygv7JViih5zpOzf8xW9cfghwzxG5TYGBBrzx1iqEhvr75IwsXDQUj31LPPzokdFPi7deW4HoqCCfJvHZ07Lw9Le9D7IAoNep8c4LS5EUG+oTY8rIdPzk8RmS8g00ahXefmYRBiVESJ7ECYAxWYl49fE5kt6XimXw1rfmI3dAjOToCyHA0NRYvPGtuZJ2rDAMwe9X3YexaQmShw6GEGTGReLPjyyARuW9KychBK/On4Hpg4SXV/pipEaE4u8PLoLeS6TCw3hx2hQsHCycg3GnWEKQGByEfy9fAn8vkQqPnhszHg9m5fjEiDb6Y82i5QjWS1syfCp7NL6bJd3hYQlBmN4Pn85eiQiDtCTOR9JH4IdZ0n6zHkagRoePJz/gNRri0eKEPPw4cxYAaVMfSwj8VFq8P+YhJBmFly17a2pkNp7PWAIicepjQKBj1Xgz9zGk+ktrCjk8JBM/GvgQGIkUBgRqRoWfD34C6f5JkhiDAjLwvZSnwRJWEsPj6Hwv5WkMCpD2nY83pGN10gtQEZWkqIX7ahksi/8BBgZ4fxgAgDBdCubG/RYs0UqMjBAABJMin0WK/yRJDKMmEWOi/wYV4ycxMuK+n1kh//uNjIYA97rv9qmODgv+9Mc9OHb0MgDcVYSMYQh4niIoyICHHxmPBQuH+pwsaOq24Y/vH8DBo+V9dsj1MAL8dVi9YjRWLBzuM8NsdeCPnxzCrqNl4Hj+rkRZTyt6o0GLB+cNx8MLR/kc4bA5nPjjuqPYcrQEzp6S7b0xDHHX2jDo1Fg1fSieWDDG5y2tDqcLf9p+Al8eL4bD6bqLQXr+t06jwsrxOXhm3jiftxi7OB5/2XcKa04UwOJwgpDbmxJ67opaxWL5yCF49r4J0KmltwYH3EtS7x09gw9PXUC33XHr/vcWIe5tk4tyBuOFWZPgp5HeGhxwf48+OHce7505i06bvU8GQwgYQjBv0EC8NG0KAnS+JW9SSvHxxUL86dwptFmtYAm5aznF45zOSUnHK5OmIlRkJ4uQvrhSjLfyj6HJahZkUEoxIyENr42ZjkiD73lLWytL8UbxIdRZuvpksD33b1J0Cl4dPluyE9Jb++vK8FbZXtRY2sES5q5qqJ6/jQ4bgF9kz0OiMdRnxsnmS/jzle2otjSLMnKCkvBcxmIMMHqP3t6poo7L+Mf1Daiy1IMBc1fOBQsGHHgM9E/C91JXINUorTN1b13rvoq11WtQbanqk+H5W6IhEasSViPVKJy4L6Ray3Vsq/sHblquiDIidQmYG/NtpBiFc46E1Gq/gaONf0C9tRgE7F15HZ6/BanjMS7iaSQaR/nM6HZWobj5N2i2nRFlGFRxyAz9EWL8pHV07i/5Mn/fc0RE1NJiws4dhdi1qxjt7Wa4nBz0eg3S0qOwePFwjBmbqijBFQDaO8zYse8iduwtRktrN5xODjqdGinJEVgyLw8Tx6RDLVKbRIo6TVZsP1KCLQcuorG1y83QqpEcF4pls/IwdVQ6tBrfJtU7ZbLYsONkGdYfLkZ9SxccThe0GjUSo4KxfGoOZo8cBJ1W2Y4Is82BHefK8cXxYtS0dMDudEGrViE+PAgrxmdj7vAM+Ol8m7jvlMXhxM7CS/j8VBEqW9phc7gZscEBWDk6GwuGDoa/Xlr0QEg2pwu7Si9j7bkiXG9ug9XphFalQlSAESuGZWNJXiaC9Mp2dthdLuy9eg0f5xfgSnMLLE4nNCoWkUYjVmQPwfKsLIT6Kdu+7OQ47LtxHR8XF6CkudHNYFmEGfywIiML92cOQYSfsm2mLp7HoZrr+Kg8H4XN9TA7HVAzLEJ1BixLy8Kq9BzEGJWNDTylOFpfgU+unsf5lpswO51QMQxCtHosShqCVSl5iDcGKWJQSnG6pQKf3TiLcy03YHG5HdFAjQFzY4dgRdIIWQ7InYyijhtYf/MkzrVehcVlByEEASo9pkblYHHcaCQbIxUzLpsqsaPuKM63l8HisoEQwI/VY2xYLu6LHo9ko3jeiRRVmm/gUNMBFHYUwNpTlVXP6pEblIcpEdOQ5Od9+dib6q03cKZ1D0o7T8HGmUEBaFk9BvoPw+jQOYg3pCvejdZmr0Rpx1ZcMx2CnTOBgkLNGBBvGIYhwYsRrRfetSRV3c4qVHatu1W6nYKHijEgTDccAwJXIUw3sl8KHvqqe46IgMwmG/ZvL8SlizXo7rJCo1UjONSIyXOGIDM3weuHRSn1amOx2HFgbwlKi2tgMtmgVrMICjZg0tTByB2a2C8Mm82JA0fKUXTxJrpMNqhYBkGBekwcPxDD85K8RjWkMOwOFw6duoxzxVUwddvAMAwCA/SYOCIVo/OSvRaAk8JwujgcOn8Vpy5WoqvbBhAgyKjH+NwBmJCX4jVyIonBcThSXIHjJTfQ0e0e0AL9dBg7OAlTc1O9Rk6kMDiex7HyGzhUUoF2swU8BQINWoxOT8TM7DRovUROpDB4nuLE9SrsLbuKdosVLo5HoF6HUcnxmJOVDr1G3MmTwqCU4nTVTey8dAWtZitcHIcAnRbD4mOxIHOQ1+iMVMa5ulpsu3IJzRYznByHAK0OeVHRWDRosNdkVqmMgqZ6bLlWhkZLN+wchwCNFtnhUViWnoVArbiTJ4UBACWtDdh44yLqLSbYXC4EaLQYHByJZSnZCNWJO3lSGZc7G7Gpqgj11k5YXU4Y1VqkB0RgaVIewnXiTp5naPfGudHdhG01F1BvbYeFc8BPpUWyXwQWxA1HpF48AiSVUWtpxp6GM6i3tcDK2WFgdYjVh2NW9ChE6cQdMKmMZnsLjjSfQL21EVbOCh2rQ6QuHJPCxyNKJ574K5XR4WjFmbbDaLTVwMZZoWF1CNGEY1TIZETqYvuFYXa14WLHHrTYK2HnzFAzOvirw5EVOBPhOu8OmKQ5hGtHRdd2tNuvwMl3gyVaGFQRSPa/DyE64W38vuieI3KHaqtasf7jE9i/rRAupwuEuJc9CAEYhgHH8UgYEI5FD4zBrEV5YGVEORrqO7Dus9PYs7MYdpvz1tJKb0ZMXDAWLx+BuQuGyopyNDWbsG7TOWzfXQyr1XGLAbi3D3Mcj6jIACxZMAwL5+XJinK0dZjx+bbz2LKvGN0WO1iG3Kq14WGEhxqxbHYels7Jg15GBKKj24rPdudjw8EidJlttzMYBhzPIyTAgOXTc7FyZh6MMiIQJqsdaw8V4MsjhWgzWe9guP//IKMeyydk48GpQxHo53sEwmJ3YO3xQqw9XojmLnOfDH+9FivGZOOhiUNlFVCzOV347GwR1pwuQF2nCSzDgOfd9Uc9Swl+Wg2WD8vCY2OHISLA9wiEg+PweUExPjpXgOqOTqh6PoPeDL1ajWXZmfj2qGGIDfT9t+jieXxRehH/LszH9fY2qHqWCjwMnlJoWBZLMjLxxNDhSArqu6iTmHhKsf5KCT4sOY9LbS23MTzLOGqGxaLUDHwneyRSg32PQFBKsaWyFB+Wn8PFtobbGT0LeAwhmJeUge8MHo2MYPEJUIixp7Yc/752GoVtNWAJA/4OBggwKyYD30ofiyHBMT4zAOBoYznW3DiGgvbKHgYFBb1t2/HEiAysHjABOcGJshjn28qx/uYhFHS4l0Foz39Iz3948BgRkoHl8VORE+T7MgsAlHVdxva63SjqLBFkZAVkYG70TGQHCdd/EdMN82UcbNqG0s4L8CwG38lI8cvA5Ih5yAqUlk9ypxqsV3CudR2umI7DvdhMQMHD3ZeXAQ8OMfrBGBayBOn+42VFOdrtV1HevgZV3ftunRs9/03AgIJDiHYQBgatQpJxlqJIyj1HpJcKzlzHKz9cC6eDA8cJ7yP35ASMnJCOn72+Ajq99Am2pPgmfvHjL2C1OcCLNM7zfKY5eYn45W+WwU+k5siduny1AT/+xTp0d9u8Ns4jBMgYGI3fvrIMgQHSa4pUVDfjh6+tR3unRQKDIDUxDG/9fBlCRarJ3qnqhnY88/oGNLabvDIYQpAQFYw//3gpIkOlr//XtXbhqT9vxM3mDq9bcxlCEB3ij78+sxQJEUGSGc1d3fju3zfhekOrVwZLCEL8DfjHd5ciJUr65NdutuK7azahpK7RayE8lhAEGnT4x8NLMDha+uTXZbPhe+u34vxNd5NDMQxLCAwaDf65YhHy4qRPfmaHA0/v3IZj1ZWSGFqVCn+ftwhj44Xrs9wpm8uFHx7ajt2VV2/lDIkxVAyL96YvxJSEAZIZDo7DC6d3YtONEjAgouXoWUJACMGfxi/EnATpT5gc5fFa4S58fuNCn/k9tzPck+5vhi7AokTpCcQ85fHny3uw5sYxCdfhdlB+krkASxOk5zFQSrGmajfWVO3pMw+jtzzHHx+wAMvipkie/Cil2NmwF2ur10tmLImdjyWx832aYE+07MWGmn/dcjiE5J7IeUyJmI950at8qgxd2rEPu+vfhtv5EK4T4mHkBM3DtKinfCotX919ACcaXoLbiRKrRcIA4JHsfx9GRvwMrMyKsfcckR6VFFThhSf+BY6jkvtkMAxB9ohk/Ordh0T7xnh05VI9fvTUx3C5OMmddRmGYNDgGLzxx9XQaL1HLW5UteCpH34Cu8PlE2NAUjj+9OYDMEhwqmoaOvD4T9bAbLVLZrAMQWxUEP7+2wfhLyGi0NhmwiMvf4rObqvkiqYsQxAe7I+PfvkAggO8RxTaTBY8+Lu1aO7s9okRZNRj7QsPIiLIe0Sh02LDg3/8DDVtnT4xjDotPv/hA4gL9Z7waLY78MAHX+B6U6vkuhosIdCpVfjiu6uQEu7d4bE5XXho7TpcrG+UzGAIgYZl8flDK5EZ5d3hcXIcHt2yAWdqpXeFZggBSwjWLl2BYdHi4W7AvTT2nX2bcai6QnKvGgK3M/3JnOUYF+v9aZ+nFD86vhXbqsokb5f1THXvT1qGGfHen/YppXi5YAfWVeb7vCX3zRFLMC9e2tP+Hy7txKc3jvtIAH6WtRiL44WLrPXWmsrd+KRqt8+M7wxYiKXx0nYH7qzfi0+r1/nMWBI7H0vjpG2tPtmyH+tqPvCZMTl8LhbGPiTJtrzzIHbU/d5HAkF20GzMiPofSU5VjfkojtY/3/O/pH+DE40zMDbyFRAZ7TZ8mb//n92+azbZ8PIza8Dz0p0QwL0WX3T2Bta8f8irrd3uxM+f+9wnJ8TDKC+twz/+dtCrrcvF4YUX1/vkhHgYFZXN+NNf90uyff63G2HxwQkB3OXNaxo68Nu/7vFqSynF83/a6pMT4mE0t5vw4ns7Jdn/5J87fHJCPIyObiv+9+/bJH1XXvx8j09OiIfRbbPj+//cLInx6vaDuOaDEwK429PbXC5895PNcIlE/zz63cGjKPbBCQHcE7KT4/D4l5tgd/XdGba33jl9EqdrbvpUNI6nFByl+PbWTTDZvXfLfa/oLA5WX/epYR6FOwL6xN5NaLV67wD78eUL2OqDE+JhAMD3j21CnbnLq/2m6iJ8KcMJIQBeOL8ZN0ytXm0PNpTIckIA4Hclm3G5q86r3fm2cllOCAD8vWILSjsrvNpdNl2T5YQAwMbabSjuKPFqV2upxPqaf8piHG7egaKOM17t2uw12FX3pgwCRXHHLpR27vNqaXY24njDz269zhdGVfdeXOlcL+P9+aavxRH5y1/+gqSkJOh0OowaNQpnz579ypn7txfCYvZtYvWIUoqtn5+B3SbecvnwgTJ0dHhfxhBi7NxaAHO3eDvv46euobG5SxaD5yn2HSxFe4d4i+pzxZWoqm2T1XeF5ymOnLmK+qZOUbuL1+pRfqNRFoPjKc6WVqOiVnygvVLTjPNXamQzSiobUFrVKGp3s6UDh0srZDOuN7bhzNWbonZNpm5sL74kq+Irx1PUdnTh8BXxwbzTasO6ohJ5DErRYrZgV/kVUTuL04mPiwtkFTHlKYXJbsfmy+Widg6Owz9LzstiUFBYXS58efmi1/fy97LTMgjuYd9Feay9WiBuRyn+eeWkrCqpFO5rWVtxzqvtxxVHZZeeJ4Tgi8pTXu3W3zwku68LCwYba454tdtVv082gwGDnQ3eJ/Cjzbtk99khIDjUtM2rXUH7VgUl9AnOta7z+mBzrWsjKOUgt5xweccaUOr9wUaJvnJH5IsvvsCzzz6Ll19+Gfn5+cjJycGsWbPQ1NT0lTEppdiyVt7A4ZGl246je8W95s3rzylK5nE6Xdi3R3wQ3LT1guyS8ID7qW+nF8aGXQWKuukyDMGWfcWiNuv2FypisAzBhoNF4oxjxYoZXxwpFGecKlb0ebAMwWcnxBnrL3h/WhNlEIJPT4szNl4sg5OT37OCIQQfXxBnbLtyCRanuDPvTR8V5osOtHsrr6LNZpV9fgqKj8ryRRssHq2rQL3FJJvBU4pPr+TDIXK/81tv4rqpRfaUxFGK9ZUFsLgcgjaXu+pQ2lnjU+TodgaP3XWF6HIK3+9aazMKOq7I7uvCgcfJlmK02oUfbNodHTjfXiCbwYPHxc4yNNqE5yCzqxsXOk7IZlBQVFmuodZaJWjj4K0o6djTkzQqj9LqqEadtUzQgqMOXO3cqIABWFyNqLd4j+4o0VfuiLz99tt44okn8Nhjj2Hw4MF47733YDAY8OGHH35lzLLCatTdbFPU6ZYwBDvWCT9hVFxrxLUrjT4t+/SlbZvyBY/V1rWjqKRGVjTEI55SbN4u/DTW2m7GyXx5T/i3GDzF5r1Fgvei22LH/nNXFDE4nmJbr6Jpd8rudGHb6VLFjN3nL8Ns63sw53mK9adLFDMOl1ag1SS8HPDFuWJF/W84SnH6xk3UtgsP5p8ViDuO3sRTiov1jbjS3CJos/ZikaLusBRARUc7ChrqhRmXihQ1sAOABnM3TtQJTxifXStU1IgPADocNhyovSp4fF1lPlgZ6/C9ZeWc2F0rPCltrTmvmOGiHHbXFQoe31N/RlGXW4/2NQpHzY82y4sc9RYDBoebhZeo8tuPg6dKmsu5GadbhZffr3Qdg5OKR8S9iYBFcbvwsnWt+TgcvPdlQW+Ma12bFJ3Dm75SR8ThcODChQuYPv0/fR0YhsH06dNx6tTdIT673Y6urq7b/slR3c022e/ZI8pT0fPU1rYrZ1Cgoa5D8Hh9g/hyh1Q1t5gEdwzVN3cqctg8MpltsFj7nsAb24T5vsjmcKG9q+8JvLXLArtT2cABuCusNnV093nMZLPDZPOes+BNPKWob+/7u+3kODSZxJfSpOqmiCNys6NTQU/V/6hahFHV0dE/jM4OwWMVHW2KnDbAnWNR1SXMuN7pW65OX2IJQbVJmFFhar2rGqqvUhEGNWbhcanarJzBEgY1FuFxsc7WoqhbL+BeAqq3Ci/DNtqbZC+ZeERB0WhrFjzeYm/0addLX+LBo8XeIHi8w1EPRmbzOo8oOLQ5awWPdztrZTfI683oclQrOoc3faWOSEtLCziOQ2Tk7dX8IiMj0dBw9wf029/+FoGBgbf+xcf7XiIYAKwWh+zOsL1lE5hYAcBmET7mixwiSahWEb6vsgnku1i95MH4IiFHxGrvR4bQddj7715ZBCIiln5kmAXOZXH0370yC9x3F8/DJbIU4RPDIXxPrK7+uZZuEYalHxgMITA7xRjKP3cCArPIecwu5Q6u+zxiDGVP34A7SmURea9Wl02xI8JTCgsn/F5tnF328pJHFBRWEYadt/XLA5qVE458OqkVvjbg60sOTvjBxcVb+oXhov3zcCSkb9SumZ/+9Kfo7Oy89e/mTfGkPiHpDZp++RKJ1RLRGZSVEvdIq1UJ5hzofahlIiYici69TlnZ9d4yGPouPKZXWNq9t/wErsOgsLS7JIa2/xhGgfdr8FIh1SeGwPtVMQzUPvb78ZUBQFKzPkkMjXBBOz+18s+EpxRG9VfLoKDwUwmfx6hWVtLfI1GGSjmDgMCgEr5XBpVOcbSCIQQGVvi96litYgYBgYEVrrGkZXT9MH0DBpFuumqih9wE0t7SssIlB1SMAVCQH+KRmpFeK0qOvlJHJCwsDCzLorHx9p0IjY2NiIq6u+mSVqtFQEDAbf/kKC5JWudKMTEMQbzIeeLjlfWFANyFx2LihFsyx8b6Xl2yL0VGBgo6O7GRgYrX2AEgKEAPg4BTExnqD7VK+VfNoFMj2L/vwSPU3+C1zLkUqVUsIgL7/mH767QINCgfzBlCEBPcdy0RNcsiOtD35m13igBICA0SPJ4UHNwvA21SiPB3NCU4pF++W8nBwoy0oFDFDApgQKAwIz0wXHGOCEcpkgOEf+up/mH9kL/BI0mkX02SMUIxg6McEv2Ex8U4fYRiJ4GnFHEG4Ro10boo5cs/IIjWC/fdidBFg1M4gTNgEK4T7nAcoo0DL1pYzLsIWIRohFcOAtQJihJVPYwAtfLePmL6Sh0RjUaDYcOG4cCBA7f+xvM8Dhw4gDFjxnxl3EFD4hCfHK5oeYbnKeatEG5FnjQgHIMGxyje0bJgsXA54OjIQAzLTVTEIARYNC9P8HhwoB8mjExVvGtm8axcwR1ERr0WM0cPUryjZeGkIYJNBjVqFRaOzVTMmDcyQzC6wjAEK8ZkK5r4WIZgRnYago3CT2OrRihkEIJxqYmiDs0DQ7Nlnx9wO1N5sdFICRWeXB8ckqMof4MASA8NQ3aE8ITxQIZyRpwxAKNjhKu4rkrLVZwjEqozYEpsiuDxFclDFedv+Kk0mBmbIXh8YdxwxQwNo8KsaOEqrrOiR8neaeIRIQQzIoULp00MH6vc2QGPSeHjBY8PDR4PFVHWCJQHjzGhwh1v0/zHQcMoazxJwSEn+D7B4zF+46BllT3QUnBIC1yi6Bze9JUvzTz77LP4xz/+gY8++gjl5eV48sknYTab8dhjj31lTEIIFq4apWh5xhigx7jpg0VtFi4drmhHi1anxrSZ4tUQFy8YqojBsgzmzBRvY710dp6inSCgwILp4hPb8mm5inebLJkqXsZ62YRsxYzlE8WvY9noIYp2SnE8xcpx4texZGiWIieaoxQPjMoVtVmYlQGNgs7RPKV4aJg4Y27aQPh7aZQnJgrgkZw80S3y0xNTEaZXNpg/kjlU1PEbG5WEeL9A2VMfQwgeSh8KNSN8v7ODY5EeECGbwRKCFclDoWOFo4Ip/pHICUqUXUeEJQzui80TXUaK0oVieEiG/DoihMGEsBwEaYSd6EB1AEaGDFNURyQnMAvhWuHokZ41YHjIBNkMAgbJfgMRpRPuRKxmdBgSNBtENoMgXJuMKN1AQRuGqJAWsFQ2AyAwqmIRqR8u8/XS9JU7IitXrsSbb76Jl156Cbm5uSgsLMTu3bvvSmDtb02dm4OAIIPsaMLiB0dD46Vp3MTJGQgNM8piEOKOhui95JqMHpmCmOggWU/6hBDMmTHEa7+ZoVnxSEkMl8VgGIKpYwciwksvmMEDopCdFiObMTYnGYlR4p59SnQoxg5OlMVgGYKhqbHISBD/XsaEBGBGTpqsiAXLEAyKCcfwAeJly0ONBizKzZTNSAwNwsS0JFE7f60WD+TlyJqSWEIQ5W/EzIGponZalQqP5g6VxWAIQYhejwXp4n1aVAyD72RLKzt+FwMERo0Wy9LFHwYYQvC9zDGyFgMYEGgYFven5oraEULwxMBxshgEAEMYPDDA+314eMBERYmeKxK9R7KXx02RX+ODUiyJm+zV7r7oGbKXZ3jwmBs9y6vdxLA5ss4PABQ8pkZ4LyOfFzy/p3y6778SCoqRoSu81rJKDVwEhqhlMQCKjODViuplSdHXkqz6/e9/H1VVVbDb7Thz5gxGjZLePEmuDH5avPbuQ2BVrE+OAmEIRk0aiPsfn+TVVqNV4bdvrYJGI5xw2pcYhiAnLxGPfWeyV1sVy+D115ZBr9f4zBiUHoXvf3eqV1tCCF5/YTH8jTqfJnGGIUiOC8Xz35spyf5335+P0EA/nxgsQxAbHohXvyttUPjNY/chJjTQZ0ZYoBFvPDFPkv2rK2YiJTLEZ0aQnx5//vZCST/qX8ydgsyYCJ9yE1hCYNRq8PeHFoNlvP+0n5syHiMS4nxyeDxN6T5cuRhalffQ9TMjx2BSYrJPDIYQqBkW/1q4FH4SIiqPDxmBuckDfRpmGRCwDMGHs5YgWOe9MeSqtFysSMn2iUF6/s/7k5Yi0uA972d+/BA8kurb2Oh5P2+PWIp4P+8h+ImRGfh2irReLnfqpSFLkep/d27fncoNTsfjA6T1crlT309bhkEB3nv/pBiT8WjSA7IYq+KXITPQeyPCaH08ViU8KYsxM3KJpC68QZoYzI/5GTzddn3RsJDFyAj0Pr4bVOGYGP16z3KWb9/gAf7zkRqw2Kf3JUffqF0z/a2BWbH43fuPQGfQgPGyS4D0TCoTpg3Gz19fAVbiroLklAi89e5DMPrrvDoKnglo+KgUvPr7FVBLaKoHAHGxIfjTmw8gWEKE51aH3yHxeOPXK6CVuGMlKjwA7/36AYSH+kuaNAiAjNQo/PmVlYK7TO5UWJAf/vGL+xEbEeSV4W5KBgyIC8P7P1uJAAlN9QAg0E+Hfz67HAOiQyUtbxBCkBARjH//70qE+EsL8fvpNPjnk8uRERch6afNEIKoIH98/P2ViAqSloiqU6vwwcNLMDTRHT2Rwggz+mHNt1ciISRIEkPDsvj78oUYn5zYcw5xe5YQBOp1WLt6BdLCpSWEqxgGf507HzMGpN56n94YRo0Gny5ZjiEiuSG9xRCCd6bMxZK0TMkMnUqFj+Ysx4go4dB5bxFC8OtRc/BAWp5EBgMNq8IHk5dhYoz0Dr8/GTIT304bc+t9emOwhME7o5ZhRqz0Dr/fTZuO76ROu3UObwwGBL/MXob7YoVzze7Usrgpt5wRKQwCgmfSlmNezDjJjOmRk/FY0oM9DezFGZ7jDyQsw9xoaQ9OADA8ZAIeSHiqhyCNMTtqOWZHLZfMSAsYh/mxvwAD1mvND88Sy4jQ5Zgc8R3JjGjDaEyMfhMs0UhmpAUswciIF77yaAjw/3j3XY+a6juwee1p7N54ARazHayKAc9TEOJeZ+M4HgOz4rBw1ShMnjMEjISnyTvV0mLClvXnsX1zPrq7bWBZ5lYuASFuRkpaJBYtG4EZs4aAlbGLpL3DjE1b87F5RyG6uqxuBk8B8h9GUkIoli4chtkzhkh2dHqry2TFht2F2Li7AG2dlj6vIz46GMvuy8P8adnQelm+6kvdFjs2HCrCl/sK0dze/R8GdUdZXByPmPAArJiehyVTsqGTsf3Xandi3bFifHG4EHVtXVAxzK2kRkIIOJ5HZLAR90/KxbIJ2TDqhbckCsnudGH96YtYe7wQ1S0dtzEYQuDieYQF+GHVuBysHJsja8eNw8VhU0EpPj5dgIrmNqgY972ivRghBj3uH5mNB0flIsTP93wJF89j08UyfHy+AJeaWvpkBOq0WJWXjYeG5yLC6L1D8Z3iKcXWy+X4d1EBihsbeian2xlGjQarsrLxSE4eYvx9/71TSrGj4jL+XZqP8421/2HQHgblYVCpsWLgEHwraxgSAoJkMfbVXMW/Lp3D6cbqW86Ch8FRHlpWhWUpQ/DYoBEYECBvd93Rhqv4+NpZnGi63uP0EPCUgu1hqBkWC+Kz8UjaKKQFeO+C3JfOtlzDZ5UncaL58q1uxDylYMh/WDOjs/FA0ngMDIyRxbjYcR2bao7gZOvFPhmEEEwIy8GSuMmSIiF96Vp3BXbX78eZtgugoCAgoKBgQG714RkenIvZUdMxKCBdFqPGcgNHmneioOMkeMqDgAEF38MCAIqMgDxMCp+DdH/xnDwhNdsqcaFtI8q7DoKjLjC3GAwoKCh4JPkNw9CQxRhglLcc2eWowuWOL1Bh2g6OOnpdB9NzFRwi9cMwMHAlYv0mKnJCfJm//3/hiHhkszpwdE8JLpXUwGyyQa1WITjMiMmzhyBlkPA2KwDgOB5OhwtanVr0w3E4XDh2+BJKim+i22SDSsUiKNiAydMGY2CG+I+Z43g4HC7ovDCcTg7HT11FYXE1TD1OT1CgAZPGD0RmRozoa6UyXByPE+ev43xxFbq6bWAYgsAAPSaNTEPu4DjR1/I8hd3hhE4rzuB4HqeKK3Gq+AY6zTYQQhBo1GFiXgqGZySIRn/cDBe0XpbFeJ7izKVqHC2pQKfZBkopAv30GJeZ1JNPIuwQUkphc7igVYszKKU4f70GB0uuo91sBU8pAg06jE5LwKTBA6ASia5RSmF1uKCTwCiorsPesmtos1jA8RQBOi1GJcdjWkYK1Kyw00kphdXpglbFer3e4vpG7Cq/glaLBU6OQ4BOh+HxsZg1MFV0KYb2dP7VsOIMAChtasS2q5fRYjbDwXHw12oxNDoGc9PSoVMJO50ehpplofLCuNzWjM3XytFk6YaNcyFAo0VOeDQWpAyCQaQuiC+M652t2HSjBI0WE6ycCwFqLQaHRGJhUib8RWqfUEph51xgGUY0gRUAqrvbsLm6GPWWTlg5J/zVOqQHRmBB/BAEasSXlOycC4S4c1TEVG9tx47aAtRZ2mHl7PBT6ZBsjMDc2DwEacTrR9g5Z89SmvgDSYu9A/sazqHe1gILZ4cfq0OsPhwzokYiWCQxFQAcvLvLs8YLo9PZhaPNJ9Fga4SVs0LH6hCpDceE8LEI0YgvWzl5FygoNIz4Q0+3qwvn246h0VYLK2+BltEhVBOO4SETEaIJF32ti3eBgoeaEY8g2zgTyjoPoMVeCRvXDTWjQ4A6ApmB0xGk8TKHUBd4ykFFNOJzCN+NStMetNuvwMF3Q0W00KsikOw/BwEaeQ7hnbrniPST6qpbsePLs9i3OR9d7e4KeQxDkJIRgwUPjMbE2UOgVVgQrLG+Azs352P3tgK0t5lvMZJSIrBw2QhMnpmluLBZc4sJO3YWYsfOIrS1dbuf3hiC+LgQLFo0DDOmZQoWI5Oqtg4ztu8rxtY9RWhqMd160o2NDsLiOXmYPSUT/kZlNTg6TFZsO1KCTQeKUd/iLk1PCBAdFoil03Mwb2ImAgXqjEhVl9mG7afK8OXhItQ0d7gZACJD/LF0YjYWjc9CSICyXRrdNju2nyvH58eLUNXU7n4yBBAe6IelY4ZgyZghgrVMpMricGJH0SV8eqoQ1xpbbzFCjQYsHZ6F5SOHICZI2W/K5nRhZ9llrDlbiEuNzbe2uAYb9FiSMxirhmUjPjhIEcPucmH31av4uLAQFxsabjGCdDosHjwYD+bkiNYZkSInx2Fv5TX8+2I+Chrrb1Wd9ddosTg9A6szc5Eeoqw2kYvncaDmGj66lI9zTTfh7GEY1RrMT8rA6oF5yAxRlsDPUR5HG67jk6vncbq5Ek7eXaPCoFJjdmwGHkwdjuwQeZENj3jK40xLBT6vPIOTzdduOQk6Vo2pkRlYmTQKOcHxip6kKaUo7LiOTTdP4HRr+W2OyOjQDCyOH4fcoBTFjMumCuxuOIqzbUWw8+6KtGqiQm7QYMyJnoQhgQMVl3mvNF/H0eb9KOg4CzvvrkjLEhUG+mdicvgMDA7IVsxosFbgfNtOlHQeg4N3NyRkoEKC32CMDJ2HdP8RYIiyMu9ydM8RUaiWxk784aVNOH/8KhiWgOduv0WEIaA8hcGoxarvTsGyx8b7/KNobzPjD7/bjtPHLrvDlXdsOyXEHe7V6TVYsXoMHnhsos+7c0wmG975424cOXoZhECQodWqsGTxcHzr0YmSc2M8slgdePv9/dh/tAw8xV1bWz0Je2oVi4Wzc/Hkw5N8XjKy2Z14Z81h7DhaCo7n+9yWTQjAMgzmT8rC/6yeBJ2Pxc0cThf+uOEYNhy9CFdPY707MYQQMASYMyoDP1k1xedqri6Ox593nMBnRwvgEGB4cg9m5KbhF8unIcDH5Ryep/jrwdP41/ELsDqcIAIMCoqpGSl4ZdF0hBh9c6wopfjHyXN4//g5dDsct75HvcX2hN4npCbhV/NmINLfN8eKUop/FxTgz6dOodNuvxXKv5PBUYox8fH4/axZiJUxRnxWVow3zhxDm80qyhgRHYvfT56FAUHCdVOEtKmiFL+5cAjNVvOt8/XFyA2Lxu/HzMHAYPEn6760u6Ycvy7ciwarSYDBgKM8MoIi8Zvh85AVLB4B7ktHGy/jdyU7UGttv3W+vhgpxgi8lL0QuSHCtVmEdKHtKt65tAE11hZRRpwhHM8OXIKhIWk+My51Xcd719fiprUeDJi7dvh4/hahDcXjySswLMT3pZZqyw2sqfoANdYqUUawOgTL4lYjL1i4ZpWQmmxV2Fb7Z9Rar9xaYuktz9+MqmBMj3oU2UHykpTl6p4jokA3K5rx/GMfoLPdAl5io7YZi4biR68tlpxbUl/Xjh8/+RFaWkx3OTlCmjA1Az99dYlgQa871dxiwrPPrUV9fYekOiQEwMiRA/DKy0u8blv2qL3DjB++9CUqa1qlMQiQmxmP3/9iCfQSJ3GT2YYf/H4DLt9oklS4iiEEGQMi8cefLIVRYpTHYnPgf/68GQXXaiXVnmEIQUpsKP72o2WClV7vlN3pwo/+uRUnL1VJ2nTIMgTxYUH4x9PLJEdHnByH5z7fiX2l1yTZswxBVIA//v3EMsQKVHq9UxzP42fb9mJzcbk0BiEI9TPgo4eWYUCYtEmcUoqXDx7Ep0VFkhkBOh3WLFuGQeHSJnFKKX53+ijeLxTusH0nw0+twcfzliE3Uvok/qfiE3i7ULjL650MLavCv6Ytx6hI6X22/nXlDH5TtE+SLQMCFcPgvXErMCFKuMDanVpfdQ6/urgVgPei5AwIGELwxrCVmBolXoupt/Y15OO3pZ/dyusQkztrhuCnmaswI2qoZMaZ1kK8deWf4CkviQEQfDdlFWZEChc/u1NlXcV47/o74KhL8hbjpbEPYlqk9O3CVeYSrK18FS7qkFw5dXLEg5gYsVIyQ6l8mb//n94146vamk144dsforPdLNkJAYB9m/PxwZu7Jdl2dVrwwjNr0NLSLdkJAYDjh8rx59d3SiqmZTbb8fwLX6ChQZoTArgHl7PnbuB3r2+X9Bqb3Ykfv7YBVRKdEMD91FxUVoOX39gmqRuvw+nCj9/egsuV0pwQwJ0UWX6jEc+/swVOl/fyyS6Ox0/e347C63WSC+DxlKKirhX/8+dNsDlc3u15ip+t2Y1Tl6slVz7geIqbLR148r2NMAs04estSile3rQf+yU6IR5GQ5cJ3/5wIzos0hqi/XbfEclOCOAurtZqtuCxTzegpVta46y3T56U7IR4GF02Gx5avx51Ejt2v1d4VrIT4mF0Ox14ePt63OiQ1nn7k0v5kp0QD8PmcuGxA+twuV24M2xvbaosluyEAAAPCifP4XsnvsTFtjpJr9lfX4rXLm7tcRCkMTjK47kLXyC/tVIS40zrJfym9DPwPWmZ3kR7OL8p/QxnWy9LYpR2XsVbVz4ARznJDAqK966vxenWAkmMSvN1vHf9bZ+cEADYUPspTrcek2TbZKvqcULsPpVvP9z0Kc617pRs/3XqniPSSx++swftrb45CB5t/OgELhd7b9L3yT+OoLG+wydHB3BP4ru2FqDwQqVX288+P42bN1vB+XgdlFIcPnIJp05f9Wq7btsFXKlo8rmSKc9TnLpQgf3HvE9mWw5dRNHlWp8ry/I8RX55DbYeLvFqu+tMOU6WVvnM4HiKsqpGfHHQ+wB18OI17C+66nMpco6nqGhow78Onvdqe+paNTbnl/lc4onjKWrbOvG3g6e92hbW1OOTs4U+EtwTbLPJjLcPnvBqe7mlBX89c0YWo9Nmw6+PHPFqe7OrA6+fljbo9xZPKcxOB146tt+rbZO1G788593uLgbciaw/PeX9wabLYcOLF3yfWCjcfWmeP7fV64ONlXPgpaJNPpfConCPJz8r3ADeS1l5F8/ht6Wf+Uj4j35TuhYuXvyhg6c8/nztI9ntAN699gnsnPgDAaUUn1T9HZyEaEtfWlv9ISwu7876ttp3eyIhvjP21P8d3U5pjvTXqXuOSI+6Oiw4vKPIZwfBI5ZlsP0L8QHUanFg9/ZC2SXbWZbB1vXiT3EOhwvbthfIZjAMwabNF0RtOI7Hxp0FskudM4Rgw458URtKKb7cWyC7gzUB8OVe7+/x84OFspPeKAW+OFQIjhf/znx2rFB20zSeUqw7Uew1uvPp6ULZfXY4SrHhfAksDqc443yRIsa2kkvotIpHXj4tKpJ9rzhKsffaNTR1d4varSktkt3Lh6MUx2uqUNXZIWr3xdVi2S0mOEqR31KH8vYmUbuNVcW3Ejl9FU8prnW1oKC1RtRud+1FmF12WTVMeVDUWztwqvm6qN3x5hJ0OM2yJlYKig6nGcebxR86ijrK0Wxvk12N1crZcKJFfFysMF9Fva1WdpM5F3XhTJt4BK3BegO11suyGRQUBe3SI2hfl+45Ij3au+mCzxGE3uI4Hod2FKOrwyJoc2DPRdht4oO9N8bJo5fR0iQcfj52/DJM3dLC7H2J5ynyC6pQU9MmaHP6QgVa2sQHe1EGpSi/2oArFY2CNvnlNbjZ0CF7MKcAquraUHS5VtCmrLIBl6qbFPWOaWzvxqnSKsHjNxrbcP5ajaKmaR1mKw5eFF5yqe8w4fClCkV9diwOJ3YVC4e42yxW7Cy9rIjh5DhsKi4TPG6y27GhtFRxg7kvSoQnJZvLhc/KihUxGEKwtkx46cjF8/j4cr6iUuosIVhzWTjaRinFx1fPyj7/LcZ14cmVUoq1N04rajDHEgafV4o/oG28eVx2/xvAnZOysUY82rar4YjsvjGAOx9lZ/0hUZsjzfsUMQCKQ017RMejC227FDEoKM617QBPlXX97W/dc0R6dHhnsaIJCQBcTg5njwgP5kf2lSpqZgYAoBQnjwozDh+5pLgSHsMQHD0uzDh08rKijsCAO1Hy8MkrgscPnr3i8w6euxgsgwNnhRn7869KKoUuymAI9l8QYRRdVdQRGHBPfHsLRBhl0vNChEQIsLNI5DO/UnFrW6tcUQA7SoQZx6uqYHPJe8L3iKcU2y5dEjx+pu4muhx2RQyOUmy5Kry0WNRSh2artHwYMcbWG8KMy51NuGnuUODquBm7b5YJLlfUWjtwxdQgO4rgZvA41nQFNq7vB7AOhxnFnTcUOW08KIo7KtDh6Pue2zkH8ttLFXUFpqC4YalBk6217/dAeRR0nFXcebjF0YQ6m3CUqqTzqGJGt6sdtVbh8eS/oXuOSI/aW+Q/4XvEMAQdIpGCtlaToo7AAMCwDDrahQe51tZuxQ4VwxB0iER22jrMijoCAwAIQUenMKO9ywJe6cTHU3R0WUUYVkhLvxMWx1O0m4QZbd0WxY4hTylaTCKfR7fFaxltb6IUosmkrWaLYocKAFrMIgyLRWFz9/+cR/CYVfiYL2q3CX/mLbb+YZicdsFlv1a7MkfHIyflYXb17Zi12ZWPiYB7Eu909H1POhymfmEAQKez7/drcnUrcqZuZ/T9fm2cFVw/RRlMzs4+/85TDna+f75bFlffjP+W7jkiPZKyi8ObCCFwuYTP0y8M4Fadi6+KAS8MsWuULOresSIkjuu7XohvCCrOUOjoeCSWv8HxVKmv42ZwYox+ug4v90pJiN4jsc/DxfP90tdCLHLj8pI42R+MO2tfKOIInEvp8lVvOQWupT+vwyl4Hf14rwQSVvuTIeRsKI1SSGJ8Ddfx39I9R6RH/gHKqn4C7slT7Dz+AcqqfgLuHA6jSO2KwEDlDEohWgU1MECvfMIggL9RuM6Hv59vnYD7EkMY+PuJMAxa5ctYhCBQpCFfgIweNn0p2E/4c/XXa2XvBuitIJHiaQE6bb84PIF6MYauX64jQCt8zwM0yn/nAGAUKeEeoO4fhoZhoWX7runTXwyxcwWolY8l3hj+amVViqWcy6jqP4afwLn0bP8xDKq+y+qrGDVYoqySt0c6Vlnl5v7WPUekR3ljUr126JWiIcOThRkjkhXnVvA8RXaecC+A3JxExZMrx/HIyRauipibGa94+YfjeOQMFi7alDcoTlFiJOB+is8bJNxddVh6nOgTuhTxlCIvXYSRGqc4t4IQguGpwoyRyfGKJ3CGEIxOEf7MRybGKQ7ssIRgTLIwY0RsrOKYC0sIxiYIM4ZGxcjeMXMbI1aYMSQsymt/FykMsaJmAwMj4KdS1vqBAUFuSKxgT50Ev1AEe+k1400EwABjOPxVfTsiYdoAROqUlegHgEhdMMK0fRfNMrB6xOmjFEf0AlRGROv6bjDIEhZJhhTFDB2jR6xe+HNPNGTealAnVyxRI0YvvZjd16F7jkiP5q4cJXvrLuDOqxgyPAkJKcKdMOcuGqYot4IwBCnpURiUGStoM2d2NpTkXxICxMQEIU/E2Zk1OVNy9VUhhQb7YewI4R/DtNHp8FPYY8ffoMXUkcLdNifmpCBEYW8anUaFuaMzBI+PSktAbEiAouGJZQgWjcoSPJ4dH4X0yDDFidArRgqXsk6LCMPQOGWTOEcpVg3LFjweHxiICUlJsrfvehgP5eYKHo8w+GHOgHTFjEeG5AkeD9TosHhApqK8HY5SPJoxTPC4XqXGiuQ8RdfBg+LhNOEurmqGxYrEEYp2tFAADyaPEXw4YgiDxXHjFE3gBARL4sYJ9mwhhGBu9BRFeSIMCGZHTYRKxMGcEjFLIYPBuLDJ0DDC0bYRofNkb911M1jkBE25FxH5pioxNQJZwxJlRyx4nmLBA2NEbSKjgzBybJpsBuUpFq0Q70kQHOyHSRMHgWXl/7CXLBouGlUx+mkxZ0qm7KUThhAsuW+oaFdanUaNRVOGyL5XDEOweFo2tCIOk4plsHxKruzJlWUIFozNhJ9IuXqGIVg1MU92PRSWIZidNxDBRmGHiRCC1WNzZefUsAzB5EEDEBUo3gX1oZG5siMvDCEYlRTvtcz7w7m5svMfGABZkZHIihRvHvdIVp5sBgGQHBiMUdHCESoAeGhgnqLchCiDEZNjBojarEoZqihXJFCjx8zYQaI2SxOGyz4/AOhZNe6LFXY+AWBOzAhFThtLGMyOEXaoAGBC+AhovXS9FRMFMN1LmffcoBEwsPIjSDx4TAibJmqT5j8M/irf+x39h8FheMh9sl//VemeI9JLj/1wFuQ8VjIswcAhcRgzVfjJ2KNHvjMZLMv4jGFZBkkpEZg8PdOr7UMPjoNKpfJ5iYZhCGKigzF7lvcmTw8sGQm9TuPzJM4yBKEhRiyanePV9v45wxDgp/PZGWEYgkCjHitmCj+1erR8Ug7CAv18dqoYQmDQafDQTOGnVo8Wj85EbEigLIZGpcITM7w3xJqXm4GUiBCfGQTuZoFPTxvt1XbGoFRkRUf4/BRO4L6WH00Z69V2UnIyRsbFyXvSJwTPT5jg1WxEdCymJCTLdkB/NmaS199WVmgU5idlyI4m/HTYFK9by5P9Q7EyOU92LOHHQ6YI5qB4FKkPxOoB3j83IT01cBoMKvE8qUC1H1YniU/AYlqdNA2BanEHQM/qcH/CfNmM+TFTEaoNErVRM2osir1fJoFgXOgUROiiRK0YwmJG1GMyCQSZgRMQpRd3cP8buueI9FLm0ET8+LfLQAiR7CgwLEFUbDBe/dvDUEnoKps2KBq/+PUyEIaR7CgwLIOQMCN++4cHodV5T1ZKTAzDr15dCpZlJE/iLEsQGGjA679bCYOEZnExkUH4/YtLoFKz0hkMgZ9Bi3deWY4ACUsi4cFG/OH5JdBqVJInWJYh0GvV+ONPliAs2Hv4Mdhfj7/8cAkMOo1PDLWKxZ+eWYSYMO/N4ow6Ld5/cgkCDdITcBlCwLIM/vj4AiRFen8C0qlV+MdjSxDuL92pYggByzD4wwPzkBEjvKTokZpl8fdVixEbFCDZUSBwR2zeWDQbeXHeW9AzhOD9BQswICTENwaA386YgXEi+SG37AnBuzPnIzMswmdn5OXxUzEjOVWS7Rvj7sOwiFifnZEf503EwmRpzeJeHjob4yNTfHZGvjdoLFYOkNYs7ocZMzE9arDPjFVJo/BQsjQn5pHkGZgV5Xv0ZXb0cDySPEOS7fzoqZgTNclnxuiQPKxOXCzJdnzYFMyK9M3hISAYHDAE9yc8Isk+K2gSpkY+5DMj3jAYC2P/x6fXfV261323D509ehm/efZz2GwOdxv1Pu4QyzLgOB5ZwxLx8p9Wwz/It6zpgvM38OoLX8Lcbe+zjXpvRnpGDF57834Eh/q2rldaVoufv7geXV1WEEL6TDBlWQKOo0hOCsPvfrMC4eG+3efL1xvx/K82oK3dDIYhfebAeP4eFx2Mt15ehpioIJ8YFTUt+NEbm9DYavLKiAoLwDs/Xozk2FCfGDebOvDMnzbhZlOHMKOnTXxYoB/+9INFGBjvffLurfr2Ljz9/mZcb2gFy5A+k3E9jGA/Pf70xEJkJ/nWrr3ZZMaTH21GWV2TIMPzffPXafHu6gUYMUB8meFOtVuseOqLrcivqRNmwB3ONmjU+MPSuZiUKpzE3ZdMdjue3rYNJ6qr+2xr72EAgFalwttz5mBWmm8t4S1OB36wfwf2V14XZHg4KobF61NmYXG69G6yAGDjXHj+xE5srSwXZXg61r46agYeSM/1ieHkObx0YSfWVxZ5ZYAAL2RPx2Ppo3xicJTHG6W78FnlabCEEVx2YkBAATw1cCqeSPUeOeotnvL44PpurK06CAZEsMiZ59gDiVPxeMpswdyQvkQpxYaa3fj85nYQQITBgAeP+6Im49HkZT4tHVFKcbBpNzbVftbTwk+cMTZ0ElYlPAaW+JZ3d6FtN3bVvQ8K4d42HkZm4EQsjP0BVAqWp3yVL/P3PUdEQOZuGw5uK8SWNadQU9ly2zGGIRg/MxPzV41G1rAk2btUrFYHDu0pweYvz6Cy4vZum4QhGDMhHQuXjUDu8GTZDLvdicNHLmHj5vO4evXukuqjRqZg8aJhGD5M/o4eh9OFIyevYP2OfJRdqb/r+PCcRCydOxRjhg2QXS3V5eJw5MJ1rNtbgMI+yrbnDYrD8pm5mDg0BSqVvB0LLo7H8Ys38PnBApy7dHcDwyEDonH/1FxMzUuFRi0vWZfjeZy8VIXPjxXiRHnlXcPH4PgIPDAhDzNy06GTmRDM8xSnr1fj09OFOHyp4i4nNz0yDA+Ny8Oc7IEwaORtB6SU4lx1LT49V4i9l67dlTsyIDQYD43Mw4IhGTBq5Q1+lFLk19djTWEhdl65ctfuo4TAQDySl4clgwcjQCd/O2tRUz0+KSnElqvld9XUiDUG4NEheVg2KAvBOvmJzaVtjVhzuQAbr5fAfke9iyiDEQ8PHIYVqUMQppefY3C1sxlrr1/AhsoiWO+oZBqqNeDB1OFYkZyHSL14LpCYKrtbsK7qHDZWn4fljiZwQWo9lieNxLKEEYjSe48UCqnO2opttaewtfY0zK7b21X4qXRYEDsa82PHIEbv28NGbzXb27Cv8Tj2NBxD9x2N5nSMFtMjx2FW1ATE6MXzjcTU4WjHydbDONK8DybX7W05NIwGY0InYULYNMTofXsQ6C2zqwMF7ftwrnUHTK7b23KwRI2coKkYHjLnv7Icc88R6UdRSnH9Uj1am7rgsLngF6BDcloUgsOEoxNtzSZcKb6J7i4rVGoWQaFGZA1PFly6oZTixrUmtDR3wWZzws9Pi6QBEQgNFx4wOtrNuFRSA1OnFayKQVCwH7LyEkV3s1RWtaCxsRM2mxMGgxaJCaGIiBC+ryaTFaUltTCZ3BGVoCADhmTHQ6sVnryqa9tQ39gJi9UBP4MG8TEhiI4UHpS6zXaUlNWg02QDgbtGSXZWHPQiCaA3G9pR29QJs9UBP70GcZFBiIsMErS32BNaLHgAALh5SURBVBwovlSLjp4KqIFGPbIHxsBPZAmqtqUTVY3tboZOg5iwACRFCS+R2OxOFFx1MyilCPDTITs1BgEiNUbq27two7EdZpsdeo0a0SEBSIkSHlztThfyr9eivdsKjuMR6KfDkKRo0UTWxs5uXG9qhclmh06jRnSgP9IiQwUdW6eLQ35lLVpNFjh5HoF6LbLioxDmLzxBNnebcbWpBV02O3RqFSL9jRgUGS7IcHE8LlTXoqXbAgfnQqBOh8yYSEQGCP+mWi0WXGppQZfNBq1KhXA/P2RFRAgyOJ5Hfm0dmrrNsHMu+Gt1yIwMR4zIONJhs6K0pQlddjvULItwgwFDwqMEl294SlHQUIf67m7YXE4EaLTICItAfKDw973LYUdJawM6HTaoGRYhOj1yQqMF80EopShsrkdtdxesnBP+ai0GBochOVD4u9jttKO0vR4dDhtUDINgjR5DQmKgFtjxQSnFxbYG1Jg7YHE5YVRrkBoQhtTAMEGGlXOgtKMWnQ6ru5aOxoAhQbFQM32PP5RSXOpqQLW5DRaXA34qLZKNoUgLEJ7o7ZwTl7qq0eVy/24DVHoMCkiAlhUef66bGlBtaYbFZYee1SLOEIo0/2jh7zvvwrXuSphcZlBKYVQZkGpMgpYVHn+qLQ24aWmExWWDjtUgSheKVGO88HeRulBproDZZQJHefip/JBgSIaOFf7dNtgaUGuthZWzQsNoEKoJxQC/AYIMnnKos16F2dUFnrqgY/0QrU+FTkHyrFLdc0T+C6KU4uK5G9j+6Smc2HPxrtB+QLABcx8YgzkrRyLcx6WJ3ozyizXYtu4sjuwtvauKqjFAh7lLhuO+JcMQFSN/b/7lS/XYsvkCDuwvvavCqsGgwdx5eZi/IA+xcfKzt69VNGHLzgLsPlAKh+P2/iI6nRpzZ2Zj4X25SIyX/9Rzo6YVm/YWYvvhEtjstzO0GhXum5SJJTNzkJIQLptR1dCOjYeLsOVoCcy2258Q1SoWc8ZkYPnUHAxKlP9kVdvSifUnirH+xEWYrLeX41axDGYPHYgVE3IwJClKduSsvsOE9Wcu4otTReiw3P4UyjIEM4akYdXYHAxNipXNaDJ1Y31+CT49W4RW8+2lqhlCMG3gADw4MhejkoUHdW9qNVuw7mIJPskvQuMdXXgJgMkDkrF6aC4mJCfKTlbtsFmxrqwUHxUXoNZ0dwPKCfGJeDgnD1MSk2X3Mupy2LHhWgn+XXoBlaaOu46PjorHo4OHYUZCqmAdEG/qdtqxpbIUH105h2tdd/dQGRoWi0fSh2NW3CBoWHlRRqvLgZ21Jfi04gwud90dkc0KisGDA0ZhVsxgUQdDTHbOiQONxVhffQrlXXf3aUkzRmNZwljMjM6BTsTBEJOTd+FkSxG21R1FedeNu47HGyKxIGYSJkcMh0GgZoo3uXgXCjsKsb9pPy6b7u7JFKmNxPTI6RgbOhaGfizS9lXpniPyNcvcbcNvnlmD/BNXb+V19CWGca+ffu/nC7DgId8y0W02B3738w04deSydwYFHnt6GlY8Ms6nAd3hcOGN3+/AwQOlXhk8T7H64XF49LGJPjFcLg7v/HUftu8pvpWf0pc8uQcrF4/A97412adlI47n8e4nR/DFznzBHIbejEXTs/Hst6aJbie+U5RSvLfpJD7cfkYS474xGfjFYzOh9mHZiFKKD/edw7vbToAI5K30ZkzLScWvH57j85LOmuMFeH3bEQAQ3J7rYYxLT8Tbq+eJblnuS+vzS/Dy9v2g1DtjeGIs/nL/AtEqrH1pW9klPL9rDzieCjN6ciiyoyPxj6WLEGrwbUDfW3EN/7N7B+ycS7BihIcxMDQMHy1Yikijb7ldR2tv4HsHNsPici+v9MXxMJIDgvHJrBWI9/dtKeRsUzW+c3Qdupz2W/k8d8qTrxRrCMRHU+7HgADfHgouttfiydOfot1hEWb05HuEa414f8xqDAwU3zVyp66Z6vGj/A/RYjcJ5pUQEFBQBKn98NbQRzE4ULhgWF+qsTTixYt/Q5O9TYThvj4/Vo+Xsr6DrEDfCoY125vx9pW30WBruJXXIXQdWkaL76d+H1mBwrWFvgm654h8jTJ32/DjVe+h6mqDT8XKHv7hTKx6StqWNZvNgZ9872NcKav1ibH84XF4/AfSMsqdTg4//cnnKCyo9qlq6vwFefifH82W5Iy4OB4v/moTTp277lPNixlTBuNnz86V5IzwPMUr7+7EvhPCHVjvFCHAhOGp+PWz8yU9wVJK8ZuP9mPz0Ys+MUZkJOCPP1wsOYfl7U1H8fFB4Tbtd4ohBNnJ0Xjv6aWSnZG/7juFv+477RMjPToMHz+5AgaJuR8fnryA1/celcxgCUFiaDA++/ZKyc7IZ4XFeHHvAcEJry9GdIA/1q++H2F+0sLXmy6V4X/37QJ8YIQZDNi04kFEG6XlZeyuvIInD20BqHAi5Z2MQI0Om+evRmKAtCjosfoKfPvIl+Ah7LDdyTCoNFg342GkB0qLHl5orcLjJz+Bi+ckX4eGUeGj8Y8hM8j77ioAKO+swdPn/w4755TEYECgYlj8adjjyAlOksSoMtfjucJ3YOMckvrJMCAghOCXmd/F0BDv5RwAoNHWiF+V/woWl0USg/T85+nUpzE0WNrOp/+GfJm/723fVSBKKX7zzBqfnRAA+PgPe3FoW4Ek2zde3uSzEwIA6z4+gR0bzkuyfeetXT47IQCwbWsB1n15VpLtXz84hFNnfXNCAGDfoTJ89NkJSbYfrDvpkxMCuHeQHDt3DX9ZI22y/GT3eZ+cEA/jXHk1frfmgCT7L48V+eSEAO5IQ/GNery0Zo8k+60XynxyQjyMK/UteHbNDknflX3l13xyQgB3VdGq1nY8/dlWSd/5Yzeq8NJe932V+tXiKEV9lwmPr98s2lDQo3N1NXhu/25QHxktFgse3rweNpfTq/3FlgZ8//BWUEolTaweRqfDhgd3fwmTo+8uur11tbMZ3z22HhzlJRen4yiFxeXAwwc/Q7vde/fXGnM7njy9VrIT4mHYeRe+c+oTNFnvXu66U822Lvwo/0PJTgjgduxcPIf/zf8X6ixtXu07nd34xcW/SHZCPAyeUrxW9gGqLQ1e7a2cFW9eflOyEwLg1k6cv13/GyrNlZJe803XPUdEgUrPVyL/xFXZZdv/9eYur63ur12qx/ED5bIZ//7bQTidLlGbmpo27NldLLt/zMf/Pgqr1SFq09xiwsZt+bILIH+67gxMJpuoTafJijVbpDlFd4oC+HJXPlraxVufW2wO/GPLKXkMCmw5WoKapg5RO4fThXe3S3O87hRPKfYWXMGV2mZRO47n8c6u47IZxy9XorDq7h1SvUUpxZv7jskquMVRivPVtThRUeXV9s2j8q6DoxQljU3Yf+26V9u3T5+UzbjW3obtV+9e879Tfyw4CZ76XiScoxQ13Z1Yf63Eq+3fyk7ByXOyGC02M9Ze8/7w9K9rJ2HzwUHwiKcUXU4bPr3h/Te8rvoETE6r7wxQ2Hgn1lYd82q7q/4E2h0mnzvr0h6HZ131Pq+2x1uOo8XRIovBUx5b67b69Lpvqu45Igq07dOTsrejAkBzfScuHLsizlh3VhGjq8OCk4fFIwRbt+QrasZntTpx6GCZqM223UWKeqG4XBx2HxAfaHccLlHWIZYC2w6KRzp2n74Em0PcsRMTwxBsPFwsarO/6Bq6LN6fboXEMgTrjoszjl26geYus6iNN8bnJ4tEbc5W1qCqrUO288kyBJ+eLRS1uVjfgNLGJtkMhhB8fEGccb2tFadrb8oub09A8K/CfFGbuu4u7L95TVHJ9n+XXhB9mGizW7C9qkw2gwfFJ1cuiP7GzE47NlcXyC5vz1OKLyvPw8EJ/8bsnBOba8747IR4xFEeO2rPw+wS/o1xlMP2umOye8fw4HGkOR+dTuEHG0op9jful3V+D6OwoxBtDu/RnW+67jkiMtXeYsLxPSWCCZ1SxLAMtq0RftLqNllxYGexMgZDsOUL4ScMu92JndsLlTXjI8AmkSUgl4vD5h0FihigwIatwgMtz1Os210gu98K4B4EN+wpFO3I+/n+AkUOFc9TbDxSDLtIlOqzIwXKmsvxFFvPlKLbKjzQrj1RKLtXkIexp/gK2rqFQ/VrzxYpZhy5cgN1HcKh+k8Li5U1fqMU52pqca317l0jtxglyhgUFGUtzShqFA7Vr71cpKhrNgVQaerAyfpqQZt114sUOToA0GTrxqG6a4LHt9UUw87Ld9QBoMtpw9464QebQ40lMLnEo6PeZOOd2FMvHN0521qKdof3JSIx8ZTH/oYzgsfLTeVosjcpYgDAkeYjis/x39Y9R0SmLhffVNStFwB4jkfJ+UrB49cvN8Dp9L5+LcrgKcqLbwpO4FWVLV6XVbyJUqCiogkOe98DUH1jJzq7rMoYXs7T1mlGY4tJEcN9HgsaW/oegCw2B27UtSpydgDAbHWgqr69z2M8T1FS1SD76dsju5PD1boWweP5lXWCO32kysXzKK25e0umR+eqahQzKICiGuEloDPVNYonVwDIrxVmnK69qZjBEIIL9XcX4vPobGON4s+cJQzONwkzLrTUKOoOCwAqwuB8891bZD0qaKtW1K3Xw8hvE3aoijoqoVLQJA9wJ5Ve7BBe9ivrqgBL5G1Z9oiCoqyrQvD4VdNVMAqnYAra51bf/2u654jIlLlLmUfukdViF8wTMXf3D4PnKWwCzkZ3PzEAwCRwru5u+csMkhnmfmQInMukYLnk7nP1fR1mu0Oxo+NRlwCD43nYvOQNSWaIRF26HcocXI86bcIMk135Z8IQgi6b8O+g0678N8IQgi6R99puU+aouxlAl8h7lZJoKkVdTmFGl9MG3zNQbpcnV0RI3S6rYqeNB0WXU/h+mF1WSE9LFpbY0oyFsyiKgnnU7RLPa/u/oHuOiExJaXAnRe5OvH1/GeWWKu/zXALvt7+uAwBUqr6/TkJ/l8cQuI5+ZKiFrkNBrs5d5xK6jn5kCNUsYQhRtPRzG0Pk/cottHU3Q/g72h8MSqkoQ6giqW8Q8fOI3UdfJFZ4rF+uw8t51IRVGA9xL/WKMVT9wAAgWAXWw0A/UDSiDHktHL6q8/w3dc8RkSmxEu++KCDYT9AR8bXJnZD0Bg3UAr1RgoP7pwQwyzIwGvuu+RDsY0NAIXlKwPeloABDvwxOABAc0Pf7DfTTKUrq7a1QAYZOrYJOZh+bOxXi3zeDEIJAg/zeLL0VKsAAgBCD/N4svRXmJ8wQOyZVFECYSGGzCD8/xd8tF+URKnI/ogz+ipc0OJ4iVCdyHXp/RbkugHspQIwRqvPzqQld3yII1QqPSyEafyhK1IJ7GStEIzy+Bmn8Ze8i9IgBg2CNcKG5AHUAeJlJvR4REARr5FfR/qboniMiU4OHJiEoVNkkzrAMpi7IEzyeMjAKkdFBihgsy2DK7CGCx+PiQpCUFKbod82yDCZOHiS4uycs1B+Zg2IUTeIsQzBqxAAY9H0X0TIatBiZnagoOZJhCHIGxSIkqO/PVaViMWVoqjIGAdLiwxEb3vcARQjB7GEDFTEIgLiwQAyMFS4+NS9vkCIGAIT5G5CTIFx8akF2huLIi79Wi9HJwpUwFw7OUOwk6FQqTByQJMxIz1AcpFcxDGYMSBU8Pn/AINm7QDyioJiTNFDw+LyEDMW5LhylmJcg3IF4dkyW7B0z/2HwmBMrXDV0elR2vzCmR2ULHp8QPtTnLbV3igePieHC4/vw4OGKzg+4P/ORISMVn+e/rXuOiEyp1CzmPTBG0eTKczzuWzVa8DjDMFi4cqSidUSO4zFv2QjB44QQLFk2QlFeAsfxWLRomKjN0gXDFO2a4XiKJfPEqwgum52nKDmS5ymWzxYeOABg+bRcZQwK3D89T/QzXTkxR3GS56pJuaKMFaOzFTEYQrBqbK7oUtKKYcIOsBSxhGDl8CHQikSIlg7JlN3PxcNYkjUY/lrhBogLB2ZAr5IfpWIJwby0gQjVC0cS7ksaiACN8HuQwpgen4pYo3AFyykxqYjQy4+yMoRgdESCaKn3kWFJSPALke0cMiAYHBgtWl01IzAO6f4xIApc0HhDKPKChTvSxhsiMSQwVVGUKkQTiOEhmYLHQ7WhyAnMUZSw6sf69YtD89/WPUdEgWavGCV7GZFhGQwdl4aYBPH+DTPm58rOf2BYgkFZcUhJF+/fMHVaJvQCkQavDIYgMSkMmVnirawnjk1HYIBellPFMASREQEYMTRZ1G50XjIiQoyynsIJIQgK0GPiCOGnVgAYmh6HxKhgWQ4oAeCn02DmSOGnVgDIiI9EZkKkbIZaxWL+SOGnVgBIjgjBqJR42VERQoClI8V7XUQF+mPqwAGylwN4UKz04syEGPSYnzFQNoOjFA/m5YjaGDUaLB+cpYjxcHauqI2WVeGhQXmyI0gcpXhksLijzjIMHk4bLnty5SnFw+nikx4hBKsHjJJ1fsD9mT8o4fUrEsbJ3gFEACxP8N6Ha37sJNlRKgKC+TETwHpZppoeOV125IUBg8kRk6Fm5DUL/CbpniOiQKGRAXjqpUU+v45hCPz8dfj+K4u92gYEGvCjFxf4zCAMgVarxv++vNCrrV6vwQs/m+87gxCoVCxe+NkCrz9qtZrFiz+eJ4Phvl8v/nie14mZZRi88j/SetLcxujhvPqDuV4ThAkheO0790HFMr4vZxHglSfmQKf1PnC8snomdGqVzwwK4JUHZyJAQg7Iy8umw0+rkTX5vbh4GsL8vS9N/mLOFAQZ9LIm8ednTER8SJBXuxcmT0Skv1EW4wfjRmNguHC7e4+eHTUOCYFBshiP5w1DXpT3/ilP5YxCelCYzwwC4IGBORgfk+jV9lsDRyA7NFoWY37CYMyKE3eiAWB50jCMDEv2+XvFgGByZDrmxwsvmXg0OyYP48MzfHaqGBAMDUnB4jjvzs6Y0CGYFD7M58gLAwbp/olYGDvZq+3ggMGYFDZJFiNWH4t50b6Pqd9E3XNEFGruqtF49NnZku0ZloFfgB6/+dfjiPYSDfFo2n05eOq5OQCk5WgxDIFer8Gv/7waCcnSmlSNnzAQzz0/F4QQyQyNVoXf/G4F0r1EXDwaMTQZLz0/DyxDJEVGGMbt6Lz280UYMlg84uJRzqA4/PrZ+VCrGEkDIUMIGJbBqz+Yi+FDvA/kAJCRFIm3f7AIGrVKktNDiJvz0rdmYVKetK6cqdFh+MuTi6HXqCVFLTwWP10+FXOGD5LESAgNwj+eWAqjTuNTZOTZ+8Zj2Shpyy5Rgf7418NLEWTQ+TT5PTVxFB4dI62hV6ifAR+vWIpwo59PjMeGD8UzY4WXRnsrUKfDxwuXIS4g0CfG/YOH4KfjJkmyNaq1+HjWcgwIDPGJMX9ABl4bM0PSb0qnUuOfk1ZgUFCET47CtNg0vD56niSGhlHhTyNXIjs4TrKjQACMDE/Gm8OXeY0iAO5k09eyV2FoSIrkSZyAIDMw4f9j77zDo7iuNv7e2dUW9d4QAgkJVJGE6L333rEB44LjxLEdx45jx73HNU7c4m4Dtum9914EKkioIpBQ713bZ+73xyJC0czOzsj+XPZ9HqUwZ+9vZ3Z37plzzz0HbyUug1LEDiKGMHi8z10YKLC8csdrQBDmGoyX4v4AtcJ2lJkQgmU9l9mV58GAQaAmEE/0eQIaRdcknf9/6yfrvltcXIxXX30Vhw8fRlVVFYKDg7F06VI8++yzUKnELQP8GrrvdujYrov4+p3dqKloAqNg7ih2xigYcByH5GG98fBLs0U7ITfr9NE8fPHv/agobYBCwdxRcZVREHAsRUL/nvjz09MQ2lOcE3KzzqdcxScfH0DJtfpOGR3/FhsXgsf+Mgm9IgLsZmRkleA//z2EK8W1UCgIWPbWr2DHv0X1DsJf/jge0b2D7GZkX67Ee18fQt7V6hvt5W9hXP+3yB5+ePzesUiMFufo3Kz8khq8vfowMq9UCDLCgn3wxJLRGBQrztG5WVer6vHG+sO4cLlMkBHq54kn5ozEqHj72o8DQEl9E17bfAinL5cIMoK93PHEtBGY1Le33YzK5la8vOsQjhUUgVxvL38L43pb+0B3Vzw+bhhmJQgvLXWmuvZ2vHjgMA5ctvaO4WP4uTjjsWFDsDjR9pP37Woy6PHiscPYdTkftBMGc/3cvDVa/HnAYKxIEM4H6kwtJiNeOXcIWwpzwHXSAK+D4a5S44/xg/BQ30F2Rx90FhPeSD+EjVczYeasRRNvpnS0nHdVqnBf1EA8Ejvc7lwcI2vG+zkHsaE4Fabr1VZvZVj/v1bhhLvCBuKR6LF2bzG2cCw+L9yPDaVnYGBNd3Rf7rgqTowSs0MG4eHeUwS31HYmlnJYe20vtpQfhZ413Lg2N4uAQEkUGB8wCA/0mg2Nwr58H45y2FO1B7srd1vri/AwGMJgsPdg3N3jbmgVXbMr7aeSPfP3T+aI7N27F+vWrcOSJUsQERGBS5cuYeXKlVi2bBneffddUWP8mhwRAOA4DumnLmP7mtO4dL4I+nYTFEoGbp7OGDszCVOXDLaZE2JLlFJcvFCE7etTkHG+CHqdCQxD4OquxehJcZg+bwC697QdarbFuJRVhq1bLuDC+avQ6UwghMDNTYORo6Mwc2Y/hIX7y2bk5ldiy650nEm5gnadEYQAri4ajBgSiVnTktC7l/1Ozu3Kv1qNTfszcPx8Idp1RlBYd9gM6xeOeZOSEBMhLpojpMKyWmw8kolDFwrQpjOC4yhctCoMieuJheMS0TciWHbhoqKqBqw/eRH70grQ0m4ARymc1U4Y1CcUi0clon9EiGxGSV0T1p/NxM70XDTrDLBwHFzUKvQPC8GSYYkYEhEqe/tyWWMz1qdmYdvFXDTodLBwHJydVOjXPQh3D0rEiIiespJPAaCqtQ1rL2ZiU1YO6nU6mFkWzioV+gYGYHlyIsb0Cpddf6S2vR3rcrKwLicLte3tMLEsnJ2cEOPnj+V9kzApPEKwNokY1et12HA5Cz8WZKKyvRUm1gKt0glRXn5YHpOEqT37QK2Qt9W72aTHxqtZ+LEwHRW6ZhhZCzQKJ/Ry98Gy3smYERoDjVJeDkKb2YDtpZlYV3weZbomGFkzNAonhLp4Y3HYAEwLiYeLUnqiLgDoLEbsr8rAppKzKNPVwciZoWacEKT1wtzuQzAlOAkuSnnRAyNrwvHaNOysOIFSXTWMnAkqxgn+ai9MDhqG8QGD4OYkbzu5mTPjQuMFHKo+hDJ9GYycEU7ECV4qL4zyG4URviPg5uQmi/Fz6RfhiHSmd955B59++imuXuUve3uzfm2OyO2ilIqaHK7klOPgpvOorWiC0WCCq7sWYdHBmDh/IDx9hb90YhnFl6txYHs6qiubYNSb4OyqQY9e/pg0Jxk+frYZAGxySq/VY9/ui6isaIRBZ4Kzixrde/hg8rRE+Afy76e3h1FR2YTd+zJRXtEInd4EZ60K3YK9MG1yXwQFenYJo6q2BbsOZaGkogHtOhO0Gid0C/TE1LFxCA327hJGTUMrdhzPRlFFPdp1JmjUSgT5umP6iFiEh9h2JMV87vUt7dh+JhuXy+rQqjdCq3ZCgJcbZgyOQe8Q29EyMYzGNj22nc9GTlkN2vRGaFRK+Lm7Ykb/aMSF2nbyxDCa9QZsy8hBVlk1Wg1GqJQK+Lm5YHrfKCR2D7L5ejGMNqMR2y7lIb2sAs0GI1QKBXxdnDEtpg8GhHbrEobObMb2vFyklJej2WiAE8PAR+uMKb17Y2j3UJtRDTEMg8WMnVfzcbq8BE1GPRTEypjUMxKjutvO2xDDMLIW7C3Jx/GKIjQZDWAIgZdai/HdIzC2W4RNJ08Mw8yxOFiRj+NVl9Fkslad9VRpMTIwEuOD+9iMnIhhWDgWJ2vzcbwmD00mHThK4aHSYrBvJMYFxEJlw8kTw2AphwsNeThVl4VmUxtYysHdyRmJXpEY5Zdoc/lG7P39l6hfrCPy3HPPYe/evbhwofMGaUajEcabyiC3tLSge/fuv1pHREiUUhzbmYEtXx1DwcUSKJQMOJaCUgqGIaCw5kiMmJaI+Q+OQa+YbpIYpw/nYtOqU8jJKIFCwYDj/sfo0NCx0ViwYgT6xNu/PAEA504XYuOPZ5CRdg2MgoByuIVBKcXgYb2x8K7BiEsIlcS4kFaMdZtScD61yHp96P9+pIRYt94O7B+GxfMHoV+i/UsgAJCRXYoftp3HmbSr1h8/tYbeCSFgiHULcXJ8KBbP7I8h/fi3/gkpq7AC3+9OxbHU/zUOszKsIXeWo0js3Q13TUnGqGThHTx8yi2pxqoDF3Aw7TI4ag1P386I6xmIu8f1w8Tk3pJudJcr6/DN4QvYk5EPluNAYF0uILBuO2c5DtHd/LF0ZBKmJ0dLiqQU1Tbg61Op2J6RCzPL3ljSsTKuL635+2DZkCTM7SdtG29JYxO+PpeKzZk5MFostzGs5xHm7YV7BiRhQWKcpChHRWsrvky9gPXZl6Azm28sERFYcx0slEN3Dw+sSEzCXfF9oZawVbhG14YvMy/gx9yLaDWbOmUEubhhRVw/LI9NglZClKPeoMNXuefxfUE6mk0GKAhzo55Hx//217pgeZ9krIhKhquT/VGOZpMeqwrP4YerF9Bg0nXK8FY5467w/lgeMQgeKvuXJ9otRqwtPo31JedQZ2ztlOHhpMW80IG4q+cweKnsrxdlYE3YXn4SW8tPoNbYdAuDAQMOHFwUGkwNHoJ5IaPgoxZ+UPs16hfpiBQWFiI5ORnvvvsuVq5c2anNSy+9hJdffvmOf/+tOSIWM4sPn9uA/etTQBgCKlDPQaFgAAI8+f7dGD1DuMbFzWJZDp+/uwfbfjgLhiGCNTwUCgYcpfjLC7MwaY5wPZCbRSnF158dwdrVp20yOvJXHv7LJMxewF/XpDPGmrVn8NV3J2wzrh9fee8o3LVwkF0T7PqdqfjPN0c6zZHojHHP/MF4YLHtLYA3a+uRTLz17SGQ604NL+P6ZLh4Uj88tmSUXZP4npQ8vPDdXgDiGHOGxeGZJePsKi1/KKsQf1u1CxylggxCrA0RpyT1wWtLJkJlxwR78nIxHvlxB8wsK8yANSdgbFQ43l0wFVqV+An2fEkZHly/FQazRbDQV8fVHxoWig/nzoCrWvxW94tVlVixZTPaTCZRjOTgYHwxczY8NOKXEXLqa7B89wY0GvQ2C5YREMT6+uPbKfPgqxU/wRY212PZwbWo0bfZZDAg6OXhg9XjFyHQWfwyQml7I+47uQZluiabvWQYQhDi7Imvhy9Fdxcv0YwaQzMePv8tittqbW7NZUDgp3HHxwPuRZir+Hy7RlMrns38HIVt5Ta3GDNg4OHkgjcT/oBervY/bP6S9ZM6Ik8//TTeeustQZvc3FxERf0vc7+8vByjRo3C6NGj8eWXX/K+7vcQEaGU4r0nf8ThLRfEFxG7fpd67pMVGDbZdoIdpRSfvLkTO9al2P3+nnxtLsaLdHi++vQw1q45bTfjkScmY+ZccUV41qw9gy+/PW4348H7RuGuheJ2RGzcnYYPvjpsN2P5vMF48K7homx3HL+E177cbzdj4cQkPLF0jCjbA6kF+PuXu+wanxBgxuBYvLhM3K6L4zlFeOSrbaBUfBUHQoAJfSPxzjJxW6tTikpx37ebwFEq+jfCEIJhET3wyd2zRDlVF8srcfeaDbBwnOgGagwh6BcSjG+XzBXlVOXW1mL+uh9hZFnRDAUhiPHzx9oFC6F1su1UXW1qwKytq6Ezm0VXTVUQgnBPb2yedTfcRBRRK2trxszd36LZZLCL0c3FA9um3gMvte2oRY2+FfOPfIk6o21H52aGr9oVm8auhJ/GdqG2JpMOy09/gipDs+jKrArCwE2pwZphf0KQ1rbD024x4LG0D1CqqxVdH4QBA61ChY+SH0eIs7zcu1+S7HFE7I5lPvHEE8jNzRX8Cw//X9i6oqICY8aMwdChQ/H5558Ljq1Wq+Hu7n7L329Nu384g0Ob7XBCAOB6eP2fj65GVWm9TfMjuzMlOSEA8P4LW1BcyN/avUOnTxRIckIA4KP39yI/t8KmXVrGNUlOCAB8/vUxZGTytxLvUHZBpSQnBABWbTqL06lXbNoVltbija8PSGKs35+O/WfzbNqV1jbh2W/22F2qilJg+5lsbDl1yaZtTXMb/vrdTruckA7G/ouXsfp4mk3bJp0Bf1qz7frSm3gGRylOXi7GZ8fO2bTVmcxYuX6rXU5IByOtrALvH7P9vTdaLLhv62aY7HBCAGthsuzaGrx67KhtW47DvXs32eWEdDCuNDXgmRP7bNpSSvHAkY12OSEdjPL2Zjx+coco+7+kbESdsd1uRp2xDY+d2yDK/oWLG+xyQqwMDq0WA/6aukZU75kP8tejVFdjV5EyDhz0rAnPZn0hu3T9r1V2OyJ+fn6IiooS/OvYnlteXo7Ro0cjOTkZ33zzDZgu6sb5axXHcdjw2WFJ1Vgptb5+1/fCN0FKKdZ/c0JyghMhBDtFODEbfjgjeQcFwzDYst42Y92mFMkMhYLB+k3nbdqt33lBcnVRhiH4YZttxoaDGZILRRNCsGZ35zlVN2vj8Uy7HYQbDACrDlyweaPddDYLZov0Ju/fHU0FywnfaLekZ0NnNktq804BrDqbDpPFImi3PTsXTXqDJAZHKX5Iu4h2k0nQbl9hIarb7ZtYb2ZszMlGo14vaHe0tAjXWpokM3ZfLUBlW6ug3bmaUuQ11UpisJTiaMVVXG1pELS71FiB1PpSSZMwSylS60uR3VgpaFfcVotTdQUSGRwKWquQ3lgsaFdraMKx2gxJ1Vg5cKjQ1+FCQ67dr/0t6CfzDDqckNDQULz77ruora1FVVUVqqqqfirkL14XTxeiurQBUu/kHEux54czMBnNvDb5WWUovlwtuXMky3LYvy0N7W0GXpviq7W4lFkquXcMy3I4eigHTY3tvDaVVc04d/6qLMaZlEJU17Tw2jQ0tePomQLJPVc4jiIjuwzFZfxRqjadEbtP5khmUEqRX1yD3Kv8vxuDyYLNJ7OkMwCU1DQh9XIZr42ZZbH21EVJk3eHalvacSK3iPc4x1GsOZsuq+9Ri96I/dmFvMcppVh1Pl1WozyD2YIdl4SjVN9lpMtq+MdyHDbmZAszstNkd9P9Me+i4PFVeamiCozxSUEIvi9IF34PVy/IZDD48aqws76pNEU2Y/21s4I2uyrPyPpeMWCwteykjBF+vfrJHJEDBw6gsLAQhw4dQkhICIKCgm78/V61Z+1ZMHYkBXam9lYDTu/nD6Pv3ZLK2wVXrEwmC47vE2DszJDN4DiKg/uy+BkHMmXXrCCEYM/+TH7GsRxZkx5gLfa18xD/eew/mw+zhZXN2HaM//M4klGIdoPwE7oYxuYT/OdxKq8YDW3CT+i2xBCCDaf5GeeLy1DRJPyELoax9jz/Z36pqhqFdQ2y+twSAD+k8U/gVxsakF5VKctpowDWXORnVLa14kRZsaxuuhyl+D4ng/d4k1GPvaXSoggdYinF2ssXYeGJhOktZmwvzZLJ4LCtNBN6S+cPaBaOxdbSC7IZh6qy0WzSdXqcUoqdFadldVDmwOFCYx5qDU2Sx/i16idzRFasWGENFXfy93tV2ZXqOyqu2iuFgkHltTp+RnHdHdVQ7ZVSwaCylD+cWl7WIJuhUBBUljfxMyr4j4kVgTWywsuoagKR6eywHEVFNT+jrLpRdoEulqMorWrkPV5a22TXrhc+xrUafkZJXbOsJ3zAOvEV1wowGppkjd/BuFYvwGjk/6zEigIoaeIf51pzk2wGAJS3tvDeL0tam2Q5Ux2qN+hh4JnAy9qbZTlTHWq3mNBk7NyJrTG0wsTJc9QBwMSxqDV07sQ2m/XQsfIcdcDakK+Kx0kwcmY0m9tkMwCgymA7D/C3pt930sbPLH270baRDRGGCI6j6wIGAOh0/OO0dwGDUgq9AEOvN0lelukQy1HodPw3IL3B1CWOcZvA9dALLKPZxdALfOZG+TdZK4N/HJ3RJNsR6RiH95jJ3DUME/81Fzpmjwxm/jwUnblrGBylMLKdc7qKAQBt5s4/k65ktFt4GDz/3rWMrrknWsfqnKFnu5DRhWP9WuRwRH5GObvKb1BEKRUcx6ULGADg7MK/rc/VVV45ZsC6bKJ15h9Hq1XJXppRKAicnflrPjhrpHWevVnWsvT85+GsEV9zQkiuAtfKWa2SnHd0C0MrcK3Uqi55OnYRuB7OKqeuYQj0snK2o86IkIS21jqL2HYrRgpCeEu4uzh1zfcKAG/hsZ+Fofw5GPLvV7bGcrazt4wg4zfSyM4eORyRn1GhkYGyc0RYC4duAh11Q8P9ZOdvWCwcQnrwlxnvHuoLhULmkgbLISSUv1x6aIhwKXUxohQI6ca/9z+0m7fkBM8OMYSgexA/o0eQFyxyl7EYgp4CpeV7BHjxrsHbwwgP4u+D1NPPS7aToGAIwgP4GWG+4gtT8YkhBOF+/NcqzEc+gwDo6e3Jz/DqGkaohwfv7reeHp6yEiM75O/sAg1PTZQQFw8oZSR4dsjdSQ1PVeeTq7/WDWo7m9B1Jo1CyVtLxEPlDFeZfWYAa8JqkNaz02NqhQreKvnlJggIgrTy+pH9GuVwRH5GTVkyWHaOiJunMwaP529LPXlef9n5GxqtE0ZOjONnTE+8o2OuvVIoFBg3ib+V/OSJ8bITSSkFpkzkZ0wcGS15626HWI5ixnh+xriBfaBRybvRshzFrNH8hezGJPSCm1beExnLUcwbwX8ew6J6wM/d/lLXtzMWDuE/j+Qe3dDd20PWBMtRisUD+RkxAf6IDvCDnI+dArg7OYH3eE9PLwzsFiI72rY0IZH3mL+zK8aG9pK1a4YhBMti+IsXeqg1mNYjShZDQQiW9E7kzZPSKJwwp0eC7B0tc0IToVF0HolSEAZzuw+QeR4MJgbFw82JvzjbjOBhYGR8exkwGOQT85ss925LDkfkZ1T8oF7WaIbE7yrDEEy9ayicBCa2yOhgREQHS07CZBQMJs5OhkZgSaN7Dx8k9Oshq8bHuImxcHfn/1H7+7lj6KBeMhgEI4ZGwteHv8S0p7szxg2LklVHZEDfHggRiIi4aFWYNiJWOoMQxIYHoncofxRM5aTEvBHxkq8VIUBYoDcSwoN5bRQMg8XDEmRNrkGebhjah78PECEEywaLb2PQmbyctRgX3UvQZnn/JMgJhLmonDAtpo8wIyFRVgTJSaHAvOgYQZtlsUmyds0AwKIofucTAJb16Sd7Z87dkcKf6ZLw/rJ3tCwJF25NMbf7AFnnwVIOC0IHCdpMCRI+bkscOMzsJq5S829NDkfkZxQhBAseGitpPZ8QQOmkxLS7h9i0XXjfCMH+NUIMhgAzFg+0zbhriORkUkopZi+wzVg0f6CMOiIUC+fZ7mmzcEay5MgLx1EsmSWCMSEJIESS/8lRimXTbDPmj0yAkpH2PEYpsGJif5tF8OYOjoPaSSnZGVkxpr9NZ2lWYjTcNGrpjKH9bDammxbTB74uzpKekAmApf0TbZZfn9CrF7q5uUtmLI6Lh7uNfjMjQ3oiwtNbEoMhBHMiYuDvLFwaPdmvGxJ8giQzJnaPRKibp6BdlEcABvv1lMRQEIIhfmHo4xEgaNfdxQej/aMlRSwUhEGsRwj6ego37PRRe2BsQLIkBgMGoc4BSPbqbfdrfwtyOCI/syYuGIhpdw+16zWEWP/j2U/vgV+w7fXnkRPjMH+FnZ719QZlT725AN172m7wNHBIBO55YJR9jOt6/OlpiOhtu0V837juePgPYyUxHv3jeMTF2O4m3Cc8AE89NFES48G7hmNgYk+bdj2DvfHSHyZLyiddPn0AxgyItGkX7OOOt1ZOA8j174sdWjgqAdMHCz99A4Cvmws+vH8miJ0MAmBm/2gsGc6/nNEhd60G/102GwqG2OWMMIRgfHQvPDDCttOmcVLiy8Vz4KRQ2M0YGhaKR0fYfhhwUijw7Zy50Do52TXBMoSgX3AwnhkxUpTtN1Pmw12lsYuhIATR3n54dfh4m7aEEHw+ei58NC52M8LcvPHO0Gmi7P81cD4CtfY5bgpCEKj1wPsD54myf6nvfIS6+Nq1DKQgDLxULniv392iqlU/2ns+wl2D7XJGFISBq1KL1+MfBNMFOTm/Rv0+z/r/UYQQ/PHluZi1YgQAa1daISkUDJROSrzw33sxcIztyaJD9z02AYsfGCmaoVAwePqfCwRzQ27X3SuG494HR18fwzaDYQie+Md0TJ6WKJqxYM4A/PmhcSIZ1uOP/nE85s4S30V4+vh4PPXQRDCE2FxC6Tj+0NIRWDZXfCh24uAovPLQFCgY8Yz7Zg3CnxaIdyhH9e2Fdx+cASXDiGbcPa4f/rZwtOiWAIMiQ/HxA7OhViptMjom+bmD4/DyoomiGUmhwfhqxTxoncQzpsT1xnsLp4penooJ8MfqpQvgplbZnPw6GGMjw/HJ/Jk2Iy4d6uXtjXULFsFLq7XJ6Dg6rHsovpk9F2qRnYq7u3lgw6wl8Hd2telUket//QKC8cP0RXAWuSsmwNkNmyYtRYiLh2hGtJc/1k28C+48Saq3y1vtjB9H3YswV1+IiR0SEIS7+eLHUSvgrXYWxXBz0uCLQQ+gt1vgjfcpJAYEwVpPfD34QfhpxCWiahVqvJ3wJ0S79xTN8Fa5419JjyBQKz9B/9cqu7vv/pyyp3vfr1FnD2Zj6zfHcfH05Ru7aShHwSgIOJaDk0qJ8fMGYPZ9I9G9l3DokU+ppwuxZc1pXDh9GQxDQGBtY29tZ89BoVRgzNS+mLN0KMJFRCk608W0Ymxal4KzpwpACAEhNzOs/z16XAzmLhqE3lHSKuteyinD+s0XcPJ0AQDcwuj4Co8c3gcLZvdHbIy0dtp5hVVYt+MCDp8puO3943oTNorh/SOwcEYykmK7S2IUlNRi7d5U7D+bBwvLgWEYcBwHhhBQWJd7hib0xOJJyRgUx59PIaSrlfX4/nAadp3NhdnCQsEw4Ci9EcVgOYpBUaG4a2wSRsSHCw/Go2u1jVhzPB1bU7JhNFs6ZSSHd8PSkUkYFx8hqfdReWMzVp1Jx8bUS9CZzFDewiBgOQ6J3YOwdHASpsb3lsSoam3D6vPpWJuehVaj8X8MACBWRlygP5YNSMLM2ChJxelq29ux6mIGvs+8iCaD4RYGIQQWjkMfX1+sSEzC3OgY0Y7OzWow6LAqOx2rszNQb9DdygCBhXLo5emNFbH9sDAqnndbsJCaTQasyU/HqvxUVOvboCTMjUqizHVGqKsnVkQl467IRGiU9m9jbjMbsa4oFauvpKBS32JlXP99M8TKCNK6Y1mvgVgc3l/S9l8Da8bm0vNYV3wGZfqGThl+ajcs7DEEC0IHCiao8snEWbC38iy2lJ1Amb4GCsLcuE8RQsBSDp5OrpjZbThmdRsOdyd5ieC/RNkzfzsckV+AyotqcWjLBdRVNsGoN8PZTYPw6GCMnZ0MF4GETgAw6k1QKBVQOgnfvKrKGnFwZzpqKpph0Jvg7KpGz4gAjJueADcP4ScKk9EMQohgkiwA1FQ348CeTFRVNkGnM8HFRY3uoT6YMKUvPL2Ef2gmk8XKsHEe9fVt2HvwEsorGqHTGeHsrEZINy9MGh8HH2/h9W6TyVocSmXjPBqb27HnaA5KyhvQrjPCWatCcIAnpoyJhb9A8isAmM3WbqtqG4zmVj12n8pFUXkd2vUmaNROCPJ1x9ThMQj2E86at1hYsJxtRqvOgF0pebhcXos2vQkalRKBXm6YNigaof7CS3zmDoaTQnCCbzeYsCstD7llNWg1GKF2UiLAwxXTk6MEt+oCgIXlYGZZaJyUggydyYzdWfm4VF6FZr0RaqUCfm4umN43Cn0ChZcRxTKMFgv25BYgrawCLQYjnBQK+Lk6Y2p0H8QFCT8EsBwHo4WF1gbDxLLYV3gZ58rK0GI0wknBwFvrjKmRkUgMDBJ8LctxMLIstEphhpljcbC4EKcrStBkNEBJGHhptJgc1hsDArsJvpajFAaLGVqlk833cqT8Co5XFqHJaABDCDzVGkwMicSQwB5dwuAoxcnqKzhWdRlNZmtVVk8nLUYFRmJ4QC/ByAylFHrWDI3Cyabd+fqrOFaTi2azDhyl8HDSYohvJIb59xFcwvkfQym4nEIpxaXmqzhZl4lmcztYysFN6Ywkr0gM8YmDkrHf6fy1yOGI/IbFshzOH8rG9m+PI+vsFVjM1vLIWhc1RsxIwozlwxERL+1pvUMcxyH15GXs+PEsMs4UwmyyMjRaFYZNiMH0JUPQp2+I5A6/gPUHmnG+CNs2nsf501dgvu4kqNVKDBnZBzMXDEBsQnfZjMysUmzbmoYzZwthNP7PERk0MByzZvdDUqLwjVMMI6egElt2p+P4mQIYrjOcnBQYkNATc6cloX9iT9nF2fKKqrHxQDoOnS24Ua3VScmgX3R3LJiYhCGJYbJLyV8uq8WGIxex93w+2q9XWVUqGCREBGPx2ESMTOglu5T81ap6rD+ViZ3nc9F6vVqskmEQ1yMQS0YmYlzfCDgp5d2cr9U1Yt25TGxLy0GTztq8UcEQRAf74+4hiZgU3xtqJ3lbqsuamrEuNQsbMy6hQWedKBlCEBXgi7sHJGJabB+bCa22VNnairWZmViXdQl17e2gsOZF9PLxxrLEJMyKjhIs3iZGNe1tWJubhR+yL6K6vQ30+nmEeXhhWVwi5vaJhbta3tbweoMO6woy8UN+OsrbW0BxvU6KmyeWRSVhfkQ8PNX2Rx1uVpNRj03FF/H9lVSUtjWBgoKAINjZHUt69cOCsET4aORFHVrNBuwoy8Da4hSUtNWDgzXi5K9xx/we/TE3NBl+GuEHld+THI7Ib1RHt6bii9e2oqG6BYyCuaMmiULBgGU5RPbtjr+8swThEpYoTh/MwX/f3IHaymZBRlifQDz28hz06Wu/03P+TCE+emcPKssab4zXGSO0py8ee2Ya4pPsX6LIyLiGDz7Yh5LSBigU5I66Jx3/1i3YC489NhH9+4fZzcjOr8A7H+/D1Wt1UDDkjuJoHf8W4OeOx1aOw/BBEXYzCopr8PoX+5BfXNMpo2P5yM/LFY/ePQoThkTZzSiqrMfL3x5A1tVKQYa3uzMenTccM4by17HhU2ldE1788QBSC8s6ZxACjlJ4umjw8NShWCgisfV2VTa14oVN+3G6sAQKQu7YrtnBcNOo8dDYQbhneD+7ndDatnY8v/MAjl4usi4P3sEAOGrd4rty6AD8YfhAu3cANer1eO7AQey7fLlTBoF1451WqcS9ycn4y9AhdjuhLUYjXjh+ENsLrV2EO2MAgEqhxPK4RDw1eITdS0Y6swkvpRzE5sJssJSConOGkmGwODIBzw4Yy1tcjU9G1oI3Mw5gXVEGLBzbaUI4AYGCEMzp2RfPJ02Es53LOWaOxb9zD2BtcQrMnPVB43YOAwIQYEpwPJ6Nnw5Xp99fddTb5XBEfoNa//EBfPPPnaJsGYbASa3ES988iMRh4reDbf/+DD59Y4f1/9j4VjAMgULJ4Ll/342Bo8RPfvt2ZOD913cAlNrcNksIAcMQPPPqXIwcLz5R98iRXLzx5nZwHGz2kunIN3jqb1MxSaDA2u06lVKI59/aBo6loutFPP6H8ZgzVXydjPOXruHJ97bCbGFFb2P+85KRWDrd9s6RDmUUluORf2+B0WQRXWV25fRBeGiW+J1fOaXV+MMnm9BuMIlmLB3dD0/OHinaUSisrsO9X2xEs94gmjF/QBxenD1edLTqWkMTlq/agNq2dtE1KabH9cFbsyZDKdJRqGhpwV3rN6CipUU0Y1yvXvhoxnSoRDoKNbp23LVtHa42NYr67hIAw0J64Mups0XnfTQZ9bh73zrkNtaIYjAg6OcfjG8nLOAt1X67Ws1G3H/8R2TUl4vqesuAINozAN+Nult0BEZvMeHR8z8gpe6qqF1vDAjCXH3xxZAV8P2dR0fsmb8du2Z+Bdrzw2nRTghgTXg0GSx4acXnuJpTLuo1R3ddxKev77A6ICJ+cRxHYTGzePXR75F7sUQU48zxfLz/2nZQzrYTAlidCJbl8Mbzm5F+vkgUIzW1GK+/sR0sK67Tc0cS6tvv7MKZM4WiGFm55Xj+n9tgsXB2Fa3612cHcehEnijb/OJqPPnuVpjMFrtqqXz043HsOJolyraosh6P/HsLDEbxTggAfLHzHH44mCbKtqy+GX/4ZBPa9OKdEABYczQNXx5IEWVb3dyG+7/cZJcTAgAbz1/CB/tPirJtaNdhxeqNdjkhALDrUj5e33tE1HexxWDA8o2b7HJCAODwlSt4et9+UQyd2YQVOzaiSKQTAlhvB6fLS/DI/l1gRbQSMFgsuO/gRuSJdEIAa2fbtNoKPHR4K8wiuvGaORYPn9qAjAZxTkgHI6+5GitPruVtJnizWMrh6bSNOF9XJHrrPQeK4vZ6/PHc6i5t6Pdbl8MR+YWrqb4Nnzy30e7XUUphNlnwryd/sGnb3mbABy9strviK6XWnJX3ntlo8yZoMlrwzsvb7AN0cDiKd17aarN0PctyePOfOyQXKHvrrZ03Elp53wuleP2D3ZIKrREAb320FzqBLrcdjNc+2wczy0o6l7e/OYTm1s7brt+s11cfgtFkkVQB9F/rj6Om0Xbb839uPIJ2g0kS46Ndp3GtptGm3bt7jqNRp5fUN+irYxeQW1Fj0+6Do6dR3dpmd3VOCuCH1Eykltp+IPj43DmUNDVJYmzLzcXRItvO+hcZF5DXUGc3g6MUB4oLsftKgU3bNfnpSK+tkMQ4WVmMTYWXbNpuLs7E6Zpiu79XLKXIqC/H94WpNm0PVGTjSHWeaEfnfwwOl1uqserqKbte93uWwxH5hWv/urOSe8dwHEVhVhkuZ5YK2h3ZkQGjwSyp4ivlKMqL65B1QfgmePxQDtpaDZImVkop6mpbcf60cMTizJlCNDS0i3oyvJMBtLQacOKk8I02LbMEFVVNkiZWCsBgMOPg8VxBu5yrVbhcUiu5qqyFZbHzeLagzdWKeqRfLpfV9G/LCeHIS3l9M07mFElmKBiCDacyBW3q23TYl1Ugi7H27EVBm1aDEVszcySXCFcwBN+fF2YYzGaszcySziAEq9MzBG3MLItVlzIkl55nCMF3WemCNhyl+C7X9iTPJwLg29xUwd8wpRTfFaTI6km0qvC8zevwQ9E5yb1jOFCsK0qBRUR0xyGHI/KLFsty2PHtCUnl2jukUDDYuZo//EwpxbY1p2X9qBUKBjt/OCtos3VdiuT+N4A1J2XbhvOCNlu2psrancIwBFu2XBC02bwrTVajPEKAjTuEb7Sb9mfIYlAKbNifLujIbDyaKYvBUYoNRy/CbOG/0W46nSVrRxLLUWw+cwl6k5nXZvOFS7KaI7Icxfb0XLToDbw227JyYRI4TzGMfbmXUdvWzmuzq6AAbSbpoXyWUhwvLkZpczOvzaHiK6jX6yQzOEpxoaocBQ11vDanKotR2tYsqYowYHXW8xprkVFXyWuT0VCOgpZaWYyy9iacqeF/eLrcUo2MxhK7oyE3q97UjmPV+ZJf/3uSwxH5BaswqxR1lU2yxmBZDse386/nlxXVoqyoTt7NnOVw6mA2WJ6bdV1NCwpyK2Q5VBxHkXr2CnTtxk6Pt7UZkJ5+TXIUoYORk1OB+vrOlxzMZhanUq7IiiJQChSX1qOM53OllOLguXxZDACoqmtBwTX+JYe9KXmyGY2temRe4Z8wdqflyWr8BgDtRhNSCvgjersy5DNMFhYn8ov5GZfkTyYcpThccIWfkZ8vu1svIQT7Ll/mZ1wpkM1QEIJdhfzXY3dxPpQyy5QrCYNdxfy5VHtKc7uEsaeUPzJ5oDJbVkdgwBpB2l8pHJl0yCqHI/ILVjPPhGivDDoTTMbOcx+aGvif0uwRx1K0tXb+VNnU2DUMAGhu6vyJronn36WIb6yWNr3sSe8Go7nza2IwWmAyd004t7Gl8/PgOIoWHX8EwB41tPJf96Y223kqohht/Ix6gWNiRQhBQzv/OLXXa3jIkYJh0NDOfz1q29plf7cUhNyoadIpQ9cmm0EIQYOBn1Gnb4dFRiddAKCgaDAIfObG9ju2AtsrlnKoN/IzGoztsqLEgNX5rDN0zT38ty6HI/ILFmuR94O+dazOJ7efhSExx6VzRudjdSmDZ6zba5HIkYXvPETsShDN4DkPCnG7luQwAMiOuPxcDGKT0TWfiUVgHKFjYkVtjGP+Oc5DphMCWM9D6L2ynJwFk5sZ/A4/2wXnAeBG3RGHhOVwRH7BslXeXawUCgYa586L+Li6d13hHVee9+vq2pWMzsdyc+tCBs/7dXOVV2Hy1rE6ZzhrVHZ3z+VluHT+fhUMA62N8vBi5e7Mf01cNfKqfophuGvlfyYcpXDX8n9/PDTyv1scpXDX8L9XL6383zq1wfDWaGU/5QMQrLTqqbLd4M+WGBB4CDTLc1dpJCeRdkhBhBluXVCQjADwUv32esj8FHI4Ir9gRcSHQKWRVyaaUTCIHRjOmzQY2ssfLjInccIQRMQEQ6Xu/L0GBnvBy0e4D4wYdQv1hodn531xvLxcEBzsKXsS9/V1RUBA58V3tBoVIsL8ZCVgAoC7qwahIZ132mQYgoTe3WSXhNeondC7hz/v8eQ+3WUlqwKAk4JBbBh/o8RBvUNlMxhCkBjGXyF4SIR8BgD0F2KEh8qeXDlKMSA0hJ8R2l12/gZLKQaG8DMGdZPX+gGwRkMGB/OPMyiwu+SdPzcYlMOgQAGGXw/ZkReWUgz046/YPMAnrEuiO/19esoe4/cghyPyC5azqwYTFgyEQkZ/D47lMPPekbzHVWonTFk4EIxCxi4NjmLWMv5Kmwolg5nz+8vaNQMCzFk0iNcJIIRgzuxk6eNfH2P27GTB6z1verKk7cEdYhiCWZMToRLodbJgUpKspFsFQzBjVBycBSISi8YmylrWUDAEkwdFwdOV/0l+0YgE2Ywx8b0Q4MnvxC4aLJNBCAb3CkUPX/4mgEv69ZU1uTKEoG9wAGKC+B3DhfHiq/p2JgKgl7c3BnTjd6gW9ImDk8wma0GubhgVyt8OYWZYNJwldN29Wd5qLSaF8leEntAtCp4qeREkZ6UTZobG8R4f6h+BQI28at5KRoFZ3cVXUv49y+GI/MI1fflwWfkPnr5uGDyB/wcHANMWDZQ18Tm7qjFysvCNdMos+/t63CyVSolxU/oK2kyaFA+ljIZpDEMwZbIwY9yIKDhrpS85UEoxY5IwY1RyBDzdpN9oWY5i7njhXi2DY3og0Ft6CWqWo1gwWpiRGBaM8EBvyUF0lqNYNEKYERXkh77dAyVHE1hKcffQREGbUG9PDAsPlczgKMWygcITUoCrKyZGREiOvFAAK/olCf7GPDQazO4dLZnBgGBFfJLgdXB2UmFRZF/pDEKwNCpJsFy9SqHA3b2SJX8eCkKwMCwJWgGHSUEYLA4bBCLx26sgDKZ3S4C7TIfp9yKHI/ILV8+oYIydKz2acP+zM6GwMTkHhnhj+uJBkpc17n18Eu+yTIe8fV2xYOkQaQAASx8YCRcbORqurhosXzZMMmPx4sHw8hJe09WonbBy6QhJ4xMAc6f1Q6C/h6CdUqnAn5fwR7EEGQSYNjIWYd18BO0YhuAvC6QxGEIwtl+E4LKM9b0QPDFrpN0VezsYQ/qEYmCk7eWEv06W9nkoGILE0CCM7GO74eFfRg8DQ+yflhSEIDrAD5OjI23aPjpkMJQK+7MfFIQg3MsLs2Ns92P6U/IgaJROdk/iCkIQ5OaGxTHCTjQArIwbCDeVWhLDR+OM5VH9bNoujxwAH7Wz3Q4PQwjcnDS4r88gm7bze/RHkNbD7m28DAg0CiXuj5T2vfw9yuGI/Ar02NtL0HdwhN3OyNInpmD8/IGibB96ZjoGjIqy2xmZd+8ITF8yWJTtvX8ci9ET7O/cOnVOPyxaLs7BuOuuIZg61f7OrePGxeDeFeJuHHOnJWHhTPuWgQiAYQMj8PB9Y0TZTx8Vh/vniLuuNxgE6B8TiqfvnyDKfkL/3nhsvn03S4YQxIYF4tX7p4iyHx4Thn/MH2t9f3Ywegf74r37ZoiKog0ID8Hr8yeC2MFQEIJQH098fM9sKEUsffbtFoj35061NmIUCVEQgkB3N3xx1xyoRHSV7ePnh09nzoSCYURP4gpC4OPsjO/mz4Ozk+0lkZ4eXvhq6hw42cnwUGuwZsYCeKht55MFu7jj2/ELoFEoRTsKCkLgrFRh9cRF8NXaTvD00bjg25F3w1mpsouhZpT4euQSBDsLPwwAgLuTFp8NvgduSo1oZ4QBgZJR4MOBSxHqIvww4ND/5Oi++yuRyWjBB0/9iCObL0ChYHiXawhDwBCCP7w0FzNETqwdYi0sPnl9B3avSwGjYMDxMBiGgMIaCZl/3wi7llxYlsOXHx7Eph/OglEQcDxbYhmGgFKKu+8fiWUrR9nFoJTi229PYPWa0zda2PMxOI5i0cJBWLlytF0JopRSfL8pBV+uOQEQ8DI6Wt7PnpKIR1eOEzXp3az1+9LwwZqjAL2zVfvtjKkjYvDMAxPhZOfy1LaTl/DGmkPWbZE8d4MOxrjkSLxy32Ro7Nx1syctHy98b+2fA9p5N4EOxvDonnjn3mlwVtu3BHY45wqeWrsbBnPnrdpvZgwM745/L50uuFumM526eg2PbtyJNqMJxAYjoVsg/rtoFrxdOk+w5tP5sjL8Ydt2NBsMYAjQ2VdLQQhYShHt54ev5s5BgKt9yeAXqytx/+4tqNPrwBDS6Xerg9HL0xvfTp+H7u62J++blddYixUH1qNK12aT0d3VA99NWIhwj86TuPlU1FqPe4//iLL2Jl4GAwIOFAFaN3w9Ygn6ePLn6nSmcl0j/nRuNYra6m68Xz6Gt8oFHw9ailhP/lyd34vsmb8djsivTEW55di56hQObkiByXhr6Wtvf3dMv2cEJi0eDG9/6der5EoNdq09h/2bU2G4rUGbh7cLpi8ehMkLBsA3wL4b080qL6nHri2p2L01Dbr2Wxmu7hrMmNsfU+f0Q0CQp2RGVVUTduzMwI4d6Whru7Uiq4uLGtOmJWDGjCR0C+ZPVLSlmrpW7Nh3EVv3ZqC55dZCT1qNE6ZN6ItZkxPQI0T601F9Uzu2HcnCpgPpqG++tQiTWqXE9JGxmDs+Eb26+0pmNLbqsO1kNtYdybijmZ2TUoFpg6Mxf3RfRPcIkMxobjdge0oOfjiejoqGlluOKRUMpvTrg0UjEhEXGiA5n6jVYMT2tFx8fzod1+qbbjmmYAgmxkViyZBE9OsRLJnRbjJhR1YeVqeko7Cu4ZZjDCEY36cX7h6QiEE9QiQz9GYzdubl47v0dOTW1t5yjAAYGx6OZUmJGNajh+RcCYPFgj1XCvBtVhou1lTdwRjRvSfuiU/C6NAwKBhpwXMTy2JfSQG+y03FhZo7G/8NCQzFPdHJGN89AkqJDAvH4VBFAVZdPo9ztdfuOJ7sE4LlkQMwoVuUYO6JkFjK4WTNZfxYdA5nagvvcEDjPLvhrrDBmBAUA7VCXrLub0UOR+R3oPZWAwqzStHWrINCwcDDxxW9E0J580EopShIv4aasgYYdEY4u2nQIyoYIb34JxZ9uxGXs8vR2qwDwzBw93JBn/gQKJ34GVdyylFZ0gCDzgRnVzVCwv3RI5KfYTSYkZ9TjpZmAwgB3D206BPbDSqBJ+6rl6tQXtIAfbsRWhc1uoV6IzySP1/BZLIgL68SLS16UFC4u2sR1ScIaoG8luKiWpSU1EOvM0GrdUJQsBciIvknSIuFRe7lSjS16EE5Cnc3LfpEBEArsHOlpKwexdfq0a4zQqNxQmCAB6IiA/kZLIfcq1VobNGB5SjcndWICguAi0CdjbKqRlwpqUObzgiNygkBvm6IjQziZbAch5ziajS26mC2cHBzViO6hz/cnPkjBxV1zSgoqUWbzgiVkxL+Xq7oGxHMG2HiOIrcsmrUtehgtrBw06rRJ8QPni78iX3Vja3IKa1Bq84AlVIJP08XJIYH806QlFLkVtSgtrUdBrMF7lo1egf6wceVPzpR29KGS6XVaNEb4KRQwNfNBUlhwXDimbwopcivrkN1axv0ZjPcNWpE+PnA340/OlHfpsPF8kq06I1QKhj4uDgjObQbVAJRrPy6OlS0tEBvtsBdrUYvH28EufEnGjfq9Ugvr0SzwQAFw8Bbq8WA7t2gFlgeKmyoR2lrM3RmM9xUaoR7eSHEjf9Bo8VoQGplBRoNBjCEwEujxcDgbtAKLA9dbW5ASWsT2i0muDqpEObujVA3T177NpMR56vL0Wi0Ovheai0GBHSDq4r/+17S1ojitga0mY1wUarRw9ULPd34oyx6ixnna0rRYNSBgsJTpUWyXwjcBWqMVOiaUNxWhzaLAVqFCt2cvRDu5sdr/3uVwxFx6IbaW/U4vCEF2748gvKrd/YeiRscgZn3jcaQKQm8DoYt6duNOLIjHdtXncK1y9V3HI9KDMXMZcMwbFI8VGpphbSMBjOO7r+EbWvP4Ur+nf1NIqKCMHvxYIycEAu1xNorJqMFx4/lYevmC8jLrbjjeM8wP8yZ1x9jx8dCK3HnjNnM4sSZy9i8PRVZOXc+IXYP8ca8mcmYODZG0MEQkoXlcCr1CjbuSUdq9p19WroFeGD+lH6YOioGbi7SasiwHIczWcVYfzADZy4V33E80NsNC8cnYsbwOMk7gDiO4mx+CdYdy8Dx7Kt3LBv5ebhg0chEzBkSCx93aYWjKKU4f6UMP5zOwOFLV+4I7Xu7arF4SALmD4qHv4e0WjiUUqSXVuD7lIvYm31np2APrQZLBvTFwuR4BHtKu89RSpFZWY016RnYkZN/R/VTN7UaixPisCSpL0I9PSUxACC7thprMi9ic34OTOytlUldnJywKDYed8UloJeXfUssNyu/oRar8zKwoSALBvbWyqRapRLzI+OxLCoRfbylT/5XW+qx5nIa1l/JgM5ya2RZxSgwNyweS3snI8ZLegTw9y6HI+IQACDjZD5eWfEZ9O3X+4p08kl35GkE9fTFa2sfQXBP+37cOWnFeOnBb9DarAch6DTHoCMXwy/IE699fT9CI+z7cV/OrcBzj65BU0M7CEM6bZ5HiDWnxMvHBa9/uAy9+gTZxSguqsXTT65FXV0rb15Jx/m5u2vx2j8XIibWvnXgsopG/O259aioarbJcHZW4fXn56BfAn/Rpc5UVdeCx1/bhGsVDfyM6/+hVinx+l9nYkiS7V0jN6uuqQ2P/WsLCkpqb+RDdCZCACeFAq/+YQrG9uevC9GZGtv0eOyzbcgsqrTBIFAwBC/dPQHTB9reNXKzWvVGPPbdDqRcKRVkdCx9PDdnLBYOsb1r5GbpTGb8dcMuHC0ossmgoHhq4kisGGLfVnejxYK/7dqH3XkFggzF9RyKx4YPwcND+WvydCYTy+LZIwewMTebN0+ig8FSiof6DcDfho6wa9mI5Ti8fPYQvstNF8W4JzoJLw4eZ9eyEUcp3rl4FJ/lnLHBYMBSDvPD++L1gVNk11/5PcrhiDiEc/uz8Mq9n4FSKqrrLaNg4Oymwb92/U1wueZmZZwpxPP3fQWO40TVIWEUDNQaJ7y39k8IixLnKORkluLvD30Li5kVySBwUirx9mcrEBXPX2XyZhVersbjj6yC0WgRx2AIGIbBP99djMQkcY7CtdJ6/OmJNdDrTKIKcDGEgBDgjRfnYvCAXqIYFTXNWPnsD2hp1YtiEAIQELzyl2kYO6SPKEZtYxtWvPoD6pvbRTMoBV64fxJmDBe3Y6qhVYfl761FZUOLXcXKnl4wBotHJYqybdEbsPzj9bha02BXI7jHpw7H/WMGiLLVmcxY/s0G5FTW2MV4aORA/GWcuF1iRosFK9ZvRmpZhV2M5cmJeH7caFHOiJllsXLXVhy/VmxXj5cF0bF4a9wkUQyW4/DnI9uxp7hANIMAmNKzNz4eO0uUw0Mpxd/P7sLGokyRBCtjVHAvfD5ygeQclt+r7Jm/HVf2N6iinDK8vvILUI4T5YQA1gqsulYD/rHwP2hrtt3RtKyoFi//4VuwIp2QDobRYMKz936BJhGdhasrm/DcI2tEOyFWBoXZbMFzj65BTVWzTfvGhjb8/ckfRTshgHXJgOM4PP/MepSXNdi0b2014Iln14t2QgDrkxtHKZ5/fRuuFNXatNcZTHjs1Y2inRDA6iBQSvHSf3Yjp/DO5a7bZTJb8Mh7m0U7IR0MAHjtm/1Izbtzmeh2WVgOj/53q91OCAC8teEITmQXiXhPFH/5bqfdTggA/Gv3Sey9mC+K8eTG3XY7IQDw3+Mp2JQurn38P/YcwIWycrsZq1IzsCo1Q5TtKyeO2O2EAMCG3Gx8fOGcKNt3Uk9gtx1OCGAN8O4uLsDbF46Lsv8k+7RdTkgH41jFFbycut+u1zlknxyOyG9QP7y/B6yFfysmnziWQ11FE/b9cNqm7YbPj8Bksoh2dP7HoGhuaMeuH87YtN38/Rno9Ua7q75yHEV7uwFbfzxr03brllS0tuglMUwmC9avtc3Ysfci6urb7J5YKbVuqV6zzva12ns8B+XVTfYzYHV6vtpgm3HwfAGulNdJK6lOgU83n7JpdiK7CJeuVUtjEOA/207aLMGfcqUUKVdK7Z68O/Sv3Sdtfl+yyqtxOP+qZMb7B04KdgQGgMt19diWkye5k/K/Tpy+sdWZT2Utzfg+66LkbrcfnT+HVpNR0KZO347Ps1IkEoAvss6jTt8uaNNiMuCjbNvfv85EAfxwOQ3l7bYfbBySJocj8htTfVUTTu/J4K0BYkuUUmz/6ig4gTbcrc06HN6WLpnBcRQ71pyGxczfhtugN2Hv1jTeOiM2GSzFni13bj++WWYzix1b0ySXt2dZiv37stDWahCw4bBlR5rk/jQsR3HsZD4aGvlvtJRSbNidLrmUOsdRnEkvQmWN8I123cF0WWXOL16uwJXyOkG7H49KZ1AKXK6ow6VrVYJ235/MkNUor7yhBecKSwRtfkiRx6hv1+FIwVVhRnqmrGZ8bSYT9uQXCNr8eClTVmsGE2vBlrwcQZt1BVmSnSnAWqZ/fUGWoM3WokswscJOl5AIIfixMF3y6x0SlsMR+Y1p7/enO6+yZIdqyhqQfiyP9/jBzRfAWuR1pmxuaMeZg/zh5yN7swSdCDHStRtx7AA/4+SJfLTcVvvDXlnMLA7s478Jnk8rQk1dqywGpcCu/fwh5Yt55bhW0SDrY2cYgq0H+Rn5JTXIKaqW/IQPWOt4bDp8kff4tZpGpBRIj1R0MNYd52dUN7fhaM5V2c34fjzNz2jU6bEzK18WgyEEa85l8B5vN5mwMfOS7GZ836XyT64mlsX3ly7K+jwA4NuL6byOOMtx+C4nDZyMby8FxXc5aWB5Hp4opfi24Lzk8QGrI/395bQ7dgo51DVyOCK/MWWfK5TVwA6wdsvNTrnCz0i9s2iQFEZOWjE/42KJrK7DAKBQMMjJ4H9yzc4qg0Ip9ydAcOlSGe/RrJxy2efBUYpMAUZmXrmsp2/AGhVJz+HP4bhYUC65F1GHWI4iNY//PC5evXPLtBTGhcsC16qkUvbEynIUF67yM3Iqa+7YPmuvOEqRXsJ/PQpq66G3SH/C72BcquJ/r8VNjWg2Ci+r2BIFUNTUyLs8U6VrQ7XOdr6YLQmN02I2oLi1Ue7zGZpNBlxrs50T5pD9+lkcEaPRiMTERBBCkJGR8XMgf7dqEQjhixUhRDBhtbWpXfJSww1RoL2Ff0mjvdUgq+swAHAch7Y2fkZbm8HuHJfbRSlFq0BUpa3NKHnJ5GYJRW5a242ywuc3GAJLTK06I5gu2DXQohNg6I2Sl2VuH0fKMXvUbuSP1rUauoZhYlmYeJyNFpkOwi1j8bzfrmQ08zFM/N8Huxk8Y7XYyFHpCoZD8vSzOCJPPfUUgoODfw7U715Si5LdLicn/sJjTnb2GelURPi9KpQK2ZMrIQRKgYqVyi5gAMLXQ6lkJHWfvYMhcK3s7S0jmdEFO/35qpR2MORGK6wM/tuaEN8eCW3l7CoGAN4aGU5duJWU73p15XZVfkbXXSu+Wh9deh6OeiI/iX5yR2TPnj3Yv38/3n333Z8a5RAAn0APu7v03i6O5eDpx19C2svXDYzM5QZKAU8f/kqVXj4udjWh60yEEHh681fc9PKSVo3zZikUjOA4Xp4uspfKGIbAx5v/Wnl7OvOuj4sVIQQ+nvzn4e3uLCvnoUO+AgwfN/uaw/HJW2CcrmJ4CZSK97GzyR2f3DVqXkfEt4sYKoUCrqrOqwT7Osv/fQC4UQK+U4ama84DAHx4xvJSacF0SVwS8NV0zTVx6Fb9pI5IdXU1Vq5cidWrV8PZ2fYXzmg0oqWl5ZY/h+zTqFn9ZS83cJRi+Ix+vMdHTkuQvGPmBoPlMHJaAu/xURPiZC/NsCyHURPj+BljoruEMXpsNO/x0SP6yHZEOI5i3Ch+xqiBkSAyb7SUUkwczs8YkRguGGkQI0KAyYOjeI8Pie4BrUpewzBCCKYN4D+P/uHd4CHQN0eMGEIwvR//ecR3C0CAQL8ZMVIwBDP68p9HpK8Pwrw8ZX3qCoZgWnRv3qhgiLs74v0DZC2XKQjB+LBevH1uvDRaDA0KlbX7R0EIhgaF8jo7GqUTxoVEymIwIOjrHYRuLtIbfTrEr5/MEaGUYsWKFXjooYfQv39/Ua9588034eHhceOve/fuP9Xb+81qyJQEuAs8PdsSo2CQPDoGQT34O7kmj+gNvyDpP0iGIYjp1wM9e/M3qotNDEVomK/kBElCCHpG+CNaoLpqRGQAoqL5m7PZhgCBQR7ol8xfIj0k2Av9k3rIiu54eTpj6OAI3uN+3q4YOTBCVsKqi1aFcUP5y7B7uGoxeUi0LIZKqcDUofxl2J3VKsweGiuLwRBgzlB+51OlVGLRkL6yJldKKRYO5i/1rmAY3D0oQRaD5SiWDOBnEEKwvH+S5PE7GEuTEgVt7umbJGu5jKUUy/vaYMT0k7X7h6UUK2KSBW2W9U6WxeBAcU8fcfOYQ/bLbkfk6aefBiFE8C8vLw8ffvghWltb8cwzz4ge+5lnnkFzc/ONv9JS25UYHbpVTiolpq8YKXni41gOM+8bJWjDMAxmLh8uOb+C4yhmLhcuYU0IwewlgyWnJVBKMXvxYJvvcc68AZIjFgTA7LkDbF7ruTOTJTMYQjB7ehKUNqIR8ycnSV46YRiCWeP7Qm0jGjF/bIJkhoIhmDYsFq42GvktHC6PMSGpt+DSDAAsGBQvafwOxojoMAR7CZesnt8vTrIjomAIkkO7IcLfR9Budmw01EqlpKiIghBE+/uhb5BwO4fpkX3grlZLYjCEINTDA0NDQgXtxodGwE/rIul6MYTAX+uCcaHCbRCGBvREqKunJAYB4K7SYGoof4TKIXmy2xF54oknkJubK/gXHh6Ow4cP48yZM1Cr1VAqlYiIsD7R9e/fH/fcc0+nY6vVari7u9/y55D9WvDwBITFhtidx0EIMGHxEAwYz/9E2aGZy4Yhpl8PMAr7ftiEIRgxOR4jpthuHjZ5Vj/0G9zLbqeKYQj6D43ExBmJNm3HjIvB8BG97c6rYRiC+L7dMXM2/xJWh4YO7IVJ42Ltju4wDEFkRAAWzx1o0zYpJgRzJyXYPWEoGIIewd64d95gm7YxYYG4Z6q4Piu3MwJ93PHHebb7p4QFeuPPM8T1Wbmd4ePmgifmjrRpG+Tljr/PFHa2OxNDCNy1Gjw3Z6xNW28XZ7w8c7wkhrNKhddmTbBp66ZW4+1pk+zelsoQApVSiXem2e4Do1Yq8cHEqXY/dBBYk0T/PXGazdcqGQYfjpkBAvvyugmsSyb/GTPDZkIqQwg+GDoLSmJ/tggBwb+GzIRa0QVJ+g51qp+s6V1JScktOR4VFRWYNGkSNm7ciEGDBiEkxHZDMkfTO+lqqm3FMwv/jZL8StFP4yNmJOGpT+4TvfOmtVmH5+79EoXZ5eIYBBgwMgrPfbwMKrW4XABduxHPP/Y9sjNKRG0ZJoQgvl8PvPLBXdDaePrukNFoxkvPbcKF83e2mu+UwRD06ROEf76zGK5u4vINzGYWL7+1HSdOXxZlzzAEYT188f4bi+DpIS6hj+U4vP7JXuw9niuaERLoiQ9fWAA/b/7k5JvFcRRvrzmETUfE9exQMAT+3m749KkF6OYnbjmPUop/bzuJbw9eEM3wdnPGF4/OR88A8e3n/3vgLD7ab7u0fQfDXavBFw/OQ1Sw+A7V355Owz/3HQOB7TqDCkLgrFbhq2Vz0TeEf9nydq3NyMTz+w4BIhlqpRJfzJ+NQaHimkICwJa8HDx5cK+1iaYNW4YQODEKfDZtJkb1EN/ZeW9xAR4+sv1GnyVbDIYQfDxmJib3FN/Z+VjFFTx0fCPMlLPJILDeT94dPAOzw2w/nDl0q36R3XeLi4sRFhaG9PR0JCYminqNwxGRJ12bAZ+/sBGHNpy73nvm1o+aMASUo3Bx12Len8Zj0aOT7K4VYTSY8dVbu7B3fQosZov1JnUTpoOhdVFj9j3Dcfcj46Gwc7upyWTBNx8dxK6N52E0Waw39ZsZ1zu8qjVOmLFgAFY8PE5w+3FnYi0cvvvmOLZsOg+93nxjzJsZgHXL8bTpSVj50BioRTpTHeI4itVrz2Dd5hS060wghNz5mRDrTpzJ4+Lw8INj4aztfEcDnyil+GHHBazakoLWdgMYhtzhJBJCwDAEE4ZF4fF7x8DNxb7kTUopNhzKwJfbz6KxVd8pg7G29sW4/pH4291j4eVu/+6ILacv4eOdp1HX0s7LoKAYHd8LzywcC39P+3OjdqXn4YPdJ1HZ1AoFQ+5YFmKuf0bDo3ri+bnjbC7JdKb9OZfxzv4TKG1s7pTR8W9DwkPx4vSx6OnjZTfj6JUivHH4GK42NHbOuN7yvn9IN7wycSx6+/HngPHpdGkJXj5xBAX1dTfG64yRFBiEV0aNQ5y/uC7eN+tCdTlePHMQl+qreRgMWMohzicALw8Zj/4B3exmZDVU4sXz+5BRXyHI6O3hhxeSJ2BoYE+7GQ45HBGHblNLQxv2rz2DvWtOobaiESajGRqtGj36BGHGfaMwYkY/qDTydiu0tehxaEsqdv14FtXl1xkaFbqF+WLG0qEYNT0RGjsn1dulazfi0O6L2LnxPCrLrAyV2gnB3b0xff4AjJ3SF84u4qIgfNLrTThyKAfbt6airLQBRqMZKpUSgYGemD4rCRMmxcPVVd6uC6PRjCMn8rFlZxquldTDYLAy/PzcMHNKAqZMiIe7W+c7AMTKZLbg6LnL2LQ3A4UltTAYzHByUsDP2xUzxsVj+ph4eIuMtPDJYmFxLP0K1h/KQN61auivM3zcXTBjRCzmjIqHrwTn4BYGy+FkdhHWHstAVnEVdCYTlAoFvF21mDk4FvOGxSPQS1w0h08cR3GqoBg/nMpAelEF2k0mKBkGXi5azEiOxoLBfRHiLW+3BKUUZ66W4PuUi0gpKkW7yQyGEHg6azA9PgqLB/SV5IDczjhfVo7VqRk4VVyCNpMJDCHw0KgxNaoP7krqi0hf4bwTMYz0qkqszsrAkeKraDNZC7u5qzWYEhGJu+MSEOPnL4sBAJm1lViVm44DJYU3qrK6qdSYEBqB5dFJ6OsXJJuR21iNNZfTsLc073rRMwpXJzVGB/fCsshkJPl265I6Q79X/SIdESlyOCI/jSilNn9gJoMZx7ddQMaxXLQ2tYNhGLh7u2LotET0Hx9vs2y5GIbZZMGpvZlIPZaH1iYdCAHcvFwwaFwsBo+PtRk5EcOwmFmcPZqHlBP5aGmyVot189Ci/7DeGDou2mbkRAyDZTmknCnE6RMFaG7SgXIUbu4a9BsQjpFjoqFSy2dwHMWF1CIcP5mP5hY9LBYObm4aJCWEYsyoaGhsOJJiGJRSpF0qxeFT+Whs1sFiYeHmqkF8VDdMHBltMzojlpFRUI4DZ/JR39wOk5mFu4sacRFBmDIsxmYyq1hGVlEV9qTkoba5DSYzCzetGjE9AzBjcAzcbUSAxDAAIKesGjvO56K6uRUGkwVuWjWiuvlj1sAYeAvUGbGHUVBVhy3p2ahsaoXeZIabRo3IAF/MTY6Fn5twTYuOW7stTmFdPTZfzEZ5cwt0JjNc1CpE+PpgfkIsAt2FnTyxjKLGRmzMzkZJcxPaTWa4qlQI8/LEgtg4hHgIO3liGaWtzdiQl4WrzY1oM5ng4qRCD3dPLIyKQ08PYSdPLMMh8XI4Ig5JVmNNCzZ/sh+7vz2O9hY9GAVzo2aIQsmAtXDwDfbCzJVjMPOBsdBIiEC0NLZjy1fHsGvNKbQ26W5lKBiwLAdPXzfMWD4cs+4dCReReRg3q73VgK3fn8aOtefQ1NB+Y9ybGe6ezpi+aCDmLB0KNwnRAb3OhK0bz2PbxvOor2vrlOHqpsH02f0wd9EgeAkUV+OT0WjGth3p2LwtFdU1LbcwOpYrnJ1VmDY5AQvnD4Cvj/3RAbOZxbb9F7FhVxrKq5o6ZWjUTpg+Lg6LZ/ZHoL/90QELy2H70Sys3ZeOa5UNUFwfl15nUI7CyUmBaSNicffUZHQPsD86wHEUO87m4PtDaSgsr4OCYcBRDpT+bxlHqVBg6sAoLJuQjPAg+6MDlFLsTsvD6uNpyCmtufU8CAGFdYliclIfrBiTjN525JPczNiXfRnfnUrDxdJKK4PSG+fRoYmxkbhvRDLiuonPJ7lZhwuu4KtzqThfUs7LGBsZjgeG9Ee/EGmVsY8XF+PL1As4WVICxfXrw1EKhlgr33CUYlTPnniw/wAMlliu4UxFCT7POI+jpUU3PoObGSylGN6tB1Ym9Meo7uJzVhySJ4cj4pAkFeeU4x/z/oWmulabBcsIQxAWE4LXNjwG7wDxE1N5US3+sfRT1FU1gWNtJKQxBN3C/PD66ofgFyx+YqquaMQ//vAtKksbbCbRMgyBf5An3vhsBYJDxU9M9XWteObxH1FcVGuzgBzDEHh5u+Ctf9+NHmHiJ6bmZh2efn4j8gsqbSbRMgyBu5sWb7+xEJER4tfmW9sNeObNrbiYa23iJsRRMARajQrvPj8PcX3ET0w6gwnPfLgTZzOLbSZtKhgClZMS7z4+C/1jhbd93iyj2YJnv96Dw+mFd+T3dMZQKhi88+AMDI8XPzGZLSxeXHcAO1NzwRAimOyoYKylDN5aOhUTEiJFM1iOw2s7j2BdSqYoBqXA63MnYlYSf32W28VRincOn8BXZ1NtM64ff2nyWCxJ5i9AeLsopfj32TP4z9mzneZh3M5gKcUzI0bigeRk0VEJSim+yLyAN84eE834S/JQPJY8xBH5+Blkz/zt6L7rEACgoqgGT05/W5QTAgCUoyjOLcdTM98VbJB3s2orm/Dkgg9RV9Vs0wkBrE+45cV1eHLBh2iqF9ehs6m+DU/e+yUqyxpF7eThOIraqmY8seIL1FWLq+Tb2qLHEw+vRkmxbSekg9HY2I7H/7gKlRWNohh6vQl//ftaFFyuErWTh+MoWlr1+MuTP+BaSZ0ohtFoxpOvbkJmXjkotd1KhuUodAYTHntxPQquVotiWCwsnnx/G1KyrB2bbZ0Ky1EYTBY89s5mZBaI68bLchz+/sUuHMmwdowWcx4mC4u/fLoN5/L4uzPfLI6jeO7HfdiVZt2RZGvHBctRsCyHJ1ftxJFL/J2sbxalFK9sP4T1KZmiGRyleGbTPuy6mCeKAQBvH7I6IaIY13fJvLj3MNalZ4lm/OfsWfzn7NkbY9hiAMCbJ47jq7Q00YyvslLxxtljdjE+SD2Nf6eK2ynl0M8nhyPiEDiOw4uLP4S+1WBX6XaO5VBxtQbvP/KtTVtKKV79w9doaWy3m1FX1Yy3H1styv7Np9ahvkacM9UhluXQ3KjDq3/9QdQW4Xde34HK8kawIpypDnEsha7dgOefXCeK8a8P96P4Wp1dhdA4jsJgNOPp5zaKKl3/8XfHkHu5ym6G2cLiydc2wWiy3Yb+s02nkZZXald1TkopOI7ir+9tQZuIbrnf7ruAE5lX7eoIbXW8KP766XY0ttp2pNeeysCe9Hy7Cux17CD726pdqGpstWm/NT0HGy5csrsuCAHwzKZ9KK6z7eTuy7uMr8+l2kmw6sU9h5BTVWPT7nhxMf59Vtpk/+bxY7hQXm7T7kJVOV47c1QS44PU0zhWWiTptQ79NHI4Ig4h7UgOSi9XSeq7wrEcTu9KR5WNp/DctGJcziyV1KOGYzmknyzAtctVgnZXC6pw8XyRZEZ+VhkKLgnfBCvKGnDmRIGkSqksS3GtuA7pqcWCdvX1bTh0JEcSg+MoqqqbceZcoaBdS5sBOw5mSSrfzXEUDU06HDmdL2inN5ix4UCGpOq4HKVo0xmx95RwTRSzhcX3h9LsnrwBqzNiMJmx7XS28HvhKL49Iq6myR0MWPNjNpwRrrtCKcVXJy5IqmBKAVBQ/HAuw6btl2cuSK74Sgiw+oIIRuoFyX1dGELwTbrtqMiXmdIZCkLwZaa0z9Ohn0YOR8Qh7PjyiKxuugzDYM93x4UZq07a3GkjyFAw2LXmlKDNrnUpshgKBYMda88KM7aly+obo1Aw2L5R+Ca4a+9FyeMD1nyRzduEb+Z7j2TDwrLSGYRg425hxoGzedAbzZIZALB+f7pgpONIRiGa2vSSx6cUWHskQ7B78an8YlQ1iVsa7EwcpVh/OhNmC//1TrtWgau1DZIcKsC6TLPpQjZ0Jv7rnVNVg4sVVZJ7x7AcxfZLuWjWG3htipsacbKkRHJfF5ZS7CssRHUb//Wubm/D/uJCWYwTZddwrblJ0usd6no5HJHfuRqqm5GyP0tWN12O5bDrm2O8E0Z7ix4ndmXI6nTLsRz2rz8HM89ygMloxoHtabIYLMvh6J5M6No7Xw7gOIpd29JkddNlWQ6nTxagsaGd12b7rgxZDI6jSM+4hqqqZl6bLfsybCdsCDEoRV5hNa5eq+W12Xw4U3LTQsD69q5VNuJSYSWvzaYTWbKaywFATVMbUvL4+1ptOpMlqxEfADTrDDiafZX3+MZU+Qy92Yx9lwr4GRezZTMsLIftl/jzUTZcypbV5bZDm3JyeI9tLBCOYImRghCsyxOf8+LQTyuHI/I7V3VJnV1r63xqa9JB19r5k1JdVRNYi3QHoUNGvRnNDZ0/KTXWt8FktJ2zYEsWC4f6ms6TVtvaDGhvs52zYEuUo6jhcRIsFhb1IhNzbamiqon3WGV1sxw/5IbKq/mdndLqJslNC29WWU0T77Fr1Y2yusPeYNTyM4pqGiU34uuQgiEoq+e/VkW18hlKhkFZAz+juKErzoNBaVMT7/FrTU2yPw+GEJQIRCuuNTfKnrgogGst/AyHfl45HJHfufQ8T/+Sxmrr3BHRt5u6kNH5+9Xruo7BFxExdCVDx3MeenlLGbcyOn+/FpaDRUbk6BaGnv+aGAWWCexj8I+j7wKGgiFoN/Cfh84k/3MnhEBn5B+nXeCYPWoXeK9txq75rbcJLLe1m0yyHVyOUsHzaDebIffby1GKNnPX3fsckieHI/I7l1ZmSfSb5cxTllzrIq+0+62MzoubyS3tLmYsrXMXngcfQyuv1P7NcuF5v0oFAydl1/z0XQSqrWpUXXMuQgxnO/v9dCaWo3AVYLio5X/ulFI4C4zjquma75aLin8cN7X83wgB4CpwzV3VKtlLZQwhgufh4uQE+3vo3slwU3XdPcMheXI4Ir9zBfX0A5G5bgwAHr5u0Lp2/sP2DfIS3dFXSFoXNTy8O+9d4unjCk0XTOJOTgr4BnRefMfFVQM3d3k9YABrMmlgkGenx5RKBfz9uqZ4XzeBInAhQfL6mnSouwCjR7B3lxSOChV4r+FBPrKSh28w/PkZvQJ9ZOdWsBxFTwFGhL98hoXj0MNX4Dx8vbuEEeYt8Hl4ecl0EazRinAvAYanNziZcRcCINxG2XeHfj45HJHfuTz93DF0aiIUMp6QGYZg+r2jeCcdFzcNRs/qJ3vXzOTFg3kdGpVKiYlzkmXvmhk3IxFann4nDEMwfXY/2btmRoyOgocnf0n5WTOSZE3gDEMwsH8Y/P35HZrZkxMlj9/BiOsTjB4h/NVo541LkJV/RAjQK8QH0WH8lWLnjegrK7GXAAj2cUf/3vzlxRcMiZedW+HtqsXw6J68x+f3l89wUaswMZa/iuuCxDjZDJVSgemxUfyM2DjZOSIEwNyYWN7j83rHdomzszAqXuYoDnWVHI6IQ5jxwBhZyaQUwOTlIwRtpi8bLnvXzNS7hwraTFswUPaumWkLBwkzZiXJ3jUzY15/QZspk+LByPhlchzF7Bn9BG0mj4qBSiXcjM8WY97UJEGbcQN7Cy6r2BKlwIIJwk7ZyL7h8HaT0UWYAItGJwo6lwMjQtHN213y5McQgkXDEuCk4I8K9g0JRO8AX8m7jBSEYEH/eGgEmjhG+vmiX0iw5KUTBSGYHR8DNw3/kkaIhwdG9uwpq8bHlMje8HXm/0z9nF0wJby3LMbo7mEIcZPXUdmhrpPDEXEICSOiEBYbIimawDAEI2f3h183b0G7PgmhiEkOk1SvhFEwGDAmGiHhwu3Fe/TyR/KwSDAK+29QjIJBXHJPRMYI91AJCPLEyLHRkqIiCgVBr8gA9E0U7qHi5emCSRPiJUVFFAxBSDcvDBwQLmjn4qzGnEkJkiY+BUPg7+OKUYOFe6ioVUosntRP0gTOMASeblpMGsr/9A1Y812WT0iWQLiei6BRYcYQ4T4tDENw39gBkhYDGGKNIswbLPz0TQjBAyMHSNplRK6/xyWD+tq0fXBIf1kRi2X9E23arEzuL7nGB0cp7k8WdqIB4IG+0s+DpRQrEwZIeq1DP40cjohDIITg5R//DFcvF7scBUbBoEdUMB7713JR9s9+ugLe/u52OTyMgkFgqDf+9sFSUfZP/3MBArt52X0ePn5ueO7dxaLsn3hmOkJ7+trljCgUBO4eznj1nUWiHIxH/zQefXoH2sdgCJyd1fjnawtEXeOHlo5EQkyIXQyGIVCplHj3+flQCTx9d+i+2YMxJKGnXU/hDCFwUijwwd/mwllEEufS8cmYkNzbLqeKIQQMQ/Dvh2fD09V23s/8IfGYM9C+JQFy/T8/uG8m/D06z226WdMTorB8qHCUqXMG8N6iaeju7WnTfmzvXnh4uHDUj0//nDEJvf19bdoNDQ3F0yOEI6R8emXsOCQEBtm0S/QPwqsjxktiPDNoJIZ2E99Q0aGfXg5HxCEAgH+ID97f83f4BnvZnpiIdf2+d1JPvLX9Sd6dLLfL298d7254BIGhPrYTZInVQerZJxDvrHsEbh7iwu9uHs5455uV6NHLX9TERAhBt1AfvL9qJTx9bE8WgHXHy7sfLUNkVBDI9WshyGAI/AI88K//3gM/gbyNm6VWO+GdNxYiPi7k+vsUtmcYAi8vF/zn/bsFk1RvlpOTAm//Yy4GJva8zhCGMAyBu6sGH7+2GOGhtickwBqxePPRGRiV3Ms6hg2GgiFw0arw8TPzBXNDbn9fr907GdMGRYtmaFRKfPzoXCRFdBPFIITg+QXjsWBoX9EMlVKBD++fhaF9eohiAMBTk0fhvuHWCI+tpQcFQ6BgGLy/eBrGx0SIZjw6cggeGTFYNIMhBG/NmIRZ8dGiGSuT++OZESPFMQgBAfDq2HG4O0F8h9+lMYl4dfh4EJEMAPjH4FF40BEN+cWJ0K6oZvUTyZ42wg51jVob27H9yyPY8eURNNW2QKFUgHIcQAgIAVgLh269AjDrwbGYvGwEVBr7d6q0t+ix6/vT2P7dCdRXNUOhZG50sSUMAWvhENDdG7PuGYEpdw+FRkKegUFnwq4NKdj+41lUVzTdyiAELMvBL9ADMxcPxtSFA+DiKs6ZulkmowW7t6dj64bzKC9rgELB3EjO7GB4+7hi5rz+mDk3WdKOG7OZxZ79Wdi09QJKSuo7ZXh6OGPm9ETMmZkMT4EkWD5ZWA57j2Zjw840XLlW+z8Gvf55sBzcXDWYPTEB86YmwZdn55KQOI5i35lcrN+fjpyr1VAwDAAKjlodCZbl4KJVYfaYeCya2A8BPm52MyilOJBagB+PZODilYobO0QovX6tOA5alRNmD4vFkrFJCPHzlMQ4mn0Vq4+l4cKVslsYzHWGykmJWQNisHRkP8GdMkI6XlCE1afTcbrwmtXpIdZlC4Yw4DgOTgoFZiRGY/nQJEQGiHMKb9fpohJ8l5KGo4VFdzIohYIQTIvtg3sH9kNMoPCyKJ9SysrwTVoaDly1diBmYF0aUTBWBgEwJbI37k/uJyoS0pnSqyvxddYF7L5aAAqAAbEyCG7UG5nQMwL3xydjYFCIJIZD9sue+dvhiDjUqVgLi7N7LyL9WC5aG9vBKBh4eLti6PQkxA/tLfj0zHEcTHoz1M4qQTuW5ZB6LBcXjuahtUkHEMDdywWDx8ciYWgkGIGMTY7jYDKYodI42bRLP3sF547no7VJB0oBd09n9B8eieShkYJLGJRSGPVmqDRKQQalFJnp13DqeAFamnTgKIWbuxb9+odh8LBIwR1JlFIYDWao1E6CkShKKS7llOP4yXw0N+vBshxcXTVISgjF8KGRUCr5EyEppTAYzFCplDbPN7ewCodP5aOxWQeLhYWbqwZ9o0Mwekik4FIMpRRGkwVOSoXNZaH84hocOJuH+mYdTGYWbi5qxEcEYdyg3oK1RzoYSqUCShuMwvI67EnJQ11zOwxmC9yc1YjtEYDJA6KgFaiDYQ+jqKYBOy/koqa5zcrQqNGnmx+mJUfBVSChk1IKo5mFQkEEE1gBoKS+CdsyclDZ1Aq92Qx3jRqRAb6YkRgND62w82w0W0CINTIjpPLmFmzNzEFZUwt0ZhNc1Wr08vXG7PgYeDsLO89Gy3WGjfOoamvFppwclDQ1od1khqtahTBPL8yNiYGfi4tNBgColcJLgbW6dmwqyEZRcyPaTEa4OKnQw90T8/rEItDFfsfWIXlyOCIO/exqrG7Gnu+OYffXR1BX3mB9QmQIgsL9MWPleEy4exhcPYVvOLbU3NCG/WvPYveaU6gubQClFIQQBHT3xrTlwzFx0WC4e8ljtDbrcXBrKnb+eBaVJVYGCOAX6IGpiwZh8vwBopdw+NTeZsSh3RexfcN5lJfUg+MoCAG8fd0wdU4/TJmdDB8/eTdOvd6Ew4dysHVrKoqLa28wPD1dMHVqAqZNT0RAgLxdA0ajGYdP5mPTzjQUFtXc2E3k4a7F1HFxmDUlEcGBnrIYJrMFR85dxob96ci7WnVj+6m7iwZTRsZg7vgEhAYJJ0rbksXC4mjGFaw7lI6sq5U3qs66atWYMjgK80cnoFc3aVGHGwyWw/Hsq/jxeAbSrpTfYLhoVJjcrzcWDk9AVIi0qEOHWI7DyYJi/HA6A+eulMJ8neGscsLE+EgsHpyA+O6BshgcpThztQRrzmfg5NVrMF1v5KdxUmJCnwjcNSABSSFBsrafU0pxrrQMqzMycORq0S2OyJjwMCxLTMSg7iFdUqPGoZ9ODkfEoZ9NulY9Pn5iNQ6vOwNK6Y3ljxuyRnyhVDlh+gNjcP+ri+Bk57ZRg96Ez17chIMbUsBauE5rUxBCoFAymLhoMB58cQ7Udi7nmEwWfPXObuxZnwKLmbXukLj9VBgCQgjGzkjEn56baXdVWtbC4ZtPD2HbuhSYTJZOm84xDAEFMHJcDB59ZjpcRebfdIjjKFavOon168/BYDCDENyxE4NhCCilGDq0N/76xBS7l3Mopfh+UwrWbDgLnd4EQsgdnwnDEHAcxeDkMDz158nwtdN5o5Ri/d50fLX5NFrbjWAIuWOXhIIhYDmK/rHd8Y8HJyNIQiG4Lccz8fHmU2hq0wsykiK74bl7JqJHoP1LLTvP5+JfW4+jrlUnyIjvEYgXl0xAZLD9Ts/+rMv4586jqG5ug4KQO3atdDCigv3wytwJiA0Rl39zs44WXMWre4+grKlFkBHp54NXpo9Hv+7CO9A606lrJXjh4EEUNzZ1zrj+b2FeXnhlwjgMDXUknf5S5XBEHPpZ1FTTgr9Pfwsl+RWiuvcShiB+WB+8uvGv0IicxNuadXj2rk9QmFUqqn4HYQh6J4Ti9e//BBeRORn6diNeeOg7ZKcWiyrAxTAEPSID8OY3D8BDZATGZLTg5b+tRerZK6K2aDIMQXB3b7z96T2ioyMWC4vXXt2GEyfyRdkzDIG/vzvee/8uBIqMXLAsh3/+Zy/2HRHXAVXBEHh5uuCD1xYhNERc5IJSine/OYTNBy+KZri5aPDhswsQEeonmvHhphNYtfeCaIZWrcJHj89FXLj4XIbP9p7FJ7vPiLJlCIHaSYmPH5qN5AjxuQzfnUjD27uOiWYoFQw+XD4Tw3v3FM1Yl5qJF3cdAmC7aTNDrAmu/54/DeOjxCfRbsvJxZN79oKC2vyNEFgfPt6dMhmzYsQn0Tr088me+duxa8YhSTLojHhu3nuinRDA2nX20ql8vH7Px6IKj5mMZrx83xcovFQmuogY5SguZ5bilQe+hNlkuxsva2Hx+uM/ICdNnBMCWKMO1wpr8MIfvoXRYLvpGsdRvPXCZqSevSq6TgTHUVSUNeAfj6zhbcJ3syileP+9PTh5UpwT0sGoqWnBU39bi5YWvajXfPT1EdFOCGAtbd7Q1I7Hn1+PhsZ2Ua/5fMMp0U5IB6Ol3YBH39iAqrrOOyffru/2nhfthHQwdAYT/vyvzSipbhT1mnUnLop2QgDrsofRbMHD/92KyxV1ol6zLTVHtBPSwTBbWDyyajsulVWJes3+3Mt4cdchUNh2QjoYLMfhsY27cOFamSjGsaIiPLlnLzhq2wnB9ffBUYon9+zF8aJiUQyHfrlyOCIOSdLWT/aj8OI10U5IhziOImXvRRzdYPsGvfeHM8hOuWI/g+WQeeYy9q21zTi8MwOpJwrsrpbKsRwuXyrH9jWnbdqePpqHk4dz7S53zrEUJUW12LDqlE3b1NRi7NuXZXdBLI6jqKxswupVJ23aZudVYNOONPsA1xkNjW34fPVxm7ZXSuvw7dZzkhgtbQb8Z81Rm7bltc34eJPt872DQSn0RhPe/v6wTdu6lna8temIJIbRbMEraw/atG3RG/DSFtt2t4vCmrPyj/X7bH4n9WYzntm+XxKDoxR/27rXZuExM8viyd17JbUDoJTiid17YGZZu1/r0C9HDkfEIbvFshy2f3bwznwQkSIMwbZPhW+glFJs+1r8k94dDADbvz5u8+a2ffVpyU3/KKXY8f0Zm9GdbetTJPen4TiKnZtTYTYL32i3bU2V3GeH4yj27LkIvV64Ff2W3enXt93aL5ajOHAsF61tBkG7zQcyJDdmYzmKYxcKUdfYJmi36dhFyZ85y1GczbmGspomQbvNZy5JqpIKWCfwzOJKFJTXCtptS82B2SJtAuYoxZWaBmSUVAra7bqUjzajSVJVWY5SVDS34tSVa4J2BwqvoEGvl8SgABr0ehwsvCLh1Q79UuRwRByyW+f3XUR9ZZPk11OOIj/1Kgov8t+gss4UoqKoVvLNnFKgtLAa2SlXeW0KLpWhMKdCskMFALVVzUg9WcB7vKS4DpmpxbL607Q06XD6aB7v8ZqaFpw5c1lWnx293owjR3J5jze16HDoRB5YTjrDYmGx59Al3uPtOiN2Hc+W15iNAtuPZPEeNpot2HwsS9bnwTAEm45l8h63sBzWHs+QVUpdwRCsP8nPoJRizekMyeN3MH48w78ERinFqpR0WQ3mFIRgzfkMQZtVaemS+98A1pyU79LTJb/eof9/ORwRh+zW8S0pknrG3CyFksGJLed5j5/YlS6rI/ANxk7+G9TJfZdkdesFrN10T+7jn1xPHs6R3aaeYQiOH+TPy7DmhchjEAIcOZzDe/x0yhVZjg5gdQ4PneB3qM5lXYNRRF6PkDhKsf80PyMtvwxtets5N4IMjmLfOX7GpWtVqG/VyWKwHMWeNH5GQVUdyhqaJUURbmbsy+JflixrakF+dZ08BqU4VlgEg7nzz7VBp8f58nJZThtHKc6XlaNBJy7PyaFfnhyOiEN2q7Gmxe68jTtF0CyQWNhU1waOlbehi3IUzQ38YfomgWNixbKc4DjNDe2yHRGOo2ioEziPRh0UEhr93SxKgYYG/mTSxmad5CWTmyWUsNrYopPd3r1jHF6+TAehQ01t/JNeQ1vXMNr0Jt4IVFcxLCyHdlPnS3IN7V3DoBRo0nd+vep1XcMAgAZ9143l0M8rhyPikN1ibeQriBOFRWB9m69eiF0ESmEReK+chQOV9bxnlcUkcB4s1wUECF8r2U7hdYZAwh/LcrYb3vxCGELXo6uuldASVVcxhMayyFm+un0sXkbXnYeZh8HSrmN05ft16OeVwxFxyG65e7tKTva7IULgJlBp1dVTK3vZhFEwgs3yXD20sqszEobATaAgmKu7VtyeRxvyEGK4amTlPHTIXaDuiquLGlwX3OjdBXr6uDqrZTufHePwyU3gmD1y0f70DCelgresvocA31658ZSjt1VC3h7xvV93ddcxunIsh35eORwRh+xW/PA+sicM1swiflgffsbgCNlPlqyFQ9ygXvyM/mFgLTLzHjiK+P49+Rn9esg+D0II+ibzMxISQ2U7IgxD0C+Jv0tsUnyo5MThDikYguSEnrzHE6PkNyRTMAQD4virbcb3Cpa9VKZgCAZEdec9HtM9AE42+ruIYfQX6AwcGegLF4GeOWLEEIK+3QN5e+r08Pa02WvGlgiACD9vuKk7d0QC3VzRzV1+H5hu7u4IdJPXesGh/z85HBGH7Nb4JcOgknkT9A70wKApibzHR05PgrOd5c1vl6uHFiOmJ/EeHzw2Gp4+8nrTqDVOGDurH+/xpAHhCAz2lJVLqlAQTJrJfx7R0cEIC/OTtapBKTB9Bj8jLNQX8dHdZE3iLEcxW+AzD/b3wOCEnrIZ8yfyn4evhwvG9ouUle/CchQLxybyHnd31mD6gGjZjCUj+c9Dq3LCvAHxshgcpVg6jJ/hpFBgSf++sna0UADLBybxRh4ZQrAsKUlWbhABsDwpUdb7dOj/Vw5HxCG75eLhjPF3DZe8dEIYgpl/mACFwFOjWqvClLuGSt6dwygIpi4dBpWG32FSKBWYvmSI5ImPUTCYODcZzgLl6hmGYNaigZJvtIyCYNTEOMGlGUII5sztLzliwTAEQ4dGwM9Gr5a50/tJjrwwDEFSfKjNMu8LJiZJZhACRIUFoE+YcB+VRWMTJW8RJgBCA7zQr7dw9GbR8L6ytiH7e7hieExPYcYgeQwPrQYT4oRLsC/sFy95fADQOjlhenyUoM38uFjJ9WkAQMkwmBcXK/n1Dv3/y+GIOCRJi/46DRpXjd2TuELBwCfIE9PuH2PTds7KMXD10IKxc0cIo2Dg7uWCWfeNsmk7/a7B8PJzs9vhYRgCZxcV5t030qbtpJn9EBjsZTeDEAKVSokl946waTt+fCx69PC1e/cMIdZtzsvvsc0YNSQSfSIC7H4K7+gLsnLZcJu2gxPCkBgVIsk5JCD40xLb55EY2Q3D4ntKeoKmAB5bMNJmblF09wBM6tdbcg7SX2ePsDk59/TzwvwBcZIjYY9PGQ6VUrgBZaC7G1YM4o/42dJjo4fARSXcgNJLq8XDgwdJZvxp8CB4aeUtITn0/yuHI+KQJAX29MOrGx+HUqUUPcEyCgbO7lq8uf0puHvbXs/1CfTA69//CSqNyi6GRqvCa9//Cd4i2tx7eLng9S/vg7OLPQwCJ5USr/x3BQK62e7G6uKqxpsfLYO7u/gEXIYhUCoZvPzeEnTvabsbq1rthLfeXgRvb1fRzgjDEDAMgxdfnIOICNvdWJVKBd5+YR4C/T1EOwrE6oXg+SemIS6KP+fh5vf09hOz0DPY2z4GgGdWTsSAOP48l//ZE7z5h+noE+pvtzPy5JIxGJXIn3d0s169exKSwoPtZjw6fRimJAtHETr03OyxGBbZw+6I28rRA7BgoLhox5Pjh2NidITdjKUDErFisDgn5pEhgzE3NsZOAjAvNgaPDBls9+sc+mXJ4Yg4JFmxQ3rj/YPPwcPXmmzGN3F0TPBBPf3w4fGXENpHfHvwiPju+GD7X+ET4H59LGGGb5An/rXjr+gVKz7xsUdEAP69/mEEXncqeBnXz8/T2xXv/fAQogWSO29XUIgXPly1EiE9fAQZHU/Qbu5avPv5CiQOCBPN8PNzxyefrkB4uP8t7/dOhvW/nZ1VeOfdJRgyJFI0w8vTBf99dylir3+GfNGRDoZG7YS3XpiLscPFTawA4OaiwWcvLUFyTHebDEIAlZMSb/5lJqaPjhPNcNao8PlTCzG8b5ggo4PjpFTglfsnY/E4/pyK26V2UuK/f5qLiUm9bTIYQqBkGDy/aBzunzhQNMNJocBH98zCnP6xohgMIXhq2ij8ZbLt6FSHFAyDD+ZNw9KBidb/L+BYMYSAAHhs9FA8N3m06IgQIQRvTZ6EhwYOALHBUFxnPDRwAP45eZLsnW8O/f+L0K7YL/cTyZ42wg79/8lkNOPktgvY9ukB5J2/s+dD0phYzHpoPAZOTpScV2IxszizLxPbvz6OSyl3MuIHR2DmvSMxeGI8lE7SdiywFhYpx/Kxbc1pXDx7JyMqMRSzlg7B0AlxUKmEQ9q8DJZD6tkr2LY+BRfOFN6xtTcyOgizFg3CyHExUAvktwiJ4yjS0oqxbWsqzpy5fEfuSFi4H+bOHYAxY6Kh1QqHzflEKcXF7DJs3pWG42cu35HXERrijfnT+2Hi6Fg4O0tnZF2uwKb9GTh0ruCO3UfdAjywcFI/TBkRAzcX6YnN2UVV2HAkA/vO5d1R7yLIxx2LxiZi+rBYeLpKD//nldVg/clM7EjJgem2mjD+Hq5YPDIBswfHwceNPxfIlgqr67D2bCa2XMi+o5Kpt4sWS4YkYv7AOPi7S99dUlTfiLUXMrEhPQvtpls7T3tqNVjSPwGL+sUjyEP6TpiSpib8eDETP2ZmodV4axVcN7UaS/rGY0lCX4R6ekpmOPTTy5752+GIONSlKi2oRNW1WuhbDXDx0KJbRCACe/jx2rc365B9Oh8t9W0AIfDwdUPc8D7QCkwsFUW1qLxWB12bAc6uGgT19EVwT36Gvt2A7HNX0HK9AqqblwtiBvaCixv/xFJV1oDy4jro2ozQuqgQGOKNkDB+hkFvQvb5IjQ3toNyFG6ezoju10OwjklNVTNKi+ugazdCo3GCf5AneoTzM0xGM7LSS9Dc2A7WwsHNQ4vo+BB4ePHv/KmtbcW1a3VobzdCo1bCz9/9+g6bzp8izWYWlzJL0djYDouFhZubBlHRwfASWEqrb2xD0bU6tLUboVYp4evjiogwf16GheWQlV2GhoZ2mM0s3NzU6BMZCF8f/smrsUWHwpJatLYboXJSwNfTFX0EGCzLIaugAnUNbTCZWbi5qBHZ0x+BAgm5zW165JfWolVngJNCAR8PF0T3COCNLHEcxaUrlahubIXRaIGrsxqRoX7o5se/JNiqNyKntBqtOiOUCgbers6I7RHAmw9CKcWl4ipUNrTAYLLAVatGr2Af9PDnXxJsN5qQXVaNZr0BCoaBl4sWcSEBcFJ07qBTSpFTVo2yhhboTWa4alQI9/dGeIAPL0NvNiOrvBrNBgMIIfDUatC3WyBUAoy8qlqUNDRBZzLDRaVCT18v9A7gX3Y0Wiy4WFmFJoO1UaKnRoOEoECobeS1OPTLkMMRcegXrysXr2Hn5wdxYPVxmAy3PllpXNSYfO9oTH9wPEJF5BXw6Vp+JXZ9exz7fzwN422dZVUaJ4xfNBjTV4xCWIx0RllRLXb/cAb7NqRA13br05uTSoExs/phxt1DEREnvUZGVXkjdm28gN2bLqCt9dbutUolg1GT4jFj4QBExYdIDlPXVLdg14407NiahpaWW8txKxQEI0ZFYdac/ojr210yo76hDbv2ZmLLjjQ0Nt1ajpsQguFDIjBnRj8kJYRKZjQ267DzcBY27c1A7W2l9wkBhiSFY97kRAyUsU24uU2PnSeyse5gBqrq72xTMCi2BxaOT8TQhDDJu0Fa9UbsPJeDH49moLS26Y7j/SNDsHh0IkbF9+KtA2JL7QYTdqXl4vuTGbha03DH8cQeQbhreBLGx0dIrouiN5mxKysfa86mI7+67o7jccEBWDo4EZNje0PNU7zNoV+nfjGOyK5du/DKK68gMzMTGo0Go0aNwtatW0W/3uGI/PZkMVvw4aPfYM9XR6BQMrwFxRgFA47lMP/xaXjgzSVg7LihsyyHL1/ahK2fH74xTmdSKBiwLIepy0fgT28uEtxOfLsopVj1r31Y+8khUYyxs/vhL28sgJMdSzqUUqz7+gS++fgQGEJ4t7V2MIaNjcbfX59n95LOlo3n8elHBwDAJmPAwHA8/8pcONtZPXTXvky8/+E+UCrEIGBZioS4ELz2wly42VlH5sDJXLz+8T5YWP72AAxjvY7REYF45+k58BKIWHWmY2mFeO7T3TBZLLzbpTsYESG++PcTc+HnZd9SyJnca3jyix3QG60OemeYDkYPfy988uc5CPaxnZh9sy5cKcOj32xHq8EIwscgBBylCPZyx39XzkGYv/DW69uVVV6FP6zeikadHoSg0+vVwfBzc8EXy+agTyB/RNChX5d+EY7Ipk2bsHLlSrzxxhsYO3YsLBYLLl26hIULF4oew+GI/LbEWli8vOBfOLc73a7KrOPuGo6/ff2QKGeE4zi88/C3OLrlvOjS6oQAgycn4NmvHhSVw0IpxX+e24i961LEAWB94k8cGoFXvrxfdA7L5+/vw6bVp0UzGIYgqm8I/vnpPaKdkVXfHMeqb07YxQjr5Y8PPlwOrcjcj3WbUvDpl0ftYoR088LH7y0V7YxsPXAR73x+kHdS7YwR4OuOz19fAm+BVgM3a8/pHLz4+V7RDAVD4O3ujK9fuAsB3uJyJg5nFOJvX+4EQCGmRIiCIXBz1mDVk4vR3c9TFON0/jU8/NVWcJSK6nqrYAi0Kies/vMiRATa3sEFABeKy3D/qs2wsJw4BiFQKZVYff8CxAbb3sHl0C9f9szfP8muGYvFgsceewzvvPMOHnroIfTu3RsxMTF2OSEO/fb02VPf4+zuNLvLwx/64STWvLZZlO2at3fi6GbxTghgfVI7s/civnpZHGPjF0ftckKsDIqM04X46EVxjB3rU+xyQgBrpCE3swzvvrhFlP2BfVl2OSEdjKIrNXjlxc2iPscTpwrsckI6GGXljXj2lc2iipudu1iMd784CED8x85xFDV1LXjyzc2CDQU7lFFQjpe/3GcXg+UoGlp0eOTdTTDcltjZmXJLqvH017tAqTgnpIPRqjPgjx9uQpveaNP+SlU9Hvt2O1gqzkHoYOhNZjz4+WY0tfN3He5QWWMz/vj9NtFOCACwlMJoseCBVZtR0yK/K7ZDvy79JI5IWloaysvLwTAMkpKSEBQUhClTpuDSpUuCrzMajWhpabnlz6HfhurKG7Dtk32SG8Cte2c7WhuFb1AtDW3Y8NF+aQAKbPvyMBqqmwXN9O1GfP/hAWkISrFvQwoqrt25Vn6zTCYLvv34kDQGR3F8fzauFlQJ2rEshy8/OyyJwXEU589dQc6lcuH3Qik++/qYZEbmpTJcSC+2afvZ9/Y5Ux1iOYr8qzU40clOrzsYm09J+u6yHEVxRQMOniuwzdh9Fhy1vx80y1FU1Ldgx7kcm7ZfHj4PM8vaXYWX5SjqW3XYcDbLpu3Xp1KhN5tFOyEd4ihFi96INecy7HtzDv3q9ZM4IlevXgUAvPTSS3juueewc+dOeHl5YfTo0WhouDMpqkNvvvkmPDw8bvx1787fWMqhX5d2f3lYVj8Ji4nFgdXCE87+tWfkNbGjwN7vTwmaHNmeBqPe9tMtnxiGwZ61ZwVtTh7KQVuLQdBGkKFgsHPDeUGblLOFqK+T/uSpUDDYtuWCoE1GVinKKhqlMxiCLdvTBG1yr1Qhv6hGVnn7jXvSBW2KKxqQmldm98TaIUKAtQeEz6OqsRXHL12VVbL9x6MZglGqxnY99mbkS2ZwlOLHkxlgBbowtxtN2JKWLYux7nwmTBaLbWOHfjOyyxF5+umnQQgR/MvLy7vRLvzZZ5/FvHnzkJycjG+++QaEEGzYsIF3/GeeeQbNzc03/kpLS+WdnUO/CFnMFuz47ICsDrEUFFs/2st7o+U4Dtu/PCKrKzDHUez4+ihYgVD9tu9OyWoux7Ecdq89B5OR35nZ9uM5WY3fOJbDgR0ZaG/jd2a2br4gr7kcy+HYkVw0NbXz2mzZkWZ3yflbGBzF2fNXUF3DHxndsi9DXuM3jiIjtwzFZfW8NpuPXJTFoBQoKKlFzlX+KNWmk1kgMlx1CqC0tgnnC/jvmVtSLoGVmRJY29qO47lFvMe3X8yFUaYT0WIwYl/2ZVljOPTrkl2OyBNPPIHc3FzBv/DwcAQFBQEAYmL+V7JXrVYjPDwcJSUlvOOr1Wq4u7vf8ufQr19VRbVormuVNwgFqor5x2mqbUVtufSn75vHqSnrPGqnbzeipLBa8tN3h3StBpRdre30GMdxyL9ULstpAwCT0YKiy9W8xy9llslmsCyHgrxK3uOZWaVgWXkMSoGcvAre4+nZZbKiCB3KyudnpObJZzCE4OJlfkZaofSIS4cUDEHGVYFrVVQhy1EHrA3m0ov4l+TSSypkd8FVMgxSS/jPw6HfnuzauO3n5wc/P9vbq5KTk6FWq5Gfn4/hw62lhM1mM4qLi9Gjh/iy2A79NtTWzP/UbPdYje3w7KQoVVuzrhNriQyesdpabCfqiWd0Ppa+3SR7srjB4FneYVkORoGIjD1qbeWPuuh0Jt5jdjEEIjttOtsJmrbEMASt7fyMVp30ZbJbGALjNAvwxYoQglaB69HUBecBAC0CSbHNeoPsqAtHKVoN8j9Xh349+kkqyLi7u+Ohhx7Ciy++iO7du6NHjx545513AAALFiz4KZAO/YLl1IWFipQ8dTiklnXvfKyfgaHqfCyFsuvStvjer7XZHX9dEnvkJHBNFAoGMNvekWKTIVDfRWrLgJtFKRVkKHmqhdrHED4PqQXD7BnHqQuulXUc4fMQu72ZT4R03Xt16Nehn6yU3TvvvAOlUolly5ZBr9dj0KBBOHz4MLy8bHcrdei3JU9/+4ot8YrgRoO92+Xh4wbZd8Dr8uRhuHk4g1EQcDKXGwDA06fzIldqjRPUGicYDfIjFl4+ndfHIITAzV2L5ib5USQvgfLynp7O0FcJ70ISxfDkLzrm4+mCRpnRMEohWNjM19MF5TVNsr5aLMfBy52f4efhgoIyImt5huMovAV61fi5u0JBiKyIBQWFt6vAtXJxBsMwggmttkXg4yK9545Dvz79ZG6nk5MT3n33XVRXV6OlpQUHDhxAbGzsT4Vz6Bcsn2AvRA+OvNEhV4oYJYOBkxOhde28wJWLuxbJo2LkMRiCuMER8PLvPDdJ6aTAsInxshiEIQiPDkJQaOd9PAghGD05Tt6TPrF2+w3vHchrMm5CHG8HYLHy9nZBjECX4wljYmQlxAKAi4saSYn8y7kTR0TLSh4GALVKiSFJ/F2OJw2Oku3fKhQMRiX14j0+ObmP7BwRCopxiRH8jMTespdNWI5icmJv3uNT4vrIdEKsTtvU+D6yxnDo1yVH/Muhn0WzH57EWwZdjDgLh1l/miRoM+P+0fIYHMXM+0cL2kxfOlQWg3IUM5cPF+ylMmPhwDs6zdojAmDWkkHCjFn9ZEV2CEMwc05/waWk6VMSZSX2MgzBzCkJUAuUxZ82JlZyPxfAmuA5dXQsXARK1k8eEg2NxG7LHYyJA/sIRkTGJ0XCTWtf2fzbGSPjwhHkzZ/gPzI6DH7u4qrIdiaGEAzoFSJY6n1gWAhCvT0l7/9hCEFMkL+juurvTA5HxKGfRcPnDIS7jyuIhCdkhiHwD/VF8oR4Qbv+42LhG+wpiUEYAg8fVwyZkihoFz8wHCHhfpKe9AkBnF3VGDVdmBEZHYzescHSognEmpMzYYYwo3uoD5L6SW/8xhCCqTbOw9/PDcMGR0je+kopxYypwgxPd2eMHx4lmcFyFHMmJgjauGhVmDEiTvK1YjmK+eOEGSonJRaM6Ct5xwnLUSwelShoo2AY3DUsUTKDoxR3DRdmEEKwfEiSpPE7GMsGCzMc+u3J4Yg49LPISaXE06v+bPfrCLEW6Hpm1cM2e80oFAz+/t/7rcsa9txriZXz9//a7gNDCMFT798FhZNCQodYgr+9uwQare0eLU+8PAdqjZP9DAr89aXZcHXT2jR9/G9T4eKiljTBPvrXyfDmyXO5xe6P4+HurpXE+OMDYxAc5GnT7s/LRsHX21US4/6FQ9Crh+2dgH+YOxTd/DwkOTx3T05GfESwTbv7Jg1EryAfuxkEwLxh8RgUFWrTdunIfojtHmA/gwBTEvtgXBz/0k+HFiTHY2BYd7sdHoYQjOkTjhkJ0Xa9zqFfvxyOiEM/m/pP6ItnVv0ZjJIRFbVgFAyUKiVeWP84YoeKWzOOGxSBZ79cCaVSIWpiIgyBQsHg6f/ej6SRUaIYkXEhePnze6FSK8UxCAFhCB5/ayEGjxeXJ9Wzlz9e+2gpNFonUbkcHff8Pz8zDWOmCEeOOhTczQtvvX+X1RmxI19k5UNjMW2GuKdefz83vPfmIni4a+2a/JYvGYIFc/qLsvXycMa/X1gAXy8XuxiLpiXj3vlDRNm6u2jw0d/mI8jX3S7GrFFxeGThSFG2LhoVPv7zHPTw97LLqZrUvw+eXjRWlNOqcVLik/tno3eQn12OwujocLy2eKIohkqpwEdLZiAhJFA0gwAYFNYd7y2YKmupzaFfp36y7rtdIUf33d+mMo/n4uPHv0NRVgkUSuaOsuwd/9ZnQC/8+d8r0Kc/f5Ifn/JSi/DJM2tx+WLJjRb2tzCu/1t4bAj++MZCxA2OtJtxJaccH7+0Bblp1wQZoREB+MNzM9FvOH+SH59Krtbiwzd3IvNCMRgFc0d+SgejW6g3HvzrZAweZX+SX0V5I/79/h6kni/qdFtvByMg0AMP/nEcRo2x/4m1prYF//roAM6kXOmU0fFvfr5uWLliJCaOsz+xvaGpHe9+eQgnUgoBAl6Gt6czHlg4DLMm9LWb0dymx7trjuBgSj4oxR0Jph1t7T1dtbhv5iAsmpBkd1SrVW/EuxuPYldKnrX3DA/DTavGion9sWL8ALujQTqjGe/uOIat53Ng4ViA3rrhjBDrbiIXtQrLR/bDHyYMsttBMJoteO/ASay/kAnT9WrFtzCu/3+tkxPuHpSAR8cNFdwa7NCvS/bM3w5HxKH/F1FKkXf+CnZ8uh/ndqejvUUPQgAXDxcMm90fMx6cgIiknrI5hZkl2PHNMZzZcxG6Fj0oABc3DQZN7Ivp941Cny5gFOVXYtf3Z3BybybaWvSgHIWzqwbJI/tgxrKhiOnXU8Iyzq0qKarFrg3ncXTfJbReZ2idVUgaFI6Ziwehb7J8RnlZA3ZuT8eh/ZfQ0qIHy3Jwdlahb0IoZs3tj379w2TvgqmsbsbOPRex7+AlNDXrwbIstBoV4mK6Yc7MfhiYHCa7NkhtfSu2HczE7qPZaGjSwWJhodWqEN0rAPOnJGFoci8oZTLqmtqx/fglbD+ehbrmdpjNLLQaJ/QO9ceC8YkY0y8CSpm1QRpaddh+NhubT11CTVMrTGYWGrUTIoN9sWhkAsYnRUIls0ZPs86AbRdysPFMJiqaWmEyW6BRKRHm743FQxMxJakPNDIZbQYjtl3MxdrzmShrbIbRbIHGSYkePl5YMrAvpsVHwUVte7nSoV+XHI6IQ786dXwNbU2mlVersffrwygvrIS+1QCtmwbdIoIw+f6xCAoTzrQXy6gprce+NSdRdrkKujY9NC4aBPX0w8S7hyEkgn9LrD2Muqom7F93DiWXq9DeaoBGq0JAd29MmD8QPfoECb62g2OL0VjXhv1bU1FUUIX2NgPUGhX8Aj0wYVYSwruI0dykw/5dF1GYX4m2NiPUGiV8fN0wfnJf9ImxnRchhtHaasC+g5eQl1eJtnYDVColfLxdMW5MNGJjutl8vRhGu86I/cdykJVXgbZ2I5yUCnh5OmPc8CgkxoZ0CUNvMGPfmVxk5JWjtd0ApYKBl7szxg7qjf4xoTadPDEMg8mCA6n5OJ9XiuZ2AxQMAy83LcYkRmBorO3kZDEMk8WCAxcLcSb/Gpp1BjAE8HTRYnRsL4yICbPp5IlhOPTrl8MRceg3p7SDmdjw7nZcOHARDMOAUgrKURDG2myR4zj0n5SIhU/ORNJYcTkStyvrVAE2frgXKfuzQBhyJ4PlkDgyCvP+PAkDbOzg4VNuWjE2fXYYp/dl3cin5TjrjZlhCFiWQ9zAcMxZORpDJ9m/dAAAl3PKsfGbEzhxINt60++E0Sc+BHOXD8PISfGSJoWiKzXYsOY0jh7MBstSENLBsHYYZlkOEb0DMWfRQIyb3FdSJKWktB7rNqTgwKFsWCzs9c/5VkZYT1/Mm9MfkyfGS4qkVFQ14cdt57HncDZMJgsIcycjtJsX5k9Pxozx8ZKiHNX1rfhh9wVsP3oJeqMZCobc6F1zY2nN3wMLJyZhzri+kqIctc1tWHMgDVtOZKHNYLrBILAuSbEcRYCXKxaPScLCMQnQqpzsZjS06bD6WBo2nMpEi95463lc/9++7i5YPDwBd49IgovGEeX4PcvhiDj0mxGlFD+8sRnfPr+20xyJm9Vx/P437sKiv8+2a4Ld8ukBfPbsOjCMOMaSJ6dj+T9m2cXY88MZfPiP9WAIEawT0pHLMPv+UVj5/Cybu4Vu1pFdF/HOsxtBAFGMyfP645HnZkJhxwR76lgeXn9+MyhHBRmEWJ250RNi8eRzM6GyoxZHyoWreOHlLbBYWMHGeR25DMOGROC5Z2ZCoxE/wWZkl+Lvr2+B0WgWbGrX8REn9+2B1/8+C84idj11KPtKJR5/ezPa9SZhxvX/6BsZjHeemA13l84L93WmgtJaPPyfzWhq09tszkcIENXdHx8+MgfeAnVNbldRdQMe/O8m1Da32yy8xhCCMH9v/PehuQjwtL2zyqHfpuyZvx3pyQ79ovXjm1vw7fNrAcBmIbGO41/94wese3ubaMa2zw/hs3+sA6h4xo/v7sR3r28Vzdi//hz+8/Q6m5M38L8ky61fH8PnL4tnHN+XhbeeXg+O5UQz9m2+gH+/vFV0o72U05fxyj82wmJmbTI6xjx2MAdvv7xVdG+bjIsl+MfzG2EyWWx27+1422fOXcGLr24VXQguu6ASf315IwwGYSekg0EpkJZVgqde2wSTWVyb+8sltXj49Q1os+GEANakTUqBrMJKPPrPTTCIbEp4rboRD7y3XpQTguuMgrJaPPj+BrQJNK+7WRUNLVjx4XrUtdh2QgBrAm9xbQPu/Wg9mtq7rlGkQ79dORwRh36xSj+chW+e+1HSa7965ntcPJZt0y7vwlV8+rQ0xtr3duHcvkybdkV5Ffj3U2vtB1Bg2zfHcXR7mk3TitJ6vP30evvqp8A6Me3fmoa9my7YtK2vbcUr/9gIe8ulUkpx/PD/tXfnUVFc+R7Av1UNNCCIbK2yikgkxgVExQhGFEQNahBFkxjjdsxJBg2OyVOik3HmRLP4NOaFmadOMqMmmtEo7mvcRY0DihBRgRglyiaohG5p1qr7/iAwcZLqvV9B+/ucw/HY93b/ftfGrl/fqrr3BnZt/5fevmp1HZb9MQOiaFwYUWTIvnQLW766oLdvXX0jFq/IgCCIRi2rLooM390ow2dbz+nt29jUjN//9y40NgtGbS4oigxFxZVYu+W03r6CKGJB+m7U6ZnR+fXzGIrvVWPFluN6+zLG8Obf90JdV290jPJqNZZuPWLwc8iTiwoR0m7t/Hi/yfu6KOx47Px4v95+u9cdN3ndAl7BY2e6/g/a/ZsyTb44j+M5ZKw/qbffwe1ZLQdVU060csCOjZl6Z0UO7ctBc5Ng8rLtGV99q3fG4sg3V1FX32jwDM0vMQZk7L6MxkbdMxbHzt6AWlNv0u7DjDHsOZILbV2jzn6ns2/ifnWtSTFExnDw7DXUaHTPJpzPL0ZJVY1RBUJbDJHh+OXvca9ao7Pf5R9KUVR236QYgshw7kYxiiurjX4uebJQIULapYriSmQdvmLyvi5Cs4h/HchB5Z0qyT7VlTXI3HvJ5H1dREHE1fNFuFNYJtmnVl2H4xnZJsdgIsPN/BIU5d2R7NNQ34TDO7NN3zuGAWV3HuC7S7cluzQ3C9ifccmkA2urB/cfIevCTcl2UWTYteeyWfvTaB7V4+y5Isl2xhh2HMgxa6O8+oZmHDt7Q2efr7+5YtatzoIo4sBZ3TN620/lmry0fatdmVd1tv/znHkxFDyHHRf0zxqSJxsVIqRdOrrxlFEXaf4WjudwdONpyfYT2y8afZrhP/EKHke3SE/Vn9mXg2Y939D1USh4HN12UbL9wonr0NYadr5fV4zDO7Ml27O//QE/VWvNisHzHA7uuSzZnvfdHdyrVJsdY9+BK5LthT/cQ/HdB2a97RwH7DmSK9n+Y9lD5N8sN6toYwzIOJ4n2X6vWoOL1380aaailcgYdp6RLhJqautx4rubZsUQRIaMi1fRbMYmjsT2USFC2qWyHyrMfg0OQPnte9IxblWCM7PYEUUR5cXSsy5lP9436o6U3yIIIkpv64hx54HOXXANjVFSfF86RslDsxczE0WGkjsPpWOU/2TW67fGKC2VPhVQWmF+DMZ0v05ppfkxAKDivlryFFVJVY1JZ+H+U/WjOtRLFMpl1WqjrqGRom1oQo223uzXIbaLChHSLtU9qjf5tEwrQRSh1XGeva623qRrER7DWk6/SMdoMD8GgFpd46hrBGfyxuv/pn0kPatSV9dokUWo6rS6Y5hb7AAtC4eZ0maMhgbpWS6thWKIjKFB4g6dOgPvqjGEtv63r3fRNui+DsaoGBZ8LWJ7qBAh7ZKzq5PJF6q2Uih4OOvYhdbZxdGgzfd04TigU2cdMTo5WuQA3knHOJycHMAs8P3Y2UWpO4YFCirnTrpjmHM649+vI72WiJMR64zoomu9EmcLxeB5DkqJxc0sFQMAOkmsi2LJBclocTOiCxUipF3ye0r/8uD6MAb4hkgvZe4X0s3sWReO5+EXLL20vF+wCs1NglkxFAoeASE6YgR5/WrjQFNiBAarJNv9Az3NLhJ4BYeAHl7SMfw8zHp9oOXgHeDvKdke4Gt+DI4D/H3cJdv9u0m3GRwDgK/KTbKI9Vd1MeuC21Zebp0kix0f986ws8BOuK6OSrg5G75AG3nyUCFC2qUxs0ea/Q2cMYYxs0dKto+aOtTsDdZEQcTYV4dLtg8fHwaljm/ohhAEEWNfkt6u/tlRfeDiat4HvSCIeD55iGR7RGQwPL3MWyVTFBjGT4qQbO/X1w8+3c07wIoiwwsTwiXbQ4JUCAlSmTVLxRgwaWyYZLt/N3eEh/qZfZppSpx0DG83F0T3DTLrjhae4zB1xADJ9s7OjogPe8rsGFOG9TP5FnnyZKDfDtIueft5Yuj4CPAmXoSpsOMRlTgEXj7S34DdPF3x3KTBJl/oySt4hMf0gU9P6dkKZxdHjE6ONPk0E89z6B0WiJ59fCX7ODjY4fnkISYf+DgO8O/pjT5hAZJ9FAoeEyYPMutUlqprZ0REBuvIg0NSonShYgi3zk6IHhais8/khHCzilxnR3vEDg/V2WfK6DCzZpDs7BR4fngfnX2mxoSZdUcLOCAxuq/OLtOi+psVgzGG5GdN25eJPDmoECHtVvJbE01fR0QQMWXRBL39Jv1utMkHDFEQMWXBGL39Js4a3vIt34RjuCgyJL8xSm+/hGlDoLBTmDSbwBgwdc5zemcJxk0Mh1JpZ/JsQvL0YXqLpTGj+6JTJ0eTi6rkyYP1bkwXFx0Kjy7OJsXgOCApYSAclbpnuUZEBKObV2eTZhM4Dkgc2Q+uevabebZPIIK6eZgUg+c4PB8ZCi+3Tjr7hQX5oG9AV5NjjOwXDD+vLkY/lzxZqBAh7Va/4U/jjTWzTHpuyidz8Myw3nr7hQwIROonr5oUY9YfJiFi1DN6+/n36oq3P3nFpFVPp/4uFlHjpKfPW3X1cceyNS8C4IwuFCa8GIm4idKnM1q5e7jgz6umgec5owoejgPixvXHxCmD9PZ1cXHEhyumgOd5owoFjuMwPCoEL00bqrevUmmP1X+cAns7hVExeI7DoAGBmPviML197ewU+GRxEhyV9sbF4Dn0C/HBgpefM6hv+oJEuDgpjSoUeJ7DU37eeOelWL19OY7DJ3MmwsPF2agYCp5DoLc73ntJf6FOCBUipF1LWpiAN9bOAjjoPYWisOMBDkj5nzlIXDDO4BhjZwxH6ievguM5vdeMtJ4qmrN8MqYtet7gGDETB2LxpzPAK3i9p2la219KjcesJeMNjjE05mm8u/ZlKOx4/eNQtBxUJs2Iwutp4w0uXsIHBeG91S/CwcFOf4yfD1xjJ4TjraUTDI7xTB9frP5wGhyV9noPfq0xRo4IxR/emWjwQT8kSIVP35uGTs5Kvc9pzTtqcDDeT0vUO+PSqoePB9a/Ow1dXJ0MiNHy56A+AVj7X0lwkLiA9D/5eLnhH4unwcvNBbyef9/WSbn+Pbtj/e8nw0nPrE4rlZsLNr85DT7unQ2O8ZSPN/4xPxmuTtJ3SRHSimOWuCfPSozZRpjYtmsXCpGx9gDO78kC0HJwEEURPM+DiSLAcRieFImkhQno86z+mZDfUnSlGLv+9xgy92RDFBl4nmv7k7GW5daHjhuASb8bjf5RpsW4db0Uu/9+Bqf3XIbQLIJXcBCFn2OgZYG0wTFPI3FuDAY+Z1qMH3+oxJ4vz+P4/lw0NzWDV/At4+B+jiGICB8ajMQZwxD5nO5rHaSU3n2A3duzcPRgHhoamqDgW2JwfMuKJoIgol9YACZNG4KoEaEmnc6pqKhBxu5LOHg4D3X1TVC0joMHAA6CIKLP0z6YnBiBkTFPmxSj6oEGOw/mYN/R7/BI29AWg+NafscEQUTv4K6YkjAQo5972qSLmx/U1GLHN7nYdTwP6tp6KBQ8mMiAX8QI9vfC1PhwJAzvY3Ch80vVj+rw9alc7DiTh4eaOtj9PA7g5xiiiB7d3PHiyHAkRj1jcKHzS+q6enx9/jv8MzMXVepa2PF824JnPMehWRTh5+mGl4eHYcqz/eHoYHwMYjuMOX5TIUI6lAfl1Ti2+TRKb1ZAq6mDs6sTfEO6I37mCHhY4LZJAPipSo3j277F3e/LodXUw8nFEd0DvRD30jB4W+D2TwBQV9fiREY27hRVQPuoHkpnB3T19UDclMHoquP2U2M8Utfh5IFc3C6qQO2jBigd7eHdzQ2xE8LgGyh9G60xtLUNOPlNPm4WVqD2UT0clHbw8nZF7Nh+COjhbZEYdXWNOHn6BgqLKqB5VA8Hezt4enZC7Mg+CO4pfcuxMRoam3HqQiHyb5RBo62HvZ0CHl06ITa6N3oHd7NIjKZmAaezv0dOQQk0tQ2wt+PRxdUZsZEheCa4u0XWm2kSBJzNu4WsgrtQa+uh4Dl0cXHCqPBeCO/la5EYgigi8/ptXCj8ETXaevAcBzdnR4zsF4whvfwtEoN0fFSIEEIIIUQ2xhy/6RoRQgghhMiGChFCCCGEyIYKEUIIIYTIhgoRQgghhMiGChFCCCGEyKZd3+jdekOPWq2WORNCCCGEGKr1uG3IjbntuhDRaDQAAH9/f5kzIYQQQoixNBoN3NzcdPZp1+uIiKKIsrIyuLq6WnyRHLVaDX9/f9y9e9cm1yih8XV8tj5GWx8fYPtjpPF1fNYaI2MMGo0GPj4+4HndV4G06xkRnufh5+dn1RidO3e22V8wgMZnC2x9jLY+PsD2x0jj6/isMUZ9MyGt6GJVQgghhMiGChFCCCGEyOaJLUSUSiWWL18OpdI2t6mm8XV8tj5GWx8fYPtjpPF1fO1hjO36YlVCCCGE2LYndkaEEEIIIfKjQoQQQgghsqFChBBCCCGyoUKEEEIIIbKhQgRAUVERXnjhBXh5eaFz586Ijo7GqVOn5E7Log4ePIjIyEg4OTnB3d0diYmJcqdkFQ0NDQgLCwPHccjNzZU7HYsoLi7G3LlzERQUBCcnJwQHB2P58uVobGyUOzWz/PWvf0WPHj3g6OiIyMhIZGVlyZ2SRXzwwQcYPHgwXF1doVKpkJiYiMLCQrnTspoPP/wQHMdh4cKFcqdiUaWlpXjllVfg6ekJJycn9OvXD5cuXZI7LYsQBAHvvvvuY58p7733nkH7wlgDFSIAxo8fj+bmZpw8eRKXL1/GgAEDMH78eFRUVMidmkVkZGRgxowZmD17NvLy8nD+/Hm8/PLLcqdlFYsXL4aPj4/caVhUQUEBRFHEhg0bcO3aNaxduxbr16/H0qVL5U7NZNu3b8eiRYuwfPly5OTkYMCAARgzZgwqKyvlTs1sZ86cQUpKCi5evIhjx46hqakJ8fHxqK2tlTs1i8vOzsaGDRvQv39/uVOxqOrqakRFRcHe3h6HDx/G9evXsWbNGri7u8udmkV89NFHWLduHf7yl7/gxo0b+Oijj7Bq1Sqkp6fLkxB7wlVVVTEA7OzZs22PqdVqBoAdO3ZMxswso6mpifn6+rLPP/9c7lSs7tChQyw0NJRdu3aNAWBXrlyROyWrWbVqFQsKCpI7DZMNGTKEpaSktP1dEATm4+PDPvjgAxmzso7KykoGgJ05c0buVCxKo9GwkJAQduzYMTZixAiWmpoqd0oWs2TJEhYdHS13GlaTkJDA5syZ89hjSUlJbPr06bLk88TPiHh6eqJ379744osvUFtbi+bmZmzYsAEqlQoRERFyp2e2nJwclJaWgud5hIeHo3v37hg3bhzy8/PlTs2i7t27h3nz5uHLL7+Es7Oz3OlYXU1NDTw8POROwySNjY24fPky4uLi2h7jeR5xcXH49ttvZczMOmpqagCgw75fUlJSUpCQkPDY+2gr9u3bh0GDBiE5ORkqlQrh4eH47LPP5E7LYoYNG4YTJ06gqKgIAJCXl4dz585h3LhxsuTTrje9+//AcRyOHz+OxMREuLq6gud5qFQqHDlyxCam4W7dugUA+NOf/oSPP/4YPXr0wJo1axATE4OioiKb+HBkjGHWrFl4/fXXMWjQIBQXF8udklXdvHkT6enpWL16tdypmOT+/fsQBAFdu3Z97PGuXbuioKBApqysQxRFLFy4EFFRUejbt6/c6VjMtm3bkJOTg+zsbLlTsYpbt25h3bp1WLRoEZYuXYrs7Gy8+eabcHBwwMyZM+VOz2xpaWlQq9UIDQ2FQqGAIAhYuXIlpk+fLks+NjsjkpaWBo7jdP4UFBSAMYaUlBSoVCpkZmYiKysLiYmJmDBhAsrLy+UehiRDxyeKIgBg2bJlmDx5MiIiIrBx40ZwHIcdO3bIPArdDB1jeno6NBoN3nnnHblTNoqh4/ul0tJSjB07FsnJyZg3b55MmRNDpaSkID8/H9u2bZM7FYu5e/cuUlNTsXXrVjg6OsqdjlWIooiBAwfi/fffR3h4OF577TXMmzcP69evlzs1i/j666+xdetWfPXVV8jJycHmzZuxevVqbN68WZZ8bHaJ96qqKjx48EBnn549eyIzMxPx8fGorq5+bAvkkJAQzJ07F2lpadZO1SSGju/8+fMYNWoUMjMzER0d3dYWGRmJuLg4rFy50tqpmszQMU6dOhX79+8Hx3FtjwuCAIVCgenTp8v2n0sfQ8fn4OAAACgrK0NMTAyGDh2KTZs2gec75veIxsZGODs7Y+fOnY/dvTVz5kz89NNP2Lt3r3zJWdD8+fOxd+9enD17FkFBQXKnYzF79uzBpEmToFAo2h4TBAEcx4HneTQ0NDzW1hEFBgZi9OjR+Pzzz9seW7duHVasWIHS0lIZM7MMf39/pKWlISUlpe2xFStWYMuWLbLMStrsqRlvb294e3vr7afVagHgVx/qPM+3zSa0R4aOLyIiAkqlEoWFhW2FSFNTE4qLixEYGGjtNM1i6Bg//fRTrFixou3vZWVlGDNmDLZv347IyEhrpmgWQ8cHtMyEjBw5sm1Gq6MWIQDg4OCAiIgInDhxoq0QEUURJ06cwPz58+VNzgIYY1iwYAF2796N06dP21QRAgCxsbG4evXqY4/Nnj0boaGhWLJkSYcvQgAgKirqV7dcFxUVtfvPTENptdpffYYoFAr5jnmyXCLbjlRVVTFPT0+WlJTEcnNzWWFhIXv77beZvb09y83NlTs9i0hNTWW+vr7s6NGjrKCggM2dO5epVCr28OFDuVOzitu3b9vUXTMlJSWsV69eLDY2lpWUlLDy8vK2n45q27ZtTKlUsk2bNrHr16+z1157jXXp0oVVVFTInZrZ3njjDebm5sZOnz792Hul1WrlTs1qbO2umaysLGZnZ8dWrlzJvv/+e7Z161bm7OzMtmzZIndqFjFz5kzm6+vLDhw4wG7fvs127drFvLy82OLFi2XJ54kvRBhjLDs7m8XHxzMPDw/m6urKhg4dyg4dOiR3WhbT2NjI3nrrLaZSqZirqyuLi4tj+fn5cqdlNbZWiGzcuJEB+M2fjiw9PZ0FBAQwBwcHNmTIEHbx4kW5U7IIqfdq48aNcqdmNbZWiDDG2P79+1nfvn2ZUqlkoaGh7G9/+5vcKVmMWq1mqampLCAggDk6OrKePXuyZcuWsYaGBlnysdlrRAghhBDS/nXcE82EEEII6fCoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohsqBAhhBBCiGyoECGEEEKIbKgQIYQQQohs/g+vRRzGC8YqngAAAABJRU5ErkJggg==\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotPinPow(fuelBlock)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"fb90d25f\",\n   \"metadata\": {},\n   \"source\": [\n    \"Unsurprisingly, we have a pin power profile that matches our `p(x, y) = x + y`. Pretty!\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"f2c6be2f\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Rotation\\n\",\n    \"\\n\",\n    \"`HexBlock` objects have an implemented `.rotate` method that supports CCW rotation in 60 degree increments. Before we rotate this block, make copies of the locations and pin power data arrays to compare before and after rotation.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 11,\n   \"id\": \"9373e430\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def getPinRingPos(b: HexBlock) -> np.ndarray[tuple[int, int], int]:\\n\",\n    \"    locs = b.getPinLocations()\\n\",\n    \"    allRingPos = [l.getRingPos() for l in locs]\\n\",\n    \"    return np.array(allRingPos)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 12,\n   \"id\": \"3f7589a9\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"ringPosBefore = getPinRingPos(fuelBlock)\\n\",\n    \"pinPowerBefore = fuelBlock.p.linPowByPin.copy()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 13,\n   \"id\": \"d4fb2a75\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import math\\n\",\n    \"\\n\",\n    \"fuelBlock.rotate(math.pi)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 14,\n   \"id\": \"819f10f3\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.collections.PathCollection at 0x1baf95134d0>\"\n      ]\n     },\n     \"execution_count\": 14,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWV4HEe2hr/qGY2YyUKLbEu2ZElmZmZ2Yggzb7Ib3DDtZpPdwIY2aCdOzMzMLLDAtmRZFlnMIw131/0xlq9B3dOgJE7S7/PoZq/rTH+DXadOnTqHUEopVFRUVFRUVFR+A5jf+gmoqKioqKio/HlRHREVFRUVFRWV3wzVEVFRUVFRUVH5zVAdERUVFRUVFZXfDNURUVFRUVFRUfnNUB0RFRUVFRUVld8M1RFRUVFRUVFR+c1QHREVFRUVFRWV3wztb/0EhOA4DuXl5fD09AQh5Ld+OioqKioqKioioJRCr9cjNDQUDCMc87ilHZHy8nJERET81k9DRUVFRUVFRQalpaUIDw8XtLmlHRFPT08A9hfi5eX1Gz8bFRUVFRUVFTE0NzcjIiLi6jwuxC3tiLRtx3h5eamOiIqKioqKyu8MMWkVarKqioqKioqKym+G6oioqKioqKio/GaojoiKioqKiorKb4bqiKioqKioqKj8ZtzSyaoqKjdSVtuIdSdzUVLTiFazBe7OOkQG+mBm/0SE+3t3iEZFfTPWH85BcVUDWk0WuDk7ITzQB9MG9UDnYN8O0aiu12PzgVxculyHFqMFrs5ahAR4YfKwHogJD+gQjfrGVmzdk4OLxTVoaTXDxdkJQQGemDiyB+KigzpEo6nJgO07s5FfUIXWFjOcnbXw9/fEuDE9EN8tpEM09M1G7N6WhfNny9HaYoJOp4WfvwdGjktE96TwDqkx1Npiwt7NmTibWYKWZiOcdFr4+ntg+ISeSOoT1SEaJoMZ+9anI+fERbQ0G6DRauDj74Ghk1OQPLiLw1oLYjAbLTi4/hTOHDyP5oZWaDQMvAM8MWhyKvqMSewQDYvZisNrjuP0rjPQ17WAMATe/p4YOK0v+k/uBY1Wo1hD5c8FoZTS3/pJ8NHc3Axvb280NTWpp2b+5BzPK8b3+9JwLK8YDENAKcBRCoYQEAJwHMWg+M64a1Qf9O8SKUsjLb8MS3eexuGcS/aJ54oGIQBDCFiOom98BO4Y2xuDE6NlaeQUlOPHLWk4eLrg6r/dqJHSLQy3T+yN4X3iZGnkXazCz+tPYv/RfHAUINdqMAxYlkP3LiGYN603Rg3uJmuSvXSpBj+vOoF9+8+BZTkQQsBx12t0iQvG7Jl9MHZ0DzCMdI3S4lqsWnYMe7Znw2Zj29WIignEzPn9MW5yMjQa6ZNsRWk91iw5hJ3rM2CxWMG0oxEeFYAZiwZhwqw+0DpJn2Rryhuw5n/7sGP5cZgMFjAaBhzLAQTQaBiwNg6dIv0x/e5hmLRoMHTO0teHdZWNWPvpLmxbehCGZmO7GoFhfpj2wChMvW8kXNycJWs01jRhzX+2YMuXO6FvaIVGa78uAGi0GrA2Fn4hvpj2yHjMfGIS3DxdJWuo/HGQMn+rjojKLQ2lFF/tPon/bj0KDWOfqPloG39y8mDcM7qvpAl22e50fLDqgEMNhrFPVPdN6o+Hpw2UpLF+Xxbe+3YPCIGwBiHgKMVtE3rhiQXDJU3iuw6ew9sfbQUIwLKOX8fUsUl4+sGx0EqYxA8fycfrb28EpRQsy/HaEUJAKcWoEQl47q+ToNOJn2BPH7+I159fCZuNFXwdhACUAgOHdsULb8yCi4uTaI3s05fwymNLYTHZHLwO+39TBsTi5f8shJu7+Ek8L7MYf1/8JQwtJrtjwCtidxi794nBq9/cB08fN9EahdmleGnOf9BU1yKsAYAwBLFJkXhr1ZPwCRR/Ty05fxnPjXsT9RUNojQi48Pwjx1/R0CYv2gNlT8WUuZvNUdE5Zbm692n8N+tRwEIT97Xjn+05Qi+23tatMbyvRn4YNUBURrclfGvt57AZxuOitbYfCAH//hmNzhKHWtcWRss356OD3/cL1pj35E8vPGfLWA5Kjh5A///Ojbvzsa/PtsJseuR4ycv4pU31l9xEIQnpLZr7jtwDu+8t/mqpiPOpBfh5b8uh8Vic/g62p72iSMX8OaLq66u0B1xPqsULz74HUxGq4jXYf87c6IQrzyyFBaLTZTGpXPleG7+pzDojQ4nb1zROJdehJcWfQ6T0SJKo6ygEn+d8p4oJwQAKEdRmFOKZ6e9j9ZmoyiNquIaPD3sZTRUOnZC2jTK8svx9PBX0VynF6Wh8udGdURUbllOXCjBJ1uPyHrsh5sP43RBmUO77EsV+NeK/bI0vtl2EoeyCx3aFZTW4J1vdsnSWLkzA7uOnXdod7miEW/8ZwukboBQCmzdm4NNu7Id2tbW6fHqG+sBSAuiUgocOJiH1WtPObRtbjLilb+tAMdRSInVchzF6eMX8dOSQw5tTQYLXn5kCVgbByrSOWrTyM0oxpKPHX+WFrMNL9/xJawWm2gHDAA4lsPFnDJ8+dpah7Ysy+HleR/B1GoW5SBcq1F2oRIf/2WpQ1tKKV6Z/k+0NLaKdvIAgLVxqCquwT/u+ET0Y1T+vKiOiMoty9J9adDIyC0A7Ns0S/anObT7aXe6rPwFwL69sXSnY43VOzMlOwhtEELw4xbH0Z31OzLBUSrRRfh/fl5/ymFUZMvWM7DZWEkOwrWsXHPKYfRh55ZMmIxW0RGaa6EUWLfipMOIxd4tmdA3GSU5CP+vQbF5xQkYDWZBu6Pbz6CuqkmSg9AGx1HsXnUKzQ2tgnand2Wj4lKNPA2Ww8H1p1FzuV7QLuvgWRRmFUtyQq7VOLUtA2X55ZIfq/LnQnVEVG5JLtc34fC5IofbGHywHMXBs4WoaGjmtalrbsXu9AuyNTiOIi2/DIUVdbw2LQYzth4+K1uDUoq8omqcK6zktTGbrdi4M0vWxNpGWUUDMnNLecdtNhbrN2Uo0qira8GJk/wRJI6jWL/KsUMkRIvehMP7zvGOU0qxftkxKDkEYzZZsXfzGUGbDd8dku3gAvZox86VJwRtNn61F4yMBN02CIDtS4UjSBs/3Q6NVr4Go2Gw+Yudsh+v8udAdURUbknWn8hVdCMHAAKC9Sdyece3HD8ne3XfhoYh2HCEX2PXsTxYbaxijY37c3jHD54ogEFkToGgxs4s3vGTpy6hsdGgSINhCDZtyeAdz8ooRnVlk2KNzev4o1QXzl5GycVqRZ87IcAWASeh7GIVzqcXKXLaKKXY8gP/tmTN5Xqk7cuVFQ1pg+MoNn+7n3e8uV6Pw+tOyoqGXNVgOWz9Zg9Yhb8BlT82qiOicktSWtuk2EkgBCir45/YSmsaFTs7HEdRVsOvUVbVAI3C2g0sR1FS2cCvUdEg6+jqTRqX+TUulzd0yHtVWsavUSGgL0Xjcin/dkOFwJhYKBW+Tnkxf4RMCtVl9bzRocqiWqmpOu3SVKuHmceJrS6uVeTotGHUm9Bc36L4Oip/XFRHROWWxGCxXD09IheOozCYrfwaJquiVStgnwtajPz5AkL6UmgRyEkwmqyyc1CuxSDwOoxGS4cU9TIKRG6MBotiZwcATCb+99xoUBY5asMsoGFqFc4fEQvHUVh4vj/GVlOHaACAsaX9a/H9uywNvbgTOip/TlRHROWWxN1ZB0bhxMcwBG7O/HUl3F10HbD9A3i48teVcHPRKbp+G54CBahcXZw6YnEMdyENV52i3I023Nz43w9XN51ixxCwP1chjY7AWUDDRUKdESEYDQMdz/fXzcOlQzQA8BYec/XsQA0vtbiZCj+qI6JySxIVpLyUOqVAZ4HrdA72lZ1E2gbDEHQO9uHXCPGFTWF4W8MQRIX58Y5Hhvk5PI0iSiOCv/hURISfYidBwxB0juQvXx8eqbz4FcMQRETxXyc8Snn5fEIIwjsLaMQEKtYAAUIi/XmjUCExQR0SofIL9oaOpwhccOfADinX7u7tBk8/D8XXUfnjojoiKrck0/v16JDrzBC4zqQBCbKPB7fBchQzhiTxjo/u3w0uEiqK8mlMH9GTd3xo/zh4KFyFsxzFtHHJvON9e0fD31/ZZMJyFNOmpPCOJyZHICTMV9GJFo6jmDqzD+94bHwoYuNDQBR87pRSTLmtP+94aFQgEvvHgtHI1yAAptwxhHfcv5MP+o1LUnZqhiGYct9I3nFPXw8MnzdQ8amZyQ+MhUaj9p9R4Ud1RFRuSTr5eGJYj2hFdURGJcUiyJt/8vT1cMW4Pt0UafRPiERkkA+vjburDpOH9ZCtwRCCHrGd0KUz/ypb56TF9HHJsreZCAE6h/shKT6U10ajYTBjaqqi7bKgIC/06c3fo4cQghnz+sm+PgB4+bhh0PBugjbTFwyUVMjsRlzddBg+gd8xBIBpdw4F56AqrBBaJy3GzOkraDP1vlGKkkkJIZiweKiwxsPjlZ2a4ThMeXCs7Mer/DlQHRGVW5Y7R/SWvR3AcRR3jOjt0G7hmFTZp3NYjuKOcfyr7zbmjksFCJGVUMpRikVThCckAJgxIRlaDSMrmkApsHBmP4eh/kkTk6Fz1sreErhtbj+HztLYiT3h7uEi26mafVt/aB1sJwyf2BO+/h6yNAgBpi0YCBeBHBEAGDg+CUHhvrIiFoQhmLhgIDy8hfvN9BrVHZHdQmRpMAzBqHkD4Bcs3LG6x6Bu6NY3DoyMqAijYTB4ej+ExARLfqzKnwvVEVG5ZekdG46/zhgu67HPzRyBlGj+FX4bCZHB+PuiMbI0Hp0+GAO7d3ZoFxXqh9cemiArofSOqX0xsm8Xh3adgrzxxt+mAYBkZ2TmxBRMGOl4K8zP1x1vvz4bDCNNgxBg3JhEzJjWy6Gth6cL3vrgdjAaRpKjQAjB4OHdMH/xYIe2zi5OeOuLO+Gk00rSYBiC1IFxWPzIaIe2WicN3lr6EFxcdZIcBYYhSOgVhXtfmi7ClsEbK5+Eh7ebNA0Ng+jECDz2r4UObQkheG3d3+AT6C3JGWE0DMK6hOBv3z0i+jEqf15UR0Tllmbx8F54dsZwEMDh9oaGsUcdnp85AguGpYrWmDEkEX9fNAYMIaI0AOCJmUNwz0THkYo2xg6Mx+uPTISGEa9xz4z+eHgef57AjQzuG4u3npsOjYZxqNE2Ac+b2htP3jtKdJSjV2pnvPPmHOh0Wmgc5EC0aUya0BPPPjNRtEb3pHD846OFcHZxcphn0aYxfEx3vPDGLNGORWx8KN779l64ebg41GjLJ+k/PB6vfLgQWidx+Q4RccF4f80T8PJ1F62RMrgr3lz6IHTO4vKKOkUG4INtz8G/k4/D106I/S+hbwz+seEZ0ad7AkL98OHhN9Gpc6Dj95fYnZfY5Ch8sP91uHu7i9JQ+XNDaEecyfuFkNJGWOWPTealcvywPx17sgsA2G+oHKVgCLm6tTImuQsWD09FcpTjSEh7nC2qxI970rHr9AVQSkEIAUc5uwbsSYrDe8Zi4Zhe6N01XJbGheIaLN+ehh3HzoNlOTAMA477fw2OoxiUHIX5E3qjf5LjaEt7XCqtxapN6dixPxdWG3tFg17J77B3/+2T3Blzp/TCoD6xsjTKLtdjzbo0bNuRBYvZBkbTpgGAACxL0TMpArNn9sbQwV1lbedUVjRi3YqT2LYxAyajBZo2DQYACFiWQ0JiOGbM64sRY3rI0qitasKGZcewdfUptOpN0GjtGoTYK/OyLIcu3cMwfeFAjJycLKtwXEONHhu/P4gtPxyBvtFwvQYhYG0couJDMP3uYRgzp59oR+damur02PT1Pmz+Zh8aa/TQaDWgHGffEiT2JnQRXTph2gOjMX7xEN5jwUK0NLZi0+c7seHTbagrb7BrXPnx2V8Hi5DYYMx4bCImPzAGzgLH2lX++EiZv1VHROWWwGKzASDQOdjfr2lqwcZTZ1FS24RWkxnuLs7oHOiDaX27I8BLePVl1wB0WuHVZn2zAZuPn0VRZQNaTWa4uegQFuCNqQO7I9jXU/CxVhsLjlI4OwlrNOmN2HbkHArLatFqtMDV2QmdArwwaWh3hAYK79vbbCxYjsLZwWkcfasJOw+cxcWiWrQazHB21iLI3xPjR/RARKjw8WibjQXLctDphHNCDAYzdu89hwsFlWhpNcNZp0VAgCfGju4ueFQXAGwsB5uVhbODvBOj0YL9u3KRf64cLXoTnHRa+Ad4YOS4RMTECecfsDYOVptjDYvZioM7c3A2oxitehO0Thr4Bnhi+PgkdOkRJqzBcrBabHB2cRLUsFpsOLI9C9nHC9DSZISTkwbe/h4YMjkF8amdBR/LshysZhucXYU1bFYbjm87g8yD56FvaIVGy8Db3wODp/RCj4FdBB/LcRwsRiuc3XQOnguLk1szcHpHJvQNLWAYBp5+Hhg0vS9SRiY61DAbLXBxc+6Q48cqty6qI6Jyy0MpxfHCUiw7mYlDF4pgudKLwkWrxaj4GCzon4JekaGKblaUUqQVXsbyo2dw4GwhTFa7I+Ks1WBIfDRuH5yMfnERijWyiyqx8sAZ7DlzAaYrnV+dtBoMjI/E/OEpGBDfWXHhtHNFVVi9JxO7T+bDeKXappOWQe/4CMwdk4pBPaMUl5IvKK7B2h2Z2H34PFqvVEDVahgkxYdhzsRUDOkTC63CUvLFJXXYsCUDO/fmoqXFXoFUo2GQ0C0Es6b1wtBBXeEkIyJwLWWl9di8IR07t2ehudl4VSOuSzBmzO6L4SMSRG998FFZ3oCtq09jx4YMNNbbu+QyDEF0106YPr8fho9PdJjQ6oiaikZsW3ECO1acQH2tHqAAoyGIiA3GtMWDMHJaL7gqPLZdX9WE7cuOYNvSQ6itbLRrMAShMUGYcvdwjJk3AO4Ki5E11jRj+3f7seWr3agurQPlKAhDEBIdhCkPjsG4O4bDS60z8odDdURUbmkOXSjCm1v2orS+CRqG3FRUrO3fYgP98NrU0egTJX0b5FRBKd5cuweXqhsENSL9ffDS7FEY1FX6NkjWpQq8+dNuFJTXCmqE+Hni2bkjMaKn9G2Q/OJqvPXtTpwvrm5Xg2EIOI4i0NcDT902HGP7Cx9dbY+isjq8+/kO5ORXCGr4ebvh4UXDMGmE9Bovl8sb8N6H23Emu/Tq9drT8PJyxb2Lh2D6FPE5Pm1UVzXjg/c2I+3UJUENdw9nLL5zKGbPc3xS6Ebqa/X48M2NOHEoHwy5WYMwBJSjcHXTYd7dQ3DbPUPBSHQQmxta8fHLa3B0R459C/JGDWI/6eTs6oSZdw/DoifHSd4yam024tPnf8aB9acBerNG2xEvnbMTptw9HHe/NEPylpGx1YTP/rIEu388DI7jbj4yfWX7S+OkwcR7RuLB9xZC10GViFV+e1RHROWWZW16Ll7esAuUUoenSBgCMITBv+ZMxITErqI1tmfm4fmftoFSCkenf9tyAd6YNxbT+4qfYA9kX8TfvtoMlqMOe+LYMzOAF+aPwrxh/EXDbuTk2RI88+F6+3aPyGPMj88bisWTxCfRZp2/jGfeXgOTxSZa4+45A3DffMenU9rIu1CJv764EgaDWXQl27kz++CR+0eKdhSKLtXgr0/9CH2zEazI+h2TpqbgqWcmiY5WXS6pw7MPfI/62hbR9TtGTkzC396YKbpCaXV5A55b+AWqyxtFawwY3R0vfrIYTiIL59VXN+H5WR/icmGVqFonhBCkDO2GV5c+LFja/lqa61vw/IR3UJhVLOp7RRiC7gO64O1Nz/GWnFf5fSFl/lZPzaj8auw9fxF/X78TnAgnBAA4Ctg4Dn9dvRXHC0tEaRzLL8Zzy7ZdcRAc21NqT3p9eeVOHDhbKEojs7Acf/1qM2wsJ6oxX5vFuyv2YkdaniiNvOJqPPOfdbBYxTsIAPDJykPYeDBHlG1RWR2eeXstTGZpGt+tPo6VW9JF2VZUNuKvL65EqwQnBABWrTuNH5cfF2VbW9OMZ/+yDM1N4p0QANi6KRPf/G+fKNvG+lY8/+AS1NfqJRUR27c9G5+9t01Un56WZiNevPMr1EhwQgDgxN5z+M8Lq0RpmFrNePm2/+JyYbXogmuUUpw5nId/PvStqFYCFpMFL8/4FwqzS0R/ryhHce5EAd6Y9x/Yrmyhqvx5UB0RlV8Fs9WGF9bukPVYSoHn1+wAywnfBG0shxd/3g5xbs6NIsBLy3dcTWjlfy4ULy/ZDo6TrkIAvP7jLhhMwh1gKaV44+sdsLKcrGJr/1y6G40tjrudvve/XTBZrLK6HH+yZD9q6vQO7T76bDcMBrOswnTfLD2Essv1Du2+/GwPmpoMsjRW/HQMF/IrHdot+WwvamuapVdLpcDmVaeQk+HYkV7+2R5UlNRK7htEKcW+Dek4deC8Q9s1X+xG0bnLkiuychzFse1ncHiTYwd005e7cf5kgXQNlkP6nhzs+uGQpMep/P5RHRGVX4XtufloNpllFfXiKEWVvgUHLxQJ2h04V4havUHW5E0BNBlM2H3leDAfJ/NKUVbbJGvypgCMFiu2nRaeMM5eqsSF0hrZVWVtLIcth3IFbS6V1uHMucvyG9kRYOOebEGTiqomHD9VKLuxIMMQbNyaKWjT0NCKg/vPSYqEXItGw2DThjRBm1a9Cbs3Z8ou2a7RMNi04qSgjdlkxbblJ2RrMBoGm344Imhjs7LY/O0B2Z85wxBs+na/oA3Hcdjw6Q7ZnZoJQ7D+v9s7pNOzyu8H1RFR+VX48Ximoj4lGkKw7ESmoM3Ph5VpMITgp8PCGisOZipqlEcI8NO+DMEb7ao9ZxRpUAqs2J0pOOGs26nsdXAcxdodmbBdOe3UHpu3nVF0WojjKDZvz4LJZOW12bYlEw4CZYKwLIddO7LRojfx2uzecgYWi/ztApblcGjPWfvJFx4Obj0DQwv/c3AEx3I4fTAPlaX8EaQTO7PQKPAcHGpwFLknLqL4fDmvTcbeXFQW1UDWigP2LZpLOaU4f+qizGep8ntEdURUfnGqmluQU14lK4rQBkspjhQUo9Xc/rZGs9GEEwWlijQ4SnGmuAI1zS3tjlttLA5kyV/hA3YnobCyHiU1jTzjFLtP5inSAIDKumbkl1Tzju86fF6xRmOzEdl5/JPS7n1n5UdcrmAwWJB+pph3fN/uXMWrZ6uFxckT/BPf/h3CkR8xcByH4wf484MObjmjqCMwYHekjwg810Mb0xV1BAbskZdDAtszB9ccV9StFwA0Wg0OrhaXH6Tyx0B1RFR+cepaDR12rQZD+7kPDSJyIsRSz3OtplaTIkfnWhr07b8nJosNFit/lEGSRnP7GhxHoW+Vv/oWowEATc0d85k0NvJrNDS0Kr4+IQSNAtdpqG2RvcJvQ8Mwghr1Nc2KOgID9t4zTfUCGtVNijoCA/atkyaB3KDG6mZF3XoBuzPeVNOs6Boqvy9+cUfk8uXLWLRoEfz9/eHq6oqkpCScPn36l5ZVuYVwlGQqBStPAlxHath4rtWhGnyvQ0Fbd7EalFLZHYdv0hCYdJS0qL9OQ+A6cnNDroUQ4dfRIZ/JlTLrvBoKJ+//vw6/E8t2hINLKWxWodfRURod44yr/D74RR2RhoYGDB48GE5OTti2bRvOnj2LDz74AL6+wuWlVf5YeLp0XM8Jb57+FV6uLh2m4cWn4dZxGp4813Jz0Ununsur4d6+hkbDwEVGr5F2NTz43xM3hVU/xWh4eCjX4DgKT09+DU+FlUUBgGMpPLz4Nbx83ZRrUAoPb/7n6uXvcbVQmXwIPHz4n6unr4ekTsDtKjAMPHzVZnl/Jn5RR+Sf//wnIiIi8N1336Ffv36Ijo7GuHHjEBsrr9GWyu+TcB9vBHgou9ESAFH+PvB1a/9G6+/phgh/b8X32SAvD4T6tl98x9XZCV3DAqFwKx/ebi6IDm7fGWcYguQuYYpLwrs6O6FrZCDveK8e4YqSVQFAq2XQPa4T73jv5M6ymsRdCyEEid35e7307hPtsAuwGJKSI3nHUvvHKP48KKVI6hXFO548IE6xBsdySOwbwzvec2BXEIW/ENbGImlgF36NYQmKI2GsjUXPoQmKrqHy++IXdUQ2btyIPn36YO7cuQgKCkJqaiq++uorXnuz2Yzm5ubr/lR+/2g1DBb0S1F0ogUAFg1I5a20SQjBgiEpiq7PEILbhyQL9my5fUSKqEJpQhpzhvaETqAp3rwxqYqSPDUMwdShPeAmUC579oRURcmqGoZg3JAEeAtUwZwxNVXRtoZGQzBkYBwCA/gbDU6d0VvR9gzDEPTqHYXwCD9em8lz+ir6PAhD0K1HGOLiQ3htJszrJ/v6gH17KSI2CIl9o3ltxtw2QFZn32sJDPVFn1HdecdHzB8IF4VRKq8ATwyeIb46sMrvn1/UESksLMTnn3+OLl26YMeOHXj44YfxxBNPYMmSJe3av/vuu/D29r76FxER8Us+PZVfkTm9ExVtOei0GkxPFl4lTevTHU4iS2m3B0MIZvZNFLQZ36cb3BRsa1BQzBqcJGgzolcsfDzkbwewHMXsUcKl5PslRyFYYIIXozFrfIqgTWL3MERF+sv+3FmWYubUXoI2sXHBSOguvzkix1FMn9VH0CY0wg+9BsTKjlhQjmLGggGCNv7B3hg0LlH2tgalwPQ7hwi+D54+7hg5u6/sKBVhCKbdN0Kwd46ruwsm3j1S9utgNAymPjBGdLl6lT8Gv6gjwnEcevXqhXfeeQepqal44IEHcP/99+OLL75o1/6FF15AU1PT1b/S0tJf8ump/IoEerrjnsHCN3whHh0xAB4Ock28XF3w0FjhG74Qd4/sA39P4S0kV50THpsmvs/KtRAAtw1PQai/cN8FrVaDx+cPladBgMlDuiM61F/QjmEIHrtjuCwNhhAM798FCQLbMvbnQvDwfSPlaTAEfXpFIVVgy6SN+x4aJcvZYRiCHonhGCCw1dDGnY+OAsMQyTqMhkFst04YMoY/itDGgsfHQqtlJGtoNAzCYwIxekZvh7bzn5wAnauT5KPCjIZBYKgvJiwa4tB29lOT4e7tJtkZYTQMvAO9MO2RcZIep/L75xd1REJCQtC9+/U/wISEBJSUtF/u2NnZGV5eXtf9qfxxeGr0YEyU0LyujXm9k3DfUHGh2vtG9cXsfsJRjfaYnBqPx8YPEmV72/AULBwlvFK/EQJgeM8YPD1L3OQ/dWgi7psuzakiBOiTEIkX7xoryn7UwG54dPEwSRoMIejepRNefXyiKPv+fWPw1CPins9VDYYgJjoQb7w0XVSkIzmlM/72wlR7A0OR8yvDEISF++HNf8wTVfciPjEcz787F4QQ0ZM4oyEIDPbCW/9dBJ2IFX50txC8/NmdYDSM6OiLRsPA298Db39/P1zcHDekC40Owms/PAKtk0Z0TRFGw8DD2w3vrHoSHt6Oc72CIvzx9qZnoXNxEu2MMFoGru7O+MeW5+Eb5C3qMSp/HH5RR2Tw4MHIy7u+iE9+fj46d5becl3l9w/DEPxrzkTcNcg+iWsEZg2G2Fefj44YgNemjRYdeieE4NW5Y/DgmP4gV67DR1uy5t0jeuOd2yeIvvkTQvDMrGF4fPpgMISI0pgztCf+dd9UaCWsEh+YOQjPLBwJhhGnMWlQd3z49ExJ21MLpvXFCw+Pg1bDCL7HbRrD+8fh41fmwlnC9tT0Kal4+bmpcHLSCDoKbVsGfXtH4+N/LYCbm/hcg7Hjk/D6246fV5tGz+RIfPL5XfCScCJm6JjueOvTxXC90oGW77W0aXTtHoaPf3gAfhK2wPoMj8c/fngQ7ldO8fA5PW0TfOeuwfh43RMIChV/ErHnoK7414Zn4OXrIUojLDoIH+94HmExQaI14vvG4aODr8Ovk8911+LTCAr3xydH30J0kuMImMofD0J/waL+p06dwqBBg/D6669j3rx5OHnyJO6//37873//w8KFCx0+XkobYZXfF0V1DVh5Khsr07Jvqpbq5eqM2/smY26fJIT5yP/cL9c3YdXxbKw8lgW90XzdmIeLDnP6J2HewJ6ICPCRrVHVoMeaI9lYdTALja3XF/BydXbCzEGJmDOkJ6I78SdDOqK2sRUbDmZj9Z5M1DVdX9zLWafF1CE9MHtUMmLDA2RrNDQZsGVfDlZvz0BN3fWVZZ20GkwYloCZ41PQLSZYtkaz3ogdu3OwZkMaKquuT0TXahmMHp6AGVN7Ib5rJ9k5Hy0tJuzekY11a07hclnDdWMaDcHQ4QmYPqsPEpPCZWsYDWbs3ZqF9T+fQElhzXVjhCEYPDIeU+f3R3KfKNkaJqMFBzZnYuPSwyg8V3G9BiHoNzIBUxcPQurgLoI5G0JYTFYc3pyODV/vQ37GDdVrCdB7eHdMvXcE+ozuITuvxGqx4cj6U9jw2Q7kHs2/aTx5RHdMf2Q8Bk7pBY2C/C6VWw8p8/cv6ogAwObNm/HCCy/gwoULiI6OxtNPP437779f1GNVR+T3BaUUudXVKG1sQqvVCk+dDjF+fugSwJ+vYLLakFVWiSajCYQAPq4u6BneCTotfyj7fFUNiusb0Wq2wN1Zh85+PogP5j+qarHZkF1SiUaDCaD247OJkZ3gInBypaCqFoXVDWg1W+Dm7IRIPx/EhwbyTixWlkVuUSUaWkyglMLL3QU9IoPhKrBCL6qqR0FlHVpNFrjqnBDq54UekcG8GjaWw9lLlWjUG8GyHDzdnREfFQwPnronAFBa1YCCy7VoMVrgotOik58nEmNCeDVYlsP5wio0NBlgtbHwcndB15gg3pokAFBR04T8omq0GC1wdtIg0NcDSV35jyBzHEV+QSXqG1phtbLwcHdBXGwQvAWiE9W1euQVVKKl1Qydkwb+fh5ISgjjnSAppSjIr0RdfQssZhvcPVwQExsEX4H6FHV1Lcg7XwF9ixFOWg18/TyQlBQOLc8ESSlFYX4V6qqbYTZZ4e7pgs6xQfAP5I+ANNa34nxOGfR6E7RaBj6+7khM7QwngdMsRfmVqC5vgNlggbuXKyJigxAY4sNr39zQinOZJdA3GaHRMvD2dUdinyjoBL6LJfkVqCqtg6nVDDcvV4THBiM4gv9329JkwNnTl6BvaAVhCLz9PNCjfyxcXPm3h8ryK1BxqRpGvRFuXq4Ii+uEEAHH1qA3IvdYPprr7Y6xl58HegzsCjeBk1oqtw63lCOiBNUR+X3QarFg47nzWJKegQt1dTeNp4SE4M5eKRjfpQucBRwMIUxWG7bk5uHHk5k4W3lzD5UeIUFY3C8FE7t3E3QwhDBbbdiZcwHLjmYiu/Tm1vBdgv2xcHAqJqfEw00n7+SM1cZib1YBfj6YiYzCm/u0RAX5YsHwVEzuEy/oYAhhYzkcOnMRK/Zm4vT5mxO+wwO9MX90KqYM6s5bWM0RLMfh+JkirN6RgeNZRTeNB/t7Ys64VEwZ3gM+XvJqyHAcxekzRVi7OQPHTl+8qRqsv58HZk1KxeSxSfCTWQCLUorMzBJsWJ+GI0fybzqm6+PjhunTe2HS5BQEyDxlRCnF2axSbFx5Cgf3nL2pzoanlyumzOmDSTN7I6iTvPwISinys8uw+efj2L8586ZKse6eLpg4tx8m3dYfIQJHlR1RkFOKLUsPY8+aU7Car28E6OrujPG3D8TkxYMRHis/claUW4bNX+/Fzh8PwWy8Plrq7KbDuIVDMeW+UYjqES5bQ+WXR3VEVH410i6X4/5169FkMoGg/ZYcDCHgKEWIpye+nzMLcf7CJzpuJKe8Cg/8vB51rQYwBO3W8WjTCHB3w9cLZyKhk/j9bMAeAXngm3Woam65eq0bIcR+TNLbzQWf3zUDyZH8dSHao6SmEQ9/thZldU38Glf+6+aiw4f3TUO/rtKOsFfWNeOx/6xFUWU9GIa0W/+iTcNZp8U/H56KwUn8tSfao7ahBU+/txYXimugYQhvPRJCAK1Gg9cfm4SR/aQlKTc2G/DCm+uQm1fuQINAoyF47vEJGD+yhySNlhYTXn1lLTIzi6HREN56JG2RnSeeHI+pU1MlaZiMFrzz4mqcOHwBGg3DW1eFYQgoBe5/cixmLRggaUvHYrbi/edX4dD2bGENjf37sPixMbj94VGSNKwWGz55fjl2rTzpQIMBx3KY+8gY3PX8FEnbRizL4Ytnf8TGL/eI0pj24Gg89N4ixUXzVH4ZVEdE5VfhaEkJ7l69FhyloprBaQiBi5MTVt4+H/GB/Fsp15JRWo47f1gNK8uJ1nDSaLD0jjlIDhfnKJwrr8YdX6yE2WYTVeSLIQQahuB/98xCv1hxjsKlqnos/vdyGMwW0RqEAB/dPx1De4hzFC7XNOHud35GU6tRlAYhAAHBOw9Oxpg+4hyFmoYW3PfyMtQ1torWoBT4+4PjMXm4uNNMjU0GPPy3ZaiobpJUSOypB0dj1mRxp5laWkx48okfUFJSJ0njvvtH4PbbB4qyNRkt+NtDS1BwvkKSxu33DMVdD48SZWux2PDSvd8gN71YUtO8aYsG4qEXp4pyRmxWFq/f8z+k7T8vqdPx2Pn98Zf3F4jSYFkO7971GQ6tPyW+wSABhs7oixeXPCI7T0bll0PK/K1+eiqyuFTfgAfWbRDthAAASylMVivuXLUGtSI68l5ubMb9P60X7YS0aVhYFvf/tA4VTfxdQtuo1bfigW/WwmQV54QA9p4eLEfx6JINKK5tdGjfbDDhwU/XiHZC2jQ4juKZbzfhQnmtQ3uDyYJH/71atBMC2B0ESin+/tVW5BRWOLS3WG146h9rRDshbRoA8M5XO5F+1nFdIBvL4bk31kh2QgDgwy/34NjpQhHPieLVV9ZKdkIA4Ouv9mP//nOiNN79+xrJTggA/PztIezYmCHK9sO/r0FumjQnBAA2/ngMG388Ksr2i1fXIG3/OUlOCADsWnECyz/ZKcr2+9dXS3NCAIACh9adwnevrZb0vFRuPVRHREUWX5w8CYvNJtpBaIOlFPVGI346c8ah7fcn0mGwWCRrcJSixWzB0hOOb+Y/HTuDRoNJlobZZsN3Bx13kl5zNBvVTeIn7zYo7BPz1ztPOLTdeuwcymqaZGlwlOKrTccc2u49kY/C0lp5peEp8OWqIw7Njp26iHMXKmWVVCcE+HLJAYcTZmZGMTIzi2WXbf/qf/scPjbvbDmOH7w550Qs3366x2FH3uKCKuzblCnZQWhj6ce7YDZZBW2qSuuw5YfDsrs1L/9oJ1r1RkGbxupmrPlomzQn5BrWfLwdjdVqO5DfM6ojoiKZJpMJG86eAyvz7sRRih8yMmFl+Vt9GyxWrM7Ika3BUooV6dkwWvlvtBYbixXHz0h2Qq5qcBQb0s+i2WgSsOHw80H5kwXLUezKvIC65lZeG0oplu/JkN3OjOMojmYXoby2SdBu1Y4M2f2COEqRlXcZhWXC0Z01m9Pll1KnQGFxLc7lC0d31q1PU5RXUFnZhIyMIkGbTatOKdJorG/F8UN5gjZblp9Q1OnW0GLGoe3Zgjbblh1V1IzPYrFi75pTgjbblx6Q/RsE7BW8d/xwUPbjVX57VEdERTJrcnJh45R12Kw3GrG74CLv+Jac8zBYhFdrjmi1WLA19+baBW3syS2wH+lVgNXGYmM6f6j+6LliVDW28I6LgVJg3fFc3vHMC5dRVFkvd0EJwF7/Yu2BLN7x/KJqnL1YqWjC0DAEa3fxR8JKyxuQnlWirOGfhmDdVv5IWE2NHkePXFDUjI9hCDasT+Mdb240YP+ObOUaK0/yjhtbzdi55rSiTreEIYLbM1aLDVt+OAJOQVNBANjwLX+UimU5bPxyt+StpWuhHMWGL3crer9VfltUR0RFMmmXbz52KhUtwyCtnP866aXlitvUaxiC9FJ+jYzicmgVJrkRYr8OH5mXlGtwlCL9Yhm/RoHy94rjKNLy+DXO5F1W1LQQsEd30s/x54nknLusTAD2RnkZ2fwa58+Vy45OtcFxFFlZ/BoX8ipuOj4rR+PsGX6NogtVDrdVHEE5igu5l8Ha2o9Mll+qQUuT41wuYRHgcmENWpvb356pK29AXUWjMo2r12lwbKhyS6I6IiqSaTQZFa2+AXsX2maTmXe82WRW1KYesN/M9Sb+iIfeaFY+KVGgSSCqojeYIXvP5BoaW4U0TLIreF5Ls4BGi8HcIScT9C0CGq3mDnkdrQb+71WLwGuUgsFg4R1r1XeMhtXKwmKxtTvmKO9CCi08z7eFx3mQpdHU/rVamvi3HCVrNHbctVR+XVRHREUyThrlpZgJCJwE9rftvU8Ualw5yiuk0RFOgk6gNLWU3jJyNaT0lhFC6DpOWgayMxavga9KaZtGR1QT0Ah95h30Xgnlf2gFqqR2lE5HlkPne74d9V4Ja8grPti+RsddS+XXRXVEVCQT6O4u2LBODJRS+LvxV9wMcHeHhijdNiHwc+fX8PeQV/HzWjQMEbyOv6ebopwHwF5TJNCLv3Kon5cbWIU5O4QQBHjza/h6uyuOUAFAgEAFVF8fedVRb8TPh//zECrxLgVvIQ0/jw7R8PB04XVEfP07RsNJp4Gbe/sVfH0EStVLgTAEXjzvu09gx9WH8glQa039XlEdERXJTO7WTfZpljZYSjG5Wzfe8Yk9uipOiGU5DpO68xfqGt+zq+LJleUoJvTkfx1jU7sqSvAE7Dki43vza4zq1QVEYWiHUooJ/eN5x4f0ioFWqzyfZtygBN7xvqlRcJHQ1bd9DYJxI7rzjvfsGQFPT3ll7dtgGIIxY/iruHbtHirYb0aUhoZg1MQk3vHOXYIRFhWgKGqo0TAYNrEn73ZYcLgfuvSM4O3OKwZGw2DAuCToXNr/XL38PZA8LEHR6R9GwyBleAK8Osg5U/n1UR0RFckMi45CiKf8Gy1DCHqHhqJbIH+32N4RoYgN8JM9vRIAXYMCkCJQXTUhNAhJEZ1kH0klAMJ8vTAwjr91eWSgDwZ0i1R0BNLPwxUjEmN5xwN9PDAiNU5Rwqq7qw5j+vI7bd4erhg/OEGRhpNWg4lD+Z0EN1cdJo9LUqTBEGDyWP4JXKfTYtq0Xoo+D46jmDqFv9S7RsNg+rx+ivJdOJZiyuy+vOOEEExfNEhRrhbLcpi6QLhK7LR7his60cKxHKbeNVTQZvpDYxSd/uFYDtMfGiv78Sq/PaojoiIZhhDc2StVft0KSnFHL+GeHYQQ3NE/VfaNlgK4o1+Kw8lg0eBURRGLhYNSHU5qtw9Lkb09wxCC+UNTHOaazBuVIju6wzAEs4YlwcVBI7/ZY+VraBiCScN6wMNNuJHfjInKNEYM6eZwi2fylBRZ1wfs71X//rEIdtCcbvz0VGg08n4hjIYgMSUSnWOE2yCMmp4KZ2cnWXlODEMQEx+CrknCjeOGTUmFh7errMgLwxCEdA5AymDh9gEDJqXCN9hbVuSFMAR+nXzQf2KK9CeocsugOiIqsrgjNQW9w0Il54owhGBi1y6Y1M1xb5M5qYkYHBMpOWLBEIJhcVGYmeK4Cdqknt0wpkesZA0NIegdHYbbB/Z0aDs8MQZT+yZIvplrGIL48CDcObq3Q9ve3cIxd2Sy5DlJwxBEdfLDvVMGOLRNiOmExdP6SVSwawQHeOGheUMc2nYO98f9i4RX0Hwavr7uePSekQ5tg4O98cgjYyRrMAyBp6cLnnxyvENbH193PPHCFFkarq46/OXv0xzaunu44Jl350quSEoYAp2zE/76j7kOHXWdixP+9vEdkPrlJcSeUPvcf+90qKHRavDCdw/b7aTIEIBhGDz/7UMdmryr8uujOiIqsnDWavG/mTPQIzhI9CROAAyL6owPJk0U9Rgtw+DjuVPRKyJU9H2QEKBv5zB8OGeyqPodDEPwz9smYVCXSNH3QIYQ9AgPxid3TINORNY/IQSv3j4WI5P4t1fa04jt5I9PH5oBVweRijaNv94+EhMH8udg3KTBEIQH+eC/f5kFD1fhSEUbD80bgpljkkVraBiCIH9PfPLiXHh7uop6zKK5/XH7LPEOj4Yh8PVxx4dvzkeAyETRmbP64K67xTs8bU7Iv96/3WE0pI3x01Lx4F/GidfQELi6OeOd/y5GeGdxHaqHTkjC46/PtDcwFPEFZjQEzi5OeP2LOxHdTVxTyH6je+Cv/1kEhiGitpsYhsBJ54RXvrkP3VI7i9JIHpaAv//wKLRajajICGEItFoNXlr6CJKHif/Oq9yaqN13VRRhslrxjwMHsSI752rJ9mu/UAyx19pw1znhrl698MSggZILfFlsNvx77xH8fDoLZpvtJg1y5f93cdJiQZ9k/GXUYOgkHjG2sRw+3X0MPx7JgMFivdo19loNwJ7nMLdfEp6eOBQuEo8LchzFVztPYOneNLSYLGAIuWlbiBC7Aza1X3f8bdZwuDnrJGlQSvHDjtP4futJNBvM7WrYO/sSjO/fDX+7fSQ83aQlb1JKsXpnJr5ddwyNzUYwDLlp64m5srod2a8rnrlrFHy9pJ9Q2rwzC98sO4y6htb2NRgCSimG9I/DXx4ciwAZyYp79uTi66/2o7q6mVeD4yj69Y/FU0+OF+2EXMvhvefw1ce7UHm5od329oyGgGMpUvtG4/HnJyMsUpwTci0nD5zHV//cgrJLtTwaDDiWQ2LvKDz6ynREde0kWSPzSD6+eGUNivMq2tVo+7f4XlF49O25iEsS15n6WnKPX8Bnf/0BBZnF0GiZm/rttP1bXEoUHnl/EXoM6CJZQ+XXQcr8rToiKh1Cs8mEtWfP4qfMLFxubobZZoOLkxOifX2xODUZU+Pj4eqk7EREi9mCjdnn8PPpLJQ0NMJstcHZSYvOvj64vU9PTE1KgIfEiftGDBYrtmaex/LjZ1BU0wDTFY0wXy/M798T03p3h6eLuOgBHyaLDTsz8rDi0BlcrKyHyWKFzkmLTj4emD24J2b07wFvd2UnOyxWG/amF2Dl3gwUlNXCaLZC56RBoI8HZgxNwrQhifCT4Rxci83G4mDaRazemYG8S1UwmqxwctLAz9sdU0YkYvrIJAT4KjvJYGM5HD9diLWb03E2vwJGkwVarQY+3m6YOCoRU8f3RLDCI6AcR3HqVCHWr09DTnYpjEYLNBoNvL1dMXZsIqZMTUVIiI8iDUopMk5ewsZVJ5GVVgSjwWKPsni7YuT4JEyZ3UeWA3KjRs7pImxadgzpRy/A2GoGYQg8vFwxbGJPTL6tPzrHBSvWOJ9ehE3fH8KpvbkwtJhBALh7u2LIpBRMvmMIYrqHKdIAgPz0S9j0vz04tiX9alVWdy9XDJzcC1MfGI2uvaIVa6j8sqiOiIpiTDYbthbm4fDlYjSZTGAYBn4urhgXFYcREdHQOIhqUEodhnEtLIsdBRdwsKgIjSYTCAAfV1eMjonB6JhYh5ETMRpWlsXeC4XYf+ESGoz2G5qPqwuGxkZhbLc4h5ETMRosx+Fg/iXsPVeIhlYDOAp4uzpjYFxnjO/RBc4OIidiNDiO4mheMXZnXUBjqxE2loOXuwv6xUVgfEpXh9s3YjQopTiVV4pdafmo1xths7HwdHdGSmwYJvWLh5uLsJMnViMz/zJ2HctDXVMrLFYWXu7OSIwLwcTB3R0ms4rVyM2vwK6D51Db0AKLhYWHuzMS4jphwsge8PIQdvLEaABA/oVK7Nydg5paPcxmG9zdndElLhgTxibBR6DOiBSNSxersWPLGVRXNcNsssDN3RnRsUGYMCUFfg4iQG23dkc6JYXV2Lk+HVXljTAZzHDzcEFkTCDGzeiNQAcRILEaly/VYNeqk6goroWh1a4RFhOI8XP7IzjCr0M0KktqsfPHI7h8sQoGvRGuHi4IiQ7E+IVDEBoTJPhYlY5HdURUZFNjaMXXWafx87ksNFvM0BBytWaIljCwUQ6d3D1wZ2Iq7uyRCjcn6RGIBqMR36anY1nWGTSaTNdpaAgDlnIIcHPD4uQU3JWaCk9n6REIvcmMJacysOx0JuoMRmgYcvU0Rtv/9nV1xYLePXFnv17wcZUegWg1W7DseCZ+Op6Jan1ruxpeLs6Y168n7hzUS1YBNZPFhhVHz+CnQxmoaNBDwzDgOA70Gg13Zx1mD0jEHSN6I8hbegTCamOx5lAWftqbgbKapus02rYmXHVOmD64BxaN6Y1Qf+m/RRvLYeP+bCzfkYHiinporly3TYNyFE5OGkwe2gMLJ/VGRLCvZA2Oo9i2LwcrN6fhYrF9i4LjOFD6/9s4Wo0G44Yl4PYZfREVLj0CQSnF7n1nsWbtaeRdqGxHw/7fUSMSMH9OP8TKmAAppTi47xzWLD+BczmXb9aAfatw6Ih4zF04EN0SQiVrAMDx/eexZslhZKcVXdGgdgeJIVe3OweOiMecu4aiewr/EXUh0g6cx5qv9iHjcD4Yjb1yLuWuaBACjuPQd0QC5jwwCj0HxsnSOHM4D6s/2Y5Tu3PAMO1osBxSRyRg9qPj0Gd0oiwNFemojoiKLPLqa7B4y2rUGQ0OC5YxIIj3D8D3k+YgyE18tcpLDQ24Y+0aVOr1jjUIQbSvL5bMmo1QCXVLLjc14+6f1qKkodHh0VyGEIR6e+K722ejs5+PaI0afQvu/34dCqrrHGpoiL366td3z0ZckPjJr6HFiEe/Xofc0iqH1dU1DIGXmwu+eGAWEsLFT356gwl/+XwjMgouA1T4AIaGIXBz1uHjx2cgOUb85GcwWfDCJ5txPKvo6gQnpKFz0uL9v0xHnx7iJz+zxYY3PtyCA8cv3JTf056GRqvB23+bhoG9Y0RrWK0s/vWfbdi1JxeEEMFy9BqNfRL8+/PTMHwofzG6G2FZDv/993ZsXpfebs7KjRqUAn99aSrGTnR8eqsNjuPwzX92Ys2Sww41GA0DynF47KVpmDxPfAIxpRTLPtyBZR/tuJqfIqTBsRzufXEaZt8/QnT9FUop1vx3J75+dbVojUXPTcXCZ6d2SE8jFWGkzN/qqRkVAEBxUyPmbVwhygkBAA4UefW1uG3jcjSZxTX5qtDrMX/lClFOCGCvN1LU0ID5K1agziCuC2hdqwELl65EqQgnpE2jokmP25euQGVziyiNJqMJd3y9ChdrHDshgL2KbF2LAYu/Womy+iZRGgazBfd9vhrnyqpFtXhhOYqmVhPu+XQlCqvqRGmYLDY89sk6ZF4sB3XghLRptJoseOg/a3C+pFqUhs3G4q//3oCT2cUAxGmYLDY8+a+1yMoX1+WZZTm88v4mHDxRYNdwIMJyFFarDc+9uw6ns4pFaXAcxT/e34Lde3OvaAiLsCwFy3J4/e31OHLsgigNSik+fn8btqxPv6rpSIPjKN57cyP27swRpQEA3/x7B9YsOSxKg2PtkZhP3tqIbatPidb46SO7E9J2DUcaAPDNOxux7psDojXWfrYLX7+6WpLGj//chGXvbRKtofLroDoiKuAoxT3b16LFYpZUup2lFMXNjXj2wA6HtpRSPLRpIxqMRskalS16PLVtqyj7p9ZtQZW+RbJGg8GIx9ZsEtV07aU1O1BW3ySp8BZLKVrMZjz8w3pRGm+t3ouLlXWSNDhKYbLa8MhX62ETUanywzUHkVtUJanYGkcprCyLx/+7DmZr+51hr+XLNUeRfr5UUtE4Su0T7NMfrEOLkb+TbhvL1p3E0dMXJTXMo9T+98K769EgotX9+k3p2Lv/nKS+f222r7+9AdXVzQ7td27NwtYNGdJ7CxLgvTc3oqzEsQN6eFcu1iw9IlHAzsdvbcTF8xUO7dIOnMePHzq+J7THV29vQO7pQod2uccL8NXLq2Rp/PjPTTi9R7zjpvLLozoiKjhUVoSLjfWy+sewlGLnpQso1Quv9NMrKpBdVSVb40hJCS7UCd9oz1fV4ERxmWyNrPJKZJVXCdqV1DVi3/lCeRocxcWaehwvLBW0q2luwdb087IqvrIcRXl9Mw6eFb6ZN7easO5IjiwNjqOoazZgV1q+oJ3RZMWqXZmymvZylKLFYMb2I+cE7axWFis2pcmqwEupPfqyZU+28HPhKJavOiFDwe6MsCyHTVszHT6XlcuOyesdQwEKig1rTjs0XfX9Idm9YxhCsOGnYw7t1ny1T3bvGA3DYL2IqMjaz3dBI7P3EaNhsPazXbIeq/LLoDoiKliak6Gomy5DCH4+myVo88OZTEUaGkKwLOuMoM3P6VmK+pRoGIJlaZmCNitPZcnuTdOm8dNxYY21x5Wt1hhC8NNhYY1Nx8/CdqXui1yN5fuENXYdPw+j2SpbAwBW7swQjHQcPHkBTXqj7OtTSrFmW8ZNNTGu5VRaIWpq9LI1OI5i4+YMWK3873duVilKimplOW2AvTfN9k2ZMBotvDYXz1cgL7tMdu8YluWwd8sZ6Jv53+/yohpkHM6X3TuGZTkc3ZGNuir+hU1dRSOObsm4qcaIWDiWQ/q+syi/JG57UeWXR3VE/uRUG1qxt0TeCr8NllIsO3eGd8JoNpuxNT9fscaqnBxYeCZPs82GtVm5irrpshzF5tw8tJjbv5lzHMWq0znKXgdHse98Iepa+LcDVh7NUtT/hqMUJy+U4rJAPsrqg1mSS4PfqHG2uAoFl2t5bdbuzVLUHZYCKK5oQE4B/3bAhp1nFDWwA4CauhakZZfwjm/eqlyjWW/C0eMFvONbN2ZCo6ADLQCYTFYc3MsfQdqxLk2xBmtjsW8L/4Jg58qTijrptrFbIB9l1/KjCntN26MiO348rPAqKh2F6oj8ySnTNynq4NlGk9mEFmv7E3ilXg8bJ7+7ZhtGmw31xvYn8NpWA8w2+Sv8Nmwchyp9+0mrzSYz9CbHOQuO4ChFeWP7OQNWlkVNc6tiDQAoq+N3RC7XdsznXlbDr1Fa1Sh7hX+dRnUjv0Z5g+yGgtdyuZJfo6S0XrEGwxBUCGiUFtcKRmXEoNEyqChv4B2/3BEaGgYVpfW84+XFNYq69QL28u0VJfwObnlhteztpTYopai4VKPoGiodh+qI/MkxWJWFzq+llccR6VANS/vXMlj4Q9LSNXheR0dq8ERdlG5lXIuB51o2lhOVzCoGvtcBAGaez0oqBiP/dUwm5RoMQ2AQ2NIQ2u4QrUEIDAb+6wiNiYUAMApcp7VFuRNNKWBo5b+OocUsKWm4PTiOwijwXE2tZsWOIeUoDHpxp/1UfnlUR+RPjpvCsuvX4uHUfuGxDtXQtV9AzZ3n32Vp8JSJd1dYPv46DZ4qpa7OHfde8T1frYaBUweEzwH+1wEALiKa9YnB3ZVfw9VBtVcxcByFuxv/ddwExkRrUCp4HXd3ZW0D2nAV0PDwVNY2ALBXN3UTeK7uHi6KoxUMQ+Dqwa/h4u6seKuMMATuXuKaMKr88qiOyJ+cSC9vRcmXbfi5uMKdx+EI8fSEk8RGd+3h7uQEP9f2bx4B7m6Ke9kAgE6jQbBn+9VJPZ2d4S2jAuuNMIQgzKf90tlOGg06+Ygv3sYHARAZ4MM7HhHkq3ifHQAiBSqgdg7165DCUZEh/BpREX6KJyUAiAjlLzMe1TlAsQbHUYSH8Wt0jg5QnL9hs3EIj+AvmBcRHag8R4RlER4VwDseFhukKC8IuPJeCVSkjejSSXFEhBCCsFhlfXdUOg7VEfmTE+DqjrFRcYpPzSzqnsI76Xg6O2NafLziUzPzk5LgxNMbRqfVYk5yD2UaDMH0pATe6ArDEMzv11PxqZlxPbrA151/NTZ/sHKNQfGd0cmX36GZO1x8Jc72YAhBz5gQRHfin1xnj05WFKYnBIgN90dCNP+EMX1ciqJJiQDoFOSF1B78nWKnTlKmAQA+Pm4Y0I+/iuuk6b0U52+4uekwdGQ87/j4Wb0VazjptBghUMV1/Nz+HeIkjJndl3d8zG2DFDu4HMdh/OIhiq6h0nGojogK7uyRqugkCADcniA8sS1KTlF8amZBz2Th59Crp+ITLQt6Cb+OuX2SFE2uLEdxe3/h1zGjX6KiVSXLUdw2OEXQZnL/BOichBv+CcFRittGCmuM7tdVcFvFEZQCc8emCk46g/vGws9BgzlBCDB7YqpgxCM1pTNCHDR/E4JhCGZMTYVWy/9+x3cPRbSCaALDEEya3gvOAlt7UXHB6J4SKTu6o9EwGD01Be4CWzzBEX7oMzxBfh0RDYOhk5LhE8DvRPsGeWHItN6yozuMhkHfMYkIFogeqfy6qI6ICgaGRiDeL1BWNIEhBJNjuiHEQ3g7IblTJ/QODZWloSEEI6OjEeMr3AgtLtAfQ2M6y9boExGGHiHC4dowXy+MS+wiK2KhYQjiQwLRJ0q4Tbq/pxum9e0hW6NzgA+GJEQJ2nm4OmPOsGRZE5+GIQj29cCoFOEmZc46LW4b30vWFhDDEPh4umL8IP4VPmDPd7ltGv/qWVCDELi56jBplHAjNIYhuG3eAFkahBB7M7+Jws4nIQS3LR4k65QRIfbJddqs3g5t590zTHbEggKYfvtAh3azHxgpu44Ix1HMvHe4Y41Hxsp+HRzLYc5j42U9VuWXQXVEVEAIwTcTZsLH2UXSJK4hBF19/fHusHGi7D+bMhWB7u6SNSK8vfHvCRNF2X8wYxLCfbwlawR5euCT2VNE2b81cxxig/ykaTAEPm6u+GzRdFFh5RdmjkT3iCBJBdo0DIG7sw6fPTATGhE5OU/MHIJeceGSHB6GIXB20uK/j8+Ezknr0P6eGQMwMDlKmgYhcNJo8OHfZsFNRDLqbdP6YOSgbpKcKkIIGIbgvZdmwdvTcdLi1EnJmDg+SaKG/e/NV2chwN9x3s+ocYmYNV98Y7k2DUqBl96YiZAwxx2LB4yIx4IHR0jSaOOZN2chqovjvIqUQV1w7wtTZWk8+uZsdEvp7NCuW+9oPPb+Alka9742B8lDhR1clV8X1RFRAQCEeXph9YwF6OTu6XDSIFf+egZ2ws9T5/OeZLmRQHd3rJp/GyJ9fERrdA0IwIp58+HtIi5J1MfVBcsWz0VcoL+olThDCDr7+WL5nfPh7y4uxO/urMP398xF97Cgq8/TkUaItyd+vH8+OnmLS0R10WnxxYOzkBptj56I0fD3dMeSx+cjQiBJ9VqctBp89Oh0DOze+co1HGgwBN5uLvj6mXmIDeVPWLwWrYbBu09MxfDesVefpxAahsDdVYdPX5gjmBty4/N65clJGD+8h2gNF2ct/v3KHCQnhIvSIITgmScnYOrklKuaghoaAicnLd5+fTb69o4WpQEADz4+FvMWDhSpwUCjYfDyW7MxZLj4iXXxI6Ox6OFRdg0H2xsaDQOGIfjrW7MxekqKaI3ZD4zEvS9Ou3oNRxqEEDz21hxMXjRYtMbku0fgsfcXghDisNx72/h9r8/BnMfFLZxUfj0IVXro+xdEShthlY6h0WTE0txMLM3NQK3RAC1hwIFemXAJbJRDtLcv7kpMxfz4nnDROl4V30iz2Yyfss5gSWYmqlpaoGWYq5VEGUJg4ziEe3nhztRULEjqKes0jMFixc/pWfjxdCYuNzW3qxHi5YFFfVJwe6+e8HCWfnzSbLVh1elsLDueieK6xnY1Aj3dcXv/ZNzeP1nWiRurjcX6k7lYdigDhVX19kgHpaDXaPi6u2Le4J64fUgK/Dyk50vYWA6bj5/Fz3szcOFyLTQMA2pvYAKGIbCxHLzcnDF7WE/cNjIFgd7tnyoSguModhw7h5U7M3C2sOpKxIaCu6LBshzcXXWYMTIJ88f1QrCICMKNUEqx92geVm9JR/b5cvsESK/XcHF2wpQxSZg7uRfCOvnI0jhyrABr1p1CZlYpNBoCUFyn4azTYvy4JMyZ2QcR4fzJvEKcPFaAdStP4vSJQjAMASEEHEfBMAQcx0Gr1WDMhCTMmt8fUTGBsjQyjl/E+mVHcfJgHgi5UYNCoyEYPqEnZi4ehLiEUFkaOScvYt03B3BsV479HsIQcCwFo7FrEEIwdFIyZt47XFQkpD3Ony7Eui9249CGNFD6/8+fYciV5oYUgyalYObDY5A4sKssDRXpSJm/VUfkTwbLcTBzNrhqnAS3CGwch93FF3GkrBiNZhM0hMDXxRXjo7ugf0i44GM5SmGyWeGqFdZgOQ4HiopwoOgSGk0mEELg6+KC0TGxGBQZKbiytWvY4KLVOrQ7eqkE+y4UotFoAgWFj6srhsVG2fNJBLYwKKUwWm1wcRLWoJTi1KUy7Dl3EQ0GIyil8HZ1wYDYSIzoFgOtwIrwqoZWK7gCppQis6gcu7MK0NBigI2j8HJ1Rr+4CIxMiuU9TXSthrNW4/D15hZVYVdaPur1BlhZFl5uLkiNC8Po1DjBrRh6pfOvzoEGAOQVVWPX8fOoazLAYmXh6e6MpLgQjO7fVbD2CKUUZosNWq1G8D0FgMLiGuw8dA51Da0wm23wcHdGQlwnjBkaL1h7RIpGSWkddu3JRW2tHiazDR4ezoiLCcaYUd0F64JQSmGx2KDRMIIJrABQXlaPXduzUV3ZBJPJCg8PF0THBmH0+ER4OqiDYTHbQBh7jooQVeUN2L0pE1WXG2A0WODu4YyImCCMmZoCb193BxrWKxrCC5LaykbsXnMKFcV1MLaa4ObhgrDoQIyZ3Q++gcJOp8Vs7/KscxbWaKhuxu7lR1FWUAWD3ghXDxeERgdizG2DEBAqvG1ltdhAOQqdS8fV8fmzozoiKtdRadBjeUEGll/MRI2xxb6aBkGMlz/u6NYbM6ISeYuRiaXG2IrleVn46XwmKlr1V1fsnT19cEdCKmZ3SYS3s7IaHPVGA1aezcFPOVkoa7aXKCcAwr28sSgpGXMTEuHLU2dELE0mE9Zmn8WPGWdQ0tB4VaOTlyduT+mJ+T0TRW/h8NFiMmPDmXNYdvIMiuoawFF7xCnQ0x3zeidhbu8kBHtJjzpci8FixdaM8/jpSCYKKuuuavh5umF2v0TM7Z+EEF9lvymTxYYd6XlYfiATeZdrriYP+ri7YvrA7pg7pCfCRW4T8WGx2rDn9AWs3J2Jc0WVV3sJebm7YPLg7pg9MhmdOznOjRDCZmNx8FQBVm3PQE5+xdUjrh5uzhg/NAEzxyUjJkLcVhSvBsvh2ImLWLspDVm5ZbBdadjm5qbDqGHxmDE5FV0U1rVgWQ6nThZiw9rTyMgohu1Kkz1XVycMHR6PaTN6I15mZKMNjuOQcfISNq48gbRjF2G12J0EZxcnDBoRj6nz+qF7zwhFx2sppcg+WYhNPx7FyX3nrnNE+o1MwNRFg5DUL0axxrnThdj03UEc23YG5ivVc52cteg9ojum3jMcKUO7gemA+kd/Vm5JR+Qf//gHXnjhBTz55JP48MMPRT1GdUSU0WQx4u8nt2NbyXmA4KZGagT2THgXjRZ3deuLp3sOh1biD6/FYsYrx3Zj/cWzoBTgcLMGYC8Utig+Bc/3HQGdwOq9PYxWK944tA+rz+WC5W5U+H8dDcNgXvdEvDx0BFy00lY2ZpsN/9x/CMszs2G90ljvRh2GEBAA03sk4NWxIyVXc7WxHD7ccwQ/nMiAxcavAQATenTBa1NGw0vidg7HUXy++zi+P5AGo8V69TO+UYOCYmT3WLw2Z4zk7RxKKb7bdQrf7DyFVpPlasLkdRoMAeUoBnePwisLxiLIR5pjRSnF8l0Z+HrjMTS3msEQctP3V8MQsBxF34QIvHzPeIQESL9HbNidhS9/PoxGvfFqSL89jeSEMLzw4DhEChQ+42Pn3lx8/s1+1De0tq+hIWBZioRuIXj2yQmIiZK+1XLwwHl89sku1NboeTQYsCyH2LhgPPPsJHTtFiJZ48ShfHz23hZUljdevV57Gp1jg/DkS1PRIzlSskbG0Qv476vrUF5U264Go2HAsRzCogLw2BuzkDJQ+ORWe+SevIhP/vYTivMqBDWCI/3xyNvz0G9skmQNlVvQETl16hTmzZsHLy8vjBw5UnVEfgUqDXos2PMjSlsaRdXWIACGh8bi86Gz4awRl/dRa2zFgm0rcKGxTlS3WAJgQEgEvh07G25O4ibxZrMJi9evRk5NtSgNhhD0DArGkulz4CUy76PVYsF9q9fjdOllUY3gGELQNcAfS2+bAz83cREYs9WGx5ZvxOGCYlEaGkIQ6e+D7++cIzo6YmVZ/G3ZVuzO5u/yep0GQxDs7YnvHpqDMD9xdTJYjsOrP+7E5pP8XV5v1PDzdMNXT8xBVLC4SZxSivd+2IvV+/i7vN6o4enmgs+enYMuEeImcUopPlt2CMs28nd5vVHD1UWHD/8+G93jxE/iS346im9FdnllGAKdTov3Xp+D5CT+Ams3snrlSXzx6W7RGhotgzfenou+AgXWbmTr2tP4+J1NAG52OtvTYBiCl/4xD4NGJojW2LshHR88u+JqXocQ9hNJBM+8Nx+jpvcSrXFkayb+8eA34FjO4fFf+5qA4PF/3Y6Ji9TiZ1KRMn//4nGnlpYWLFy4EF999RV8HdSBUOkY9FYz7tz3s2gnBLCvmg+UF+KvxzaJmvCNNivu2rkaBSKdkDaNE5VleHTfRrAiuvGaWRvu27weuSKdEMAe9cmursKDWzbAwjruxmvjODy+fjPSyspFd6PlKMWF2jrct3odTFabY3uO4tm123HkYoloDZZSlNQ34r4f1qJFoLFcG5RSvL56N/bkiHNCAHvhs6omPe7/31o0GcQ1AHt/7QHRTkibRr3egAc/WYM6kV2Fv1h7VLQT0qbRbDDh0X+tRmVd+12Nb+THDadEOyFtGgaTBU+9tQalFfwdbq9l3eYM0U4IYP+emM02PPvqahQWiesMu3N7tmgnpE3DZmXxykurkHe+QtRjDu89i4/e3nTFQRCpwXJ467mVyM4oFqVx+mAe3n92BTiOiioYSKld5/1nVyDtUJ4ojexjF/DuA1/DZmNF1SBpc4g+/ttPOLIlQ5SGijx+cUfk0UcfxeTJkzFmzBiHtmazGc3Nzdf9qUjni9xjuNhcJ7nKKAXFlpJz2HP5gkPbb3PTkFtbLVmDoxR7SwuxodDxZLY8Jxunyy9L1mApxfHLpVh5Ntuh7cbcczh4qVi0o3OtRnZlFZamO75B7T5fgB1nL0jX4Cgu1tTjm8OnHdoeu1CC9afPSi6IxXIUlxua8Pmu4w5tsy5V4Of9mdIErmjUNrfi441HHNoWlNXi280nJGtwHEVzqwn/WX7AoW15dRO++OmQLA2jyYIPvtnj0LauvgWffCneQWijLZH1/Y93OLRt0Zvw4QdbZWgArI3De+9ucjjpm4wWfPDaeska9pNEFO+9vAacg0WHzcri/b8tv3kPUaTOv/66/Go+DB8cx+H9x5fYHRAZOv9+6geYOqBDskr7/KKOyPLly5Geno53331XlP27774Lb2/vq38REeLDkyp2zKwNPxWkS5702tAQgqV5whMfy3FYcjb9pnwQsTCE4Puz6YI2lFJ8f0bYRggC4PszGQ5vtEvSMmX3daEU+CEt02F058cTmbJ74HCUYvnprKs5JXz8fCRTUvGza2E5irUnc2CwWAXtVhw8o0hj6+nzaHYQeVm9V5nG/vQC1Da2CNqt23lGdodYlqM4mVWMsspGQbstO7IgIujXLhxHkXu+HBcvVQva7dyRBauDCVhIo7ioFmdzLwvaHdiZA0OrWZYG5SiqK5qQfrxQ0O7Y7lw01bfKap1AKUVTfSuO7c4VtEvffw7VZfWgcqqxUsCgN+HghjTpj1URxS/miJSWluLJJ5/EsmXL4CKyGNULL7yApqamq3+lpaW/1NP7w7K9NA9NFnFh9vZgKcWRqiJcaq7ntdlXVogqg/DNXgiOUpypqUBObRWvzYnLZShqapTp6tgXPRcb6nGqnP9Gm11Ridwq8ds+7VGhb8GhS/zh58KaepwsKlPUA6fBYMSe8/xbLhWNeuw/V3j1RIkcDBYrtmXyh7gbWozYnpanSMPGsth44izveIvRjM1HchVpgALrD+bwDpstNmzYnaWoMRvDEGzYzb91ZGM5rNvs2AkWQsMQrN+SyTtOKcW61adl/z4Ae5LshnX8kyulFOt+Pq7odAqjYbBxpXCEa+PSI4q6GzMMwcYfhKNtG789ILv/DWCvf7Lhm32yH68izC/miKSlpaG6uhq9evWCVquFVqvFgQMH8PHHH0Or1YJtZ//e2dkZXl5e1/2pSGNryTkwChu8awjB9tLzvONbLuUp6nLbprG1iH/i21KQBy1R9vXUMgy2FuTzjm/LuyCqFLoQGoZg23l+jZ1nLyh+rxhCsC2HX0NKXggfhEDQETmQXSgqr0cISoHtp/k1TuQUw2xxnHMjBEcpdhzn/+5mnC1Di0HeCv+qBkex8zC/xvm8CtQ3iMuH4YPlKPYc4N++vFRYg4qKRnnbGW0aLMXB/ed4nbKq8kZculClyKHiWA4nD1+A2dR+tK2pvhU5py8pcgw5jiLn1CU01bf/npsMFpzekyu7/w1gj+4U5pShqqRO9jVU+JFeFlMko0ePRnb29Xv0d999N+Lj4/Hcc89BI/EIp4o4qo162VsmbRBCUGcy8I7XGFsVd+slIKgX0KgzGsFSZRMfRynqjfwa9QajuOw7AViOos5g5B2vazXYV5RKbuaUorZF4HW0GKBhGNiU3GgpUKvnnzwbWgxXj7IqQShhtV5vaPe4sVTqm/nfqwaBMSk06fk/84bGjtFobTWDZbl2S6Q3KHR02rDZOBgMZnh43By1buSZ2KVCKUVzkwGBLjefzGqskx9ZvZGm+hZ4+91cgE3f0KLImbqWxlo9giPVrr0dzS/miHh6eiIx8fqulu7u7vD397/p31U6DpvCVSsAgAI2yr/3bOPk7UvfICH4XPnqhUjSoBRWAY0Oea8AWAXeD6UTdxtCJ4CURiraEHJk7GPK3QTBz5zl0G5REokIvR831oyQrSFwnY76PNqu1Z4j0lGvA7Anrrb777+ChpIohVgNvn+Xg81BrpaKPNSycX8wfJ2VVf1sw1vHn9fj6+wGonD7hwDwEtDwdnbugO0fRrCaq7eLs6L9b8C+beIjkAPl5aqsYm0bfu789Uo8XZwVhbbb8HHjfx2ers4OTz+IwUtIw82lQ1auHgLvuadA6XUpuAloeHSQhpOThresvqensirF19JeNAQAPLw6UIPnWh7eyiohi7mWh0/H3BPtGh13LZX/51d1RPbv3y+6mJmKPAYEd1acI2KjHPoF8VdF7B8SAcUrY8qhfyf+U1H9wyIUb//YKId+YfzdVftFhCuOinCUom8Ev0bfKOUahBD0ixLQiI1QlHAL2B2q/nH8n3mfruGKI1QahqB/N36NlK5hChXsGv2682skdg1VlBjZptEnkf+7261LJ4f9XRzBMESwqFl0TBBcXaVV9m1PI6F7KG/n2rAIf3j7Kpt4CQEiYwLhzuPs+Ad7ISjUR5EGAASF+sA/uP2cQncvV0R27aR40eHt74GwmCBF11BpHzUi8gdjXmwylPzeCIBIDx8MCo7itZkV1wM6kdVX+QhydcfoyFje8cldusJTYgn1G/FydsbkOP5um6O7xMJfZGVUPly1WszowV89cmB0JMJ9vBS5hhpCMDuVfzuzZ2QndOkUoOhzB4C5A/hLWceFBCAlJlT2UWfAvk01b2hP3vGwQG8MTIxS5CiwHMW80Sm84/4+7hjRv4vsI8JtGnMmpvKOe3q4YNyoHu1uqYiF4yhmT+WvGOri4oRJU1LAaOS/Do6jmDm7L++41kmDKXP6Kvo8KAVm3j6A1wlgGAZTFw9W5CQQQjDtjsG8fWEIIZh+30hF0TaGIZhy1zBoFTqYKu2jOiJ/MAJc3DEpMkHRtsZd3foK3hi8dM6Y06WHbA2GENzZvZdgXxsXrRNu69FTtoaGECxMTIazlt9h0jIMFvVKkT25agjB7J494OHM7zAxDMGi/vyTlkMNhmByUjf4CmzNEEKwcEiK7NQKDUMwonsMOvkId0G9fXiK7MgLQwj6do1wWOZ9/pgU2dtMhAAJUcGIjxJuHjdnQqrs3B0CICLEFykJ/BEqAJgxOUVRjkWAvwf69xEuwT51eio4Vv7k6unlgiHDugnaTJzZW1EkzMXVCSMnCPdqGTe7jyKnTaNlMHZWH0GbkbP7wllBBIkCmKCWef/FUB2RPyCPJw6BjtFKzuPQEIJID1/MieFftbbxUM/+cHfSSZ7ENYQg2M0DixJSHNrem9Ib3s4usjR8XFxxV7JjB2BhajIC3d0lOzwMIXDX6XBfv94Obef06oFwX2/Jq3CGEOg0Wjw4rJ9D2ym9EhAb7CdZo61Z4CNjBzi0HZUSh+6RQbI0CCF4bMogh7YDk6KR2i1c1iqcgOCxuUMd2iXHh2FgarQsDQrgscXDHK7gu8Z1wqhh8bJX+g/fO8Lh5Bwe4Y9JU1JkR8Lue2AkdDrhyGZgsDdmLRgoTwDAHQ+NgqubcM6Ml687bntklGyN2x4eBS/fm0/LXIuruwvueG6qbI2ZD4xCQIiP7MerCKM6In9A4rwD8OXwOdAyjOh8EQ0h8HV2w9JRt8PDyXGyXaSnD74dNxtOjEb0JK4hBJ46Z/w4YR58nB1viQR7eGDJ9Nlw0Wolabg6OWHp9NkIcnfcLM7PzRXfz58Fd51OkoZOo8HXc2cg3NtxszgPF2d8e8cs+Li6iJ7EGUKgZRh8vmAaYgIcN4tzcdLiy/tmIcDTXZKGhmHw78VTEB/meO/bSaPBfx+eiRA/L9Eabc3J3rlrApJjHLegZxiCD56YhqgQP9GOQtvH9tLdYwXzQ/7fnuDNv0xB16ggyc7IX+4eiaF9xHV8ff7pSUjqHibZGbn/zmEYM6K7KNsn/jIeffrGSHZGbl84CJOniovU3fvEWAwZnQCp+4vT5vfDrIXinJgFj43BmJmOnfobGTOrNxY85rh9CADMfHAUpt4zXJoAAQZPScU9L8+U/NxUxPOrdN+Vi9p9VxnpNWW4/+AqNJiNYEDarS+iIQQspejqHYjvRs5HiJu09zm7thJ371yDGmNru63ar9WI9vLF0vFzEenlI0kjv64Wd21ci4oW/dVr8WmEeXrh+2mzEOcn7ax/UUMj7l25DsWNjbwaba8vyMMdX8+Zge7B0hLXyhub8eCy9bhQXedQw9fNFV8snI7kcGnt2mubW/HIt+tx9nI1b92PthOyni7O+PiuaegbK7zNcCONLUY89b+NyCws59eAPXrg5uyE9+6ZjCE9oiVptBjMeO7TTTh5tkTwdYACOict3nhgIkb16SJJw2iy4tWPt+Dw6YuCNVIIAbQaDV54aBwmDBPnILRhttjwz/9sw54D56DRELA8WymE2DvWPvXIGEybmCJJw2Zj8eEH27F96xkwDOHd2mpzuh54eDTmzHMcZbsWluXw5b+3Y8PyE2A0hHdLiGEIKAXueGgkbr/XceToWjiOw5J/78DK/+2zvw4+DY39Nc57YCTufHo8b25Ie1BKsfzD7fjhvc0gBPzvlYYBx3KYdu8IPPDGHEVbR39WpMzfqiPyB8dks2JLyTksyTuNnIbK68YIgBGhsbijax8MDYmRnSthZm3YVpSP73PTkFFzc0fPIaGdcVf3XhgVESu7kqmVZbGzsABLsjLaLdvePywcd/ZMxZjoWDjJLJZn4zjsu3gJS9MycKz45vYCqaEhuKN3CsZ1jRPMPRGC5TgcLijGspOZOHSh6CbXsEdoEBb3T8WEHl3hwnN00xEcR3G8oAQ/H8nE/nOFN+WOdOkUgEVDUzExpRvcdE6yNCilSCu4jBUHM7Ens+AmBzQ62Be3j0jF5L4JcHeRtzdPKUVWQQVW783ErlP5N+VchAd5Y/6YVEwe3B2eAseCHXG2oBJrd2Ri55FzsN1Qc6JTgBfmTEzF5BE94O0pP7H5wsUqbNiaiR27c2C5oT9MgL8HZk3thUnjkuDrI7zFIETRpRps2pCO7VuzYDZfX8nUx9cN02f0xsQpKQgIEM4FEqKsuBZb1pzGtnVpMN7QBM7L2xVT5vTFxFl9ENTJcaSQj4qSOmxdfgLblh9Hq/76dhXuni6YeNsATLqtP0IUFBarLqvHth8PY8uSQ9DfUBzO1cMZExYOxqQ7hiI8VjjfSIUf1RH5E1BvbkV2YxmaLEZoCAN/Z3f08usseJrlQlMNLrc2w2CzwNPJGbFeAQh1539fmyxGpNeWodFiBEMI/Jzd0DcgEi5a/snrYmMdSlua0Gq1a0R7+yHCk/+m1GwxI626DA1mIwgIfJ1d0S84HG5O/JNXUWMDipua0GIxw0PnjCgfH3T29uG1b7VacKriMhqM9mqYPi4u6BMSBk8d/xZUaWMTLtU3oMVigbtOhwhvL8T482+RGK1WnLps1+AohY+LC3qFhsJboMZIeWMzCmsb0GI2w03nhFBvL8QF8d9czTYbTpdcRn2rESzHwdvVBcnhIfATOPlT1dSCi1V1aDGZ4eLkhE4+nujSyZ93pWqxsUgvvow6vQE2joOXqzOSwjshwJN/gqxtbkVBeS30RjOcnbQI8vFAt7BAXg0byyGj8DJq9QZYbTZ4ubqge2Qwgrz5t9Iamg24UFYLfasJOictAnzcEd85iFeD5ThkFZSjpqkVZqsNnm4u6BYRiBB//u97c4sR+ZeqoW81w0mrgZ+PO+Jjgnm3bziOIqegHNX1LTCZrfB0d0aXyCCEBvF/31tazci/UAl9iwlaJw18vN0Q36UT74qbUopz+RWorGmG2WyDu5sO0Z0DERHqy6thMJiRn1cJvd4IjYaBt7cbusWHQKtt30GnlCI/vxIVlU0wGS1wc3dG50h/dO4cwKthMlqQf7Yc+iYjCEPg5e2KbolhcOJxnimlKLxQhcul9TAZLXB1c0Z4Z39Ex/JHFS1mK/LOlELfZK9U6+nthm7JEdA5899/ii5UouxSLYytZri46RAa6Y+Y+BDe74nVYkNeRhH0Da3gOApPHzd0TYmCixv//afkQiVKC6pg0Jvg4qZDcIQ/uvSMUHw8+I+G6oj8QaGUIquhFMuLTmJ7ec5NJdC9nFwwt3NfzOncB2Fu/DcqR2TXl+PHgtPYWJJ7U9VQD60O82N64fbYXojydJy7wMfZ+mr8eD4Day7mwMxe31/ETeuEeV16YlG3VMT5yF/1XKivxY+5Z7DyXDaMtus1XDRazI7vgcWJKYj3D5Stcam+AT9lncHK7By0WK5fIeo0GkxPSMCilGQkBstfWZU2NGFFehZWpGej2XR9nxQtw2Byj25Y0CcZyWHyayVUNOqx6mQ2Vpw4g8YbOuRqGIKxiV2wYEAyekVJz3loo6apBWuP52DFoTOo019fBp0hBCOSYnDb0BT06yL/pl7fbMCGIzlYsfcMam7owksIMDgxGvNHpWBA986yj6U2tRix+UAuVu3MQGVt803j/ZI6Y+64VAxKiZYdAWxpNWPHvlys3pyGyxWNN42nJEZg9pReGNwvDlqZ2wYGgxm795zF2rWnUdJOD5UePcIwc0ZvDB3aTXZdFJPJin07crBhxUkUFtzc5LJrQihmzO+HYaO7Q+csLwJoMVtxcHs2Ni47hgs5N0dLY+JDMG3hQAyf1BMuMk/OWC02HNmWhY3fHcS5tEs3jUfEBWP6PcMwcmYfuPHUTPmzoToif0CMNgueT1+NfVXnoSEMbx8Whtj3aJ9MGIO7Y4dIuqGbWRueP7UJm0pyBTXa8hse6z4ET/YYLknDyrF45fgu/Jx/hjdP4lqN+3v0xQt9RkraNmI5Dm8fPYBvs9JEaSzsnozXh40WPE58I5RS/PvIUXx24oQojRkJCXh3/DjoJGwbUUrxvyOn8J99R8AIaVzJbxgXH4d/zZgoeUvnxyMZ+OeWAwDAezy3TWNIl87498IpcBc4stwea4/l4K2Vu0GpY43esWH48L5pglVY22P7ifN47fsdsLGUt2ZEm0aPqGB89MQM+HpKK9h18HQBXv7vFlhsNt7j0m15GrERAfjwudkI9HWcNH0tJzMu4eV/bIDpyvZKezptGhGhvnj/tbkICZa2FXImqwQvv7wGLS1m3qr6bRrBwV745z/nIzJC2qIg7+xl/P2pn9HUaODVIAwB5Sj8AjzwzkcLEdNFmsN+Ka8Cf3/ge9TX6K9ei0/Dy9cNb3xxF7olScuJKrtYhZcWfY7qsgbeHJy21+fu5YrXvr0fif35ayT9WVAdkT8YRpsF9x/7HjmNlyU1tLs7dgj+0n2cKFsLy+LeQz/jeHUxqASNBbG98HqviaKcERvH4cG9a7G37KKk2gQzYrrj30OniHJGOErx1O4t2HiBvzvqjRAAY6Pj8Pn4aaJWsJRSvLRrN1bc0NTRkcagyEh8M2um6ByWf+46iG+P87dpvxGGEKSEheC7RbNFOyOf7T6GT/ccl6TRtVMAlj44T7QzsmRvGv694aBoDQ1DEBnoi6VPzRftjKw5kIV3ftwjuhuOhiEI9vPE9y/cBn8vcXkZ2w6fxeufb5Ok4evthm9fX4ggf3F5GQeP5eOVf24EBUQV4NIwBB4eLvjivYUICxEXBT11+hJefHEVKKWiarYwDIGrqw4ff7wI0VHioofZGcV4/vEfwdo40Ro6Zy3e/+JOdE1wfLoKAPJzyvDcnV/DYrGKqqfCMAQarQbvfHMPEntHidIozqvA0zM/hMlgEdUXh2EICEPw+vcPovfweFEaf1SkzN9qKvDvgJcy10p2QgDgu4uHsarolCjbl9O2SnZCAOCni+n4Jv+EKNu3T+2V7IQAwPrCs/go84go2/+cPCLJCQHsk8quSwV459gBUfb/O3VakhPSpnG0pASv7tkjyv6n02ckOSGA3QnLvFyB5zfuEGW/Mf2sJCekTSO/shZP/7RF1ES550yBJCcEsFcuLalpwJNfbRQ1iR3LLca7y+zvq9jvFstRVNXr8eTH62EV0cgsM68Mb365XbJGQ5MBT/xjNUwWq0P7vItVeO39TaDgj+i0p9HSYsLTr65Cq8Hs0L6oqBavvLIGHCfOQQDsuTBGowXPPrsCTU38XYfbqLjcgJefXi7aCWnTsJhtePGJZair0Tu0r6tuxssPfC/aCWnTYG0sXn1oCSrL6h3aN9W34MUFn4l2Qto0OI7izfu+RsmFSscPUAGgOiK3POeaKrC74qxkJ6SNT/L2wMrZBG2K9PVYU3RGshPSxse5B2CwWQRtKlv1WHIuXXaVxs+zj6PJbBK0aTAZ8XnGSVnXpwC+y0pHdatwW/JWiwWfHDsmW2Nldg6KGxsF7Sw2Gz7cJ87xuhGOUmw7m4/zVTWCdizH4d/bD8vWOJxfhMySm09IXQulFB9uOiSrvD3LUaQXXsaxvGKHtv9dK+91sBzFueJqHDhz0aHt/1Ydla1RVF6P3cfyHNp+v/wIOI5KrpDLchSV1U3YvjfXoe1PPx+DzcZK1uA4ioaGVmzenOHQdvWyYzCZrJIr5HIcRYvehPUrHf+GN/x4FC3NRsmVZTmOwmSyYu33jr8zW388ioZaveQOwZSjsFpZrPx0l6TH/ZlRHZFbnBWXTkBD5H9MjRYD9lYKRwiWXUxTVBLeYLNic4nwTfCn/EzJBZGuxcqxWF0gHIVYdS5HcQv25eeENTacO39T4qsUGELw85ksQZsd5wrQZHK8uuVDQwh+ThPWOJh3CTX6VkEbQQ2G4OdjZwRtTheUoaSmUbbzqWEIlh/KFLQ5W1SJ8yXVssvbMwzBir3CGkXldUg/V6qg9DzByh3pgjZVNc04euqioi7KqzenCUZSmpoM2LfvHG8tE0dwHMW69WmCpesNrWbs3JQpefK+VmPL2jRYLPy/MYvZim0rT8p+rziWw861aTC08v/GWBuLzUsOtZtzIlbjwIYMNNULL2xU7KiOyC1Ms9WITZfP8CaNioEBwU+X+MPvJpsVKwszFHW6JQCWXuDfArJyLH48n6G4Q+z35/hvtByl+D5bfsSl7RpLszMEu+UuychQ1MCOpRTLs7JgFnBmfjiVoay5HKVYdyYXLWb+G+1PRzMVOZ8sR7EjOx/1LQZem+WHzihuLnfo7CVU1N98MqWNVfuzFGlwHEV6/mVcqrj51Egb6/Yo06CUIr+4Bmcv8ofqN+3MUnT8k1LgckUj0rNLeG22bctS5OgAQH19K46f4I8g7dmWDbOAEyGGFr0Jh/ae4x0/vCMHLc3C0VFHmE1W7NuUyTt+cs9Z1Ffzf+/EwHIcdq0Ut239Z0d1RG5h8poqbzo+KxUOFGcaSnkn8ILmWrQ62FZxBAVwvqn6pmO4bZTqm1Bvdry37EijtIX/OrWGVpS3ON5bdkSt0YByffs3oFaLBQV1dYqcHQDQWyworG9od4yjFFnllYqdNrONxfmqWt7x9OJyRc4nYE8+zr1885HMNtIulsluLtcGpUBWMf8WUFqecg0AOFPAr5F+tlSxBkMIsvJvPlp6VT9XfsSlDY2GQc45fo2cnDJFHWivamSX8Y7nZpUqcqLbNHIz+R2q3PRiaLTKpi6GITibwb/tl3uqEBqeuitioZQi91Shomv8WVAdkVsYvVWZ198GRymMbPvORnMHaQD2Amjtalg6UIMnT6RZYPX/e9JoNVtkbzPcSLOpfQ2W42CyKlu1ttFk5H9PWk3KHNw2mgWSMFsE9MXCMATNBv7vqL5V+feXYQj0Aq+jWa9cgxBAL7Dd0NSsbDHQRovA+9HSbFTsUFFqT8Dl1dAr1+A4Cr1A4m1rsxHi05J5oEBzvfztzz8TqiNyC+PEKPPIxVzrj6Iht6hTuxo8x2udZBanav9av93rYAhRvGr9fw3+5yu3mNfNGvzfn47oAUIphZPA6pevIqk0DTjQ6ID3yoGG3KJk10KI8Puh/RU0nJy0sjsO/78I4KT7ZV8HADg56G6sYkd1RG5h/J2lFULiw02jgxPT/g8iwEV+b4tr0RIGXk7t13wIcJVWNIoPAsDPpf1y5n4ubopyN67Fn+f5eru4KMqruJYA9/Y1XLRauMjsYyNWgxACbwW9Wa7T8OD/bP0U9Ga5Fn+BomP+Xsq/W5QKXyfAx13xd4vlOPh68b8fAX4eip1DlqPw9RZ4r/w9ZFeTbYPjKHx9+DV8/Tw6wDkk8PHjvy/5+HuAKPxENAwDH3/++6tPoKfiyCSjYeAX/OetfyUF1RG5hYn37oRQVx9F19AQBpPDe/KOR3n4oYtXoKKftYYwmBCewLsCDnbzRK/AUEU3Wg0hGBEeA3eeHjRezs4YGhGlyFFgCEHfkDAEurV/E3TSaDCuS5xCDSAhMBCR3u1XwiSEYEpiN0XJkQRApK834oP5i09NSYlXpAHYnZDkSP7iU5P7JCieXD1dndGvawTv+KQBCYpXx85OWgxKjOIdHzcoQXFekEbDYFjvON7x0UMTFOcFARTDB3XlHR0xIqFDtjRGjEzgHR8+trvgqRoxsCyHEWN78I4Pm5jUIRrDJ/LfF4dNSZV98qcNjuUwbGovRdf4s6A6IrcwDGGwILq/Iu+fpRzmdeZv+U0IwZ1d+iq60bKUw+IufQRt7krorehGy1KKO+N7C9rcmZSqKAGToxR3JQnfOBanpCjUAO5MTRU8IbGgT7Li5MhFfVMENeb376lIgyEEtw9MEdxKmjMoSfb1AXtexexBSXAWqBI7dVAPRVtAGoZg6uDu8HDlb4A4fnACXBSE2DUMwdgB3eArEHUZMbgrPNz5n4MYjUF9YxEcyL8CH9A/Fv4CUQBHMAxBcnKkYKn35N5RCA33le0cEkIQFx8iWF21a2I4YhNC5Z8yIkBoZ38k9Y3mNYmIC0bSgDgwGvn3Xr9gL/Qd1V324/9MqI7ILc70iFTZuQkaQtDTJxzdvDsJ2k3rnAg3rU6Wu8MQgjivAPTyF+7fMKFzN/g6u8pyqhgQhLl7YVgY/40DAEZERiPE3QOMDA0CAn9XV4yL5l+1AkC/8HDE+PrKWukTAB46HabEdxO06xESjKSQYFmRFwJAp9VgZrLwDTA60A/9YyNkR3cIAWb3TRS0CfbxxPDEGNmRF0op5jpwZnw9XTG+n/wIEstRzBmeLGjj7qrDlBGJsrc1WI5iztgUQRudkxYzJqYo0pg1WdiJ1mgYzJzRW/YEznEUM2cKLwYIIZh5W39Z1wfsn/mMefwLpzamLx4k+wQQATB90SCH78P0e4ZJLph2VYMhmHbXsA7JYfozoL5LtzjeOje8ljxD8uMYELhonPBGykyHtm5aHd7vN02yBgGBE6PB+/2nO/xR6zQafDRsqmQXgcC+Evto2FSHk7+GYfDxuCn2fg8SNQgBPh47xWEfGEII/j15EpwYOe4O8MHEiXB14m9j3sa708bB2UkLqfMSBfDO1HHwcnGcA/LazDFwd9HJcqpenj4agZ6O84temDMSPu6ushyFp6cPQ3iAj0O7p+YMQ6CPhyyNB6cOQJdw/nb3bTwwZzDCgnxkaSyY1BuJXRz3T1k0ewCiIgIkaxAA08Yno09yZ4e2s2f3QXy3TpIdHkKAUSMTMHQI/9ZPG5Nm9kZy7yjJGgxDMGBoV4ye6DiSNmpqCvqPiJel0bNfDCaJcHYGjk/C8Om9QKRqaBh0S+mMGfcOl/S4PzOqI/I7YEp4Mp5PnARAXHFSDSFw1erwef87EOMprknVuPB4vNNnChiRMQsNIXDRaPH1kPlI9A0RpTEsLBofDZ8KDSGiohYMsTs6X46ciT7B4jpm9g0Jx+fjp0HLMKImWAYEGobBJ2OnYHC44xs5ACQFB+OrmTPgrNWK0iDE/lr+OWE8xsSJ68rZJSgAX90+E65OTqKiFuTK3ysTR2FKorhmW5H+Pvj63tnwcNFJmvyenjAEc/uJ23YJ9vHEl4/MhrebiySNB8b3x+IR4vbX/bzc8PnTs+Hv7S5JY+GYXrh/6gBRtl7uLvj4+TnoFOgtSWP6yCQ8dru4CcnNTYf3X5uDiDA/SRPsqKHxeOrBMaIiHc7OTnjnnXmIjQ2SpDFwYByefXayKA0nJw1efW8+4hPDREdfCLFv67z49mxRUQSNhsHzH9yGnv1ixGswBPHJEXj5k0WiTsUwDIOnP1iI/qP581VufgxBdEIoXv/+ATi7SutQ/WdG7b77O2Jf5Tm8n7sDpYZ6aAhzU8XVtn/r5x+Nl5KmIFqkE3ItBysv4u2MXbiorxXU6O0fjtd6T0SCj7S23QBwvLIEr53YjfMNNdC0096+7d+SA0LwxoCxSA4Q5+hcS0ZVBV45uBvZNVWCGt39A/Ha0NHoFyqtNTgAnK2uxqt79iK9vFxQI87fHy+PGIEhUeIcnWu5WFOH17btxcnisvY1rrS17+zng+fHDsOortLbj5fUNeLN9XtwtKBEUCPM1wvPTByK8UmOV8U3Utmgx9ur9uBQ7iUQQm7KF2rTCPbxwONTBmNqX+l763XNrfjHsr3Yn2Gv/HmjRlsLd38vNzw0fSBmDeNPVuSjqcWID5bsxZ7jeaC0HY0rr83H0xV3zxiAeeOF84Hao6XVjE++3oudB85e6T3T/uvwcHfGgln9sWBWP8mRAaPRgi++2IvtO7Lb7T1DCAGlFG5uOsyZ0xeLFw2WvM1gMdvwzad7sGXdlZLtN2nYTyy5uDph+ty+uPOhkZKPStusLJZ+sgublh2HyWi5es3rNADodFpMnNcP9zwzATqJ+T4sy+Hnj3Zg3Vf7YGgxgzDk+tLvxB4d1jppMHZeP9z/8gy4uMnP9/mjIGX+Vh2R3xmUUpysu4Tll07gRG0hDDYLGELg5eSKiWFJmBfVD9EejkPNjjTSakvxQ8FpHKosRIvVbNfQuWBieAIWxvVGV+8gxRqZtRX44Xw69pZehN5qL8TkpXPBhM5dsbBbKhL9pTs5N5JTU4Wl2RnYeakAeotdw1PnjNFRsbgjMQXJwdKdnBvJq6nBsjNZ2Jafj2azGRyl8NDpMCwqCotTU9A7VEFi3RUu1tbj57Qz2JqbjyajCRylcNc5YWB0JBb1TUG/zuGKNYprG7HqZBY2ZZ5Dk8EEG8fB3VmHPlHhWDAwBQPjIhUf/7xc14Q1R7Ox6dQ5NLQYYOM4uOl0SIkOwW3DUjA4IUpx/ZHqhhasPZiFjUfPor7ZAJuNhauLDj2igjF/VAqG9oxRXK+lrrEVG/dnY+P+bNQ1tsJqZeHq4oSunYMwZ1wqRvSJU1x/pLHJgK17srFpZxZq6lpgtdjg4uKEmM6BmDkpFSMGd4VOIJFXDHq9Cdt3ZGHz5kxUVzfDYrHB2dkJkZH+mD69F0aNTICzs+OtRCFaW8zYsy0Lm9acRmV5IyxmK5ydnRAa4Yepc/pg1PgkuLopix4YW83Yt+UMNv98HOXFdTBf0egU7ovJt/XHqGmpcPdQdmTdbLTgwMZ0bF5yGKUFVTCZLNA5OyEozBeTFg7CmLn94SlwtPnPhuqI/M4o0F/GrqqTqDE1wsxZ4a51RYxHKMZ36gdfnafgYymloiagAn0VNl1OR4WxESbWAg+tC2I8gjA9vDcCXYTf27aviCOdQn0t1hZnorS1EQabBR5Ozojx8MfsqFSEurV/XFWqRom+ESsLsnBJX49WqwXuTjpEe/phflxPRHj6dIjG5ZZmrDifhcLGBrRYzXB30iHSywdzuyYixsevQzQqW/RYlZuLC3V1aLFY4OqkRbiXF2Z374Gu/o4dSTGfe21LK9Zk5SKvuhZ6sxmuTk7o5OWJmUndkSBwtFeKRkOrEesycnG2vBp6kxkuTloEeXlgWnICksKFk6TFajQZTNh0+iyyS6qgN5rhrNUgwMsdk3vFIzkqxOHjxWi0GM3YevI8zhSWQ28ww0mrgb+nG8b36YZeXRxvMYjRMJqt2HH0HDLyLkPfaoJWy8DXyw2j+3ZFn+6OnTwxGmazFXuP5iEtpwTNepO9Xoa3K4b164L+qdEdomGx2HDwSD5OpV2CXm8EYQi8PV0xaGAXDOgX69DJE6Nhs7E4cjAPJ48VQH+lIqynlyv6DYzD4GHdHDp5YjRYG4uTh/JxbP956BsN4CiFp7cbeg+MxeAxPRxGTkRpsBzSDp7HsR3ZaKpvBcuy8PRxR/KgLhg2ORnOLn/c7RvVEfkdQCnF/uoMrCk7gDx9CTSEAUcpKCgYEFDYw7zDA1MwN2IU4jzDZGnsqzqLHy8dxpnGmzXaGBncHXfEDEWiD3+9BiEOVF7AN/nHcKK26GYNYn8eI0O64t4ug9AnIFKWxuGKIvwv9wQOVVwCQ4g9LH5FgxB7iHxYaDQe7N4fg0KiZGmcqCjF/86cwt6Si3YN2K9LQMAQ+xHiQaGRuL9nX4yMjJGlkVFRjq/S0rDzYsHVf7Nr2D9vllL0DQ3Dvb16Y1yc8AkePnIrq/D18dPYfu7C1Wj4VY0rWyDJoZ1wV79emJTQVVYkJb+qFt8cOo2t2XlgKQcCco0GA5bjkBAShDsGpmJacoKsSMql6np8vy8Nm9LOwWZjr27pXPs64jr5Y9GwVMzoJ+8Yb1lNI37YnYaNx87CYrWBXNn2uPZ1dA72xYKRqZgxJNFhMnN7VNXpsWzbaWw8kAOj2Xp1Gwqw5zqwLIewQG/MG5eKWaN6yopy1Da0YMXG09i4OwutBku7GkH+npgzKRWzJqbCRUaUo7HRgFXrTmHjlkzoW0xXr3uthr+fO2ZM7YXZ03vDTcb2hL7ZiHWrTmLj2jQ0NRra1fD2ccO0Wb0xc24/eAoUiePD0GrGhp+OYdPyk6iv1ber4entislz+2LmokHw9pVe9NFktGDz0sPYuOQwaioaodFe0aD2ZFaO5eDm6YKJtw3ArPtGwC/ojzW/Aaojcstj41h8lL8S2ytPggEBJ1DFQwMGIMBz8QsxMlh8cRyWcvj3ua1YXnzMsQZhQCnF3xNnYHqEcD2Qa6GU4j+5e/Fl/pF2cwuu17BPIi8lT8DiWMcZ69dqfJpzDO9nHhSlwVKKZ1OH4+EeAyRNsN9kn8Zbx/ZddQYcaTyeOhBP9xksSWN5dhb+vncPCCCo0ZZncHdqL7w0bLikUy2bcs/j2Y3bAQLBOiFtGvNSEvHahNHQSpjEd58twNMrt4CjVFCjbb9+clI3vDNrHHQSKsYeOV+Ep77bBCvLCmvAngMwskcM/rl4Elx14ifYtAtlePLT9TBbbQ41AKBffCTef3Aq3CWsYnMvVuCp99ei1WhxrEGAnl1C8f5fZsDLXfw2woWiajzz5mo0iujzQghB1+ggvP/3WfD1Fj/BFpfW4a8vrEBdfYsojc4RfvjXO/MRGCAc0b2WivIGPP/UT6isaHSowTAEnUJ88I8PFyAk1Fe0Rm1VM1586HuUFtVen+fBo+Ef5IV3vrgTEdHi8+0aa/V4+e6vcDH3ssMjxoyGgZevO95e8gBiuktfbN7KqI7ILQylFO+d/wl7qk5LLiL2So+7MTTQcYIdpRT/PLsJq0qkt6B+veccTAlLFWX7Qc4e/C//iGSNV5InYmFsX1G2n2Yfxb8yD0rWeC51BB5OFHci4vucdLx2dI9kjcdSB+CvfYeKsl2Vm4Pndu2UdH0C4M6UVLwyYqQo+23n8vHkui2SNWb17IF3Jo8V5VQdyL+ER37cAEqp6O8vIcC47l3w73mTRUVGThWU4v4v1tgjayJFGEIwqFtnfHLvdFH5H9mXKnDfv1eBZTnRhfYYhiA5JhSfPzFLVNTiQkkN7nvjZ1hsrOiKpgxD0DUyCF++NE9U1KKkvB73P/cjjCaraA0NQxAR6ocv310AdxFRi8qqJjz4+BK0tJhEF8HTaAiCAr3xxcd3wFtE1KKuVo/H7vsW9fUtomt3MBoCPz8PfPrNvfATUaitudGAJxZ+geqKJtFVUxkNAw8vF3zy08MIDvVxaN+qN+Hp2R+hrLBGkoaLmw4frX8K4THKcu9uJaTM3+rx3V+ZzeVHsVuGE0IAvHN2KSqMdQ5tt5WfkeWEAMDrWWtwUc/f2r2NPeV5spwQAHjzzDZkN5Q7tDtaUSTLCQGAf2bsx/Eq/lbibWRUl+N1GU4IAPw34zj2llx0aHe+tgYv7N4l+foUwPeZGdiUd96hbUlDI57ZsE1ybRMKYE1WLlZm5ji0rW5uwZM/b5bkhAD2qMiO3AtYeizdoW1TqwmPf7MBlEK0EwLYt56OnC/CV7sdf++NZiue+HS9JCcEsBf0yrxYjk83HnVoa7Ha8JcP1kpyQto08our8e9l+x3asiyHv729FiYJTghgj5SVlNfjvS8cO8aUUrz42hpJToj9uVFUVTfhnX9tFmX/5str0SDBCQEAjqVoqG/BG39fI8r+Xy+tRnVFo6TS7RzLoaXZhNefXCaqgNrHL65E2cVqyRomgwWv3vO14tL1v1dUR+RXhKMcVpbulfXYtnyFzeXCN0FKKZZcOii7LDwhBCtLjju0++bCUVkVTAF76folBY4njK/OnpRd+VNDGHx19qRDu2+z02T3Q2EIwZdnHGv8kJkpu0g/Qwi+Sjvt0O6n9CzJDkIbBMDXx087vNGuOp0NK8vKbgfw3ZE0sJzwjXb9qVwYLFZZ7QAogB8PZsBiswnabT15Dk2tJnkalGLVwTMwmCyCdvtOF6CmoVVWbxeOUmw5lIsmPX+begA4nnEJlysbZZXq5ziKfUfzUV2nF7Q7k12Kwks1sjVOnCpEaVm9oF3++QrkZpWClVHFlGUpcrNKcSGvQtCutKgGpw5fkFUplWM5FOZXIie9WNCupqIRh7ackfeZsxzKi2uRdsDxouOPiOqI/IpkNlxApUn4RykEBw5byo/Cwlp5bXKaylCgr4K8KcmeW7KpLAMtVhOvzYXmaqTVlQrmnTjS2FaWi3pzK69NaUsj9pcXyu7rwlIOe8sKcLm1idemxtCKrYV5sjU4SnGiogwFDfxRqmazGWvOnVWkkVNdjayqSl4bk9WGFRnZsjUogOKGRpwsKeO1sbIsfjp5RlG/oGp9Kw7mX+Id5ziKZYcyFHU9bTaasetMAe84pRQ/78tQ1CjPZLFh6ynhCWPlrgxFR51ZjsOmQ7mCNmu2ZShrXEiATbuyBE3WbUqHRkG/FYYh2LglQ9Bm07rTikqhazQMNq1LE7TZuuoUGKUay4UXT9t+Pqboe8VoGGxcelj+BX7HqI7Ir8jWiuNgFL7lrawJR2qzecfXl56GhijTsHBW7Kzk11hdlKFYg6UUG0r4b4KrLmYr7txKCMGqAv7Xse5CrvKuqoRgRR7/69iclwcryyrXyOHfOtmVX4BWi/AKXZRGBv97dfhCEepbhVfoojRO8WucvliGigbhFbojGEKw8ij/53GupAqFFfWKnB1CgNUHzvCOF1fUI6egQlGnW0qBNXv4Narr9DiZWaSocSHHUazbkck73qw34tCRfFmRims1Nm/Pgo1ny8FksmLPjhxFWxIsy2H39myYTO0v0Fgbi+1r0xR102VZDod350LfZGh3nFKKrcuOKvrMuStHfWsqGmVf4/eK6oj8ipQaqsBB2R6ghjAoN/GvwItba26qhipHo8zAH7kpbqnvAA2C0tZGfo3mBsVOAgFQ0sKvUdTcKHt7qQ2OUpQ080ddipsaJJ1IaQ+WUhQ1NvBr1Dd2iMaleiGNJsWOIUspimr5NUrrGhVdH7jyeQhp1PB/VmKhFCir5b9OWXWjYg0AqKxt5t0uK6/sGI3GZiPM5vYn8KqqZkUTaxtGowV6nm2mulo9rFZljjoAWK0s6mvbd2Kbm4wwGpQ56oDdqaquaP9zN5usaKrnj/CKhgJVpfKj5r9XVEfkV8TAmhVfg4DAaOPfNmm1KdcAAIPAdVo6QIOCCj7XVptF0TYAYJ+UWq38NyCD1SJ7C6sNClyt2NoerRarYocKAPRmfg2D1aLQnbLTYhZ4r8wWxY4IAMHIjcFs7RANg4V/69LIM+lKxWThz0Mx8qzMpcJRCrO1fZ2O0gDAm+9idJAHI0mDxxHoCAfh19QwGtr/HZpaO+a+CwCGVv77+x8V1RH5FXHTKCsxDNgncDct/3U8BMbEQ+Cu5T/W59kBGsSBhoeTs+xE1TYYQuDuxF/zwd1Jp7gsOoG9ZDwfHjpdhzgJns78Gm5Oug5xdjyc+d8rN2edYsfQsYZTh2i46fg1XBWWKxdzHVeXjtFgGAJnnmPCbh3YUM3Ntf3vlmsHavAdE3ZTWNr9Og33X17DjUfD1aPjesu4KSxF/3tEdUR+RTq7d7IXKFMASzmEu/IX14n2CFKcv2GjLDq785cZj/b074AcEQ5RHvzl0mO8/BRPrvTKdXg1fPwUh54ZQhDtzV9QKdrXF1YHJ0UcoSEEcX78ryPa3xc2pRoMQVyAP79GgK9iJ0HDEMQE8mtEBYkvTMUHQwhigvnfq6hg5RqEAJFBPrzjkZ06QANAWJA3r6McFuKjKDGyDX9fdzjzlDLvFOStKIm0DXd3Z3h6tj+5+gd6Sm5C1x7Ozlr4BbRfS8TTxw3uPPpS0GgYBIX4tK/vouuQ6qiEEIR2VtYr7PeI6oj8ikwOHQhWYY6Ip9YNAwMSecdnRvRRnL/hqnHC2E78bd7nRKUq1tAyGkyL5C/ONjc26aZunZKhV67Dw8wu3RU3cWMpxW3x/K9jctducJVQUZRXI5FfY2zXWHi5KFuRsRzFban879WQuM4I8pRe6vomjX78r6NXdBgi/L0VRZA4SjFvEL9Gt4ggdAsPhJKPnVJg7vBk3vGIYF+kdgtX/N2aMzqFdyzA1wMDe8UoOjVDCMHMCfwanp4uGDksXvGpmWmTUngdGmdnJ4yb1FPxqZlxk5J5m/NpNAwmzu4DRsHr0GgYDJuQCA+B4myTFw5S9JkzGgb9RnX/Q5Z7d8Qv6oi8++676Nu3Lzw9PREUFIQZM2YgLy/vl5S8penpHSsYzXAEA4IpoYPgxPBPbAneYYj3CpVdR0RDGEwL7wNXLX84M8YzAP0DomTv52sIg6nhifDR8f+oQ9y9MDo8VkEdEYJxkV0R7MZfYtrPxQ1TY+IVaQwJ64wogYiIh06H2d17yNZgCEFyp05ICOT/3ui0WsxPTZL9eRAAsf5+6BUeymujYRjc3i9ZUQ5HiLcnBsd25n8ehGDhUHFVffnwdXfFqKRYQZvbRqZCSSDMzdkJ4/t0E7SZOzZFUbRNq9Vg8pDugjazJqYqOjVDCDB1tHCl5ulTUhWfmpk6KUXQZurM3opPzUyd2VvQZtKcvrJqiFynMa+/oM2E28RVcuaDYzlMXTxY0TV+r/yijsiBAwfw6KOP4vjx49i1axesVivGjRuH1tYOyC7+HUIIwfzI0fIeC0DLaDEldJBD27tihslKwiRX/uZFCv/gAODergNlh+opKO6Ic6xxf/f+sjU4SnF/guMy8vf27CM78MJSigeSHWvcmZIKQuS5hhyleLCPY40FvZLhxMg7A0QB3D+wj8N8mTl9EuHspJXtjNw7pI/DFePUvgnwdHWWrXHH8F4OG9ON79MN/l5uslavBMD8ESkOe9oM7xWLTgFesiIWhAAzRibB00G/mX7JUYgK95OlwTAE44Z1h7+Dhm6J3cMQ3zVEtsaQQV0QyrOd0UZMXDBSekfJ+jwYhiC1dxSiY4VLo4dG+GHgSHnNFxkNg66JYUhIFm4K6hfkhZHTe8nWiIgLRurQrpIf+0fgF3VEtm/fjrvuugs9evRAcnIyvv/+e5SUlCAtTbj4zB+Z8Z36iXImroVc+b+v9LgLQS6O95/HhiThjmhxPVCu1aAA3kqehygPx1Gb4Z264ImEEZI02ngrdSoSfBy3iO8XHIG/95HnuL3adwx6B4U7tEsMCMa7Q8fJ0vhb36EYFh7t0C7Wzw8fjJ8gy+F5qE9fTIjr4tAuzNsLH82aDACSnZGFvZIxM0l49Q0AAR7u+GzhNBACSfkJBMD0lAQs6M+/ndGGl6sLPr1/BjQMkeSMEEIwOikW94xy7LS56LT472Mz4aTRSNJgCEH/hEg8NHWgQ1utVoOP/joLLs5OkiYmhiFIigvFE7cNE2X7r5dmw8P9/9g77/g4qqvv/+7MNvXei2VZlmQ1S5Z7773hijE1QAgBUiAhEEIJIY0AgUAahNANNu69914kWbKsZsnqvWt3tW3mvn+s5Mi2ZnaKIOR5/Xs+PJ/n8b2a7+7s7txzzzn3HL0sQ4FhCOIGBeGZR13/tggheO3lu+Dr6y6bERnhh+eeWSBp/q9eXYagEG9ZYSCWJQgK8cYLry6TNP9nv1mGiEEBsgqbMSwDX38PvPzneyQltj/52goMHhYuKwzk7Gfjht989CgYlcfw/1f1rb7rjg7nGWx/gcQ7q9WKzs7Om/77vyZCCJ4cuhxLI5yGgqs9LEsYaIgGr6R8D2MCXC8WvXoqYTa+N2SKZAZDGPxu+GrMChPOE7hVP0ychJ8mTeu5hhQGwe9GLMbymHTJjIeHjcJLI2eA9FzDFYMAeGXUTDyYKL2L8OrENPx+0mwwhEh4H87xX4yejB+mu/bq9GpRQiL+PHceWBmMp8aMwc8nTJTMmD50CN5bvggalpHMeHD0CPxq9lTJp4fGxkbjH/cuhV6jcbkw9S7yKzJT8NrS2ZIZ6THheP8Hy+Gmk86Ymx6P1++bL3nRT4gKxgdPr4Snm04yY3JaLN78wWKXHpdexYT7458vrIavp5vL19V7a0YlRePtn0lrqgcAYcE++NtraxDg5ymJQQCkxIfj7VdWwU1iF+HAAC+899a9CAnxkXR/CQHiYoPxzp/ugafAKZNb5ePrjrf//gAiowIkfU8IIYiMDsTbf38APr7ukhgeXgb86cOHERsf6rzfLjAMQxAa7ou3Pn4UARLzNgzuevz+i8eRmBEjyWDv7fD7xtdPIiRSOMn6/7q+te67PM9j8eLFaG9vx8mT/ZexfeWVV/DrX//6tn//v9R9t6/ONF/B5urjyGkvuVFxlYL2tGjnoWU0mBUyCssipyDaI0QZo6kE68pP4UxzCZiehZyn/2FoCIu54cOxNmYChnq79lL0p3NN5fj42lkcqSvuCUE4W8yzPa3mGUIwLzIZD8aNQYqfcB6CmC41VuNfBRewr6oYgNO44noZPb6GedEJ+N6wUcgMUtZOO7epHh/mXsSusiLwoDcYvc9enlLMHBSHh1NHYmy4uJtWSAVNTfh31iVsLyqEg+fBMgw4nr+x2PGUYkpMDB7KyMSkQcL5FGK61tSCjy9kYWteAewcB5ZhwFPqfChSZ0hpfEw0HhidgWlxsYoY5S1t+OxMNjZn5cNqd9zMgDMxdeSgCNw3LgOzkuIUHZOuae3A58ezsfnsFZhtdmhuYhBwPI/hg8KwdnIG5qbHK2I0thvx5ZFsbDqRB2O3tV/GsOhgrJmWgXmjE8Eq2LG2dJjw9YEcbDp0GZ0mC1iWcbagJ84FleN4xEUFYtWsDCyYmASNRpqh01ftnWZs3puDLXtz0NZhdjJ6Hu29jEER/lg+PwMLZ6RKNnT6qstowfad2di8IwstLcZ+GeFhvli2JBOL5gknj4rJbLJi17YsbN14AY0Nnf0ygkO8sXTFKCxcmgk3BUdzrRY79my6iG3rzqKuuhWspufzAEAYAs7BIyDIC4vWjMHClaNFE1SFZLM6sH/DOWz7+Diqy5qcDEoB+h+Gb4AnFt4/AYvumwhvFyGy/0XJ6b77rRkijz/+OPbs2YOTJ08iMrJ/l7nVaoW1T+Gmzs5OREVF/U8aIpRS2KkdDBhoRJJLAaDa3ISDDRfRZG2HlbPBQ2NArGcEZoZkwkMj/iOwcjawhIWGEX941ZhbsasmG3Xd7ejm7PDQ6DHEKwQLwtPhoxPfUVg5Z6EpsSRZAKgzd2BL5WVUm9phctjgpdVjsGcA7ho0HP568R+ajXMABNC5YDSajdhYloeKrjYY7TZ4anWI8fLH8iEpCHYTbwVu5ZzFofSsOKO524TNxfkobW+F0W6Dh1aHaC8fLI9PQZincPKr831w4CmFwcVJmbbubmwpKEBJazOMVhvctFpEeHtj+bAkRPr4iP6tvYehd8HotFiw7Uohihqb0GW1wU2rQZiXF5akDkOMv3iIz85x4HgKvYYVXeBNVht25hbiam0jOi1WGLQahHh5YtHwRAwJFj6qCwAOjoed42DQakQZZqsde7OLcKWqHp3dVug1LAK9PTB/RCISwsXDiFIZVrsDB7KKcbm0Fl1mK7QaFgHe7pidmYCkQeKbAI7nYbNzMOjEGXYHhyMXS5BdWI1OkxVaDQM/L3dMHz0UKUPCRP9WKsPh4HDyQikuXalEZ5cFGpaBj7cbpowZirRhEaJ/y/MUVpsDBr04g+N4nLtQhvOXrqOrqxuEEPh4u2HCuKHIGB4tgWGHQa91Oe/S+TKcO12Crk5nVVYvbzeMGT8UmaNjRT0zlFJYLHboXYTFKKXIOV+Gs0cL0dluBqUUXt7uyBwfh1GT4kVP8lBKYbXYodNrRMMplFLkX7iOU/ty0dlqAsfx8PJxx/DxcRg7MwUarfBzm1IKa7cdWr1mQI5Rf9v6zhkiTz75JLZt24bjx49j8GDXMfVeyXkj3wU5eAdy2nNwsPEgrhmvgaPO0sVurBvG+o/FtOBpiHJXtpPuFUd5XGjNx46a47jScQ2OGww9JgVmYEH4JMR5qWPwlMe55hJsrDqDC60lsPNOhoHVYWpwMlZEjUOST5SqYmCUUpxrvo51ZedxovEabLyjh6HBtNBErBk8CiP8xR9qUhgXmqrwWfElHKotgaWPITI1bAjuG5qJcSGDVDOym+rwaUEW9lYUo7un86uOYTEpIgYPDBuBSRHKTxj1Kq+xAZ/lZmPXtWKY7c6qmlqGwbjIKNyfloGpgwYr2q33VWFjE77IvowdBUUw9lRA1TAMMiPCcX9mOmbEDVFdSr60qQVfXsjF9twCdFqsNxhpEaG4d0w6ZibGQafAI9BXFU1t2HA2F9suXUW72VmlkmUIhkUE457x6ZiTFi9YKEyqalo6sOlUHraeuYJWo3OhZBiC+PBA3D05HXMyE1wmtLpSQ2sXthzLxdbjV9DaYQLtYQwO88eqGRmYOzYR7hLDK0JqbjNix+E8bD2Yi+ZWo5NBCKLC/LBs9nDMm5IMT4FiZFLV3mHGrn252L4nBw1NnaDUGbIID/XFkgUZmDczBd5e8r0OfdXZ2Y19e3OxfVsW6mrbQXu8WsEhPli0KAPzFgyHr686r4PJaMHBnZexY+N51FS2guedjIAgb8xflom5S0cgIFB8o+JKZqMVR3ZkY8fnp1FV2ug8eUUA/0AvzFk1GvNWj0ZQqK8qxrel74whQinFU089hS1btuDo0aMYOtR10l1f/S8ZIsebjmNj9UZ0ObrAgLmtp0zvv8V5xuF7Md9DmFuYbMbRxkv4sGwrWm0d/TJYwoCjPOI8o/Dj+HsQ6yk/RHGsMR9/LtyBBks7WDC31T35DyMUzyUvR7KPfKPnREMJfpu7G1XmthvX648R6xmIV9IXITNAfojibEMFXry4F6WdLWAJua0zbe+/DfL0w6sj52JSmHQDuVdZjbV4/tQ+FLY1iTIiPL3x67EzMCta3vcfAK42NeIXh/bhSlOjKCPEwxO/mjgFC+MTZTOutbTg+T0HkF1b1y+D6QmxBbq749mpE7EsJVk2o7K1HS9sO4ALFdWiDF83A348fTzWjHKd2Hqr6tq78PLX+3G6pFKU4WXQ4wczx+D+SSNkG6HNnSa8+uUBnLhyHaTnejczAJ46j/g+NGsUHp41WvYpinZjN37/yUEcySq5EebsK0KctUwMOg3umZ2J7y8dJ9sINZqteOPDgzh4ugigEDydptOyWD4nHY+vmSQ7ZNRtseEv/ziEfYfzwfP0tr45PdEvsCyDhXOG44ePTBMsriYkm9WBf/z9IHbtvAyO4/ptZkgIcZ4SmpOKJ56aJbtirMPB4d/vHcT2DRfg6Cm5fyun9zOeMjsFTz23AB4yq6NyHI/P/3IAWz46AavFfuMzvpVBAYyflYwfvbrsOx/O+c4YIj/84Q+xbt06bNu2DQkJ/zl37+PjAzc31xbw/4IhQinF5prN2Fm3U9J8Bgx0jA7PJDyDOM84yZwNlQfwSfkOiQwCLaPFy8nfx3A/6cfBvq48jbcKt0tmsITB79Pvw4Qg6YvflopsvJSzHVTCAWMGzpMTr2cux5wI6Yvfzoqr+OmZbaAUN/JHhOTMWSP445gFWB4rXlOhrw5WXsMPDm8FR6nLI8a9y9Cr42bh/mHS62ScqqrAIzu33gj3SNFzEybjsRGuT4706lJ1Db63cQssdsdtC7eQnhw/Bj+ZKP3k15XaBjz86SYYbTbJdS8eGDsCz82ZLNlQuFbfjO+9vxEdZotkxorRKXhp2UzJhkJlUzse/cvXaO40SWbMy0zAb+6bC41E13p9Syd+8PrXqG/plMyYnD4Ef/jhQmglGgot7SY89eoGVNa1Sap1QggwMiUarz+7FHqJXp7Orm48/cIGlF5vlMggSE4Mx59eXQF3iR4Yk8mK55/9Clev1go2B+wrhiEYMiQYr795D7wl5n1YLDa88vRXyLlQJqljM8MQRMYE4o9/ux/+Er0jdpsDv/vR5zh7uEDSfIZlEBLhiz9+9hiCXByN/m9Kzvr9jQae/v73v6OjowNTp05FWFjYjf/Wr1//TWK/Ve1v2C/ZCAEAHjysvBVvFr2Juu46SX+zt+60ZCPEyaCw8Xa8kv9PlBlrJP3N/rocyUZIL8NBOTyf8xmutFdK+pvDdYV4MWcbeIlVTpwMHj+/tAlnm8okMU7WX8dPzmxzGggSKLSH8+y5nThUUyKJcbGhBj84vBUOnpdkINCe/148cwA7yqQ9bPKbGvDwjq2wOhyyaqn84dRxbLiaJ2nutZYWfG/jFnTLMEIA4L3T5/DxxSxJc6vbOvDwp5vQZZVuhADAJ2ez8M8T5yXNbegw4pEPNskyQgBg4/kreGdv/4nzt6q1y4zvv7tRlhECAHsvFeH1jUckLZRdZgueeGOTLCMEAE5cLsVvPtovidFtsePp321ClUQjBHDuzC9dqcJL7+wCJ6GVgNXmwHOvbJJshDgZFFeLavHi77bC4XDdjdfh4PDKS5tQUCDNCAGceSelpY144fkNsFmFmxb2iuN4/OGFTbh88bokI6SXUV3Rghd+9AUs3a6b7VFK8edfbsS5I4XSAHAWPmuoaccvH/wXjJ39dzX+X9M3aohQSvv978EHH/wmsd+aWqwtWF8l36iioLDxNnxa8anLuR22Lvz92teKGA7egbeL17mca3JY8Pv8TbLrT1A4c1ZevbLB5cPAytnxQvZWmYQeDqV4PmuLy7LyDp7HM2e2S35o3Kqfn91xI6FV7LU8fXwXOF5+yTgC4NmTe0U7Avcyfn5wH+w8p6j2yK+OHEJbt+sH1K/2HYLFLs/Q6dXvjhxHfZfR5bzf7D4Co1VZJ+W3D59GeUuby3lv7jqONlO3oiqjHx69iIKaRpfz/rbrNJo6jLIZFMCGk7nILnW9Ifj3znOobmqXz6DAnjMFOJ133eXcL3deRGlls2wGTylOXCzFkbPFLudu25WNq0W1sivL8jzFxewK7D10xeXcfXvzkHWpXBGj4GoNtm93bUifOHQVZ44VyWdwPMqvNWDj56ddzr1wrAhHtmdLNqb6Mmorm/HV3w/L+rvvqv73UnG/QzrWdExxKXUePAq7Cl16RfY3nFXc14UHRamxCiVd4h6LvbXZsPLK2tXzoKgyNyO7TfwhuK/2KjrtFsWMRksXTjSIeywO15agyWJSVFWWAmi3WbCvSrwFwam6SlR0tUvytvTHMDvs2FZ6VXTe5YZ6FDQ3Ka4q6+A5bCzIF51T0tyCi9U1sjwht2r9ZXHPS3VbB46XXFfMYAnBVxdzRee0GM3Yl1usuNQ5yxCsP3NZdE5XtxU7zl9VxzghzrDY7NhyLE9xWXiWIdhwKEd0jsPBYdO+HMXfK4Yh2LhXnMHzFJt3ZCnuE0UIsGl7lujCTCnFlk0XFDf9oxTYsvmCy3u9bf05xb1jeJ5ix9cXwLnw7mz/7JSsAms3MTiKPevPw2qxK/r775LuGCIK5eAdONJ05LaEUTliwOBo01HBcY7y2FlzQtHC2iuWMNhdK+x+ppRiQ6Vry90VY1PVGdE5X5Sdc1lYTZxBsK5M3FX/afFFxT1dAGcy4yfFF8UZBVmqGATAv69eEn3QfpaXo4pBAXySmy264KzLyVXF4CnFF9mXYeeEH7QbLuWpOpHEUYqvs66g2yb8oN18/opiDxjgrHeyPasAnd0WwTm7zhfAZncdLhBjHMwpQXOncGuLgxeKYZLgyhdjnMkrR01Th+Cck5fK0NZpVszgeYrcohqUVTULzrl0uQJ1DR2Kn1iUAmXlTSgoEt6gFVytxfXrTao+9/q6DmRnlQuOl19rwNXLVar6BbW3mnD2hLAHqa6yBZdOFoNX0WfHbLTgxB5xY/1/QXcMEYUqNhbD6HDtmhYTDx5nW84Kjpcaq9Bsa1fF4CiP403CbsgKcxMqzU2qGt1ylMfRhitw8P0/rBu6O3GlvVaRF+E/DIpTTaUw2a39jnfaLDjdUKFqh89TiuyWGjR29/+52jgOByqvqWJQACXtLSjv7D/kQCnFrpIiVQwAqOnqxNUm4ZDDjquFqhmt3d3IqhFeMHbmFSrefffKZLXh7PUqwfHdOeoZNgeHk4XlguN7s9Q36uR5iqN5pYLjB84XqTLaAGfC55FLwl7Dw2eLVHcEZhmCwyLhmaMnClXXvGBZBkdOCudMHDtWMCCMY8eE87VOHLqqqlsv4PQgHT8o7Jk8fSBf9bF+whAc333HEPn/Vl32rgG5jtFhFNwdt9vUGTq9svA22Pn+d5Vt1oFh8KAwOvrfVbZaB67JYZut/x1di1X5Tu+2a1n6v1a7tVv1oterZgFGt8MBq4iXQY5auvtn8JSiwyLsAZDFMAvf9zbzwCTStZqEGS1G9Z87IQQtIozmTpMqQx0AGIZBm1H4fjR3mGTnCdzOIGjrEma0tJtU7fAB571qF/GqtHWYVXXSBZzGeHuH8PtobzOrvlccx6O9Tfh9tLeZVBuGPE/R1iz8fG1vNaruLUN5itam//1WKHcMEYXqLVamVrzIGZKBYgCAQyDPRGn+Sf+M/l+vEHsgGVKy+VUzBvCku0Pg9Qr9uxLZBa5FqZpg380Se70Ddb+E3gcAxXkbfUXgrMAqyFC5sALO3IdvmgGIM8TGpIq6uA7nGKD3IZJbMWD3yhVjAL6+dhEGP1D3SkXY8LuiO4aIQrlrpDVaciUDY7jRA+ZWeboo7y5VLGFgYPov4uOlHRgGAHgJvF5vrbziPmLyFni9PrqBYwhdy0enrsKkFIanTqcik+YWhr7/18syDNxUVha9wTAI3xMvvbqqn1IY3iJjUsVTCm834e+Pt4f67xbHU3iL1Mfw8VT/O6SUwlukyZyvyuqlvfISuR9eXgbV4R9CCLxECoJ5eupVeysYhsBL5H54ehrkt7HuR2IN+Ty8Dao9OwC+84XNpOiOIaJQQzyGgCXqSlEzYJDoLVwMbIhnFHSMujLRDBgkeQ8R/OHGeATDU6PuQUtAkOAVDj3b/2uNcPdFoF68D4xrBhDjEQA/gb44gQYPRHv6qn52hLh5IcLdp98xd60OSf7BqpJuAcBXb8AQ3/47bTKEYGR4hKpEUgBw02iRFBgsOD4mKko1Q8swSAsTbpQ4dnC0rNbx/YkhBCOihSsEjx2qngEAIwcLM8YkRKteXCmlyBzSf48tABg5LEp1vgDHU2TECzMykqIUnzS5weB4ZCQJM9JTo1SHfziOR3qqcMXm4emDVHtFeJ4ibXi04HhaZoxq7w4hQNqIGMHx1NGxqt8HwxAMHztE1TW+C7pjiCiUl9YLo/1H3+iaq0Q8eMwMnik47q4xYGbIGLAqGYsjJguO61ktlkaOUbW4UlCsGjRBcFzDsFgzeJTqBXxt7BhBg4oQggfiR6q6PgOC++MzRctlP5g0QlXSLUMI1iakizbeeyAtQ1VYgyUEq5JS4KET9kjcNyJdNWNRUiL8RCokrx09XFXohGUIZiQMQYi3sBG7epxKBiEYGxeNQUHCTQBXTkhTtbgyhCBlUAgSo4QNw6WTUlXtwAmAmDB/ZMQLG1TzpyRDw6rbPAUHeGHscOF2CDOmDIObQd3mycfbDZPGCVeEnjgpQXJlVCG5uWkxY6ZwtebMcXEIClFXzVujYTFrUbrgeMrIwYgcHKTaOJy3arS6C3wHdMcQUaEZwTNUHd8N0gVhmPcw0TkLwife1u9Fjny1XhgTkCI6567IMaoWVw+NHjNCxMujLx8kv69HX+kYDRZHuWAMToPWRRdiMTGEYGWseI+TxbHD4KFVHnKglOKeRHHG7Ng4+BuUP2g5SnFvqjhj0uBBCPdW3qCLoxT3ZogzMqLCERfkr3h95XiKtaPFGYnhQUiLDlXsTeAoxT0T0kXnRAX5Ymyicq8ITynWTBEv7R/k54mpGXGKvTsUwN0zM0R/Y96eBsyZmKiYQQjByrkZovfBzaDDgjlpihkMQ7Bkfjq0Il1ptVoWi5eMUPx5MCzB/AXpMIgYTCzLYPGq0YqfWSzLYPr8NHiJGEyEECx5YILiJy/DMpgwOwX+wd/N9idydMcQUaFYj1hk+GYoLmq2KmqVYH5Ir2I8wjEteJRixvdil7gMIYW7+2N51FjFC8bjcXMFwzK9CjJ44aE46f1JbmMkTIGni1wTb50BT6VMVMx4LGksAg3i8VY3jRY/z5yk6PoEwAPDRiDSs//QT6+0LIvnJgh7sVwxViQmI84/QHQeQwiem6qMwRCCOfFxomEZwPmgfXa2csaE2GiMGey6qeJP5yn7PFiGIH1QGCYnum54+MSCCWCI/F8hyxAkRgZhVrrrhoePLhkLDcvI3iGzDMGgUD/MH5/kcu79d42BXqeVbbixDEFIgBcWz0h1OXf1XaPg4aGXbSgwDIGvjzuWLRrhcu7SZSPh6+uuiOHhoceKVWNczp2/LBNBod6yjwoThkCn12D1g66fRTOXZiIyJkh2UTNCCDQaBmuemCHr776rumOIqBAhBI/FPobBHoNlGwp3R92Nkf7SQgk/jr8bqT5xshlrB83DjBBpbrufJCzC+MBE2Q/aewZNxvLocZLm/njYdMyT0byuVysHZeKRodIMjB8mjcfq2HTZjMWDkvHT1CmS5j44bAQeTpYXBiIAZkbH4cUx0yXNX5mUgh+NGiubMT4yGr+dPkvS/PmJ8XhuqrxFnCEEw8NC8eaCeZLmTx46GC8tmH7j9UllJIQE4p3ViyTtSEcNicRrq2aDEHmM6ABf/PWhpZIa0qXGhOIPD853dnKVCGEZghBfL7z3+F3QSUgOjosMwutPLgbDMJINBZYh8Pd2x3vPLIeb3nVIJDLUD3/6xVJoNIzkRZxlCLw8DHjnVytEE1V7FRzkjdd/vRI6nUYWw82gwxuvrYKfr+vkSz8/D/zxjTVwc9OBlVjvg2EIdDoN/vD63QiW4EXw9HLDH/56Pzw89ZINBYYh0GhY/ObtexARJb4ZAACDuw6//ehh+Pp7SDZ4GIaAZRm8+Nf7MThBfhf376K+0e67avW/0H0XAKycFR+UfYBL7ZfAgBEM1xAQMITBgzEPYmKgvJ27nbfjneIvcaTxIljCCB67JT0da78/ZBkWhsvbjTp4Dm8VbseW6nOijN5cj8eHzsXaGOkdUgHnceE38w/gk9Iz/bZqv8EgBJRSPJ4wBT9MmCqLQSnF23nH8V7+KTAijF7+9xPH4tn0abJ2iZRS/D33HP6UdQKAcBv1XsZ9iel4ZexMaGTWDfj4chZeO3HU2ZzPBWN5YhJ+N302dDLzAL7OvYKX9h+Cg/KC1Sp7GXPjh+KNBXNhkHnqZldeEZ7fuu9GJdb+MCxDwPEUk+Ni8OeVC+Ah89TNkfxSPLtuNyy9rdpFGKOHROHt+xeKnpbpT2cKK/CzD3fCZLH126q9LyM1JhTvfH8J/L3knbDLLq7Gz97djk6TBQwB+ktP6WUMjQrCOz+5C0F+8pLBr16rx89f34K2DjMYQvr9bvUyBoX7463nlyEsWNyTd6tKy5vw7Etfo7nFCIYh/ebZ9P57WIgP/vSblYiK6D+JW0hVVS147tmvUF/X4ZIRGOiF37++GrGxwrk6/am+tg2/+tEXqCpvBsMS8NztDMIQUJ7C198Dv3n7HsQnCefq9KeWhk689Oi/UVZYB4Zl+q22SnqeiV4+bnjp7w8gZaRrT95/U3LW7zuGyACq3FSOQ42HcLblLBz05uZp/jp/zAiegUmBk+ClVR6bv26swe66kzjYcB62W4qU+em8sTB8EmaHjoO/Tvn9Kjc2YnP1WeyouQgLd3PZaV+tB5ZHjcXiyNEINsh7MPVVhbEFG8ov4uuKLJgcN1dL9dYacHfMKKyMyUS4u69iRrWxHeuuZWPdtWx02m8u4OWl1ePuIem4J24EBnkJJyq6Up2pC+uKcvB5QQ5arTcXYXLXaHF3fBrWJqYjztf17khITSYTvrqah89ys9F0SwExg0aDFcOScW9qOhICAhUzWsxmbMzLx2dZObc1s9OxLJYmD8M96WlICQ1RzGg3W7D18lV8di4bNe03F2HSMAwWpibgnlHpSI0IURyb7+q2YkdWAb44lY2K5vabxliGYHbqUKwZn46MmHDFDLPVht0XCrHuWDbK6ltvGmMIwbS0Ibh7cjpGDo1UzLBY7dh/vghfHcxGSVXTTWOEABPTYrFqRjpGJw1SnCthtTlw5Fwxvt6TjYLS+tvGx6QNwoq5GRibMVg0gVtMdjuHE2eKsWl7Fq4U3N74LyMtGssWjcD4MXGSPFP9iXPwOH26GFs2X8TlnNv7aiWnROKuZSMxcVKCaO6JKIPjceF0CbavP49LZ2+vkJuQHI4lq8dg0owk6CR4pvoTz/PIPlWC7Z+fxoWjRbcd7Y1NDMOSByZg8vzhMLgNzNH4b1J3DJEBFE85VJuL0OVogYO3Qc96INQQC1+dsFVtcphQYa6A2WGGhtHAS+OFwR6DBfNBeMqj3FSKNnsLbLwNbqwbItyiEaQXfuibHd0oMVbB6OgGSxj4aD0R7xUtmA9CKcU1YwWarK2w8Fa4swZEu4cj3E2MYUVhZzU67d1gCIGP1gPJPlHQCCSEUkpR0lWD2u4WdHM2eGgMiHYPQoyncC6BhbMjr60GHbZuEAL46tyR6hsBncipkuLOelSaWmByWOGh0SPaIwDx3sIMK+fA5ZZatNu6QSngp3dDmn8YDBrhB0ZJRxNKu5phctjgrtEh2sMPSb7CC6Sd55DTVIc2i7P6qq/egLTAULiLJLaWdrSgpL0ZXXYb3DVaRHr6IC0gVJDh4HnkNtSjpdsMjqfwMeiREhwCL5H6JuUdbShsaUaXzQo3jQbhnt7ICAkTZHA8j7z6BrSYzbBzPHwMeiSHBMPbIOw5qO7owNWmJnRZrdBrNAj19MSI8HBBDxPPU+TXNaDZaIaN4+Bt0CMxNAh+7sKJffWdXbhS14guiwU6VoNgLw+MiAoXXCAppSioaURTlwlWuwNebnrEhwUhwFPYO9HUaUReTQM6uy3QsiwCPT0wIiYcWgEPE6UUxTXNaOwwwmKzw8tNjyFhAQjyEfZOtHSZkVdRhy6zFRqWgb+XO0bERkCrEV4gr1U3o6G1E91WB7zc9YgJ80eIv/Bmpt3YjdyyOnSaLGAZBn5ebsgYGgG9iBervLoFdU2d6LbY4OGuR3S4P8KChJ+5XSYL8kpq0WG0gBACXy83pCdEwCCyCFfVtKK2rh3mbicjMtwP4WG+gvPN3TbkXq1GR0+1WB8vN6QlRcJdZBGurW1DTXUbzGYr3Nx0CI/wQ2SksJfFYrEjL68KHZ3d4HkKb283pCRHOOuICKihrh3VFc0wGZ2MkHBfRA8OEpxvszqQl1OBjnZn1VkvbzckpUbB20f4+95U346q0kaYOi3Qu+kQHO6LmHjhZ5zd5sCVrAq0txjBOXh4ehuQmBYF3wB1ZRPU6I4hMgAyOzqR3XYAF1p3odN+e5OnOM9MjApYgCGeGWAU1hMxO0w403IcR5v2o8XWdNt4glcypgbNRopPuuKaJWZHN441ncfuuiOotdzeeyTJOw7zQqditP9wQQPDNcOKQw3Z2FR1AuWmhn4Y0VgWNRGTg9OgY5QV0rJwduytzcOX18+isPP2/ibDvMNxz+CxmB2eAoOLxFkhWTkH9lYX4LNrF3C5tfa28XjvINw/dBQWRafAXaNsR2LjOOyvKsbHhZdwsfH2HWKstz8eSszE0thkUQNDTA6ex8HyUnySl4Uztbf3aYn29sWDqRlYnpAMH72yGjIcz+N4eTk+zcnBifLy28Ig4V5euC89HStSUuAvcsRXTDylOF1Wic8v5uBocdltjGBPD6wdlY4V6ckI9FRW1IlSivNl1Vh3NgeHrpbeFqLw93DDmrHDsXJUKoJFjhG7YuRcr8X6k5exP+f2TsE+7gasmpCG5eNTEean7DlHKUV+eQO+PpqDveeLbqt+6ummx7JJKVg+JQ2RQb6KGABQVN6IzQdzsOfU7Y0A3Q1aLJ6airump2FQuLwQS1+VVTRh694c7D50BVbbzZ5lg16DedNTsHRuOmIHCS/+rlRZ1YLtO7KxZ+9ldHff7FnWalnMnpWCJYtGIC5OuQewtroVu7Zcwu5tWTAZb/b6ajQsps1JweLlI5EgM4zTV4217dj99XnsWn8eXbeUxWdYBpPnpGDRmrFIyohWXQROru4YIip1teMktlT/GTx1CJZfJ2BAwSPMMARrYl6Cp0aeez+vIxv/KnsXDmoXZPTmm4QZIvBE3LPw18lz7+d1FOGPhf9ENyfcV4QBAQ+KUEMgXkx6CqEGeT/uK+3leP7yh+hydIOg/7h8LyNY74s/ZTyKQR7yftxX22vwxPnP0Goz3biWECNA74m/jb4fiT7ykrhKOprw0Il1aOjuEmT0vj9fnRven7gaGQHChZ36U3lnG+4/uAGVxnbBuHzvo8JDq8P705ZhfOggWYyark7cv3MjSttbBXNwehkGjQZ/m70Y0wbFymI0Go14eMsWXG1qEs3zIXCeAPrzvHmYGy9cF6I/tZq78fj6bciprruRq9CfGOLMifrtollYmub61EhfdVms+NHnO3CurMolAwBeXDwdq8eIHyG/VWarHb/4dBeO5193yaCgeHrxZNw3Vd5Rd6vdgZf+vQ8HLxWLMtieXInHFo/DIwuEa/L0J7uDwx8+PIhdJ/JdMjie4r6Fo/D4qomywkYcx+MvHx7G5t3ZkhjL5mfgRw9Pl3Wqhecp/vXvY/hq/VnBnBLAefyW43jMnZOKp38yFxoRj9WtopTii49O4NMPjjp/5y4YU2Yk4ecvLYVOL32TRinF5k9O4V9v7AUhcMkYPSUBv3zjbhjcv72Qzh1DRIWy2w5gR827kuczYOCpDcDDsX+Cl1baLuBi61l8VP5XABA0Qm5leGi88GzCrxGgl5YHcLE1D38s/Cdoz/9IYbhrDPh96s9FwzV9ldVagmdz/gWe8pLqkDBgYGC1eDfzCQzxCpfEuNxWiUfPfAQ75SQ1nGMJgZZo8MG4h5Dm5/roJwBcba/HmiOfwsrZJRX5YkDAMgw+mrQGY4JjJDGudbRg2Z7PYLLbpDF6jor+a9pyTIuUVjmxqrMdSzevQ7ulWxKDwJkA9+6shVgwJEESo8FoxLJ169BkMklmAMAf5szBimRpJ6ZaTWas/ugr1LR3yiq69tLcaVg7Kl3S3M5uC+795waUNbXKamT49JyJeGTKKElzzVY7HnnvaxRUN8piPDprNJ5cIFwgsK9sdgeeeHszsq/VyioXfvf0dPxstbQkcIeDw8/f2oazeeWCycz9aeHkZLzw6GxJDI7j8cqbO3DsdLHkuhoEwJTx8fj1zxZLMngopfjTG7uxd3+eRIIzH2f0qFi89uoKSQYPpRR///M+bN1wXgaDICU9Cr9/517odNKMkY/fOYCv3j8qmcEwBHFJ4Xj9o0e+NWNEzvp95/huH103XsbOmvdk/Q0PHl32FqwrfwWcQIfbmxima/i4/O+SDYRehsnRhXev/QFWEe9Gr8pN1Xij6ANQkYZ6/THMDgt+nf8XmByuO5pWmZvwy9yPwEk0QnoZFs6Gn+W8jzab6+7FteZ2PHHuM9h5aUYI4CxQZeMdeOL8Z6jv7nA5v9lixEPH10k2QgBnp2GO5/H9UxtQYWx1Ob/DasF9B9ZLNkIAZ1iCpxQ/OLYVhW23h+1ulcluw707Nko2QgCnd4dSih8f3IWchtvDXbfK6nDgwc2bJRshNxgAnt+/H2erbg8T3SoHz+Oxr7bKNkIA4Dd7j+BYyXXXr4lS/PiLnbKNEAB4a99J7MktksR4/rPdso0QAPjgwHlsPSfcPr6vXv30ALKv1cjuWfLV4Rx8dThH0ty3Pjsq2wgBgJ3H8/HxdmkL8gdfnMBRGUYI4PxeHT1djPc/Py5p/hfrzsgyQgDnqajzF8rw7l8PSJq/dcN5WUaIk0FxJacKf/7dDknz92+5JMsIAZwek2tXa/GHZ9fL+rtvS3cMkT463PA5lNRZpuDRYC1HQedpl3N31G6EkraOPHg0Wutxoc014+vqPeCo/FqpPHi02NpxqME1Y135Ydh54dCVMIOiw2bCtuozLud+fv00zJxVdtVXHhQmhwVfXHfN+OzaRbTbpC/efRlWzo4Pilwzviy5jAazUTaDwnmk+q95rhmbi66iorNdEYNSincuuv7M9xQXo7i5WXFp+D+fOuVyztGS68itbVDMeOPwSZeL8rmyKpwrq5JtIPTqrX0nXZZ8v1LZgKNXyhQz/rLzpMtuuWW1LdhzrlC2gdCrf2w/DcstORi3qrapA1sOXVbM+GjrWZjMVtE5be0mfLX1gjIAgK+2XURbu0l0jtFkwefrXH/H+xOlwI6d2ahvEN/YWC12fCLTQPgPg+LQ3jxUXhffdDjsHP791j5FDJ6nOHukEEV51Yr+/pvUHUOkR/Xd11HTXQSqsJw6AYPzLTtF5zRZG1DYdUVxWXgCgiON+0QftK22dpxvyVHMoKDYXX8UvEANEQDosptxoD5LsM6IK/Gg2Fp9Cg5euH212WHDlspLihckjlJsqryI7luOH/eVjeewrvSS4sWCoxRbynPRaRP2UnE8j08KLykuoc9Rit0VhWjqFn7QUkrxcV6W8lLqlOJo5XVUdYo/aD/JzlZcSp2nFBdra1HcfHvid199dj5bcTM+CqC4sRm5tbcfRe2rL87kqGqUV9PWibNltx8T7av1J9UxWrrMOJZfJjpn47FcVQxjtw0HLxWLztl2OA9EBcNu57DnVIHonJ0H8/qtlSJVPE+x69AV0TkHDuTD5sLoEhMhBLt25YjOOXboKswmcaNLTAxLsGPzJdE5Z48Wor1V3OgSE8sy2PnVWcV//03pjiHSo0ute1Q1sKPgUd1dhAaLsGv4RNMhlQyKOksNykwlgnMONpxW0TXGqSZrKy63FwqO7627qNgI6VW73YSTTcIPj721uTCLGBFSZHJYsa9WmHGgphDttm7BcSmy8xy2VOQKjh+vvY46s+swlJgoBTZcE2ZcqKtBaXurqs+dIQTrrl4WHL/a2IjchgbFRhvgzN9Zd1mYUd7ShrPlVeqa8TEEX1wQZjR0GHGkoEx1M751Z4QZ7aZu7MkqUsVgCMGXJ3IEx80WG7aduqKKQQjBV4ezBcftDg6bD19W1fCPAtiwL1tw88RxPDbvFh6XxKAUm3dlCXaypZRi89aLiq8POI2d7TuzYbcLb562bTiv6mQKz1Hs25GNbrPwc2/7F2dUdYLmOB5HduWiq911+P3b1B1DpEflpjxVDex6VWUWXsBLjIWqGQQMykzCu5iCzmuywyW3iiUMCrtuL9rTq7x213F4KYwrHeWC49mtlWBd9OGRwshpFd65XmquhkYlg4Agq1nY1XmxqUY1gwfF+Qbh/IoL9TWKvQi94ijF2X6O+vbqYk2Nyt7JPYxq4XuVXX37kWnZDJ7ifIUwI7eqTpUx1cu4VC7MKKhudBlWcSW+58ivkEprW1yGVVyJUoqCCuHXWlXfhi4VO/xeVda3wSgQnmluNaK51djvmBw1iVzHaLSipqZNNaOry4Ka2v6v43BwKCmqU2VQAc66JuVlt5dZ6NXVnEpVhiHgDO+UFKj/rQ2k7hgiPbJwyt1dvWLAwMIJ/6jMA8IgMDuEr9MlMiZdRDRhtdNuVm3sAIDRIeyN6LJbVHtdeMqjyy7OUPs+eFBRr4pY2EaO2qwiDKtFccikrzoswq+102pVXF1TMsNiHZD30WUVXjw7LeoXVgAwWoV3rV3dA8OwOzjYHP0bG10u8i7kSOhaA8oQMGi6TAPz+xC7ltE4cAxjV//XurVOyDfBsNkccIh4ZOTI1Dlw92QgdMcQ6ZHSomR9RQGwRPj4ldKiZHIYmgFgOK8jzNAqLHx223VEGYzijsO9IiCir9XpqVC/8OlFertoGPndVPuTWP8YHcsOgFkIwQqivWMDcdLfFUOttwKAaD8fMf53jQFA0PhTWg69P2k13zxDI8gYuHslVJ1WiK1EGoES8d8GQ24XYCWM/5buGCI9kluQrD9R8PDQ+AqO+2j9VC+uPDh4a4V7vPjrfFQzKOXhqxMuIe2v91YdNgEAX51wpUp/vafq3TEhBP56YUagQVk1zr5iCYMAvfB1Ag0eqnIeAGe+QIibyPtwcwfHq/MeMYQg2EPkfbi7q34fBBBneMhrDqfkOoEiJd7lyE+EESCzyZ2QvN30goaIv/fAMLQaFh6G/utK+Puo/30Azu+Wj2f/1XX9fAbmfQCAr8A98fZ2U5VX0Vd+fv0z3D30A7a4+/n3f99ZloGnl7JKyLfKN2BgPtuB0h1DpEepvlOgdnfMEi3ivYSLHY30G6c6FEDAYLivcAv6CYEjByTcMDZghOD49ODhqsMmHOUxLWS44Pic8NQBYcwJTxEcnxeVNCCM+VHCFT0XDEpUvcvnKcWimGGC43Nj41WXb+YpxZI4YcaM2FhoByA0szgxUXBswpBBcNMqK8/fK4YQLE4Vfh8jB0fAR2bH3f4Yi9KF30dydAiCRfrNSBHLEMwfKfw+hoQHIDrEV9UTi2UIZo8U/u6EBXojcbDyBoS9jEkjYqEXKNTl4+2GEanRqgwFhiEYkRYNH+/+jR29XotxY+NUMxLiQxEi0IGYEIKpM5NVeS0IASIHBWCQSN+aaQuHg1HpGQkM8UZ8iryq0N+07hgiPRruN0M05OFKDFik+UyFgRV+AI3yHwc9o6x/iJPBIMN3FHy0voJzRvsPh5dG+UPQyUhCqEG4guuogAQE64Vfg2sGQYpPDGI9hcuwZ/hFI9YzSPGDloBgqFcI0nyFq6sm+YZiuH84GIUUAiDSwxcTQoRLpMd4+2Fi2CBVyaQBBnfMjBoqOB7i4YnZg+NUMbx0OiyME66u6uvmhiXDhqli6FgWdyUJG20eOh1WZCSrYhACrMgQNj51Gg3uHpOmyttGKcWq0cKl3lmGwd2ThqticDzFqgnCDEII1kzPUHz9G4xp6aJzVs3OUBWS43iKFbPEX+fyBRmqEjB5nmL5fOGNEwAsXZKpmnHXUuENIAAsWj5S8OSOFFEAS1eOFjX8Ftw9BrwKBiEEi+8ZN6BhnoHQd+vV/BflxnoizWcqiMJbwoPDyID5onN0jB4TAqcpPsLLg8eUoFmic7SMBnNDJysOz/DgMS90iugchjBYFjVRBYNiWdRE0TmEENwzeJxi3w4FxT2Dx7rczd0/dLTiGh8AcH/cKJcLzoOJIxWHNRgQ3J8wQjQnAQAeSMlQzGAJwZphaaLdiAFg7fDhqhjLkpPhpRc3xNdkqmPMHRaPABchnpWjUxVdH3Du8CcnDEaEi+Z0d41NUbwDZxmCEbERGBIq3ltq/thh0Os0in6FDEMQHxmE5Bjxdg4zxsTDy0OvKM+JIQQRwT4YmSzeamH8qDj4+3koMtwYQhDg54Hxo8TbIGSkD0J4mK+iz4QQwNPTgKlThL1gAJCYHIHYoSGKGXq9FjPmifcyiokLQUrmIMVeEZZlMHtZpqK//SZ1xxDpoxmhD8BHG6jIUJgYtAphbq57giwIW4ZgfagixtSg2RjqJf5jAIClEbMQ4xEpm0EATAsahxF+wjvKXi2LmogUnxjZ3gQCginBaZga7Lp52F1RIzAucIhsBgOCiUFDsTjS9Y5xYVQyZkckyGawhGBkYDTuGeL6Rz0jcgiWx6bIXjBYQpAcEILvJ492OXdseBTuS05XxIj19cdTI8e5nJsWGoofjJLWZ+VWRri3N56Z4Lp/ypBAf/x0mrQ+K7cyAj098NysyS7nhvt647kF4sZ2f2IIgbfBgF8tnu5yrr+nO15cNVMRw12vw8t3i284AGdH3VcenCPbjCaEQK/R4NXvzXFpqOt1Gvz68XmyNx2EOBe9V5+Y75KhYRm88vRCECIvOE4AEIbg5acXukysZRiCF55fDJZVkjxO8MvnFrnsA0MIwbMvLYVWp5EdzqIU+PlLS+Dh4dpj/pNXl8HgplVk8PzolaXwFchB+W/qjiHSR+4ab9w3+DV4aQNkLeKj/BdgWvBaSXPdWHc8NfQXCNQHy2KM9Z+EFZH3SpprYPV4cdiTiHIPlbXAjg3IwA+G3CPpR6RjNPjd8Icw1CtSMoMAGB2QgF8mrQEjIdlVw7B4c+QapPtHS34QEhCMCIjBnzJXQyPhdA9DCN4csxQTQmIl3ykGBCl+YfjHhFXQs67DeYQQ/H7cXMyOFg6v9Pe64n0D8fGMlXBz4anoZbwycTqWxkvvQMsSgkE+vvhs4Qp46aSFDJ+ZOBH3pEnvQMsSgjAvL3y2YgX83PqP4d+qxyaMwiPjxN3gtzICPN3x8b3LEewlLSx57/gMPDXTtfF1g8EQ+LgZ8OHDy116Q3q1dEwyfrbUafBI+W6xDIGHQYe//2AZYoKlJc/PGhmPF+6d4VzEJUAYhsCg0+Cdp5ZiaKS0Ttvj02Px4mNzwDBEMkOrYfH6TxcjeYi0LtgZqdF49dleQ8E1hBDiNHR+vggZqdGSGMOGheM3v14OrYaVtIgT4nwvz/9iIcaOkdZ4MnZoCH771hroDRrJDEKAHz+3AJOnS/vtRsYE4ncfPAQ3Dz0YVsIH0jPl+8/Ox+y7xENY/y3d6b7bj0yODuyp/eeN3jG3ln0nYJwnZFgfTA5eg5H+82RbwGaHCRuqPsXFtjP9NsD7D8MTc0MXY3qwfEY3Z8FH1zfiaNM58PT2BngEztbj7qwblkTMxLKIOZIMhL6ycnb8/doO7K4939N75mb1MtxYPVZETcIDg2dJMhD6ysY58JeiA9hQfgG2nsaCfTmk5/83sFqsHjQaTyXOhJaRl+/j4Hn8Jf8YPrl2HmaH/cY1+zIA59Hl1bEj8GzadBhYeYmVPKV4L+80Psg/jy67DQzIbWEhAufR0OWxKXhx1Ax4aOV1yqSU4v2cC/hb9nl09NQXuTVZliEEDAgWD03EyxOnw0cvL3mTUorPcnLw7tmzaO3uFmQAwLyhQ/Hy9OkIcJd/OuLr7Ct45+hpNBlNYAm5LWTDEAJKKWYkDMFL86YjRKIR0lc7cwrx530nUdfRJcqYFB+DF5fMkGyE9NXByyX48/YTqG7p6Le9fe+/jYmPxgsrp2NQkPwTfCfzruOtDcdQ0dAmysiIi8Bza6cjLkJaF+++uphfibc+O4Ky6hZRRsqQMPzswelIHCyti3df5RXW4O33D6G4rAEsS8BxtzB62trHDwnBTx6dgdTECNmM4uJ6vPPefhQU1IoyBscE4okfzsSIjBjZjOuljXj3T7txJafyxvX6Y0QOCsAPfjwbo8dL36T0qqaiGe++uh05Z0vBsMxtuSO9/xYa6YdHfjYPE2dJ6349UJKzft8xRETUZW9BVtt+5LQdhNHRDo7aoWPcEOYWh9H+CxDvPVpVgquT0YFTLcdwuvko2u1tcFA79IwBEW7RmBo0C+m+o6CRuajezjDicOMZHGg4hWZbG+y8HQZGjyj3MMwNnYLxgSOgY9SdVuiyd2N//UVsqz6DBksbbLwdekaHKI8gLI0cjxkhGTCw6tpPmxxW7Kq+jPUV51FtboWVs0PPahHl7o9VMaOxIGI4PDTKk4EBZ4+bnZX5+KL0Iq53tcLC2aFnNYj08MWa2EzcFZMKL626UxcWhx07KwrxWWE2Sjqa0e1wMsI8vHDP0HSsjEuFr16a90BIVs6BvWUl+CQvG0WtTTDb7dCxGoR4eODuYWlYlZiCQHd1Llo7x+FgaSk+y8nBlYYGmO12aFkWQR4eWJmcjNWpqQj2VHd6xMHzOFZyHZ9fyMHlmnqY7TZoGRb+Hm64Ky0Zq0ekIsxH+Ki5FPE8xcmScqw7k4OsilqYbDZoGAZ+Hm5YnD4Mq0anIdJf+Mi8FFFKca64El+dvIwLJVUwW+1gGAIfdwPmZyZi5YQ0RQbIrYzskhqsP5KDc1crYbLYwDAEXu56zB6ZgBVT0hAbLp53IoVx5VodNh7IwenL12HqtoEA8HQ3YProoVg2YziGDpLmaRFT4bV6bNmdjRPnr91omufhrsek0XG4a34GEuNCVTNKSxuwfUc2jp8ogtFkBaUU7u56jB0zBEsWj0DSsHDVp9Eqrjdh56aLOHYoH12dFlBK4eauR+boWCxeOQqp6dGqGTUVzdj51Xkc3XUZXR3d4Hkebu56pI0ejMVrxyF9TKxqhhLdMUQEZOOMuN61Gy2WfNj4TrBEB4MmAIM8ZyPIMNzlh0UpdTnHxplxtfMwasz5sHJGsIwW7qwPErynIMp9gBi8FVltp1BqLICZM4IlGnhovDDcZwzivVJdejWkMew413IR+Z0FMDpMYAgDL40nMv3Ske47MAw778Cp5lxcbC1Ap90MQgi8te4YG5CCsQHJLgvASWE4eA7HGwtwsrEIHXZn1VlvrTvGBcVjWkiSS8+JFAZHeRyrL8Hh2mK02UzgKYW3zg0TgmMxJyLJZfhGCoOnFCfqyrCnsght1m44eA4+OjeMCx2EhYOGuQzfSGFQSnGmthI7y4rQYjHDznHw0RswMjQCS+OSXHpnpDIu1NZgR3Ehmswm2DkO3noDMkLDsDQxCd4uklmlMnJq67EjvxANRiNsDg5eej1Sw0KwLDXJ5fFdKQwAyK9twLacAjR0dsFid8DLoMewsGDclZEEfxcJs1IZxXXN2HYhH3VtXbDY7PA06DE0LBB3jU5GoLe4Idn7aHfFKatrwfYz+aht7YTZaoeHQYfYsAAsHZeMED9xI08qo7K+DTtP5KO6sR1mix0ebjpEh/pi0aQUhAeJG3lSGXWNHdh5OA9VtW0wddvg7qZDRIgvFk5PQWSYuJEnldHY1IU9+3NRXtkCs9kGNzctQkN8MG9WKgZFixt5UhmtLUbs3XUZ10sbYTZZoTdoERzigznz0zB4SLDo3/ZyXDHaW004sD0bpYV1MBkt0Bu0CAz2xoxF6Rg6LNwlQ4ruGCK3XsdWicL2L1DWtRs8tfeEC3g4AwcMKDj4aAcjwXc1Yr0XgVHg5eiw1eNC60Zcad8PB7XeCK04gywMeHDw1YZjhP9SDPebD5bI90C021pwtGkXzrYchpW39FzX6Y7r/b/9dUGYFDgXEwJnQcvI90B02Duxu24/Djceh5nrFmD4YXbINMwKmQ4DK98D0Wk3YXP1UeysPYUuh/kmBksYcJSHn9YLiyImYmnEFHho5HsgjHYLviw/ha8rz6LNZrpx3b4MX607lkePwZqYCfDRyQ8dmBw2fF56Hl+UXkCjpatfhrfWgNWDM/Fg3FgEKCigZnHY8VlxFj4puogaUyc0Pdd1Vth1hhI8tTqsjkvHo8NGI8RdvnfAxnFYV3AZH+VfQkVne78MN40WqxNS8UjqSER6yfcOOHge6/Pz8HFOFkrbWm9j8JRCx7JYNiwZj44YiRhf+d4BnlJszruKj89noaipGSzDgOedjN4Qi5ZlsTg5EQ+PyURcoHzvAKUUO3ML8cmZLOTXNoJlCHie/ofR837mpyTgexMzkRAq3ztAKcX+3BJ8diwLlyvqnAxKQSluOlkyK20oHpqWieQoZd6BY7ml+PTgJWRdqxFkTEmLxf0zRyJ9iLLF6UxeOb7YcxHn8yt7GM73x/RkplKeYlxaDO6bPwqZw8RP1wgp60ol1m2/gDNZ1515GdT5XSCEgCHOI8Sj0gZhzeKRGJM+WBHjytUafLXxHE6dLXXmyfRlMADHUQxPjcLqZaMwfmycIkZRYS2+XncWJ44WgsIZpuV52pOr4gzlJKVEYvnq0Zg0NVGRl6OsuB5ff3wSx/dduXHtWxlDk8Jx19pxmDY/TZUn5Y4h0kf15gs4XvdzcNQGCrE6/c5vV7j7BEwM/S00jHTXeI05H5uqfgU7b7ktn+R2BhDlnoalkS9Dz0pfmKrMZfhn6e/RzZlcNs4jIIh2j8Ojsc/CQyN9Yaoy1+APhX9Gp71LIiMSzyb8GL466QtTjbkJz+X+Dc3WdpfHZhkQRLgF4XdpjyPYIH1hqutuw5MXPkK1qUUSI9TNF++N+h6iPKQvTI3dXXjk1Be41tnkksESggC9J/498V7EeUtfmFotZnzvyAbkttS5PBnBEgJfnRs+mbEayf7SF6YOqwXf378F5+udTdzEOCwh8NDq8NHc5cgMkR6bN9lseGL3DpyoLJfE0Gs0eH/hUoyPkpaECABWhwNPb9+D/UXXbsvv6Y+hYRm8d9ciTI2TvjDZHBxe3HYA2y8X9JsXcxODISAgeHPlfMxOlh7/53gev9t8BBvO5EpiUAr85u7ZWDxSepIyz1O8s/UEPj14SRKDpxTPr56OlZOFCxDeKkopPth6Bv/aehZMj7EmxuB4ih+tnoy18zIlL36UUny5/SL++tmxfnNW+qr3NTy8ajweWjlO1gK7bWc23v7bATBEGuPuFaPx/YemyDrVcmBPLt74/U4QgttyVvpjLFw6Ak/+dI6seiAnDuTjj89/DUohWu+EMASUp5i5aDh+/NISaLXKUgPkrN//p0/NNHbn4EjtT+CgFhdGCND76Kozn8Gxup+Dp9I6W9Z3l+DryuckGCG9DIpqcx42Vb0ABy+tzX1ddxXeu/YqzBKMECeFospcir9dew1WTlpzowZLI35z9XVJRsh/GDV4reAN0QZ5fdVkbcczOX9Bs7VDUu0OHhQ13c14JucvaLdJ69DZajXi+2ffR425VTKjwdKBR8/+E42WDkmMDls37jv+CUq7miUxOErRYjVi7bGPUW2S1gXUZLdh7cF1uNJaL+l4JkedzfdW7/8C1zqaJTEsDjse3LsRFxtqer6ZrhlGuw1rd23AleYGSQw7x+H7O7fiZFWFZIbF4cBD2zbhUl2NJAbH8/jRll04WOzsGC2FYXNweGzjNpwuF+7O3Fc8T/HLLfuw43KB8/93sX/jeAqO5/GT9TtxuFC4k3VfUUrx2qZD+PpMrmQGTyle+HIfdmcJd/2+VW9vcRohUhmUAr/76jA2n8yTzPjX1rP419azToaLQmK9i/tf1h/Hun1Zkhnrd17CXz87dtM1hNT7Gj7ccBoffX1GMmPH7hz8+a8HnIu3RMZXG8/jHx8elcw4vP8KXv/tDvA8FTVC+jJ2bcvCX97cI7nY3Jmjhfjds+vBcbzLomu0h3Fo52W8+eIW8CrbR0jR/1lDxMYZcazuGQA8XD+a/iMKHg3dF5HX+i+Xc+28FZuqXgBHHRKMkJsZdd2FON74ocu5HHXgg7I/wsHbZDF48KizVGFT9Ueu51IebxS9i27OIskI6ctosDTi/bKPXc6llOLVKx+i0y7NmOrLaLZ24A8Fn0qa/8ucr9Bk7ZJVup2jPNrtZjyb9YWkH/YvL21DtblNJoPC6LDiB6e/ksR48fw+FHc0yyruxVEKC2fHQ4c3wCHh4fHbc0dxualeFoOnFHaew4N7N8Ii0Bm2r/589jTOVlfJKnPPUwqOUjy8fYtoJ91evX/2Io5cK5PFcBpFFI9v3I4Ws2tDet35HOzKK5JVs6N37k/X70JdR5fL+dsuXMXGs1fk1wUB8MKX+1De5NrIPZhdgs8OXZJJcOq3Xx5CUZVwi/penckrxwdbpS/2ffXOl8dwudi1AZpbWIN3PzmqiPHhhtM4l3Pd5byS0ga89d4BRYwNmy/g2Mkil/OqK1vw+m93yL4+pcDu7TnYvyfX5dym+g787tn1N/5ODuPo3jzsWH9e9uuTq2/FEPnrX/+KmJgYGAwGjBkzBufPf/Nv7HrXbth5k6zF+z+iKO7YAAcv7k0o6jyGbq5DEYOCIrd9N6ycSXReXsdFtNmbZS3e/2HwuNR2Al128Z3+lY6rqLXUK2Lw4HGxLRtNVvFd+NXOchQbq8ApZGS3F6PCVC86r6SzDpdayxT1j+Eoj/yOalztqBadV2lsxeG6YkXVPznK41pXE842iT8EG81GbCvPV9SjhqMU1aYOHK65Jjqvw2rB+sI8xYzmbjN2Xxd/0Jrtdnyam62obi1PKbqsVmwtKhCdZ+M4fHQ+SxGDUqDbYcfGy/nir4Wn+PDkRQUEpzHi4HmsvyC+YFBK8dGRi4qqpPYaVV+dynE595MDFxWXnicE+PKoa8YXey6qqir7pQSvyFc7LoJVyGAYgq+2u/48N227BKXtlRiGYP0m1+vcts0XFfcGIwT4et1ZlxubXV9fAOfgZRkhfbXxk1PfuFfkGzdE1q9fj6effhovv/wysrKyMHz4cMyZMweNja4ta6WilKKoY72qa9h5EyqNh0TnZLVuVdXp1kHtyO84KDrnRNM+xWXnAecD6lzrEdE5+xuOKC47DziTWA83Hheds6P2BFgVDJYw2Fl7SnTOpspzqroCs4TB1xVnReesv35JVQ8RljD4ovSC6JyvruUovr6TQfBJofiDdmPxFdh5V+FKYTGE4OMr4gvGjuJCmO12xQwA+CQnS/RBe7D4Gtq6uxVfn1Lg80s5ot2LT14rR32ntNBgf+IpxVcXcmFzCN/v7Ou1KGtsVdxsgOMpNp/Lh9kqfL+LqhpxpbxecRNGjqfYfb4AnWbhDVpVQxvO51cq7uvC8RRHs66hqU34fje1GnHi/DWXoRIh8TzF+dwKVNe3C87p7OrGwSNXXYZKxBhXC+twrUx4nes227B352XwChmUAhXlzcjPE9482WwO7Np4QVWfnab6DmSdkRZeVKpv3BB566238Oijj+Khhx5CUlIS/vGPf8Dd3R3//ve/vzFmsyUXRns15IRkbhdBSccmwdEmSxkaraWqO91ebtspONZsrUeZqUChV8cpCopTzcLuxXZbB3La8xR5Q3rFg8ehhmOCC4bJ0Y3jTTmKvCG94iiPffVnYef7DwdYOTt21mSp6qbLUR776i7D5Og/HMBTig3lWYp7ofQyDtcVo8Ui7An7oiRbVcdejlKcbqhAtbFdmFGQo/j6gPNe5DbXo6i1SXDOurzLqrrDUgBl7W3Irq8TnPNldp4qwxAA6ruMOF1eJTi+4VKeqkZ8ANDRbcHRojLB8U3n8hTv8HvVbbNj/+ViwfEtp/NVMxw8j93nhfNRdhxXzwAFdp28Kji852i+2kbpYBmCnYeEc14OHilQ1cAOcBYt27VX2BN2/GgBLBZ1hjrLMti9PVtw/PzxInR1KDfUAYBhCXZvVOYRlMz4Ji9us9lw6dIlzJz5n54LDMNg5syZOHPm9hii1WpFZ2fnTf8pUZdd3L0uTVT0Om222gFhdNiFww0ttoHxGrXbW8ALLNBN1mbVxhQAmDgzugUSY5us7aoMhF5ZeTs67P3vlFptRlgFjBQ5clAeTQJJq512C7rsrnMWXIkHRa25vd8xO8+hsVv57ruvKkUMkaqujgH41IGKTmFGRXv7gDAqO4QZ5a1tqoy2G4x2Ycb1pjZVxifg9FJVtQmHSK83tine4fdKwzCobhVmVA4Ag2UYVDe3C45XNbSr/jwIIahuFGbU1LcpasLXVzwFahpEGLVtqjvUchyPmlrhvJ26mjawGvWM6qpWYUZVq7Qy8CLiOYrqCmkJ8Er1jRoizc3N4DgOISE3l/oNCQlBff3tC/Dvf/97+Pj43PgvKkrZuXIHb4ZqkxkAR4UtSTuvzsrslYPaQAUWaaknXqTIJpDvYuXVL6y9sggwLNzAMcwC1zI7pJ1AkiKTwLW+FYZ94BhGgWs5eB72AYr5mkReb7dD3W6vV0abMENt6AdwGgkmMYbImFQRQmCyCl9HbEw6RPw6Jov63yGlgElkF2+22BTnIvSKpxRmi8jnYbFD7deXUnqjYmt/6h6A9wEARpMIo9s+AKsUYBJl2FR7DAHALMIYCH2nTs08//zz6OjouPFfVZWwu1RMGsYd6sIyPdchwrVEtDLqjIgzdCACeQ16Vl0p8b7SMf1fS8+oK4neV25s//dESdEzIXkI3BO1pd2lXMtDo65E/U3X0vbPcJfZW0ZMXgIMDcNAqzQL7xZ5ijTLc9OqaxsgheE+AAyOUnjqhO+7h179Z0IpFb2Op2FgPndRhpv63wghgIdB+J57uOlUlxNnGAJ3kfvhblDWefYmBiHwdBf57hp0qr0uAODlKcJw0w6Ix9BTlKEbEI+hhwhjIPSNGiKBgYFgWRYNDTfXHGhoaEBo6O1Fl/R6Pby9vW/6T4m8ddILIQmJgIG3LkZw3F8fqZoBEPjqhAtDBenU91MAAH9dkGBJ9mBDkKqE2155abxgEDBqgvR+0Lgo1y5FbqwePtr+e5f46zzhprKXDeBsahds6P9756U1wEer3gBlCEGku68gP9xdfTsDAmCQl3ARuBgfvwHZjQ32EWYM8fMfkN3YYD9hRlxgwMAw/EUYwQGq8x44ShETIHKvQtQzHBwv2q9mcKi/+vfB8Rgk0hl4UJif+rAJTzFIpCR7dLj/jVoXikWAqHARRqS/+hwRhiAq0l9wPDI6AJxDfR5K9CDhBoaRMYGKk2F7xbAMomNdl5ZXxfgmL67T6ZCZmYlDh/5z+oTneRw6dAjjxklvwS1XAfoUeGtjoCY8Q8FjqM9ywfFAfQzCDAkqF3GKDL9FgqP++mAM9UxRdaKFgGBi4GzBcR+tN0b6pas8NUMwM2SK4E7IQ2PAtOBMVadmGDCYFzpWsHOvjtVgcWSm6lMz88Mz4C7gEWEIwd2xmapPzcwJHwY/vXBJ+XvjR4BR8b1iCcHk8FiEewgbNPcNS1d8fcB5L0YEhyPOV7ga7drU4ap2YwRAfEAg0oKFu7iuyUhTzYjw8caYQcJh4FUjU1XnVgR4uGFyfIzg+Iqx6hkeeh1mpQlXcb1rfIpqhk7LYu6oRMHxRZNSVJ3QAACGAAsmCneKnTc1eUCMnUUzUgXHZ0xLglajbvPE8RQL5wpXo504JRHu7uo2TxzHY8GSDMHxURPj4eOnrrklz/GYv2Kkqmu40jcemnn66afxwQcf4JNPPkFBQQEef/xxmEwmPPTQQ98YkxCCBN9VUBOe0TFeiPKcJjonw3+JqkRPDdFjmM900TmTguaqOtHCEAaj/aeKzpkVMk0VgwKYFjRJdM6i8ImqTs3w4LEgfILonGVRo1WfmlkRPUZ0zqrBIyRXMxRirIkV/1GvjEtT9aDlKMV98SNE59w1NBk6F834xMRTigeSxRkLhibASyTk4UoUwAPDM0Rd/dOHxiLQRYM5V7ovM13UuBw7OBqRft6KTUOGEKwZPRxaVnhhS40OxdCwQMWfO8sQrBibCoNIOe4h4YFIjw1XbEizDMHCMUnwEgnxhAf5YHxajGLPC8sQTB8VD39v4c/U39cDU8fGq2KMGzEYoSLN9jw99Jg9IxmswkRPhiFITY5AjIi3wmDQYt6iDMVhJkIIYocEI0GkSZ1Gy2LhqlHKQ1kECI30Q/roWGV/L1HfuCGyevVqvPHGG3jppZeQnp6OnJwc7N2797YE1oFWjNdc6BkfxTU4EnzvBkvEH6LxXpPgqQlQyCBI91sEnYtckyTvDAToQhR5LAgIRvtPddlvJsk7EdFukYoYDAjG+o9EgF7YBQkACd7RSPIe7LJrb/8MBqP9hyHSXdw9GOsVgrGBQxU9aFnCIMMvBok+4j1UItx9MSciSTFjmE8IRgUOEp0XaPDA8tg0RV4RlhAM9vLD1PAhovO8dHrcO2y4osWVJQRhHl6YO1i8h4peo8GD6SMUMRhC4O/mhsXxwrtvwJnv8vCYTAWEnjwBvQ7LUsX7tDAMwSMTRynacjAE0GlYrMwU3n0DzkXl4emjFCVIEjjfy+oJaS7nPjh7pCoP0t1T013OWTtvpPIaH5TinjniBi4A3L1I+fvgeIo1i0e5nLdsSabivSzPU6xZIb6pAYDFyzLBsIwiA5RSitVrXffNmb98JLQ6jbLcHQqsfHCi6rwfV/pWklWffPJJVFRUwGq14ty5cxgzxvUHpFZaxgNTw98GIRrIe5sEEe6TkOzn2mOjYXRYHvVbZ8KpDAYBgyj3NEwKftDlXJaweGzIc9CzBlmGAgGDaPchWBrxgOu5hOCZhCfhqfGQxWDAIMItHA/H3i9p/ovJD8Ff5y0rfMKAQZhbAJ5NvE/S/NeGr0aEm78sBksYBOq98IeMe6QxRizCEK8gWbUlepvS/X3cGkk/6l+PmoXUgFDZDE+tHh9NXw1WQjLqL0ZPweiwSFlGFUsI9KwGn8xdAb0Ej8pTo8dhyqDBshgMIdAyLD5ashweEjwq3xudiXmJ8bIe5gwhYAjB+yuXws/ddd7PqpGpWJ6RLMuoIj3/+701ixHs3X9uU18tGJGIeycLu9mFGcCf7luAqABfl/OnpA3Bo/OUPX9/fd8cxIUL7/B7NSopGk+tFveQCunZ+2cgeUiYy3lJQ8PwzCMzXc7rTz+8bzIyU1znEcbGBOEXz8xXxLh/zThJXXjDI/zwwitLQSlkGyPLVo3G9NkpLucFBHvjpbfWgBB5DEKA2UszMH/5NxuWAb5jp2YGWgGGJMwIfw9axg0EruJ9zlsR7TkdE0N/C0ZicmWQYTBWD3oDetbTpTHSm08y2GMk7op6FSyRlvEfpA/DU3G/hqfGW4LB42QM8RyGx4b8EjpGmms8UB+Al5N/AX+dn+S8l1jPGLww7Gdwk3i6x1/njbfSf4RQQ4AkBgFBjEco3kh/Cl5aae53H5073h/zKGI9gyUzotwD8K+xj8Ff73qxAABPrR6fTrofyb5hIHCdicSAIMzNB+umPIRQiYmoBo0Wn864GyODonpepwsGIQgyeODr2feJJqn2lY5l8e85yzE5MubG6xQTSwh89QZsWLQG8f6uFyTA6bH424JFmBUbd+N1umJ46nT4YtlKpIrkhvQVQwjeWDwXS5OHSWYYNBp8dPcyjIyS1kWYEIJXFs/E6lFp0hgMgU7D4m9rl2BCnLgHrK9+vmgKHpqaeeMarhgsy+CN+xdgRqr01vOPLxyHHywYK5nBEIJX75+DBWOGSWbcO28kfrR6smQGIcAvHpiB5dOld/i9a046fvboTBAClyGU3tfwxH1TcI8Eb0ivZk9PxvPPzAfDEMmMh+6dgIfumyiZMXFKIl78zTKwLOOydklviGXVPePw2JPSDbHM8XF45Z210Oo0khkLVo7Cj3+1+Bv3hgAAoWoC3t+w5LQRFpPJXo+ijvW41rENDmoCgQYU3I1FnYJDgD4Z8b6rEOM5W/A4rZiM9hZktW3D5bZdsPJGMGB7KqI624Hz4BCsH4IM/yVI9pkp2dDpqy57B04278Op5v0wcf0zQvSRmBw0F6P9p0LDyM8BMDpMONBwBAcajqDD3gkWLHjwTgJx5jiEGkIwJ2Q6pgZPgo6Rf3zS5OjGztpT2FZzAi22DrCEuZF3QQjpYfhjScRkzA8bD4OC0zDdDhs2VZ3DhoozqOtu75cRYvDBqkHjsCxqNDy18o9KWzkHNpRn4fNr51FhaoWGMDfcxQwhcFAeQQZPrI0dhTWxI+Gjk3/ixsZx2Fiai48KL+BaZws0hAHtyUxi4GT4691xb3wG7k8YiQCD/HwJB89jc0k+PrpyCQWtTf0yfPQGrB02HA8mj0CwuzSDra94SrG9qAAfX85GbkN9j8eqh0EIHDwPT50Oa1LS8MDwDIR7yf+9U0qxp7AYn1zIQVZNrXNhoLiJ4abVYuXwZDwwMgPRfr6KGIcLy/DpmSycL6++sfhQip428Tz0Gg2WZiTh/nEjMDhQmlF4q04UXMfnJ7JxpqjixsLAUwqGMOApDw3DYtHIYbhvcgbiQqUZhbfqXGEl1h3Owon86yDEabb/h0HBMgRzMhNw74wRSIxSdmoiu6gaX+7LwvGsUoDgJgalFIQA00fF4545IyR5QvpTfnEdNuy6iCNnip2eBYaA5ykYpqfJGwUmjY7DqgWZSE9Sdtqx5FoDvt56EYePFYDjKJhbGJRSjB01BCuWjkRmhnTDs6+ulzVi8/rzOLT/ChwODgzD9DAIKKXgeYqRo2Nx18pRGD1OuuHZV1Xlzdj2xRns354Nm80BlnUyej9/juMxfNRgLL1nLMZOTVRlhMhZv/+/MER65eAtqDAeQIslHza+CyzRwcAGIMZrNvz08aJ/SykHjtrAEoPoh+PgbSjuOoka8xVYeCNYooE764sE7ykIc0sQZfA9DI1LhgN5HRdQarwKM2cCQxh4arwx3HcMYtzjRf+Wpxwc1A4t0YvO4yiHrLbLuNJRABNnAgMGXlpPjPTLQKKXKwYPO2+HjhGvKcBRHhdbC3ChtQBdDjMICLy17hgbkIJ036Gi+SQ85WHj7dAxWpfzzjeX4mRTIdptZgAUPjp3jA9MwNigoaIhHEopLLwdekYjyqCU4nxzBQ7VFqHNZgZPKXx0bhgfPBhTQ+OhEQmTUErRzdlhYF0zLjXVYG9lIVqsZnC80zgYGzIIs6PioRU4TdSXoWc0oiEbSikuN9VjV1khWrrNsPE8fPR6jA6NxNzB8aKhGEopLJwDOoZ1GRbKb2zAjpIiNJtMsHEcvPR6jAgLx4Kh8TBohA1bSiksDge0LCt6TwGguKkZ2/ML0Wg0wepwwEuvR1pYCBYmJcJdNzCM682t2JZTgMYuI7rtDngb9EgMDcKitER4GoQTOimlsDo4sAwRTWAFgKrmdmy/eBV17V3ottnh5abH0NBALMwcBh93cePZaneAEKdnRky1LZ3Yee4qals6Ybba4Ommx+BQfywckwQ/T3Hj2Wp3OENpLhiNrV3YdeoqqhvbYe62w8NNh+hQPyyYmIQAH/FTHTa7s2KyTiQRFwBa203YczQflXVtMJutcHfTITzEF/OnJiMoQDxPzu7gwPMUep04o73DjP2H8lFR1QKTyQo3gw5hoT6YPSMZoSHCya8AYLdz4KlrRldnNw7uu4LyskYYjVYY3LQIDvHGrLlpCI8QN2wdDg4cx0PnIifEZLTgyO5clBbVw9Rlgd6gRUCwF2YsTEdUjDLD9lbdMUQGSCZ7Fco7v0Zl13bY+HYAvfVFEhHrczciPGaDFSgUJlVd9nrkt+9EQcdudHNtNxj++sFI9b0LQ72nqy6e1mFvwcWWA7jQegBdjl4GQaA+AuMC5yPddwr0AsXIpDM6cKLpGI43H0WrrfUGI1gfjGnBMzA+YALcNeqOkXXYjThYfxZ76k+i0dIKCupkGPwxP2wSZoaMhbdWHaPTbsae2kvYXHUGNd29DCDY4IslkWOwOGI0/HTyPQJ9ZbRbsKP6Mr66fh7lxhbwPYwggxdWDMrE8kGZgrVMpMrssGF7RT4+K76Eks4mcNTJCDB4YFXscKwZkoFwD/EHpytZHHbsuF6ITwqyUNDaeKMMur/eDSuHpmJtYjqivXxVMaycA3uuleCT3GzkNdbfYPjqDViWmIS1KcMR6yeeKO1Kdo7DgWul+DQrGzm1dXD0lO300uuxJCkR96QPR3yguoezg+dxtLgMn5/PwcWKmhsMD70O85Pjcc+o4RgWqq5WA8fzOFlUjnWnc3CutAr2njoY7jotZqcOxd3jhiM1Sl1tIp6nOFdcia+O5+B0YcWNRn4GnQbTU+OwetJwDB8cpmonTSnFpaJqfH04Bydzr8PaY4jotRpMTBuMldPTkZkQqZqRV1KLTftzcPzCNVhsvcYOizFpMVgxOx0jUwapLpxWUFyHrbuzcexUMbp7KtJqNSxGDI/GsoUjMCojRnUp+WvXGrB9axaOHLkKs9lZkVajYZCaGoWly0Zi3Lg41QwlumOIqFS3owE5Tb9GY/dpELCguLVrJgOAh4Z4It7vEcT5PCD7R2F2tOFow5soN54GAemnsR0BQKElbkj3X42RAffJDhl1O4zYVvNPXOk4DYgydBgXuBAzQ9eAlRkysnDd+KLyc5xrOdPjzu//66QhWkwNmoYVkatkh4wsnA3/KtuEgw3nwFO+XwYBAUMYzAoZi0dil0EvM5xj4x34a/EubKs+Bwd1ft63UpwMgtmhGXg6cYlgvREhOXgOfyk8hHVl52Dr6YtzK6M3T2N2eDJeHL4Q3jILqPGU4t0rJ/BB4Tl0c/aeT/gWBnG6emdGxOO3o+YhwCDPeKOU4u955/C33LMw2m0939+bKSwh4CnFlIhY/HHiHIS4i+9I+2N8nJuNd86fRofVCqbnercyOEoxPjIKf5w+F5EKnhFfXc7FmydOoa27W5QxMjICv58zW7TwmZC25xbg9QPH0Ww037jeTQyGgOMp0iJC8driWYgPlm/07M8rwR92HEVDh1GUkRgehFeXz0JypPxTi8fzy/CHjUdQ09J543r9MYaEBuClu2ciPVb4WKmQzl+txB8+P4jKhnZRRnSIH56/bwZGDZNfvPJyUQ3++K8DuF7dIsoID/LG0w/OwIQR8o+uFl2rx+vv7sO1skawLLmti29vaCc40AtPPjINUyaIe8v70/XrTXjjT7tQWFAnyvD398D3fzAds2a5TmwdSN0xRFSoy3Ydp+oegY1r78cA6V9RnouREfSKZEOh01aHrVU/gcnRLLmzbqznFMwK/xVYIm0R77C34MPSl9Bqa5DMiPcagbWDfgGNxLyPTnsn3ix6HXWWWkl1SAgI4j0T8KOhP4VeYtl3o8OMl/L+imvGKkk1WwgIhnpF49WUJ+ChkbaImx1W/Dz7I1xuL5fEYEAQ6xmKdzIfha9O2iJu5ez48fmvcLrpmqQTgSxxJtF+OOEByd4RO8/hx6e3Yl91kaT5LCEIc/fGF9PWItLTV9LfcDyPn5/cg82l+ZIZgW4eWDd3NYb4CBc/6ytKKV46dgifX7ksmeGjN+DzpSsxLDBIMuNPx0/g/fPSuoqyhMBdp8PHK5dheJj0XIa/HTuLvxy9vcFnf2IIgV6jwftrl2LUIOm5DJ+cyMLrO49JZmhYBu/evxgTE2IkMzaeysVrGw7dyLlxxWAYgj89tADT06TnMuw5W4CXP9wLSqnLY8zOEyAEv354LuaNlZ5Ee/RCCV58Zyd4nro8/kt6/tcvHp6FJTNcH43u1fms63jhtS1wcLzk4m5PPDwNq5ZKP52Se7kSzz+3ATabQzLjoe9Nxr33iddiGkjJWb//T5+akSuLoxmn6x6TZYQAQJVxO/Jb/yyNwXVge/UzMDlaJBsIAFBmPI7jDW9LKqZl4cz4uOxVtMkwQgCgpCsbG6v+Itipt6+snBXvlLwl2QgBnKmJxcYi/LPsb5IYdt6O1/LfR6mxWnLhOAqKa11V+O3VD2CX0I3XwXP4Ve7nyJVohADO7rllpgb8PPsjWDnXTdd4yuP5rM0401QquSwBRymqzK34wZnPYHK4bjhFKcULF/Zgv0QjpJdRZ+7E/Ue/RLtVWhPH35w/ItkI6WU0d5tw794NaOo2SfqbN8+dkmyE9DI6rBbct+1r1HRJ69j9/vkLko2QXobJZsODX29GeZtwR9W+WnfhsmQjBHB6s6wOB77/xVYUN0rrdrrt0lXJRkgvw+7g8NSn23GlWrjzd18dvFyC36w/5EzKlMjgOB4/+/cuZJVK64R+Ou86Xv7XXvC8ayMEcCaI8jzFy//aizNXyiUxsguq8at3doLjeEk1SGgP5w//OoCj50skMQqK6/DL17bcyDmRqr9+eAR7D1+RNPf69SY8/9wGWK3SjRAA+Ojfx7Ft6yXJ879N3TFE+uhq6zuwci2yjJBelXZ8hjZLnst5F5o/QZe9QQGDoqBjF2rM2S5nHm/cjCZrjexqqRQUeR2nUNjp+gF9qPEAKs0Vihi5HZdxrvWsy7n76k/jamepbAYPHnkdJThQ73oR2F+fg3MtxeBlVi7iKY+CzmpsrDrlcu7hukIcqLsqm8FRHqVdTfj3tZMu555qKMem67my6y9xlKLa1I53810zshpr8XGB/AcZRykau43406XjLucWtTTjrxfPKWK0Wyz47cmjLudWtXfgjeOu3++t4imF2WbDKwcPu5zbZDTht3uPKGJYHQ68tOOgy7md3Ra8stn1vFtF4exL88v1+1xubLptdrz0xX7ZRemcizjFLz/d63KxdDg4pydEQfUwCoqX/rUHDof485TnKV792x7wvNxfoVOv/WMvLFbxTQelFH94ew84jldUmO7N9/ajy+i64/qbf9oNm82hqMLzX987iNZWo/wX9w3rjiHSIxvXjmrjHkVGCAAQsLje+bXoHDvfjYKO3bK8FLcy8tq3is5x8Hacb92vgsHgbMtu0Tk85XG48aDi8vYEBIcaDojOoZRiR430nd7tDGBH7TGXP9aNlacU9wuioNhYedplWfl118+BVcjgQbGh/KJL786nxRcV99nhKMWGshyYHeKt6D8rzFLF2Fp6FR1W8Qft53k5soq43crYV3YNjSbxB+2Xly8rLnPOUYpT5RWoaGsXnbcx64riNvI8pciprkNRQ5PovG2XrsLuYgEWY5Q2tiKnok503t5LRTBZbIp+6TylqGvrwpmiCtF5R7NL0dbVreh+UQq0dXXjaE6p6LxzueWob+5U/JmYum04eEbc23iloAblVS2Ke+3YHRz2HRb3NpZea0BBQa1iBk8p9uyW7m38tnTHEOlRZdc2xUYI4KxFUm3cAxvXLjinuPMAHNS1m12MUW48CaNd+AGV33EG3Zxyi5eCR6kxF83WWsE5uR2X0W5vV8GgKDdfR6VZ+AF1peMaai1Nijv5UADV3Q242in8gCroqEZRV42qfkGN1g6cay4WHC/rasKFlnJwKhjtNjMO1RUKjteaOnG4tkRVnx2zw46dFVcFx1stZuwoK1TFsPMcNl4Tdj932azYWJh/W6KlXH11VdgzaXU48OXlPFUMhhB8dTlXcNzB8/jiQo6qUuosQ/DlBWEGpRSfn8pRfP0bjDPCixKlFOuOZavqe8QyBF8dzxGds/5QtqpGkgxDsP6QuKd44/5sVZ2HCSHYsDdLdM6WXdnqTqdQYNOOLNHN0/bt2Yr73wAA5Sm2bs1S3Vl4oHXHEOlRtXEv1DTJAwAKO+rNJwTHSzqPQE1HYCcDuG4UdivndZxW2RHY6RXJ7xAOnVxqu6CqWy/gLN1+sfWC4PjJ5mxVnXQBZ+n2k805guNHG/MGhHGkUXjBOFh3VfEOv1cMCPbVCi/g+2uKoPZ7RQDsrBQ2RA5WlcKhwggBnN/dHWUFguMnKitgcbjO6xETTym2FwsbbeeqqtFlVb4ZAJxeke0Fwozcmno0G83qGDzFrivCjOL6ZlS3dqh6YnE8xb68YsHddU1rJ4prmxV7EXoZJ65ev3E89la1d3Uju6RGldHG8xTZxTVo7+o/z8liteNMznVVnYcppSipaEJdU4fgazh2uljVAk8B1Na3o6xCOD/oyOGrt52OkavWFiMKC4Q3mv8N3TFEemTlWlRfg4CBjWsVHDdzrVBr7DBg0C3idemyt6na4QPO0InJ0f8PDnDWDFHTrRdw7jCMji4RRpeqhxPgfHh02oUZbTb1sVKO8qLXabWaVBuGPCiaLcKMFotJtbFDATRZhJNJW7rNqhkARBNWW7rNKu9Uz3XMwkaA2JgctXULJ/e2mgaG0WW1geP7/521qjR0euXgeJis/YfkWrsGhkEp0GHq/34NFAMA2gSu1WFUFvbpT60d/TNMZiscjoHxMrQLMDiOh8mkzoi+wWgfuPs+ELpjiPRITVjmPyLgqfCOTmxs4BgD8T4AToTBDQCDUnqjXkf/jP7rhchiADcKRwkxVCIAOE/eCI6p9CL85zpi94oOyAJuF3kfHOVVG1ROhvD9cPD8gPS1ELvnQgu7XIldR+w7N1Ach4rd/e3X6p8xUPcKEP7cBzJE4BC4lloPgjTGAL4PAYPm27hX/y3dMUR6pGXkFVzqTxQctKzweWmDyJh0Bg89K/xa3TXq3wcAuLHC1UM9NZ4DEP4h8BCptOqpcVcdNmFA4ClSEMxL46Yq/t3L8BZpyCe3IJmQxOqVeGv1sk/k9Cc/vfBr9dbpVeWH9MpXL1yJ2FuvV+0FA5yvVYwxEPIUuY63SHl3OdKxLHSa/usG+bgNDAMAvARer7ebuqrRN1+rf4aXx8AxhK7l5TFw98pbgOHpOYDvw7P/16vTaaDVyu9R1j9j4F7vQOiOIdKjILexEjr0ulagIVNwLNJ9hITuueKi4BHuJtyhMtYzZQBCARwGeyQLjid4Jar2VnDgEO8p3N8nxSdO9cLHgUeyt3BBpXS/WNUeCx4U6X7ClRdHBsSoZhAQjAqIERwfEzxIdYInQwjGBQszxoZGqzZ1WEIwIVy4Idjo8EjVPheWEEyIEq62mREerioxspcxLjpKcDwlPAQ6Fz1kpDBGDRLuDDw0NBAeevkNJ/uKIQRp0aHQCCRYRgf7uuw140oEQGyoP7wEDJFgX0+EuugDI0VhAd4I9u1/8+TprsfgiADVmw5fbzdEhfr2O6ZhGSQlhKkuCe/upkPsIOGifGnDo1QztFoW8QnqSv0PtO4YIj2K8V6pMjzDIMCQCS+d8KKU7LtI8bFapwgC9XEIcROuJJjpP0OlIULgrwtFrGeq4Iyx/uOhJeoegj5aH6T5pguOTwzMgDurzmr30LhhYlCGMCNomOq+MQZGizlhIwTHxwYNRoS7r6pPhCUEd0ULv4/0gHAk+ASpW8QpsCZOmBHvF4iRwRGqFnGOUtybmC44HuXtg8nRMapyUThKcX+q8PsI8vTA3Pihqhn3jUgXHPc2GLA4bZiqUxocpbh3jPD7cNNpsXxUqioGTynunSDM0LIsVk1MU/WZUwBrp2QIhtwYhmD19AxVRgIhwOoZ6YILNCEEK+dmqMoTYQjB8lnp0Ig091u2cITiY7WA814snJMGg0H42br0rpGqGCzLYPbs1AH14AyE7hgiPfLWDYG/YQSU3xIesd53i87w0oZikMdYFV4RilS/ZaIzPDW+SPEZr+pUy7jABaKxeneNO8YHTlDMICCYHjxTtK+NntVhTqhyBgMG80InQidSrl7DsFgWOe5Gfxe5YgmD+REj4SHSc4YhDO4ZPEbR9XsZ8yJS4acXDs0QQvBA/CjFHguWEMyIGIowd/HQ4QPDRigOnTCEYFxotMsy7w+kZSj27jAAUoNCkBIs3kflvhHpihkEwGA/P4yOFC/BvmZUmqpTGiFenpgcFyM6Z/VYdQwfNwNmpYiXYF8+TnhDIkVuOi3mj0wUnbNoYrLLjs1i0jAMFk4Q9uACwJyJw2Bw0fXWlRZPF78XUybEqwp58DzFknnponPGjBmCgADlmyeO47F4qfDG6b+lO4ZIHyX5P6XIm0DAwlefglCPqS7njg58CISwkHvckoCFv24whnpNdzl3WsgqsEQr+70wYOCvC0Gmn2vG3NAF0LN6RQxfrS+mBE1zOXdJxDR4atxkGwoMGHhpPbAoYorLucuixiFA7yU7H4UBgTurxz2DJrtmRI9AhLufIoaOYfFovGvGkkHJiPMOlL3TJwA0hMGPUia5nDs3Jh6pASGKGAwIfpbpmjFl0GCMDo9U5rEgBL8Y75oxMiICUwbHKN7p/2LqZJdJtclhIZifHK+Y8fNZk1wuzjFBflgxOkWxN+Gn8yYK5qD0KsTPC/dOU75w/XD+OLjrxRtQ+nq64XsLlBvrDy0YA18XISR3gw7fX6W8z8rq+SMQ7C8eQtJpNXjsQde/1f5ECLBwThoiw8WbKrIsgx887vr53D+DYNq0YYiLk9/w8JvWHUOkjwIMGRgR/Bqcj05pv24CFu6acIwNfReMhHBFkCEec8JfBgEjeREnYOGhCcCiqNehYVwnXgUbInFvzHNgCCvZ+8KAgbvGGw/Fvgw96zouHKQPwo/inoaGaCR7LRgwcGPd8HT8z+GpcW3VB+h98OvUJ6BjdLIYelaHV1N+CH+d6zb3vjoP/HnEI3Bn9ZINBYYw0DIavJHxEMLcXLeg99Qa8P64++GjdZPOAAHLMHh39D0Y7Om6G6tBo8XHU+9GkMFT8iLOgIAlDN6bsAxJfq4fTlqGxUezViDS00cyw/lLInh7ykJkBgvnPNx4TYTggwVLMMTPXyYD+MO02ZgQJZyDcmM+IfjL4oVICg6WbSi8OGMaZsYNkTT390vnICNKfk7KT6dPwMJUcS9Cr361dDomDB0k2xh5dNoorBwjzdvxk8UTMXN4nOwt2prJ6bhPohHz6OKxWDA+SSYBWDghCY8uHitp7t3zM7Fidrqs6xMA00YPxRP3SDMwFs0ZjrUr5RlVhACjMgbjpz+YKWn+9BnJePgR15usmxkEqamRePa5hbL+7tvSne67/ajBfAIXGp4FR3vLUffXdp4FBQd/QwbGhLwNHet60euralMW9ta+BBtvAvpt1v4fRpA+AQsifwd3jetFr68qTUX4rPx3MHNd/bZqB5wLNw8eIYZoPBDzK/jo5LUgrzCV4y8lf0aHo8MlI1gfgp/GP4MgfbBMRh1eyf87mq1tYED6PSHyH4Y/Xkl5HFHu8pKxqs3NeCbr36jubhFhOP89QOeFN0Z8D/Fe8tqc15nb8cNzX+BaV2O/rdr7Mvx07nhvzFqk+U5lZusAALbsSURBVEnvwgoATd1GPHJ8A6601Qsyer9t3lo9/jFpBcYEu168+6rN0o1HD23GxcYalwwPjRbvTVuCaZHyWql3Wq14Ys92nKyuFGUAgJ7V4M+z52PukKGyGGabHT/duQuHSssEGb0cDcvi93NmYWmyvMXS6nDgl9v2Y9eVon5bzveKIQQMIXhx/jSszpTe6RUA7ByHVzcfwuaL+S4ZAPCzBZPxwCR5Xg6O5/Gnzcfw5fEclwwKih/OH49HZ4+WdRyb5yn+tuUkPtl9AaSnhX2/DIaA8hQPzB+FH941UVbyJqUUH285hw82ngIhwoze97hyTgZ+fP9UWaEjSim+3nYJf//oKEAhGM7sZSyYlYqnfzhLNP+kP+3ckY2/vLO/p1GgAIMl4DiKadOT8OwvFkCnMjwlR3LW7zuGiIDsvBHVXbtQ1vkljPbym8YIGIR5zMRg79UIMIxQXPvAznejpPMQcts2o9V2/RYGQYznBKT6LkWEuxqGFXntp3GmeRdqLWW3jSd4ZWJs4DzEeaaDUXhc1s7bkdV2EYcaD6LMdHtJ9STvZEwPnok0n+GKGQ6ew9mWXOysPYb8fsq2p/jEYWH4FIzxT4WGUXZiwcFzONNciK8rT+FSW3+MaKyImoApISnQMcp+0BzlcarxGr68fh6nGktuM3eSfMKxNnYMZocnw8AqSwjmKcXphnJ8VnIRh2puZyT4BOHB+FFYOCgJ7hpxt7mQKKU411CFTwuysbei+LaH7RAffzyYlIm7hiTBU6vs+CSlFFn1tfgsLwe7rhXfVu8i2tsHDw4fgeWJSfAWORbsSrl19fg8Owc7Cgpvq3cR4e2N+0akY3lKMvzclJ8gKahvxJcXcrH18lXYuJuT4kO8PLF29HAsz0hBgIfwUXBXulbfjK/O5mLLxXxY7DfXAfL3dMOacelYMToFwd7KcwzKG9vw9clcbD6TB/MtTeB8PQxYOXE4VoxPRaif8pMw1U3t2Hw0F5uP5cHYfXMBL083PZZNScWyqWmIDPJVzKhv7sS2Q7nYfPAyOm9pNOdu0GLx9DTcNTMN0WHyNn991dxixM79udi6KxtttxQp0+s1mD8rFUvmpmPwIHmbv75qazNhz+7L2LrlElpabi56qNWxmD07FYuXjPivhGPuGCIDKEopOmxFsHCN4HkrNIwXvHVDYdAIJ91ZHU3osObBwXeCEC30bAB8DZmCoRtKKVqsZTA5muCgVugYD/jrY+ChEf6Cdjva0GS5CivfCQYsDKwfQt2Gg2WEF5ZGSxXabE2w8xboWXcE6SPhK+IB6eaMqDYXwNLjUXHX+CDKPRlakfBQvaUOzdYmWDgL3Fh3hBhCEKgXPo7WzZlRZiyGydHVU1vEC0M8E6AXOTFT292EekszzA4L3DUGhBkCEeYmxrCgsLMUXQ7nD9VL44EEryFw1wgvLLXdragyNcHEWeHO6hHm5o9BHsIMC2dDbnsZOuwmUErhpXVHik8MvERqjNSZ23Hd2Ayjwwo3Vodwdx8M8RL2Flk5O7JaK9BmM4GjPHy07kj1i4SfSI2RenMXrnU2o8tugYHVItzdG/E+QYKGrY3ncKm5Ei0WE+yUg4/WDWn+4Qg0CC9ejWYjitub0WmzwsBqEOLuiST/YEGGg+dxobEKTd0m2HgOPjoD0gJCEeIuvHi1dJtR2NyEDqsVepZFsIcnUoKEGRzP41JDLRpMRlg5B7x1BiQHBiPCS/g50t7djauNTei0WKBlWQR5eCAlNEQwtMJTiuy6OtR3daHb4YC3Xo9hQUGI8hH2jnZZrMiva0BHtxValoG/hztSw0MEd9yUUuTW1qOmoxMWuwOeej3igwMQ4y+cS2Cy2pBf3YCObgtYhoGfuxtSokKgFThSTClFfk0Dqts60W23w1OvQ2yQP4YECz/jum125Fc0oMNsAUMIfDwMSB0UCq3Arp5SiqLaJlQ2taPbZoe7XoeYYD8MDRN+/ljtDuSX1aPT5DQUvD0MSI4NhV4rvAm4VtOMioY2mCw2uOu1iAr2RXyk8Pfd7uBw9VodOowW8JTC28OApCGhMIgcjb5e04Ly2laYum1w02sRHuSNxMEhwt93B4fCknp0dHaD43h4eRmQMCQU7u7Cz+qq2jZcr2yG0WyFQa9BSJAPkoaGCn/fOR5FRXVobzfDYefg5WXA0PjQ/+rpmDuGyH9BlFK0WS6guvNLNJj3A7cc09Uyfoj0vhuRXqtg0Cg7w00pRZMlHwXtm1HWdei248Y6xguJPkuQ4LsEXtowpW8Ftd0luNS6C/kdR2+rsKpj3JDhNxcj/ObBXy8vNNFX1eYKnGw+iPMtJ2CnN++sdIwe4wKmYmLQTIQalDOqzHXYX38cRxpPw8rfXMZax2gxJWgs5oROwSAP17kLwowmbK85jd2152Dmbt69aQmLWaGZWBI5AfFe8kIsfVVjbsPGygvYWHEBXY6bd28awmBueCpWDRqDVN9IxZ6zOnMH1pdlYV3ZJbTbbi7HzRKCORHDsHbIKIwMjFLMaDQb8dW1y/i0MAvNt5STZ0AwK3ooHkgYgXGhgxQzmrtN2FB4BZ9eyUb9LV14CYBp0bG4PyUDk6OUJ6u2W7qxMT8fn2TnoKar87bxSdGDcH96OqYOHqz4NEiXxYoteVfx6fkcVPbT6XfMoEjcOyodM+KHQKOQYbLasCOnAJ+fyUFZ0+2tKTKiw7B2XAZmJsVBJzNs0Ktumx17soqw7kQ2imtv76GSHBWCeyalY3Z6vKiBISar3YEDF4vx1ZEcXK1ouG08PjIId09Lx5zRCXDTKfMy2h0cjlwowYYD2cgrub1jcUy4P1bPzsCc8cPg4abMy+hwcDh5oRSbdmUhJ7/6tvHIMD8sX5CBuVOT4TmARdq+Kd0xRL5lOXgjLjf8GK2W0zfyOvqX84GREPBLRHuvlcmw4Gjdr1FpOiHKIGBAQTEy8DGk+q2V9UB38HbsrH0b+R3HwIAFL8rgMTHobkwOksfgqAMbKj/G6ZYjN/I6+lPv2PTg+VgSsUZWSIejPD4r34RddYclMWaFTMLDsatFjxPfKkop/l22F59XHARDGPACRctYwoCjPGaFZuLniauglRHSoZTi36XH8V7RQWc8WygO3MOYEZqE36avkB3S+bTkPH53eT9AROLZPYxJIUPwztjlssMt60su45dn94GCumSMCY7C+9OWw0dmuGVbSQF+dmQvOMqLMJy5IMODQvHv+csQ4CYvFHKg9Bp+vHs3rA6H4HHpXkZCYCA+vmsZQjzlhUJOllbgyU070G1zGuj9cXoZMf5++Pc9dyHSV16O2sXr1Xji8+3oslgFMtSc+R48pQj39cYHD96FwUHywhRXKuvxxPtb0WbqBiHot45HLyPI2wN/f+wuxIcLex37U3F1E578yxY0d5huXOt2BsBT5+mcd59aiuQYeRvBirpW/Oj1zahv7gQjkLvSew893fV44+klyEiQt/GobWjHz17dhKraNmFGz2PWoNfitV8swej0GFmMb1t3DJFvUQ7eiAu198JoL8GtXhAxDfH9EWL9HpfIsGBP9Y/RbCmQVRAt1e8ejAr6oaS5HG/HV5Uvo8KUJ6tq6gi/eZgb9kNJxghHOfyr9M+40pkDOU1eRvlNwL0xP5BkjPCUx7slH+Nks3Bn31tFAIz0H45nEr4v6VQLpRRvFn2NXbXnZDAIRvgNxR+GPyI5h+Wtq3vx6fVTkhkMCNL8ovCPMQ9KNkbevXoM7149Lp1BCBJ8grFu6oPwkJhf8kH+Ofz20hHJDJYQxHj5Y/O8+yQbI19cvYwXjh8QXFT7Y4R7emPzXfcgyF04tNVXWwuu4pm9ewEZjEB3d2xecw/CvKTlTOwvvIYfbdoJgEJKiRCWEHi7GfD1g3cj2t9XEuNUSQUe/3QreCpsFN7KcNNp8cVjqzE0RFo+w6XSajz2j81wcMJG4U0MhkCn0eCjJ1ciKUpaPsPV8no8+uZG2BwOSfVUGEKgYRn8/afLkREnzQtaVt2MR179CharXTKDMARvPbMUY1NjJDGq69rwg1+sg9FkkcQghIAQ4DfPLsbkMfIStL9NyVm/7xzfVSFKKS43/Fi2EQIApe1/QZ1xp6S5x+tfk22EAEBe2zoUtm+VNHd33V9lGyEAkNW2B+datkiau6X6C1zpzIbcTnMX2k5hb500xoaqnbKMEPS8mgutl/F5+WZJ87+qPCLLCHEyKLLaSvB20SZJ8zdUnJdlhADOcvO5bVV46bK097G1IleWEQI4PSZFHY348ZmNgpn6fbW3skiWEQI4q4qWd7Xi0SMbJS1ix6vK8avjBwBI/2ZxlKLW2ImHdm+GnXNdUflCTQ1+tm8fqExGs9mMBzZvgsVhdzk/v64BP92yC5RKM0J6GZ3dFjy4bhOMVtedWa81tuCpL7aLeo36Y3Tb7Xjko81oNwt3He5VdUsHnvxgm2QjBAA4nsJqd+AH/9yMxg7XXbGb2o144i9bYJVohADO766D4/Gjd7eiplm4u3iv2ru68dQfN0k2QnoZPM/j2be343qN647uJrMVP33la8lGCOBcdyilePmNHSgqvT0U9b+oO4aICrVbL6HVchpyjZBelbS+CeqiD0mLpRjlxqOKS8NfavkAHBV/CLZaa5DbflBx/5jjTetg4y2ic9ptrTjetF/R9QHgQMN2mB3CLeQBoMtuxNYa5YzddYfRZhN/QHU7rPjkujIGBcWuunOoMd8eK+8rG+fAe0UHFTF4UOyvu4LiznrReRzl8UbeIWUMSnG8oRTZLbfHsfuKUoo/XDqqqG4tRynON1bjRO11l3NfPyfPmOrLuNLcgAPl11zO/fNpeUZhX8a11lbsLCp2OffdE2edRzEVMGraO7E596rLuR8cvQA7x8kud87xFC1GM9afz3M595Mjl2Cx22VX4eUpRafZii9P5Lic++XhbHR1W2WXO+cphcVmx2cHLrmcu+VwLlo7zLKr11LqzPf4ZMd5l3P3HM5HQ1OnIgbPU3yy4Yysv/uu6o4hokJVnetUNcqzcvVo6T4pOqegfbNKRgcqjOIP6kttu1U147PzFlztEGecaj6s+PqAM6xzrlWccaTxjGCuhhRRAIcaxBecAw1ZsPKud7dCYsBgR634w+NgfT467a53nkJiCYOvK8QfgsfqrqHR4nrnKcb4ovSi6JyzDZUo72pTVXr+06Is0Tm5jfW40tyoivHJlWzROaWtrThbXa24vD0B8HG2+Puo6+jCkeIyVc0LPz2fI+qlajN1Y09ekeKy8DylWHc2Bxwv/BszWWzYej5fFWPD6VzYHA7BOVa7A5tO5CnuucLxFNtP58NksQnOcXA8vj6Yo/gz53iKA2eL0N4l/DumlGLjriy5DuIb4nmKkxeuobG5S9kFvkO6Y4golJVrRqNpv8pGeSyqOr8QYXThWtc+VQwCBgVtwuEAO29FTtt+1c34LrRsFxzlqAMnmpR7XACnN+FY4z7BBy1PeeypO6Kasbf+KDgqfL83V51Q1VyOB4+dNWdh44SNmS/LzyrufwM4vR3bq7NhtAt7qT4rvaCy8RuPPdVX0WoV9lJ9Wpilurnc4eprqDEKe6k+z89RzThXV41rbcJu9C9yL6tiUABXm5pwuV7YS7U+O0/xSaFeRmVbO86WVwnO2XzpiuouzU1dJhwrEvZS7bxUAJtd2IiQoq5uKw5cLhEcP3ipBF1m12EoMVlsDuw5VyA4fiqnDC3t4h5YV+J4ih3HrwiOX8qrRE19u6qu1oQQbN9/WcUVvhu6Y4goVKc1T6URAgAc2izCLsJWawl4F2EVV6Lg0Wi5IriAN1urYOOV7757KY3Wcjj4/ncYLdYmmDj1VnuLrQlGR//X6bB3odnWpprRYe9Cs/X2o4yAMyxTYW5Q9eAAABNnQaW5qd8xnvK40l7Tb2VXObLyDpR0CcePLzVXqV6UHJRHXuvtRxl7da5BPYMCyG6uFRw/U6ueAQCX6oUZ56qrVTMYQnCpVphxoVK5x6VXLEOQVS3MyKqolZTXIyYNwyCrokZwPOd6rapuvTcYZcLvI6e0BhpW3dLFMAQ5pcKMy8XqGZRS5BYLM/IKasCy6u4Vz1NcvioeIv1f0B1DRKHs3O31A5SIo2bBPBEbr9x13lcUPBy0f2PDwg0MA3AWQOtPZs7c778rY/S/SzE5Bo5hFLiW0aHWYHPNMDlsqrw6fSUU3uEoD4uIR2YgGABgsqvbtfaqwybs2emyqWcwhKDDKszosIjnP0lldIow2rvVMwgIOi3C96PdrJ4BOD0WQuo0W1QbbTyloowus/zckNsYPEWnSZhhNFsH5FcoFpoxmqyKmqzeqo6ugflc/5u6Y4golJQGd1LkzP/o/8vIYGAYgPDrZcnA9R4QupZGRn0OpQw5NUBcSSPEUFg6vl+GwLWUFqfqT1oBBtPTcvGbZABQ1dpdKmMg7helVLDqqCu+HIkyRMYGjjFAn4cYQ8Oq/mYRIv5atRpWccfhG4ye6whJw6p/HwCg04rfq4HQQF3nv6k7hohC6Vjl/QH6Ssv6CsaG3TTiLaElM4g7WAFDxGOAGM4y8/3XY/DSyCu2JCZPTf/1GLy1yntb3CofgWt5a9wHbAH3E2AYGC0MzMAYoP66/gtpEULgq1PeN6WvAvTCxboCDMr7pvRVkEG4zofUGiBiogACRQqbBXt6qP7UHTwvygjx8lAd0uApFe1VE+QtvTOzkCil8PcUZgR4uoNRbRwS+HsJM5xj6t4HwzAI8BZh+LirDmMxDEGgr/D308/XXbVnhyEEgf7Kewd9V3THEFEoX0MGtIzyhkiA0xsS5rlYcDxAHw9PheXg+zJivWcJjvvrwhGoj4aaHzYDFoneE8AIeCV8dH6I8YhTdTKHAYNk73TBHjQeGjcM900Co4pBMMwrDr66/ovvaBgWk4JSJRU9ExIBwRDPcIS79d/HgxCCueFqGUCkuz8SvIW/O4ujU1UvSoEGD2QECFeQvCs2WfXi6q3VY1yYcHfgpUOHqTYSDKwGU6MHC44vTkhU7abXMAxmDhkiOL4gKUF1jgilFHMS4wTH56XGqw6bcJRiXmq84PicjATRUzWSGDyPeRkJwoyR8QPCmD1S+H3MHJOg+ORPr3ieYuYY4fcxdVy86jAsTylmTkpUdY3vgu4YIgrFEC2ivO+BmltIwSHSa7XgOCEMkvxWQI2RQMEh0XepCINgtP9iKD5DBoAHh5EBC0XnTA2aq+pkDg8ek4Nmi86ZFzpVsJy7NAbFvLCponOWRk4Ep+qIMMWyyImiJyRWx4xRxQCANTFjRRl3DxmhalFiQHDvkFGioZE1Q9PVfK3AEoI18ekwsMLhw5UJKapCQCwhWJGQDC+dcMn6JcOGwU2jPITJEoKFCQkIcBfegc8dNhTeBuX9Q1hCMC0+FuE+whUspyQMRpCXcg8SQwhGD44ULfU+Ki4SUYG+ip9YDCEYFhksWl01KSYUiVHBisMzBEB0sC8y44WN6Jhwf4xIjATDKH/2Bvp6YHy6sIEbEuSN8ZlDwKpgeHkaMGXcd7e6qlTdMURUKMJrpeK/JWDhbxgPd2206Lyh3vPBKMzjIGAQZEhCgF78i5rsOxU6RlmXRgIGgfpoRLoNE5033HcUPFhPRclZBAT+ukAkeqeKzkv3S0aAzlcxw1vjiVH+6aLzhvvGIso9SFGIhgBwZ/WYHpIhOm+YTziSfSIUeRMIAC2jwaLIdNF5sV6BGBsUo9grQgjBysHi7yPMwxszo+IUM3hKsTZenOHv5o4lccMUMzhKcW9yuugcT50OK1NSVDHuGz5cdI5Oo8GaEWmKPUgcpbhvZLroHJZhsHZcumIGTynWjhNnEEJw72Txz8wV455J4gwAWDMjXXZRtr66e3qGy+PSq2ZnKA6dEEKwclaGSyN5+YIMxZ4XhiFYMmc4dAqbBX6XdMcQUSGDJhiJAS8q+EsGGsYTwwJfcTlTz3pjYshzyhjEgEkhv3Q5U8cYsCjiadkEAgKWaLA44mmXP2oNo8EDg5+QzQAAhjB4IOaHLnvNsITBj+MfltUgr1cEwE/iH3bZB4YQgheS1kLDsAoMHoJfJt0DA+u6R8uvh98FA6OVzaAAfp12F7y1rnNAXstcAE+NXtHC9ErGPAQZXMemfz16Fvz07ooW8RdGTke0l6/Lec+Pm4wQD2X5Dz8ZOR6JAa4brf103HhE+fgoYjySmYmMMNddpB+bMBpDgwJkMwiA1RmpGD9YfFMDAPePH4GUiBD5DALMT0vAzCTh0E+vVoxLxai4KNnfK4YQTEmOxcKR4psaAJg/Zhgmp8XKZzAEIxOisHyy+KYGAKZkxmH2uATZ9V1YhiA5NhR3z3VtkI0cPgiLZqXJ9u6wDMHg6EDct3yMvD/8juqOIaJSUd53I87vp5LnE7DQMl4YEfoh3LVRkv4mznsOxgb95MYVXDMYaBkDZke+CV99jCRGovd4LAj/Uc/CJ42hITqsjn4ZYW6uH04AMMw7DQ/EPAEGjKQF1snQ4JHYnyDWUzjWejMjDs8kPAoNkWYoOI0pBj+JfxipvtJirQneUfhd2sPQMRpJnhHS8z/PDluNCUEpkhhxXiH46+j74cZqJS0avTOeT16IeRFpkhjRnv74aPK98NLoZS1MP0+dgdWxIyTNDfPwxhez7oav3k0W40dp4/HwsFGS5ga6eeCLhasQ7O4hi/FwWiZ+nDlO0lwfgwGfLV+BCG9vWYzVKSl4btJkSXM99Tp8uOYuDA7wk7XALkhOwMvzpktaMA1aDf7xwFIkhAXJYkxNiMXvls+WxNBqWLz98CKkDQqVzCAEGBUXhdfvny8p1MYyDH7/6HyMTIiSvIgzhCB1cCjefHyRpFNKDEPw4qNzMDEjVhqghxEXFYS3nlkKg8510jkhBE8/NhMzJkjP82AYgqgIf7z50gq4u0lrPPld1zfWfbe8vBy/+c1vcPjwYdTX1yM8PBz33nsvXnjhBeh00m7e/0L33V7VG3ejpPVNWLhaELD9FDtjAfAIMIxHYuDLko2QvqownsD5pvfQZa/pl9H7b2FuIzAu5Bn46oST/IRUaryEA3X/QoutCsz/Y++84+Mor/39zMw2rXq3LMu25N57xdjGmN4xvUPKTbs3NyG/VHJDcnPDTSC9h4TeTDO9mWKDsY1777Ykq3dpe5t5f3+sZFw0s7M7gkvZLx/xAb1n32eb5j1z3vOeg4J2EqPvd0MyxnHu4K9S6jL/R9qng969PF3/EI3Bo8jIp+R19P1umHsEV1bczLBM/SQ/fUY1/zzyBEf8iRhD+ELVVYzLSX6f9ZC3gd/tf5bdnppjLeyPV9/vhrlL+cboS5hZYM6ZOl5HvK38YtdLbOqsNmQMdRdy+/hzWVSafOLaUV8nP9n6Ku+3HDnWXr4/Rrk7l+9OXsp5Q8YnzWj0e/jxB2/wdv0hpH7atfdxB7mz+e60RVw+wpzDdrzaAn5+/N6bvNHbO0aPUZyRybdmzee68cbbJf2pOxTkzrff4eUD+xH9MPpa0ednZPCN2XO4ZVriLYCT5Q2F+Z83VvHCrn2ovQ3O+mPkuJx8ad5MvjR/VtKRgUAkyt2vrmbF5j3Hmv4dT5GkeD+TTKeDm0+bzlfPmJN0Lk44GuN3L63h6bU7iMT6YfT+f4bDzrULpvD18+cnfYw5qqr89fm1LH9nO8FI9NjzPp4B8WO0l58+mW9eviDprQxV07jvufU8/uoW/KEIssQJTQml3n/ZFIULF07gm9cuIsOV3Mk3TRM8tmIDj67YgM8fRpalU7aFJElCkSXOWjSOb35hCZnu1HOKPg4ls35/ZI7Ia6+9xvLly7n22msZOXIku3bt4ktf+hI33ngj99xzj6k5Pk2OCIAQGh3BtdR5HqUrtAlV+JGwYVfyKMu6iCHZVyfMCUnMEDQFt7C3+xkaA5uJaUEkZBxKNlXZSxmXdxm5DuuM+sAeNna+xBHfFiJaAAkZl5LFuJwFTC84jxLXcMuM2sBh3m1bye6erYTUeOGfDMXNlLxZLCheSoXbGgPgiO8orzevZkPndgK9BcncNhcz8idz7qBFjMweCEYTzzesZXXrdnyxIEII3DYXswvGcOmQBUzMHW6pfDdAta+NJ2s38HrjTjzRIJoQuG0O5hSN4Jrhc5lZYJ1R6+vkiSNbeOHoTrojQWKaRpbdwayiYdwwYibzS5MPhZ+sOl83jx/YzrNHdtERChDTVDLtDmYUl3Pz2BksGlxluf5Is8/LY3t38PT+XbQHAkR7GZOLB3HzpGmcOWyE5fojbX4/T+7axfJdO2nz+4moKm67nfElJdw0ZSpnjxxpuTZIpz/AM9t38+TWXTR7vURiKhl2O6NLirhh5hTOHTcKh4UkWoCeYIjntuzhyQ07aOrxEo7GcNltVBUXcO3cqZw/eQwuizkIvlCYlzbtZfn7O2jo6IkzHDaGFuVz9WmTOX/GWNxOa3f2gVCEVzfs46nV2zna2k04EsPpsDG4MJcrF03mgrnjyMqwtnCHIlFWrt/P0yu3UdPURTgcxeGwMagwm8vOmMwFCyeQk5larl2fItEYq9Ye4NlXtnLkaDuhUBS7w0ZJYRYXnjWZC86cSJ7BseNPkj4Rjkh/uvvuu/nrX//KkSNHTNl/2hyRkyWEMLU4+CN7aPU9Q1htQtNC2ORs3I5xlGRdkbBeiVlGd/gQNd6X8EebUUUQu5xFjqOKqpxLyLAlZgAJOZ3hOnb3vEFPpImoFsShuClwVDAh71xy7CUDwugIt7Ch8x3aw82EtSBOOYMi5yBmFyyh0DkwjPZwB6vb1tAUbCakBnEqLkqcxSwsPo2yDOPj1OYZ3bzR/AFHA834Y0FcipNSVwFnlc5mWGaZ4WP7OInfKy8vNWzhoLcJXzSES3FQ6srlgvLpjM4ZGEZXOMCKo9vY092ENxrCpdgpcWVz8dDJTMovHxBGTzjIs9W72N7RiCcSxqnYKM7I5JLhE5heVJ7w8WYY3kiYFYf2sLmlAU8kjF1WKM5wc9GIccwZNGRAGIFolOf372VDYz2ecBibLFOY4eb8kaOZXzE0oZNnhhGKxnhl337W19bRHQyhyDIF7gzOGj2ShVXDB4QRicV4fc8h3j9cS08ghCxBnjuDJWNHsGhUZcLS6GYYUVXl7V2HeW9vNT29FWFz3S5OH1fJkokjEjp5ZhgxVeO9XdWs3nmYHn8QTYPcTBfzxg3jzKkjE0ZOzDBUTWP9zhpWbT5ElzeIqmnkZmUwc1wFS2ePTrh9Y/b6/knUJ9YRueOOO3jttdfYtKn/rp3hcJhw+MOyux6Ph4qKik+tI2IkIQQdgZdo9PwLX2RH77aKRjxYGf9DlpApdJ9Pee6XyXQkHw4XQtDgX8X+7kdpD20/gSEhI4iHFcszFzM2/yYKXRNSei3Vvg1s7niKusD23nlFLyOebyIQVGXNZWbhlZS7kw+5Axzw7mBV64sc8O1A7mWIXoaEhIbGmOwpLC6+mFHZqTH2eQ7wctNrbO/eidT7vE9mjM8Zy/ll5zAlL3GyW3/a66nhmfq3Wdu+41jYWOtlyEioaEzMqeKyIWcwv8hcrscpr6OngYer3+Ptll1oQiAdz5AkVKExIXcI1w1fwNJBk1K60B3wtPKvA+/zSn28kdqHjHhysSo0xuUO4qaRc7m4IrXTIIc9Hdy75wNWVO8iqqm9z/1ExujcYm4dO5MrqyanFEk56unm7zs38vSBXYTV2AkMRZKJCY2q3HxumzCDa8ZOTqnKaqPXwz+3bmb5np0EotFjW0THM4bm5HLLlOlcN2kyToPjynpq9fm4b8MWlm/fiS8cOYEhyxKqJijLzuLGmdO4YfoUMuzJF83r9Ad4YN0Wlm/cQU8ojNI7L3Dsv4uzMrl+9hRumDuNrBSiHD2BEI++t5Un1m6nyx/sl5GfmcE186dw/enTyHUnH4HwhyI8vmory1dvp93j75eR63ZxxemTuX7JdPKzki/+FwpHeeqtbTz55jZaOr39MjIzHFy2aBLXnTuDorxPf1Gyk/WJdEQOHTrEjBkzuOeee/jSl77Ur82dd97JT3/601N+/1lzRDQR5UjHHbT6nyLudBjVjIgnXY4q+jVFmRclwVDZ1v5bDvYsT8iIOyiCWSU/oipHv8DayRJC8H7b/WzseKLXATFixMcXl36NaQWXJsV4q3UFrzUv7zfX43j1jZ8/6FrOKLkkqQX2taaVPHrUPOOSwReybEhyjFeb1vLHg08eczj0GRIagkvLF/GlqkuTOgX0WuM2frrzaQDDWiR9jEuGzOR74y9JeFroeL3ZuI9vb3waTQhDRp8zd8GQifxi+iU4klhgVzce4SvvPkNUUw3rnfTlGSwdMoo/nHYJGTbzC+wHTXXc+sazhGLRhAyA0wYP4+9LLyXLZI4bwLbmJm554Vl8kbApxoyycv554aXkuswvsHtb2rjtyWfpCgQT1oaRJBhfUsK/rrrMsArryTrS1sltDz1Dm9efkCFLElVFBfzrpsspzTG/wNZ39PDlfzxDQ6cnYXE3WZIoL8jhH19expBC85WbW7t9fPWPz1DT0pWYIUsU52bx129cTuUg88UrOz0B/vPXz7L/aGvCI8aKLJGblcEfvrOM0UMTn976NOkjdUS+//3v88tf/tLQZu/evYwd+2HSXENDA4sWLWLx4sX885//1H3c5yEiIoTgUMd3aPM/h/lqT/HL1JjiP1PoPtcUY0vb3RzyPJX085tdcieVOReYsl3T+i82dixPmrGk9BtMKTDn8LzVsoJXm59ImnF+2XUsKbnElO0bzW/xcO3jSTMuHnwBV1ZcZpKxnt8eSJ5xyeCFfGXkMlO2bzbv5IfbkmNIwIXl07lj4jJTTtXq5oN8bd3jvbEiswyJsweP4zezrzAVGVnfUssNbz2OJswzZEni9LJK/rnoSlP5H1tbG7nqpSeICc10RVNFkphRWs4j511pKmqxt72NZU89RlhVk2KMLy5h+eVXm4paVHd2sezBxwhEjJ2pkxmVhfk8eeM1ZDsT5000dHu44u+P4QmFTNe8UGSJwbk5PPnla8l3J44otHl8XPP7x+nw+pNiFGZnsvyb11GUk7hQW7cvyI13P05TpycpRrbbxaPfvY7BhYnXIF8wzBf++wmONneaZsiyhNtp5/6fXM+wQfmmHvNpUDKOSNKxzNtvv529e/ca/lRVfXiSorGxkTPOOIP58+fzj3/8w3Bup9NJTk7OCT+fNbX4HqPNv4LkSk7GbQ+0fZNQtC6hda3vtZScEICNrT+jJ3w4od1h77qUnBCAt1v+THPwQEK7g95dKTkhAK80PcZh356Edod8R1JyQgBeaHyZbV07EtpV+xv5/YHUXsfzje+yqnVLQrv6QAf/tf3JpCubCODFhi08X9//dunxag16+eYHTyblhMQZgtcb9/DQofUJbbvDQb646mmESO4vRBOCdxuP8Odd7ye0DUQj3PL6M0k5IRAvGrapuYG7N72X0Dasxrj1hWeJJOGE9DF2t7Xys/feSWyraXzxyRVJOSF9jCMdXfz4tTcT2goh+NpjzyflhMSfm6Cx28P3nn3NlP3tD72clBPSx+jw+vn2Qy+Zsr/jwdeSckL6GN5AiG/9/QVTvWfuun8ltUk4IRA/MRMMR/nWb1ZYLl3/aVXSjkhxcTFjx441/Ok7ntvQ0MDixYuZMWMG999//wA0Q/p0SwiNhp6/k1rJdoFAo9n3aAKGYF/XQykyACQO9iR2YjZ1PJly7xgJma2dzya0W932Usq9Y2RkVrclvkC93vSmJcbLTYkvtC82vJdStVeIRxOeqX8rod0zRz9AQ0u5ovrD1e8mvNA+VbOZqKamzLj/0LqEpeufOrKDQCyS0isRwP37NxFWY4Z2zx3eS3c4lFJvFw3BI3u34Y9GDO1eP3yQFr8vpTL6mhA8vXc3XUH9FvIAq4/UcLS7J2XGq/sO0uTxGtptrG1gf0t7StU/VSF492AN1e1dhna761vYWtOYGkMTbK1pZE99i6FdTUsn7++pSZlxoKGNLYcaDO1aOr28ufFAStVYVU1Q39rNuh01ST/2s6CPzDPoc0KGDh3KPffcQ1tbG83NzTQ3N39UyE+8ekJrCav1pN6AQ6XF+ziaCOtadIZ30xM5lDJDoFLjfYmo5tO1aQ/X0BjcnXLvGIHKAc9qArFuXZvOSCv7vFtT7h2jobHXs4WuSLuuTU+0hw2dmywx9nkP0BBs1LXxx4K82bLBMCfESALBIV89B7xHdW1CapTn6jZaappWF+hgS2e17nhUU3msepMFVwdaQ17ebT6oO64JwYP7N1lqA9YTCfHa0f2640II7tu12VKjvGAsxnOHjKNtD2zfaumos6ppPLV3l6HNI5u3WW5c+OT2nYbjj36wzVIvFEWSeGKTcdRw+drt1hiyxPK1xoyn39thnfHudkOb51btSPmGo4/x5JvbUn78p1kfmSOycuVKDh06xFtvvcWQIUMoKys79vN5VYtvOfHCZqlLFV46A2/ojh/xPIdkmRHhqHel7vju7tcsMwQae3v0Q8MbO1elHHH5UBIbO/VD3Gva11vufikjs7p1je746tYtxITxHXoiKci83rxOd3xVy278qr5zaoohyayo36A7vqblEJ1hv0WGxPLqzbrjH7QcpcHvscSQJYlHD+pvZe1sb+Fgd4elT10CHt67TXf8cFcnW5ubLDmGAnhkp/7C1+Txsqa61lLjQk0IHtuqv4B3B0Ks3HvIUhdaVQie3ryTmNq/Ix6MRHl5yz5rDE3w0pa9BCPRfsdjqsaKtbssM97aepAef6jfcSEEz76zw9JnrmqC9btqaOk0jlJ9FvWROSK33HILorcq4Mk/n1cFo4fhlIqryUohFNO/O/ZEavup6pqcJBR80Xrd8e5I44AweqL60bH2sPXImQR0RFp1x1tDrSlvy/RJQ6Mt3KY73hhqR5EsOoZoNAT1GfWBDpQU+uucwBAaR/360aNaf2dKjf5OZAhqfB36DJ9xCN+MNCGo8erPU+vptswQwFGv/jxHe6wzABq8Ht3rZV13j0UXOq7OQJBQtH9HubEn8ekVM/JHovQE+1/A2zz+Y1VXrSgSU2n39O8o9/iDBML9OynJSBOC5s7+HeVwJEaX13grzawa23oGZJ5Pkz7fSRsfs1Rh7Y4S4vkVqsG2SUyzzgCIiYDuWHgAGAJBRNNnRLRwyls/fdLQCGv6F4eQGrIcEQEIqPqMoBoeEIY/1v+FPM6PWAoJf8jQj6oEYhHLVVXjDP3cioFiBGL6i44RPxkFY/pRLn/U+qIH8YVPL98lMEAMAH+k//ckoPP7AWWEB5ChM9dAOCEfJyMQGrj35NOitCPyMUqRrBetEQgUWX8eu8FYMrJL+sfhnHLio3KJJCHhkPXrGDhll+WtGRkZp6x/dNCluAZkAXcr+gy34hwQRqZNv66EW3EMiLOTadM/yum2OQbk7jjLpl+DY6AYboNaIpkG/AFjpFAsrD8pkqR7TDjTREM1s9IrPJaZRL2UlBkWS7ufMJdLh6Hz+9QY/f+NuJPsLWPIsFiK/tOotCPyMcptH4XVHBGI4bJV6o7mOKoGIH8jRpZBv5p8R4VlJ0FDJd+hXwK82JW4bXoiCaDYqZ+TVOYalHKiap9kZAa5SnXHh2SUEBPWQs8KMkPd+qXlh2UWJzyNkpAhyVRl6ZfIr8wqspSoGmdIVOXoF22qyim0ND/EC7WNzNWfpyrPep0GGajM1Z+ncgAYEjA0N0+3tsuw/LwBcG+hODMTp06/msF5OZZ78gBku5zkZvTvSBfnZuG0Wb0mgtNuo1ineFpupstynxmId/wdXJDd75jLYacwdwBu0CQYUmK+QNtnRWlH5GNUafa1WM0Rscl5FLiX6o5X5VxqOX9DkTIYmnWW7vjEvHMtb5vIksK43DN1x2flLyb100V9EswqWKw7elrR3KSqlvYnDY3FJafrjp9ePA2nbO1uSUXj3EH67eoXlY4n2yBiYoohNC6rmKM7vqB0BCWu/i/C5hmCaypn6o7PKh7C0CxrC6yG4PpR03XHJxSWMqGgxFK+iwbcOG6a7vjwvHxmDx5i+UTLjZOm6o6VZGWxeGSlJYYsSVw/Xb8DcW6Gi3MnjLZ8aubqGZN0y++77DYumTXB8omWS2eO123Op8gyy06baJlxzozRZBuUlL9iyRRLW4uKLLFgStVnstx7IqUdkY9ROc7ZvdGMVL+sMqVZ1yFL+qHGAtdY8p1jU2ZIKFTlXIzNYEujwFnBEPcUC3VEFMbmLMGl6Besy3MUMj5nhqUaHxNzZ5Fr1y/NnG3PZk7BLGuMnPGUGkRE3DYXZ5XOscCQGJM9jKos/eiRQ7ZxWcXslBdXCRieWczkPP0omCLJXFs109ICXpaRy2klI/SfhyRxyxh9R8WMCpwZnF0x2tDmlonTLUV3Mu0OLh4x1tDmpslTLZ1osSsKy8YZ95e6Ybo1BsBVU4z7Ml03e7Kl0yaaEFw907gv01XzrDFUTXDVfH2HCmDZAuuMKxcaMy5ZNDH1S3sv44ozp6Y+wadYaUfkY5QkSZTn/hup3elLyJKdQdnXJbQcl39zygyQGJl7RULLmYVXWoiKCKYVJC6Nvqj4Qks1PhYVX5jQ7ryys1LOr9DQOL/snIR2F5cvJNUbJQ3BlRX6kaM+LRs6B5uspFgqD26qWpSwxPsVw6bjVGwpOyNfGDU/4R3jsqpJ5DhcqTPGzk7YmO7iqrEUZ7hTiiZIwC3jpyXsaXN21UjKs3NSZEhcO2ESOU7jKNeCymGMKCxIiSFLEpdMGEtxlvF2wrSKwUwqL00pmiBLEmeOHUFFQZ6h3ZjBxcweWZESQ5El5oysYHSZcQfxiuI8Fk8egZwiY8KwUqZUGpeeKMrL4ty5Y1OKiiiyxPDBBcyZMCzpx34WlHZEPmaVZF5Jadb1ST4q7iCMKf4LTlvi3ImKrKWMzbsxBYZg3qD/JscxPKF1ZdZs5hXdlCQjrrPKvkWJS//OuE9VWeO4eHBqjEsH38LwzDEJ7YZnDuO2ytQYVw65jEl5iTsWV7hL+c6YZD+PuK6qWMppRcZ3YgBlGfncNfVa6O0UnIyuqJjDBYP1txr6VOTK4i/zrkWSkiNIwCUVU7iualZC2xyHi/sWX4kiy0ld0GUkzqkYzVcm6G9h9clls/PguVdgl5XkGJLEgvLhfGvGaQlt7YrCgxdfTobdnpSjIEsSM8rK+MGCRaZs/3nlpeS4nEkxFElibEkxd56d2MGVJIk/XXMxBW53Uo6CIkkML8znrssSO+oA99x4AaW52ckxZInS3GzuvtFcb6yf3XQOw4rzk2bkZ7n5zZcvNtWL6Xs3L2XU0OKkHB5FlshyO/ndty5LyVH6LCjtiHzMkiSJqoI7Kcu+pfc3iRK1FCTsjC3+G/kZZ5jmTC78BuPyb40zE3zMEgoSMvNK/4eKLP38k5M1p+h6TivuYxi/jj7G2WW3MyHP3MUJYGHxBVwy+GaAhNsbfeOXDr6FBcXnmWYsLjmd2ypvQkIyzbi6YhkXDT4/CcZ0vjv2RmRJNs24dug53DI8cVSnT6eXjOOX067HJskJ64r0LVzXDjuN28dfZLqL8NziSv4271qcii0hoy+qccWw6fx8urkLOcD04iE8vOQaMpTEi3ifI3HhsHH8/rRLTDsWEwpLWX7BNWQ7Ei/ifa9j6dAR3HvWpQkjLn0aUVDIk8uuIT8jIyHjWIffIUO5/+JlpprqAQzJy+XxG66mJCsr4WuXen+mlpfx0LXLcJs8eVOak8XjX7ya8twc04yxg4p5+NYrydY5ZXKy8jMzeOgbVzO8ON9U9FCSoLK4gIe+cTX5mYmb6gFkZzj557euZHR58bHnaSRZkhhcmMsDt19Nicm8jQynnT9/9womjSgzzSjKy+LeH17D4OLPX5Jqn5LuvvtxKpnufZ9GdQbeotFzP57wWj50SDQkFAQqsuSgOPNyynJuw21PHEHoT82B9ezvfpzmwNpjzoJAQ0JGoCFjY2j2uYzJu5Y856iUGHX+7WzpfJYjvvW9d+NS79zyMdbonEVML7ic0gzj/Xs91fj3s7rtZXb1bAT6Wsx/+DpAYlLubBYWX8DwzNQYR3w1vNa8kg2d8XLpfW3s++7/BYJpeVM4t+wsxuUkjrb0z2hgRcMqVrVuRhUasiShCYGMhCC+3TMrfzyXDlnE9HzjPAR9RgtP1KzllcatRLUYsiTHGb1XeFVozC4cyTXD5rOgJDVGja+Dhw9/wLO12wirUZRehnQcY2bhMG4cMYezBo817YQcr3pfN/fv38QTh7YRiEWxHWPEP/+Y0JhWVM6tY2Zy4bBxKTGa/V7u372Fx/ZtxxMJ98uYVFTKrROmc+mI8bpJl0ZqC/h5cPtWHt25ne5wCJvcyyB+YxLTNMYUFnHLlGksGzsBu5L8KZLOQJBHt2zj0S3b6QgEjzEgvtjFNI2qgnxumjmNKyZP0D0pYyRPMMTjG3fw6IZttHr9/TIq8nO5cc5Urpo5WTd51Ej+UISn1u/gsTXbaOr29ssoy8vmugVTuWreZNwpHP8NRWI8+/4OHl+1jfr2nn4ZxbmZXL1oKledPtkwQVVPkWiMF97dxfKVW6lt7kKR5fgWsIh3242pGvk5GVx55lSuOHMqeVnmnKlPk5JZv9OOyCdAwWg1bf7nCMca0UQIm5yN2zGW4sxLscnGr1vTQkiSgiQZ3934og3UeF8hEG0iJoLY5UxyHSMYln0+TsXYE1e1MJIkIydgeKOt7OlZSU+kmYgWxKlkku8Ywvjcs3Db8hIwIiBJKAkYnmgXmzpX0x5pJqQGcSkZFDnLmJm/iBy7MSOmxYsO2RKcYumJeljTvo6mYBNBNYRLcVHiLOL0ovkUOPWTX+OMGAINu2x8gfRE/bzVspGjgWYCagin7KDUlc/S0tmUuoyPscY0FQ0NR4LX4Y0GebVxG4e8zfhiIVyKnVJXLucNnsbQTOM99ZimogoNh2wzXOD90TAv1e9kT3cTnmgYl2Kj1JXNRRWTGWFwVLePEdVUXIrdkBGIRXixZi87OproiYRwKgolGVlcPHwC4/L1jxzHGVovw/h1hGIxXqnez6aWBjyRMHZZpjgjk4tGjGVSkf7RaYj3hQmrMTJsxq8joqq8fvggHzTU0xMOYZcVCjIyuGDUaKaWlhk+Ns5QybAZv46oqvLWwSOsP1pHdzCETZbIz8jg7DEjmTmk3PCxmhCEojEy7MYMVdNYfbCaNYdq6QmGkCWJvAwXZ44dwZzKigFhaJpg7YFa3t1bTU8gXswv1+1i4bhK5o8eZriFIYQgGI3hstkS2m3YX8fqnYfp8cebIOa6XcwbP4wFEyoNnU4hBMFIDJc9MWPbgQbe2XyIHm8QVdPIyXQxc1wFC6eNwDYAR5c/qUo7Ip9hCaHSE3yLNu+D+MLrEcQXV1nKJN99IcXZN+F2GGepJ2ZotAbfp8bzBO2BD9CIV/pTpAzKMs9keM415DknpXQH+iFD0BDYwu7uFdT5P0AVfQwnw7NOY0LeZQzKsM6oDezhg45X2O/ZRLSXYZMcjMqeztzC86jMtM6o9h/mnda32Nq9iYjWx7AxPmciZ5QsZVzOBMvHhA/5jvJK47u8376VkBbuZShMyh3NBYMXMj1/guUy74e8TayoX8ubzdvxq6EPGXnDuKLiNE4rGo/N5LaEno5423iydgMv1m3HG+tjyEzMG8J1lXM4s2wcdjn5O+njVe3t5PFDW3imZgfdkXjVW0WSmZBfyk2jZnF+xTjTWx96qvP28NiBbTx5cAcdoUAvQ2Jsfgk3j5vORZXjEia0JlKTz8vju3fwxJ6dtAX8iF7GiPxCbp40lUtHj7NcdKzV52P5zl08sX0HLT4fgnhUYHh+HjdMncrlE8aT7bRWg6PTH+Dpbbt4YvNOGj0ehIhvrVTk5XLdzClcNmUCeTp1RsyqOxDiuc27Wb5+O3WdPccYZbk5XDV3EstmTqQgS7+Aohl5g2Fe3LiH5e9t52hb97GoVnFuFlecNonL506kOPfzd/RWT2lH5DOqTv/zNHT/nKjaQnwr5+R6IfHfuR2TGVrwK9wO4+N//anJ/za72/+XoNp8bIvoePX9LscxmslFPyHflbzTc9T/Ae+3/A5PtNGQkecYxsLS2ylzJ07WPFnVvl280Pg32sMNyMinnL7p+12Bo4yLBn+ZkdlTk2Yc8R3i4dr7aQjWJ2AUcs3QG5iap1/fQp9Rz58OPsphf10CRi63VV7O6cUzkmbU+Fu4a89T7O45iiLJpxRHk5HQEBQ4svjqyPM5b3DyR2zr/J38ZPtzbOqoQZGkU46d9jHy7Bl8feyZXD18dtKMxoCHH2x4iTUt1YaMbLuTf59wOreNnp20E9oa8PGDta/xdv1hZANGps3BVyfN4WuT5yV9iqIrFOSH76zktSMHkXq37o5XPK0cMmw2vjBlBt+aPT/pLSNvOMx/vfkWL+2LdyrujwHgsCncMHUq/+/0BUlvGQUiUX7++js8v2MvqtA4eaXpY9gUmSunTeL7Zy1MessoHI1x98vv8vTGeGO9/hYzSYo7opdMH8/3L15sOjemT1FV5Q8vvs8T720j2tsX52RO32d87vQx/OiqJWSZzI35LCvtiHwG1dzzZxp7fmnSWkaWnIwovo9sV+IM/z5V9zzOro67+PBSl4CBwsxBv6XUvdA0Y1/PK7zb/Kve2RMx4umjZw7+MSOyzSfq7uxew9N1v0X0/pOIAHDZkG8wLX+Jacb27q387fCf0IRq+vjvdUNvYnFJ4pMKHzL28/M9fyOmxUzXvbh5+KVcPsR8wvGO7hq+s/VfhLQomsnqrLdWLuULI842zdjT3ciX1z+IPxY2XQH2xqp5fGf8uaYdhQM9bdzwziN0R4Kma2tcXTWVn88837SjUOPp4trXHqc16DPNuKRyHL8+/ULTFUobvB6ufe5JGrwe04ylw0fwl3MvwmHSUWjz+7lh+VMc6eoyVVJfAuYPG8o/Lr0El8ny9d3BELc+8gz7WtpMMWRJYmp5Gfded5luOfiT5QuF+bf7V7DjaLNpxpiyYv75xWXkmcz7CEaifPPeF9hw8OgpjpQeY3hpPvd+/QqKcqxXWv00K5n1O31q5lOgdt9jSTghABqaCHGo7VYCkT2mHtHge6XXCQFzNUg0NGJsbP4WXaHtphg1vvdZ3fyr3oXbDEMgUHmr8b9p8Ou3dj9eh33bearut2hophyEPmfl2fo/sd+z0STjIH87/EdUEUuqBsljRx9iY+cHpmyP+Or4+Z6/EU3CCQF4sOY53mxeZ8q2xt8Sd0LUiGknBOD+6jd58ugaU7b1gS6+vP5BfNFQUmXoHz6yjnsPvmvKtjng4aZVjyblhAAsP7KNe3a8Y8q2IxTgutefSMoJAXihei93fvCmqa7jPeEQN7zwdFJOCMBbNYf57tuvm2IEIlFuffpZqk06IRD/S113tI7/fPkVVC3xZxiOxfi3J55jv0knBOIRmW0NTfz7Uy8SVRNXho6qKt985CXTTkgf40BTG1974DnCOh2Hj5eqafzgoVfZeLDOlBPSx6ht7eJrf1sxoI3wPutKOyKfcEXVDuo6f5zCIwVCRKjt+E5ihuZje9udJF8WMO4obG27I+FFMKaFeafpF0nO3yeNd5r/By1BzxZVqDxd9ztSLQ3/TP0fjiW06kkIwX3V/0hq4T5eD9X8i5Cq30m3j/GHg4/0Jr4m/1r+evgJPFH9Ds19unvvs3EnJAXGnw68SFuoJ6HdXTtfxh8LpcbY/xa1vo6Edv+7/S06w4GUqoz+fd86dnc1J7T79Zb3aAl4k2YI4JH9W9nYWp/Q9s+bPuBoT3dKjOcO7GVVbXVC239t2sT+9vakGZoQrDx0mFcPHExo+/imHWxvaEqJsbb6KM/tSHzz9PzmPaw/dDTpJomqEOyoa+KJ9TsS2q7cdpB3dh5OnqEJDja289A7m5N63OdZaUfkE64O/3ILvWM0gtFdBCLGf3QN3pdQRZjUFnANf7SWjpDxH90R7yoimi8lhkDgj7VT5zeOJuz3bMIX606xUqogqPrY41lvaLXPu4e2cGvK1VjDWpgNncYRi4O+Wqr9DSmXIVeFxtstxu9Vta+F7d3Vlkqdv9BgzGgIdLGm9UDKZcgVSeKpWuMoVXvIzyt1ey0wZB49ZPzd9UTCPHN4l6XX8dBe44heKBbl8T07LDEe3LnV0Caqqjy8bXvKHY5lSeLhrcYMTQge3rg15TZREvDQhm2GNzZCCB55f2vK1YoR8OjarWgJSr4//u62lHvHaELw5JrtxFRrPbk+L0o7Ip9gCaHS5n0QLDWYU2jzPmzAEBzxPGZh/nhyaY3ncUObXV3PWOrYKyGzq+tZQ5sPOl62zFjf/rKhzTutb6bcNybOkHir5Q3DC+0rTe9aOgEjELzUtNowavNc/TpLDA3Bivp1xDR9J/np2k2WTiSpQvDM0c0EYxFdm6eObDMdNu+fobGiZieeiH6UasXhXUTUxKF8fYbg1dr9tAb0o1QvHTqAN6L/Os0wVh+toc6jH6V6+/AROgKBlBmaEGxqaORAe7uuzbrqo9R3e1J2bwVwoLWdHY36Uaoddc0cbOlI+XMXQEOXhw8OH9W1OdjYzrbqxpSdNoAOb4DVu46k/PjPk9KOyCdYgcguomqTxVlUugIv6o76ojX4ozVY6XQrUGn2v4Um+r9Y+6NttIX3W+rYK9CoD2wkovV/IQ2qfo74d1pm1AX344129jse02Js796acv+bOEPQFGqkNdzS/7gQrGnbklQ+RX9qC3dS7W/QHV/Zss0yozvqZ2dPje74qw07LV3IAfyxMBs69LccXjy621JUByCiqaxqOqzPqN5raX6IL+Jv1h3SHX/p4H5LnVshXhzttcP6WyevHDhgmaFIEq/uP6A7/tqeAykVfTteNlnm1T36jNd3WGcosszrO/Xfqze3H7TUrRfihctWbtN/HWl9qLQj8glWTNO/80hGmgigiXC/YxE18R68GQk0opq337Gg2jUgDIBQrP87voDO71ORP+bp//eqL+UtmZPljfbPCGsRojoOXbLqifb/eWhCwxsNDgijK+JPaSwZdYb152kPWWfISHQYMFqDfsufuiLJdIb0oxGtAZ9lp02RpGM1TfpTm89vmSFJEp1B/e9Ohz9gKqHVSEIIOv36jE5/AEthMEDTNDp9+u9VpzdgKZoXZwjaPQPzN/BZV9oR+QRLJEjOTG6u/he31PNPzDMSJZkmI43+GeoAMlSd9yTVBNV+GTrP12qU4njpbZvEzywNjEMVM3jfrbaoP8Yw2P4ZEIYUr8Cqy7C4sPYpajCPET8ZGeUkDBTD+HVY/zwExs81pln/9gogavBeqZpmJUh8TEaMtD5U2hH5BEtJUN49iZmQpf6rCtrk7AFigF3n+TqVgWM4dZ5vhjJwFQ0zlP7P/7t1fp+K3Lb+58pQnEl3z9VTlq3/z1yRZFwJStCbVbZNv0dGlm1gijrlOPQZOXbrDE0Ich36dSVyndaqfh5jGMxT4LLea0QTglyXPiM/I2NAvlm5BpVWczOS6wTcn2RJIsegIFiOyzkgW0y5bn1Gdob175UE5GVa/+58HpR2RD7BcjsmIklWv8gKWU79CpLZ9hED4IzI5DrGocj9//Fm2weRoRj3aDGjXHsFLp2+OFm2PAocg0j+CPKJyrEVkGfvv0eKU3EyJGOoZUchU8lkkKus3zFZkhmXU3Ws42uqcsoOKrOG6I5PK6hCsfjnb5MUxudU6I7PKaqyXHZeRmJq/lDd8dMGVVpe+ABmFeszFpQNt8zQEMwu1X+v5g0ZanlxVYVg9mD9z3xOhf6YWcU0jdkG88weNsRylCqmacweps+YVTXEcnRHFYKZlfqMmSMrrEeQpPg8aSVW2hH5BEuRsyjMvJIPO/OmIpWS7FsNGE6GZV9h6bQJaFTmXq87Kks2JuZfZnkBn5i/TNehkiSJuYXnW5pfQmJO4fnIkv77vaT0LEvbGjIyC4uXYDdoWHfh4EWWEjBlZJaWziND0b+rWzbkNFQLSbeKJHPWoKnkOvSjRNdUzra01aRIMmcMGktphn5k8PoRMywtfIokMb9kOJXZ+o7y9WOmWmLIksSUojImFpbq2lwz3lp/KAkYkVfA7LJyXZtlE1Pr7Hu8yrKzWVRZqTt+wYSxuE1WX9VTvjuDpWNH6o6fOWGk6cqoenI77FwwVb/z9PxxwyjNsxZltSkKl8xJvs3G51FpR+QTruKsmzi1p4x52eQicjOMS34Pz7nS0uJqk7IYnHmOoc3Y3Auw8nVTJAejc4zLik/NX4Iipd7MTEJmeoFxCfZZ+XNwyalfBAWChcWLDW3mFEwhR2frxow0NM4rO93QZnbhKEpdeSkzVKFx+ZD5hjZT84dSlVWcsvupCo1rKucY2ozLL2VqweCUI0iqENw02rh3zrCcfE4fnHpURBOCW8cZ9wAqzczinKqRKTMEcOuU6YYJlrkuFxePG5syQ5Ykbpo21TBy43bYuWLaREuMa2dMNixX77ApXD13SsoRJEWWWDZrIhkGPWcUWebahVNTTlhVZIkLZ44lx6LD9HlR2hH5hCvDMYYC9+Wk+lGV5/0QKcHi7LYPYVj2VaS6rTGu4D90t2WOMWyFTCm4JqX5AWYU3oIjQY5GhpLJGSVXpcw4vfgysmx5hjZOxcml5VekzDijZCmFziJDG5uscHPlpSnNLyGxpGQOFW7j1vWyJPP1URemzFhUPJFxucZhZ0mSuH28sYOqJxmJecUjmF2of/fdp+9OWZLSV1eRJKYXDuGMslEJbW+fdjqSlHxMT5EkJhSUcN7wMQltvzlrHjZZSYlRlZfP5WMS331/dc7seOv6JBdYRZIoy87mmsmJIze3zZ1Bdgp5HIokUZCZwQ2zpia0vX7+VAqyMpI+YitLElkuJzefnrg55LJ5kxiUl50Sw2m3cdvS5Bs3fl6VdkQ+BRpa+EuynHNJ9uMqy/02hVnmFs2JRd+jxH06yV7RR+TezPBccw7G7KIvMiLbfGO5Po3LvYipBdeZsl1YvIwZ+eabvvVpcu5ClpRea8r2jJKlLC1NfoGdkjuNKyvMMZaWzuPqivOSml9CYnLuaL420hxjSelkvjbqgqQZ43Mr+K+J5hinl47mh5Mu7H2sOclIjM4p5TczrzF1RzqnZBi/mn0RUhIMRZIYnlXAvadfZaoh3dTiwfxx4cVIkmQ6+qJIEmWZOdy/9EqcSuJI3djCYv5+3sUosmx6EVckiSK3m4cvvsLUlsjw/HzuvexS7Ekycl0uHrxyGTkGybB9KsvN5t5rL8Nps5mOjCiSRIbDzn3XLaMws/8k6+NVmOXm3i8sI8NhN+0oKHLcQfj7rZdRlpc4Ly7H7eLvX7uc7AynaYYsSdgUmT99+VKGFueZekxa6e67nxppIkxtx3fpCqwgnjOit10jAxIV+XdSnH1zkowYu9rvotb7FBKKwdHe+IV7XMF/MCL31qTCl5pQ+aDtb+zoehIJWbcAWXxMMKPwZmYU3pIUQwjB262Ps6r1KRMMjQVFl3LWoBuRk0isFELwWvPLPNfwNCDpMmRkNDQWFS/hmqE3oBjkn/SnlxpX8a8jzxp2Eu5jnFEyh6+PvBa7nNz21EsNG7ln37PxVu06DEWSUYXGGSWTuGPCNTiV5PIAXm3YyY+3PUtU67+N+vGMBSWjuGfGVbiTPHXzZsMB/nPdc4TUaELG3JJh/PW0K8gxOC3Tn95rrOar7zyHLxrR7VGtSBKqEEwrHsw/z1xGoSvxwnq8NjTW8+VXnqc7HEJG6jdfqI8xrqiYBy68nNLM5PIZdjQ188UVz9ERCCBLUr/1RfoYIwoKuG/ZZQzJ7T9RXE/7W9r50uMraPH6EjKG5OXwz+sup7IwPylGTVsXX77vWRq6PLqMvt+X5mTxt9suY/Qg44jkyWro6OHrf1tBdWsXiiyh9nNEuY9RkOXmT/92CROGGkckPw9KZv1OOyKfMgUie2n3PUyH/ynESUXKbHIJxdk3U5R1NXalJGWGN3KEWs+THPWuQBUnFhZyyPkMz7maoTnLyLDpJ98lUk+knj3dL7C350WiJ1VLdcrZjM+7hHF5F5FtT/0PuivSysbO19nY8Toh7cTCQk7ZzcyCs5hdcA4Fzv5PsJhjdPJu2ypWt72FL3ZiCW+n7GRB0SIWFp9BWcZgCwwPK5vX8nLTu3SfVAjNIds5s3Qu5w06nWGZVhg+Xm7cyDN1a2kLn1gczi4pnFM2ncuGzGNMTuonL3oiAV6o28aj1etpDHafMGaTZM4rn8Q1w+cwMa885b15byTEitqdPHhgIzW+EwvpKZLEeUPGceOomcwoGpIywx+N8NyRPTywZxMHe04sCCgjcfawUdw0djrzBg1NmRGMRnnx4D7u37mVve1tJ4xJwJLhVdw8aRoLKoalnCsRjsV49cABHtyylR3NJ1b6lYAFw4dx07RpLKocnnIl04iq8ua+Qzy8YRtb6htPGZ8zvIIbZ03ljNFVpiJT/Smmaryz9zCPrd3GhiOnNhecNmww18+fypkTRuKwpZasq2oaa/bU8MR721i3r/YU13Di0FKuXTiNs6aOwmlPPU/ts6S0I/I5kKp5CUR2EtN6kFCwKYVkOqbo5oMIIQhFthFT69FEAFnKxmkfg8M+QpcR0wJ0h3cT7WU4lDzynBORpf7vhoUQ+CJ7CMbqULUAipxFpr2KTId+BnxMC9Ma2ktY9QASLiWHEtc4FIM6F53hQ3gjdURFALvkJttRQYHTiBGlPniQQMwLCNy2bMozRmI3yGtpC9XSEakjogVxyBnk2csodVXpLiwxLUaN/wi+mA+BhtuWxXB3JU6DkystoUaaQ/WE1CAO2UmBo5ihbn2GKlQOemvpifrQhEamzc3IrArcBrU8moOtHA00EFCDOGUHhc4CRmVVGjA09nnq6Yr4iGkq2fYMRmeXk203YnRyyNeAPxbCIdsocuYyIXe4boRJExp7e5poD/uIqDGy7S7G5paR59CPHLQEe9jracQbDcYZrmym5g/TPR4shGB3VzNtIR8hNUaO3cWYvBKKXPp5Rq1BLzu7GumJhnDICkXOLGYUVWCX+1+8hBDs7WqjJeAlGIuS43AxOq+IErd+dKI96GdbeyM9kRA2WaHI5WZWSYVhcub+jnYavR4CsSg5Ticj8wspy9LfWugKBdnS0kh3OIRNkinIyGDWoCG4bPoL5KGODup7PASiUbKdDqryCyjP1b/mekIhNjc10h0KIUkSBa4MZpWXk2GwPVTd0UVdVw/+SIQsp4PhBXlU5Ofp2vvCETbXNdAdjPcBystwMaOinCyn/rWhrqOb2o5u/KEIbqeDoYW5DCvSj7IEI1E21zbQ5Q8ihCDXncG0oWXkZOhHyho7PdS0duELhnE77QwuyKFqUKGu/edVaUckrWNSNS8e/9N0+f5FNHZqA6YMx1zys28lK+NcJB0HI5Fimp8W/0vUex7GHz21f0OOcwpDsm+kJPMcZCm1QloxLUy1dyX7ep6mM3xq/4YC5xjG5V3J8KwzsSVInNVnRNjnWcOmzhdpCu0/ZbzYOYyZBZcwPncxjhRPzsS0GDt7NvJu2+sc8Z/KKHGWsbD4XGYVLMClJBfS75MqVDZ37uC15lXs9pzKKHUWc27ZYhYVzyNTp+hZYobGxo59PNewho2d+04ZL3HmcemQBZw7aDa5jtSOQWpC44P2wyyvXc97rftPuQstdmZz1bA5XFoxk0JnagwhBB+01fLI4Y282bT/lNB+odPNdVUzuapyOqUZqdXbEUKwua2Bh/Zv5uXafaccBc5zuLhh9HSuHT2F8szktj+OZ2xva+bh3dt4/tDeU2pgZDucXDduMtePn8LQnLyUGAC7W1t5ZMc2VuzdS0Q9ces2027nqomTuH7SZKoKUq8bdKC1nce27ODZ7bsJxU6spOyy27h88gSumz6Z0SXJbbEcr+q2Tp7YsINnNu8iEImeMOawKVwydRzXzJnCuLLUI8ufd6UdkbQA8IfW0Nh+G5ro25bod0cbULHbhjOk+DEctuFJMXpCW9je+hViWg/o7prLgIZTKWNq6b/IdOhHYfpTR2gfbzbeTkjtMmDEf+9SCjir/DcUOEcnxWgL1bL86B14Yx1ISDq5EnFGhpLNlRU/pdytX4egX0a4mb8euouOSKth7gqAS87gi1W3Myp7QlKM9nAn/7PnDzSGmo/ljvQnCXDIDr41+stMy5+YFKMj7OGHO+7lkK8BWZJ1S99LSNgkhR+Ov56FJVOSYnRF/Hxr0yPs6K47ltehx1Akmf+afCkXlk9LiuGNhvj6uqdY31ZjyJCRkCT4ydTzuKYq8WmL4xWIRvjGe8/zdsNhQ4bSm2PwoxlL+MK4WUlt6YRiMW5/51VePrL/WM6FEeNbM0/j36fPTYoRUVXueOtNnt6zOyFDFYJ/mzmL/3fagqS2jVRN43/eWMUjm7fr5mIcz7hhxhR+dPbipLaNNE3wu5Vr+Od7m4wZvWOXTZ/AnZecabn+yudRaUckLXzBlTS030Z80TZTVEpBlrIZVvqi4XbN8eoMrmN7yxd7F1RzDEVyMaPsMbIc5hbx1uBO3mj4dzQRM9VZV0JGluycM+RPFLvMLeItocM8XPP/iGmRJBgyVw/9OcMyJ5tkNPDbAz8hrAZNde+Vev/5YtV3mJBrboFtDbVzx65f4o36TTMAvjn6i8wrNLfAtod7+Mbm39ER9ppkxL+B/2/sNZxbZu44Y1fYz83r/k5TsDupgmjfG38hVw+fa8rWEwlx7eoHOOxtT6oR3HcmLuHLY04zZRuIRrhm5WPs6mxJivGNifP5zrSFpmzDaowbX36aTU0NSRXBu2XiNH4yf4kpZySqqnz5hed5t7YmqWpDV4yfwC/POtsUQ9U0vrXiFV7fd9A0QwLOGTuK311+gSmHRwjBHSveYMWWPSYJccbpo4fzp+svwaakD5kmo2TW7/Q7+xlUKLKHxvYvg2kHAUBFE17q2q5B1RJ3sg1Eq9nR+tUknJA4QxUhtjV/wVTXX1+0iTcbbzfthEC8C7AmorzZ8G380ZaE9v5YF0/U3mHaCfmQofJU3Z10Rk5NwDtZgZiPvxz6hWknJM4QaAjur/4tjcGjCe1Daoif7/m9aSekjyEQ/PHgfRzyVie0j2gxvr/973RGzDkhcUZcv963nG1dhxLaxzSV/9j0cNJOCMAv97zEmtZTt6JOeU5C8I31TyXthADcs+ttXqnfbYrxH2teSNoJAfjTrrU8eWiHKdvvrXqdjU31SVfifWDXVh7YtdWU7c9Wr0raCQF4es9u/rJxgynb36x6n9eScEIg/t16bd9Bfv3OGlP2f1+9ISknpI/x3oEafvHyO0k9Lq3klHZEPoPq8PwWQYzk20eqxNRGenyPJ7Ss7b4XISKYd0I+ZES0Thq8iRl7upcT04KmHYQ+CTSimp893U8mtN3U+SJB1ZsCQxDTInzQ/kxC23Udb9MT7TK9eB9PUYXKyubnElq+2/YBLeG2FBjxRfPp+pcT2q1u3Ua1vznlsu33V7+a0GZN2wF299SnxJCQ+MP+N0gU5F3fVsP6tpqkHYQ+3bPz7YSP3d7RxJv1h1Jm/Grr6oS9Tg52tfPcob0p10T+zaY1hGJRQ5v6nh4e27E9ZcafPliPNxw2tOnwB7hv/eYUCXD/B1vo8AcMbbyhMH9f9UFK8wtg+YYdNHR5EtqmlZrSjshnTDG1GV/wVVIvCy/o8t2HMFgIomoPzf4XDOqMJJJGvedRNKF/EYxqQQ72vJgyQ6Bx0PM8MS2ka6OKKFu7Xk7aCTmesbPnTUKqT9dGExrvtr2Rcgl9DY1t3R/giXbrPw8heLUp9Ts2DY2t3btoDbUb2q2oX5NyKXUNwa6eamr8zYZ2j9esS5khEBzytrCr59QjnMfr4cMbLTXjqw90s67VOIL08P4tlhrltYf8vFl/auL38Xpk93ZLDG8kwstHTk38Pl6P79qZ8hFkiOeWrNi319DmqW27LDSYiCdOP719l6HNC9v2Eoml3ipDkiSe2rQz5cenZay0I/IZU7fvMctzxNR6AqF3dcebfCt6Iy6pK6p10h54S3e82ruS2Ek1TJJnBKj2vqk7vt+zlqDqtcRQRYyd3fqvY69nO93RxNtQRhII1nes0h3f5z1EY8h4gU8kGZk3W97THT/kbWC/96ilZnyKJPNCw/u640f97WzsOGKZ8WSt/p1vc9DD240HLDbjk3j08Ebd8a5wkOdr9lhulPfgPv0ogT8a4cn9Oy0zHti5RXc8oqo8tmN7ylGdPj24datulErVNB7ZtM0SQwh4ZNN2VJ0IkhCCR9aZ24bSkyYET2zYbsmZSUtfaUfkM6Zg+AOS3y45WTaCEf293Z6w/sXLrCRshvO0BncgWeo6DBIKbSH9u5j64F5kywxoCOrvO1f7D1hmCARHfKceke3TPu9hZIt/yhoaezz6d8e7eo5Y7J0cv3Pd3nVYd3x7V+JcGDOMzR360YrtnckldfbPEGxs13+uuzqaLbeQ13qP/Oppf2c7wZi1mwFNCHa2t+g+15ruLnoSbKskkgCqu7t0t2davD5aff5+x5KR0TyeUJjajm6Lnzp4gmGOdnZbnCWt/vSxOCLhcJipU+OdDLdt2/ZxID+3UrWuxEYJJRkmrEbVbpLPPzlZgqimH42IaD4LWz99BI2IASOs+ix1HY4zhGFUJaj6LS/gAH4DRiAWsBQ+75Mvpr/P7ouFkiqBryevAcMbDaW8LXMiQ387zhu1trD2yReL6I55BogR0VTCav/Ohseig3DiXP2/XwPJ0HNoPKEBZAT7fx3e4AC+VzqMtKzpY3FEvvvd7zJ4cOrlp9MyLynFgmGnzIP+PHqVVZMlyOjPI0s2Uu0G/CEhMSP5fqqnSjF4PxRJsfoyALAZMGwJuiubZhj0qLFJCgNx0N+mU6UUwC4rlqMVEH+u+oyBueTZDJyygWIYcQaSoVcjY2AZ/c81kEdi9V7Hx8FIy5o+ckfk1Vdf5Y033uCee+75qFFpATalFOsfq4qi6FctdCjFlrdNQOBQ9KsvZigFSJZfh4TLpl/eOVPJszg/yChkGjCybbmW99glZHLsebrjufZs3aJi5hkS+Xb9s/75juyUTuScrEKHPqMgxeqopzAM5ikaIEaBU78irVEZ+WSU63DpFusqcqdWEfdkOWSFLHv/Nx1F7oF5HbIkke/qv0VA4QC9DoACd/+MPHdGyv14TlZh1sA937Q+1EfqiLS0tPClL32Jhx9+GLeJL1w4HMbj8Zzwk1ZyynZfgvUcEUG2+0Ld0dLM8wZg20SlJPN83fHh2WcOCGN41pm64+NyT0ezyNBQGZejX3xqav7clE/l9EmgMT1/vu747MLkKor2zxAsKNIvODavaLxhpMGMJODM0ukGjJFkJNnV91SGxPmD9au4zioaSq5Dv2+OGcmSxMVDJ+mOTyksY1CGNYdHkSQurRyvOz46v4jK3HxLwTZFkrho5Fjdbb3ynBwmlZRaWsQVSWJpVRVOnT43+e4M5g6rsHT6R5Ek5g6vIF/HEXHZbSweW4Uip86QJYmJ5aUMzksX1vwo9JE5IkIIbrnlFr7yla8wc+ZMU4+56667yM3NPfZTUVHxUT29z6yyM85FkVPv8wAKbtdiHLZhuhYFGafjVKy0uZbJdU4ny6Ffhr3ENZlc+3BS39eQyHOMoNilX7681DWCMtcYS9szufZSKjOn6o4XOwcxOnuSpWTSLFsOE3P1K58WOPKYVTDVEiNDcRlWV82xZ3Jm6XRLx17tso2lg/SvBW6bk0srZlpiyJLEpRX6r8Oh2LiuaoalxVUIwTWV+g6VIsvcNHaGpXwXVQhuGK3PkCSJWyfqj5tl3DhhqqHNzVOnWYroqUJw4xRjR/mGmVMtnf5RheDGmVMNba6bM0W3nLsZaUJww1xjRlqpK+m/+O9///tIkmT4s2/fPv74xz/i9Xr5wQ9+YHruH/zgB/T09Bz7qaurS/bpfe4lSQ7ysm4mdR9TJT/r1gQMmYqcm0jdSdAYknNjAobEuPyrSD0pVjAu78qESZyzCi+2kLAqMbPgYqQEC+ei4nNS3taQkDi96Ox4romBzh20OGWGjMzSktNxKMb5RZeUL0j52KuMzNmDZpFl0CkY4Iqhs1NmKJLM0kETE27xXF05PeWvlSJJLBo0kvLMPGPGyCkp34ErksSskiGMyjNu6nbZ6PE4bbaU/goVSWJcYTFTio1vKC4YPZocpzMlhixJDM3NZX6CG8olo6soznSn5BzKkkRxViZnjKoytJtbNZSKgtyUGBKQ43JyzsTk+lelZV5Jr1a33347e/fuNfypqqri7bffZt26dTidTmw2GyNHxtu0z5w5k5tvvrnfuZ1OJzk5OSf8pJW8CrK/jtM+AZLO45DIcV9Npkt/O6NPQ3JuJNc5PQWGTLH7XErc5ya0HJVzIYMzZiedKyIhU+6ey8gc/a2fPo3PWcTo7PlJR0UkZCrcE5ier7+F1acJOdOZlX960gwZmSHuSpaUJmaMzxnN2aWLkpq/jzE4o5TLhyR+r8bkVHDt0CVJMxRkSl353FaVmFGZVczXRy9NniHJFDqy+Pa48xLaDnbn8sMpZ6fAkMixZ/CTqYkZhS43/zMn8Xf8ZMmSRKbNwS/nJX6vsh1Ofr34vKR9KlmScCo2fnPG+QkddafNxm/PPS/pU1kSYJNlfn9eYoZNlvnNZecjkdytjUT8tfzm0vOwJUislWWJX10Zt0vWFZEkiV9eeR5O+8Akhad1qj6ypndHjx49IcejsbGRc845h6effpo5c+YwZMiQhHOkm96lrpjaTn3b1YSj+zGbM5KVcSGDC/+MZPJUTFTtYVvLF/BGdptkSBRmLGRi8R9RZKc5hubnzYbv0BragbnbWIlBGdNYMvhX2GVziWVRLcyzdT/niH+LKYaERJlrNFcP+29cirlcgJgW48GaP7CjR78Q1okMmTLXEL4+8kdkGSSRHi9NaPzl0IO8126ulLWMzCBXMT8e/58UOPUTbk9m/OHAs7zYuNY0o8SVxz1Tv0pZRqGpxwgh+MP+N3jwiH6BteOlSDIFjkz+Puc2hmcVm3oMwJ/3vsvv96w2yYg7IQ+cfj3j8sxvS/5zzwZ+vvlt3Z7RJzMybQ4eWno1U4vMnzJ8bM92fvTeSjDJcCo27jvvcuYONr/1/dzePXznjdcRInH8UJYk7LLM3y66mEXDK00z3th3iP9c8TKaEAm3g2RJQpYkfnfZBZw9dqRpxnsHavj3x14gpmoJGZIU/1u/a9k5XDR1nGlGWnF9Irvv1tTUUFlZydatW5k6daqpx6QdEWvSNB8t3Xfi8T8F/faekQENWcqhIOerFGT/e8JthpOlaiEOdf2KJu9TaPSVbD+eE2coUiYVOTcxPO8bvUdzk2FE2NLxN/b3rEAVffUbjmfEL/OK5GJs7uVMK/o3wyO1/UkTKu+1PcLGjueJitCxOU9kgCLZmJp/HmeU3IrdpDP1IUNjZctzvN3yEiEtiIR0yraQhIQsKcwuOJ3Lym/CqbiSYggheLFxJc81vIZfDSAjnXIkNs6QmF84i1srrybTltxJACEEzzes4aGalfREff0y+u47FxZP4d9HX06eI/nkzefqNvGXA2/RHvYi97awP5khgEWlY/nBhIsodiV/jXjx6E7u2fU2TUFPv+3tZUlCCMHC0pHcOe28hFsy/em1o/v5xeZ3OOrr7pfR97vTBg3j53POoTIn+Ryvd44e4b/XvsORni4UST5le6uPMWtQOT8/fSljCsw7bH1aW3eUn616hwMdHYavY+qgMn62ZAkTS0qTZmypb+S/X3+H3c2tKLJ0Sl5H3+8mDCrhx+ecwfQhyZeF2N3Qws9ffJvt9c2GjFGlhfzg/MXMHTE0aUZaaUckrZOkqp30+JfT7X+UWKwBQQRJysBpH0Ne1q1kuy9ElpJb8E5WVPXQ7H+OBu/jhGKNaCKMLLlw2ysZkn0dpZkXosjWTitENT+HPa+zv2cF3mgDqgijSE5y7OWMyb2cqpyzscvWjhxGtBB7elaxpfNlOiP1REUYm+Qg1z6I6QXnMzH3TFyKVUaEbV3reK99Jc2heiJaGLvkIM9RwPzCM5lTuJhMm7VTF1EtyobObbzW9A5HAw2EtDB2yU6BI5clpQs4o2Q+uSYjLXqKaSrvt+/iuYY1HPTWE1LD2GQbBY5szh00mwsGz6PQOQCMtgM8UbOeXd31BNQINjkeAbloyHQur5jJoIw8SwxNCN5rOcwjhzayuaMOfyzOyHe4uWToJK6pmk5FprmIkZ6EELzfXMuD+zezvvko/lgERZLIc2RwSeV4rh89LSUH5GTGhqZ6Hty9lTX1tfiiEWRJItfh5MIRY7lhwhRG5RvnnZhhbG1q4uEd21hVXY0vEr8xyHG6OHfUKG6YPIVxxck7OSdrZ2Mzj27ezlsHjuDrLYaW5XRy5ugqrp8xhUmDrSTLx7WvqY0nNmznjd0H8YbCCAFZTgcLx1Ry3ZwpTKkoG5BCgZ9XfSIdkVSUdkQ+GgkhEv6BCREiEHyRUOg9NK0bSZKR5UIyXOfici1BSpA8aYahiQhdgdfpDq4mpnUDEnY5nzz3meRnnImUIHJijhGl0f8uTYE1hNVuAJxKLoPc8ynPPCNhcTZzDJUa3waO+NYSVHsQQsOlZDM0cwYjsxdik42TQM0xNA56t7Oz5wP8MQ+qUHHbshiRNZEpefNxJIjOmPvMBXu9+9jQsRFPzEtMi5FpczM6exTzCufiShCdMcvY7TnC6tbNdEU8REWMLJubsdnDWVI6i8wEyaymGT11vNa0jfaQh4gWI8vuYnzOEM4vn0GO3ToDYE93I88f3U5LyENQjZJjdzE2t4zLhk6lwGnsrJpl7Otu5ZkjO2j0xxlZdidj8oq5qmoKxQmOCPdd2hNxDnZ38NSBndT7eghEo2TZHYzKL+Sq0ZMpy8weEEZ1dxdP79lFbU83/miULIeDyrx8rho/kSE5uQPCqO/p4ekdu6nu7MIXiZDpcDA0L5crJk9keH7egDDSMq+0I5JWylLVNry+v+PzP4wQHuLJqH21NmxADEUuIyvrNrIyb0M2mYdxvKJqF82ef9Hqe7TXATmeEf9vu1xEafaNlObcik02vhj2z/BysOdxDnmeIqx2IqEcq0vS998OOY8RuVcwOvc6HIrxxbA/RbQg27tWsL3refyxjn4ZTjmLSXkXMq1gGW6Dwme6r0MLs7b9Dd5vf5XuaBsyyrHaJzIyGhpOOYPZhWeysPgicu3J31XHtBjvtK7mjZaVtIbbjs17IsPJwuIFnDvobIqcyd9Vq0Ll9aZ1PN+wmvpgC4oko4n4hk4fwyHbObN0NpcPWcLgjOTvqjWh8UrjFh6veZ/DvuaTGPFtMJukcM7gqVw/fCGVWSVJM4QQvFS/k4cOr2N3d1M/jPgWxXnlE7lt1HzG5CZ/5y6E4NW6ffxr3wa2tjecyJCk+I6hBOdVjOVL4+YyubAsaQbAm0cP8Y+dG/iguR5FktBEvJ7M8SdLzho6ki9Pms3M0vKUGKtra/jn1o2sqTuKIsXfH03EGVLvfy8aVsm/TZ/F3CGplWtYf7SOez/YxLtHauJbaScxVCE4bdhQvjB7BgurhqfESCt5pR2RtFJSJLqPtvZr0LR2SFjoS8ZuH0dx4WMoivkLeihazd6WG4mozaYYLlslY0sfwmkzvxfsjzbxbtPX8EXrSZREKyHjtg1i4eC/kGU3fyH0xzpYUfd9OsM1CY//xhn5XFbxKwqd+vVZTmV4uO/IXdQHDydkyMhkKFl8seoOyt3mEwT9sQC/P/hHDnjjLeeNODIyLsXJ7aP/k5HZ5hMEg2qYu/bcx+auvQmTNhVJxi7Z+a8JX2JKvvnjkmE1yk92LGdV6+5+c29OZtgkmbumXs/84rGmGREtxn9tfYEX6nb0mxdzMkMC7pl5BWeX6xcmO1mqpnHnptd59NDWfvNiTmTEF91fzbmQy6v0C6ydLE0I7tq4in/s3NhvrsfJDE0Ifj7/LG4YZ75wnhCC329Yx+83rDPFUIXgB6ct5EvTZpqOSggh+NeGzfzvqvdMM/7jtLn8+2lz05GPj0HJrN/p7rtpARCL1dDadplJJwRAIxrdR2v75WgGDfKOVzjWxJ7mq0w6IXFGKFbDnuariKodphihWCerGr+IP9qAmZM8Ao1ArIV3Gr5AMNZqjqF6ebr223SGj5qqQRJndPF07X/SE2kyxQirQf5++Kc0BI+YYmhoBFQffzv8X7SE6k0xIlqEX+//LQe9hxC9/yRiBNUQv9z/a2r9taYYMU3lZ7v+wdauePfgRK9EFRphLcJ/7fore3qOmGKoQuNH2x9jdeueXoYxRRUaES3Gd7Y8xMaOQ6YYmtD44ZbneLFuR/z/TTBUofGtjU/ydpN+5+TjJYTgxxtf47FDW3uZiRjx0yXfWf8iL9TsNsUA+MWGuBPSN0cihgB+tHYlj+/bbprxh14nxCwD4K733+Vf2zabZty3cQv/u+q9pBh/eH89f3x/vWlGWh+P0o5IWgih0dZxI0J4Mecg9EklFquhs+vbJhiCg21fIap1Jc2IqM0cav+mKev1LT8gGGtLqjy8QCWsdrO2+f9hJkC4sulueqJNSTI0wlqAF+rvMMVY0fBPWkL1SRUpE2hEtQj3HfkFqkj83J44+iRH/NVJMgQxLcavD/yOiBZNaP9I7cvs7DmUVDM7gUATGj/d/XcCsWBC+4erV/N+276kCtOJ3p/vbn2Yrogvof1jRzbySv2upGp29Nl+e+NTNAUSO+vPVO/kicPbkq4LIgHfWfciRzyJnfVXqvdz7y5zR8hP1g/XvsHujpaEdqtra/hdrxOSrH6xZjWbGhsS2m2ub+Cud95NifGH99fz7pGalB6b1kejtCOSFqHwamKxQyTnIPRJJRh6lVjMuAquL7wFf2RHygxP6H0CkYOGVt3hg7SFNqXUo0ag0hneRVfY+M6yO9LIEd/alPrHCFQ6I7XUBbYa2nmiXWztWpMSQ0OjK9rGXo/xnaU/5md123spVZXV0OiJetjYabyghdQwLzakyhD4YkHebt1kaBfVYjxRsyalQqkCQUiN8mK98XulCY37Dr2fAiHujMSExpM1xq9DCME/9qxLqYJp3KkSPHIgcTTh7zs3pFzeXgIe2L0lod0/t25MuXeMLEncZyIqct/GLZYq19630XzkJa2PXmlHJC18/vtJvkLq8ZLx+R82tGjxPmyRodDqe8TQ4rDnKUtdgSUUDvU8aWizq/slS12BJRS2dz1vaLOh8y1SL20fz0l5v/1VQ5s17WtNRU30GRIrW94ytFnduoWQFrbAgBcaVhtGkFa37qE7GkiZIRA8fXSdYVn5Na2HaQ6m3oBTE4InqjcR0WK6Npva6jnk6Uj5U1eFYPmR7QRiEV2b3R0tbGtrSrl3jCoEKw7vpicc0rWp6e5iTd3RlHvHqELw+pFDtPj0o1QtXh8rDx5OuXeMKgRrao5S29Wd0uPTGnilHZHPuVS1lVDoTVKLVBybpfeUTf8XhpjmoTPwsmVGm+8pNNH/hVbVwtR4X7LUsVegctT3OlHN3/+40NjZ/bKlbroClWrfWgKxLl2b9e1vWOh/E9+iOezbRWdEP+fl7dZ3LDIE1f4a6gP6+SivNq2x1FBQAA3BVvZ5a3RtnqvbYKm5HEBruIdNBrkiT9VsttQdFqAnGmRV0wHd8eWHt1lq9gcQjEV55ah+PsryAzstM2KaxorDe3THn9qzy/J7BfDMPv3I5LO79PlmpUgST+3YZXmetAZGaUfkc66YWoeVu+8+CdGNEP3fxURizQj07wbNShNBYmpnv2MhtRNNpH733SdBTDdpNaz5iOg4KckxBJ5o/3vtqojhMXBSklFnWH8/vy3cPiCM1nCb7lhjsN2Ss9OnpqD+c631tyWVf6Kn+kD/3yuAam+7pe6wEF/46gL6n+sRT0fKzf76ZJNk6nz6jOqeTusMWeaoR59R09ONhSa3QPwYdG1Pt+54bVc3Vn0dARztNpdkn9ZHr7Qj8jmXGICFtU+ajiOiiYFjqDpzxUTq4flT5tL6nyuiJU6cNKuozlxhVT/snazCWv9zqUK1tC1zvIIGzzes6W8TDBQjpFpnyJJEQNV3Yo22O8xKQiIQ02f4ogPBMJ7HG7HOEMKY4Y9ELDufGgK/wXMNRKNYLTqhCXGsYmta//dKOyKfc0kWS6IfL1nqv/CYLA0cQ5H6ryZpl5IvrKYnvTLxjhSKt+lJb65ke8sYyan0X0FUkRRsSfb70VOGDgPApRhXlDUrt8F7kmFLrt9Pf9KEINOmz8i0W2cIBG6D55o9IAzIsuu/5zkO6wxJMmZkORwpJ8P2SZYkMh36DLfdPiCMbKf19yOtgVHaEfmcy6YMYyC+BrJciKTjcDhsZUgk14SuX4aUiU3pv3Ko01aIIlnrZQMgYyfD1n+BNqeciSuFKq8nS0Imx95/xU1FspFnt9YPpE9FDv2qnqXO5KuK9qdBLv3GZkMySi3liPSp3K3/XCszSyzniAAMdeu/5yOziy3nPahCMDxLv/vwqNwi6/kbQqMyW58xMq9gQHJEqnL1q/dW5RdY/jQ0IajKN2AU5qeccNsnCagssNY/KK2BU9oR+ZxLUYrIcJ2L1VMzWZk361YrtMnZFGZebJGhUJJ1jW5vGEVyUJlzseVTM8OyL8CmE62QJJlJ+RdZPjUzMvt0Mmz6JeXnFZ5taQGXkBmdPZU8h/7iuqT0jJTnh/g+/sisEQzO0C8vfv7gBZbC9BISw9xljMrS7356WcUcSzkiElDmymd6gX412iuHz7CcI1LgzGRh6Sjd8WtGTrWcv5Flc3BuxRh9xpgplhkOxcbFI/QrxV41fuKAOAnLxk7QHb984vgBcXaunDzR4ixpDZTSjkhaZGXdirUTLZCZeb3heGn2jRYZKiXZ1xlajMi5wvKpmRG5VxjaTMy7wPKpmcn5FxvazCpYYvG0icb8onMNbU4rmoc9QcM/I2kIlpaeaWhzevE0w22VRBIILipfaFiOe0HxWAoc1joVXzFsHrJBpGBucSXl7ryUPxEZiWsrZ2GX9Z3kKYWDGZNbnDJDkSSuGTkNl03/Mx2dX8TMkvKUtzUUSWLZyAmGWzxDcnJZOGx4yhEkRZI4f+Roitz626BFmZmcO2aUJcbCquGU56bbhnxSlHZE0sLpOA27bTypRSxkMjIuxqYY94LJck4hyzkjRYZCnusMMuxVhlY5jipKM+alFBWRUChyTSPfOc6YYS9lVPailKIiEgrFzhGUZ0w2tMuy5zKjYHFKzoiMTJGjjLHZUw3tMpQMlpSkzihw5DMzf7qhnUO2c0n54qTnjzMkcuyZLC6ZaWhnkxWuH356yoxMm4sLBxu/DlmS+eKoBSnFXWQkHIqNK4cZMyRJ4isT5qfEkHqf4w2jjRkAX50yx1LE4pbxiRlfnjYr5QiSJgS3TZ2R0O62WTMs1UP54mzj71VaH6/SjkhaSJJEUeGDyHIeyTkKCnbbGAry7jZlPar4rziUkqQZLlsFI4p+a8p6bun/kGkbnJQzIqHgUoqYV/orU/ZLB91OgWNYUs6IhEKGksNFQ35uquHWpeW3MSRjRFIMGRmnksEXqn6ILCV+/VdVXMGY7NFJOSMyMnbZzu2jv4VdThxRuXbYOczMH58kQ8Im2/jZxK+SoSROKLx2+ALOLJ2UFENCQpZk7pl+M7mOxMnUVw2fweVDpyXltkm9//rjnKspyUh8933J8AncOmZWEgSOPZ8/nHYpQ7MS5zwsHTqS/5g6LylGn+5ZeD5jChJ3RZ5fMZTvn7YwJcZ/L17K1EGJuwlPGTyIn569JCXG9xafzrxhqXX6TeujUdoRSQsAm20IJcUvoChlJP5aSICEwz6V4qKnkWVzoXGHUsz4QU/isg01zXDbRzNu0JPYFP2cihMZuSwuv5ccRxWYWjYksuwVLBlyHy6bfoLciQw3y4beQ4lr1LHnaUyQyLYXc+Ww35FtN9fe3i47+eKIO6jM7OsOm4ghk2XL42sj/5tCp7nW8zbZxrdG/weTcicee55GkpHJtGXyo3HfY4jbXFt4RVL44fjbmFc42TTDbXPxi8nfYFS2fm7ICY+RZO6cfBXnDZ7aO4cxQ5FkXIqd38+4lan5w00xJEnizqkXctXwmaYZDtnGn+dcy2kl5jsV/2j6Ur48bm7vHIkZiiTzxwWXcY5BbsjJ+vb0BXxr2mkmGRKyJPGbhedz+Uj9vI2T9eVpM/lBrzNihiEBP1+8lOsnTTHNuG7aFH569hIkM4zecvDfX3w6X5ydOOKS1scrSZjpwPV/pGTaCKc1MFK1Lny++/H5H0DT2gAb8S62fQtuDJutiqzML5CVeR2SlHwOQEzz0Op9jGbvA0TVFiRsx+VdyEAMp1JBac7NlGRdhyInfxompgU57HmaQz3LCcSakFCOJU7G28SrZCiljMy9mhG5y7CbdKZOZETY1f0y27ueozvagHwSQ0PFrRQwJf8SJudfjEtJ/sRNTIuyqWsVa9pepjV8PEMgIaOhkqnkMK/obOYXnUuWQRKsnlSh8n77Wt5ofpO6YD0K8rH+JbIkowqVTMXNGSWLOav0TPIceUkzNKGxqnUzLzau5oD3KIokI0QfQ0IVGm7Fxbll87l48CKKXcmfaBBC8FbLTp6sXcuO7tr4CRERz2fpY7gUBxeXz+TqYadR7jbneJ7MeLt5Pw8fXs+G9ppjp1CE+JDhlG1cOmwqN42Yy/Cs1E5ArWo8zP37N7Km6UhvToeEJkRvO3sNu6xwaeVEbhszm9F55pzbk7WmsZb7dm3i7brDx/JGNBFf1DUhkGWJi6vG8YUJM5lYpH86ykgbGuq5b9tmVlYfBuJ/3WofA4EEnD9yNLdNnWEqEtKftjU28cCmLby67yACjn0OsiQdqzeydNQIbp05jVkVQ1JipJW8klm/045IWv1KiBjB0BuEw++had2AjCwXkJFxHk7HPMPtBSE0ECGQMhLYqXQHV9MTWkVM7QEkbEoe+RlLyXHNRzJIIBRCQxMhZMmV0K4l+AFNgfeIqD0IBE45j0Hu+Qxyz0My2MIQQqCKIEpChqAhsJ3DvrUE1R4EGi45h6GZ06nMmmu4TSKEICZC2CRnQkZtYD87u9fjVz29C3cmI7ImMiF3FopBXRAhBFERxibZEz6XI/5qNnRuxBP1oooYblsmY7JGMbNghuFWTJwR6WUYR7sO++pY3bqF7oiXqIiRactgbPZwTi+ehtOg9ogQgogWxSYrKAm2ng57m3m9aRvtYS9hLUq2LYNxuUM4e9AUMmzGjLAWxSYp2AySSyFecfWFuu20hLyE1CjZdhfjcgdx4ZBJZNn1HfQ4I4YiyYYJrAC13i6erd5JY8BDMBYhx+5iTF4xl1ZOJNdh7KCH1RgSEg7FmFHv7eGZQ7up8/YQiEXItjsZmVfIslETKHAZ184JqbHePBhjRrPPyzN793DU040vEiHL4aAyL59l4yZQ7DbeGgur8arMTsW49k2738+zu/ZQ3dmNLxwm0+FgaH4el08cz6BsawnNaSWvtCOS1scuTW0jFHicsP8RNK2JeHklGVkZhivzZpzuK5Hl5O/Wj1dM7aTTv5wO7yNEjpWml3AoFRRm30hB5tXYFGu1AaJqD/W+F6j1PE4g9iHDpZQyNOcqKrKX4VT0azWYUUT1c8j7Onu6n6UnUtcbDZJwK4WMzbuYMbkXkWmzVkskooXY2f0uGzpepjV89FjEKcuWx7T8s5iRfw55jtTupD9kRNjUuZ5VbSupDxxFO8bIZl7h6SwsPpNii/VKolqUte1beaVpNYd8xzPcnFE8h3PKTqc8I7W79T7FNJX32nbxTN377PbUHjvimmVzcdag6VxaPo/KLHPbXUaMd1v383j1B2zprCHWy8i0OTl38CSuGjabsbmpRQT6pGoa7zYf5uFDm1jXWkNUi58gc9vsnDtkHDeMnMnkAuOk8kTShOD9phoe3LeF9xqrCatxRoZi4+yho7lp7DSmF5ebyoPSkxCC9Y11PLRzG2/XHjnBEVkyrIqbJk1l7uAKS4y0PnqlHZG0PjYJzYev5w4iwRXEF+2Tj7b2XSwcuDJvxJ3zQyQpuYqbmhakoetOuvxP9fas6e8rKyFhIz/zasrz/ws5ye0cVUTY1/Fr6rxPoh3ri3MyR0ZCYnDWhUwo/JFuvRHd1yFibGq/l93dT6OKaL+voy85tTJrMaeVfgdnkts5mtBY3bqcte0riIow8ff/RI6EjEAwNns2F5V/ncwkt3OEELze8hKvNb1ISAv2bnWdyJCR0dCYmDOFG4Z9gTxHcg6iEIKXmlbxZN0r+GIBZKRT6oX0MSbljuYbI2+gxJW8g/hiwwf84/Cr9ET9/TIUSUYVGpNzK/ne+CupcCfvvL1Uv43f7n2d9rAPuXfboz/GpLwh/GTypYzKSd6xeq1+Lz/f+gbNQW/v9k3/jPF5pfxi5oVMLEje6Xm7/jA/+WAldb4eHUb8d6Pzirhr3jnMKEl+G+T9+lruWP0m1T1dhozK3Hz+Z9FZzB9iLo8orY9faUckrY9FmtqOp+Ma1NhBzNUIkbA55pFT8ACSyUVc1Xo43Ho9wchOTnVy+pOM2zGFqpJHUGRz35mYFmBj81fpCm/BXANAmWz7KOaU/ROHyQhMTAvzZuMPqQ9sNMWIV18dwvkVvzcdHVFFjKfr7mGvZ50pewmZXHsRN1f+nHyHucVPExoP1dzL+s41puxlZLLtOXxr9A8Z5DK3+Akh+MeR5bzW/J5pRpbNzU8n/gfDM80l0Qoh+NvhV3i8dpVphtvm4J6pX2J8rvnF7+8H3uEvB942x5AknLKNP8++iRmFw00z7tv/Ab/YvtIcAwmbLPO3BVexcNAI04zHDmzjR+teBxJ/e2UkZFniz4su4Zyho00znjuwh2+/9eqx3CEjScQTiH9z5nlcOlq/wFpa/3dKZv1On5pJKyUJLYin86YknBAAQSyyHm/XVxEmmq5pIkx1220EI7sw54QAaAQiO6hu+yKaSNzkSxMxtrR+m67wVsx3IdbwRQ+xseVrqDqN5Y6XEBqrmv/btBMC8aJknmgDr9XfTkSnCd+JDMGLDX9mr2e9qfk/ZLTzcM1PCMS8ph7zVP2jpp0QAA0Nb9TD7w78L56ouW6njx19ybQT0sfwxQL8ZNcfaAvrd9E9gVG7yrQT0scIxMJ8Z9u91AX0Ow4fr+U1H5h2QiC+7RFWY3x9w0Mc9Oh3Tj5eK2p2mHZCIJ64G9VUvrLmSXZ2Npp6zGu1+/nRutd7E5jNMVRN4+urnmdDS50pxqqj1Xz7rVfRhDBVjVcQf7++/darrD5abYqR1idXaUckrZQU8v8LNbqL5KulakTDbxEJPpfQstP3OP7wxhQYKv7wOjp9TyS0bPS9THtwDeYdnbgEKj3hXdR4HktoW+N7jxrfasw7Oh8yuiM17OhMzDji38627reTZmhodEVaWN22PDHDd4h3Wt9Iav4+hifazXMNTya0rfU38nT9aykxfLEA91c/k9C2MdjJPw6/kgJDEFQj/G7/cwlt20Nefrk7NUZYi/GzHc8ntPVEQtyxKXmGIN6X5v9teIFEAfFgLMp33k+NoQnBt957KWHhsaiqcvtbryR8Lv1yhODbb71CVB2YbtJp/d8o7YiklbSEUAn67yfZxftDyb2PN2II2rz3pTg/gES79/6EF7caz6Ok/mcgqPU8ljC6s6f7mZT70wg09vY815tToq8NHS8jW2Bs7VpJJEF0Z1XbypQZGhobOtfij/kN7V5rftcS44OO7XRGjCMvzzesS7mEvio0NnYeoCHQbmj3bN3mlBZWiC/gO7rrOOBpNmbU7CCixQxtjBiHPO1s7ag3tHuxei++aCSliq8agga/h/cajSMWb1QfoiMYTIkhgI5gkJU1h1J4dFqfFKUdkbSSVjT8NkIzFzruXxpqdBux6C5dC394PZFYNcne4X8oQTh2CH94g65Fd3gXnsgeUneoIKQ20xZ8X58RqaUpuNVSf5qw2kOtT3+roifSxn7vxmMnSlJRRAuxq1uf4Yt52dT5gSWGKlTWd+gzArEgb7eut8QQwJsta3XHw2qUFxvWW2qUJyPzQoP+FlhMU3mi5gNLDEWSebJW/7srhODBg/rj5hgSjxzabMi4f+8mSw3mFEnioX1bDG0e2Lkl5f43fYwHdmxN+fFp/d8r7YiklbTCwZew1kkXQCESfFl3tDvwEvFialZkoyegz2j2r7TUrRfipdub/PrbFdXe1Za69cYZMke8+rkGe73rLXcjBYldPfpOwo7urWgWGyMKBBs79RNpt3XvJaIZR37MMN5t26g7vr37CL5Y4rweI2lovNmyTXd8V3cDHWGfJYYqNF5t2Kk7vr+nlTp/twVXJ95z5dW6PbpbJ/W+HvZ2tVlmvF1/mFCs/8+1MxhgY1ODpf43qhBsaKqnM5g4lyqtT6bSjkhaSUtobVjt1gsSmtahOxpTOwaAoREzYERU/TGzEqhEVP0EyZDaZdkREWgEY/oMf6zHMgMEvliX7qg35kl5y+R4eWL62yY9UWuLt5l5uiIDxdDfYuocIIYvFjpW0+RkdYSNt7jMKio0/NFwv2PtoYFZ2AXQFQ72O9YxgM5DZ7B/RlqffKUdkbSSlhCp7UufNAsYzKNfLyQ5htFzFaiWCQCaQf6GZuJ0kBkZ5YgMHEP/vVIHjKE/jyrUlHM3zDNS3/YxO4+qDQzDaK6YNnBVF6I6r2Wg3iuA2MfxOgbwfU/r41XaEUkraclyPuYayhlJQjKotKrIuQzE9o9iwLDLOQOw8Mk4DBryOZVsU8cRE8ml5BmMZQ4II0PRP+vvVtwDwnAr+uW8M20Dw8hU9IvZZdmS743Un9wGXYGz7cn3RupPdlnBoVPWPNcxMK8DIEenHP2AMpw6DGfi7spmNZBzpfXxKu2IpJW0bM65AzBLDLtjju5olnMu1rdmYmS69BkFrpm9kRcr0ihwzdQdHZQxFTEA21hl7qm6o8MzJ1lKho0TZKqyJuuOj84eZ9lJkJEZlzNRd3x8jvkCW0aMKXljdccn5A5L2Dk3kRRJZnq+fkfd8XmDE/aQScyQmFkwXHd8TG4JmQY9c8xIliSmFpRjk/tfBobn5FPgTK568MmSgFG5heTY+3cSBmVlU55lvVhleXYOg7KSbyqZ1idDaUckraTlzFgGWLsISnIJdtdS3fE894XIkrULiyLlkOe+QHe8xH0GDjn5DqwnMlyUZ12kO17unkG2rQwrESQZhTG5+q9jSMZoSpzDLDFAMKPgHN3RwRlDGJE5ylIuiobGwuIzdcdLXUVMyxtvKRdFQ+O8soW644XOHBaVTDrWNTcVqUJjWcVpuuM59gwuLJ9qkSG4tnKe7niGzc5VVdNQLJw20YTg5tGzdMftssINY6daOtEigFvHz9TtCyNLEjdPmmbpmysBt0yaZul5pvV/q7QjklbSkuUcnO4rSX3rRMaVeSuSQcdYWc6gMOs6CwyFwuwbkSX98LIs2RiWcy2p/hlIKAzJugybrL/dIEky4/OXpTR/H6Mq+0zDrRlJkphTeCGp5tRIyIzOnk2u3biU/BklZ6cceZGQGJM9LmGZ9wvKFqV8fFdCYkTmUEZkGZdgv3zIaSnnP0hAhbuIKXlVhnZXD59tKceixJXDgpJRhjbXjZh+Si+WZJTnyODscv3oEcC1o6ZYStVy2+xcUmlcgv3KcRN1ozJmZJNlrhirH2lL65OvtCOSVkrKyPoakpRJ8l8hBVkuxZV5Q0LL4uwvppgromCT8ynKvjWh5bCca3AqRSkc45VR5Ewqc29JaDkm50Ky7WUpMCQUyc7UwpsSWk7OW0SxsyKFiIWEIiksLrkmoeW0/JkMdVemFLGQkLlk8JUmGOMZnzMy5ajITcMvSWgzOa+SuYVjU9qiEcBXR16YsPPruNzBnFM2MeUcpG+POydhRKUyu5Crq1KPJnxn0hk4dXJQ+lSWmcMXxutvPSbSt6cuINNuHD3Nd2Xw9Rmpb/d+fcZc8l0Dk5eT1v+N0o5IWilJsQ0lu+BB4ls0ZhdYBUnKJqfw8d6EV2PZbYOoKnmkN6phniFLGVSVPIJdSdzIzaHkM3vQvShypmlHQUJBlhzMKv0LbnviJmsOJZPzyn+LU8lJgiEjSzbOLv9f8hyJm6zZZSc3DL+TbFu+aWdEilO4suJ7lGUY3+EDKJKNb4y8nUJnkWlHQer957bKr1KVZXyHDyBLMj8c92+Uu0uTYMT19ZHXM9kgP+SYvSRx58QbGJVdnrQz8s3Rl7CgeIIp2/+eejnTCoYmzfiPsUs5r1w/X+d43Tn9XE4fNCJpZ+QrY+dzzYjppmy/P2Mx5w0dnTTj5rHT+cJ4/a2f4/XNmfNYNsbc+3q8rhgzgW/O1N/CSuvTobQjklbKsjtnkVu0Aknua7+u93WKL76yMpTc4pdR7PqJfifL7ZjEqNLnj3Mq9Bby+O/tShmjBr1AhsP8RS3bMYLTBj9Bhi3uVOgv5PHfO5QC5pc9Qr5rqmlGjmMwlw6995hToc+IX+6dSg4XDvkjg90zTDNy7UV8acSvGeSqNMeQ3dxU+TPG5JhbLABy7Ll8b+ydVGbGE0sTOQsO2cnXR97OzAL9pOGTlWlzc9ek25mUO9qQIfX+2GU73x37Jc4sNb8guW1O/jDjq8wrGgdgGH2QkLBLCneMv4ZlFQtMM5yKnb/NuZmzB09MyJCRsEkyP550MV8Yucg0wy4r/H3BVSyrnNLL0HcXZCRkSeKHU87iO5OXmGYossyfFl3CzWNnJGZI8RjQ7VNP587ZSxNGjvokSRJ3LzmXr06bjZSAofQyvjptNr9acq5pRlqfXEki1YYIH4OSaSOc1v+dhAgTCb5C0H8/avTUcs525+m4Mm/F7jwTSUot50OIKD3B12n33t9v2fZM51yKsm8lN+MsJMmeEkMTMdoC71LjeZSO0AenjOc5pzA853pKM5eiSKkl62pCpcG/gd3dz1AfOJVR5BzDhPwrqMw6A5uc2nFETWhU+3ewoeNl9ntP7fhb4hzG3MKLmJh3Og45tSOaQggO+vaxqvVNtnZvOiV3ZJCrjDNKzmZOwWm4DI7TJmLs91bzatNq3u/YckrOxSBXEReULeaMkjlk2lI/3bHXU8eKurW82bKV2Ek1SEpd+SwbchrnDZ5Jrl0/FyiR9vU08WTtBl6s33ZKf5gSVw7XDJ/DpRXTKXRmpcw40NPGY4c380z1doLqiXVnCp1ubhg5k6uqplGakXoS+JGeTh49sJUnDuzAHzuxu3W+M4Mbxkzl2tFTGZyZ+vX6aE83j+7ZzuO7d+CJnFhsLcfh5NoJk7l+/BSG5ualzEjro1cy63faEUlrQKXGDqPGjiKEH0nKRrFVodgqdO2F5kGLbEZoXcRrixQgO2YhyfoLSzhaTThWiyZ8yFIWTttwnPbhuvaa5icY2YDaWwFVkfNxOWehyPoX5EC0Hn+0lpjwYZMyybCVk+Wo1H/dWpCe8CaiahegYZNzyXFOx25QY8QXbaE7UktU82OTM8iylZLvNGKEaQluI6x2o6HilHMoyphomMjqiXbQFq4jpPqxy05y7UWUOIfp3kWqIkp9YDf+WDeaiOFSsijLGEOmTX8rrSfaTWOwnoAawCE7yLPnMyRjqAFDpdq3D0+sh5iI4lYyqXBXkWvXP8HUE/VS62/AFwtgl+0UOHKpyqzQZWhC44D3MF2RbiIiSqbiZnhmBUXOwn7t4+9VgIPeBryxIHbJRoEzmzHZ5cg6kQxNaOz11NIe7iakRsmyZVCVNZiyDH2GNxpiT08j3mgQm6xQ4MhkQl65brRECMGunnqagn0MJyOzSxmaqZ9Y7IuG2dXVRE8khCLJFDgzmFSgf6RYCMGu7ibq/d0E1QhZNicjsosYkVOsywjGomxvb6I7HEKWJPKdGUwpKsOh6DP2drVS4+0iEIuQaXdQlVPImDx9RigWY3trE92heEn+PJeLKSVluGxWWz+k9XEo7Yik9YmXFt1DLPAoauAZ4KQS01IGSsbV2DJvQLalXlsiHD1Aj+9BPIEnEOLE8s+S5CLHfRW5WTfjtI9LmRGIVtPofZxm71Oo4sSy2xIOSrMuZnD29WQ7k9//7pM32sjBnhUc7HmOiOY9YUzGxvDssxidu4wi14SUw9SeaBvbu15lW9fLBFXPSa9DZkzOAqblX8QQd+qMnmgXH3S8zZr2N/CeVOpdQmJi7kxOLzqHkVlWGB5Wtb7P683v0BXtPoUxNW8iZw86g8m543UdjETyRP283ryB5+vfoyV8aln8GfljuKR8AbMLx6d8hNcbDfFyw1aeqF1LXeDU8v4zCiq5Ztg8FpaMxZZizRJfNMyLdTt5+PAGDntP7SY8rWAIN46YzVnl43CkyAjGorxQs4cH9m1iX3frKeOTC8u4ecwMLhg2LmHibFqfLn1iHJGXX36Zn/3sZ+zYsQOXy8WiRYt47rnnTD8+7Yh89iRElGjPf6EGnyCe16FX7Cs+Zsv8IrbsHyAlcUEXQqW952d0++41xcjJvJGSvP8xPE58KkNQ0/07jvb81ZAhoSBQKcm8hDFF/4OcxJaOEIJdXQ+xreNvSEi6R2f7GEMzF3PaoDuxJbndsrnjed5u+TuYYFRmzuDiIT/EqSS3FbK+422eqvsnovef/iQjo6FRlTmOL1TejtuW3DbF++0b+NvhB1CFmpAxInM43x377+TYk9umWNu+k//Z8zBRLZaQUZlZxi8m/xtFTv2oWH9a13aQ7259jKAa3/rojyIjoSEYmlnEn2fewmB34uTv47WxvZavrluONxpC0mNIEpoQDHbn8q/Trqcq2/h498na3t7Ibe88SWc4qM/ofR0lGVk8sORqxuWXJMVI65OrT4Qj8swzz/ClL32JX/ziFyxZsoRYLMauXbu46qqrTM+RdkQ+WxIiRqTrK2jht0mmOIHsuhRH3q9NOSNCaDR3/ju+4IoknplEpuscygrvNZXDIoTgQMePafY9mRQjzzWPSaX3IpvMYdnc9gf2dD+WBEGmyDWRpeV/MO2MvN/2KO+3PZIUo9hVyXXD78Yhm8v9eKf1JV5oNM+QkSlyDuKbo35m2hl5s2U1/6p+NClGobOAn034PnkOc9eWN5s38ct9j+ouqqcwJJl8exZ/nP4til15phhvN+/me1sfB0AzQVEkmWybiwfmfYWKTP0toeO1puUw/7b2cTQhTDIk3IqDxxffyqgcc47ChtY6bnrzcaJCM9VZV5EknIqNJ866nkmFxrVm0vp0KJn1+yM5NROLxfjmN7/J3XffzVe+8hVGjx7N+PHjk3JC0vrsKer5n6SdEAAt9Bwx3x9M2XZ67knSCQEQ+EOv097z36as6zz/TNIJiTO6Q+s42HGnKev93c8k5YTECRptoV2sbTH3OnZ3v5WUE/Iho5rn636BmXuYHd0bknJCIF4dtT3czL+q70EzURRsR/du7qtO7r3S0OgId/Kr/X8gpiUu87+r+wh374szzH57NaHRHfHx/R1/I6xGEtrv7Wngh9uWIzDnIEC8yqs3FuJrG+/HFw0ltD/kaePr65ejCi0JhiCgRrh1zSN0hRN3y63zdfOFd5407YT0MUJqjJvfXk5LwJv4AWl9pvSROCJbtmyhoaEBWZaZNm0aZWVlnHfeeezatcvwceFwGI/Hc8JPWp8NCbUZNfAQqZZpjPn+itD0W8gDqGonnd4/pzQ/CLp9/yKmthgzND+13X9KmdHse5pg9GgCRoRtHX9LkaFR63uLrvBBYyuhsrr1vpQIAo1q/yYag3uN7YTgxabkHIQ+aWgc8e/jgHdnQtsn6pJ1PD9kVPuPsrlre0LbB2peTYmhonE00MKq1m0Jbe899DaaMOseHMcQGk3BLl5q2JrQ9u/71xDVUmEIOkJ+lldvTmj7jz0fEIxFTTshfdKEwBMJ8eD+xIy0Plv6SByRI0eOAHDnnXdyxx138NJLL5Gfn8/ixYvp7Dw18apPd911F7m5ucd+Kir0T1uk9elSLPC4xRmiqMFnDC08geVgqYmdoMdvvHC2+F9EOynxNTnJNHqfMLQ46nvnlKTUZCShcKDnWUObI76N+GL6f4uJGTJbOl80tDns20N7uDllhozMe+2vJ2DUUO0/mnJDPhmJ15rfNrQ56m9he/ch0xGEkyUhsaLhXUOb5mA377buR7VQT/2J2rWGUarOcIBX6nenXHpeQ/DIkY2Gj/dFwzx9eEfKpedVIXj04BbCqtVmlGl9mpSUI/L9738fSZIMf/bt24emxb+oP/rRj1i2bBkzZszg/vvvR5IknnrqKd35f/CDH9DT03Psp66uztqrS+sTISGixPyPgMUOsTH/A7oXWiE0un33YakxBho9vgcQQv8i2OB5CGvN5VSavE+gaWFdi33dT1lqLidQOex5mYjq17XZ0vmiRYbGfs97BGLdujbvtb9huYHdXs9WuiKnnujo05stqy0yBPu8B2kINunavNS41hJDIDjsa2C/Rz8S9mzdRostC6Eu0MnGziO6Ns/UbrXUmwagLeRjVZN+tO256t2WnQhPJMyrR/dZmiOtT5eS+uu6/fbb2bt3r+FPVVUVZWXxZKPx4z9sduR0OqmqquLoUf0/RqfTSU5Ozgk/aX36JdQ6EKnffffOEp9H638eVWsjpjZYZBjPo2p+AtFDWHN2QBU+ArH+FwwhNNrDu1NuLvchI0J3RH/BqA9YZ2ioNAf1GUd8e1NuYNcngaDGr8/Y49lvmQFwwHtYd2xH9yHLDAmJ3T3VuuNbO2tSjrj0SZFktnfV6o5vaa9LOXLUJ5sks7lD/xq+qbXechdcmySzqbXe0hxpfbqU1MHt4uJiiov1C9D0acaMGTidTvbv38+CBfGSyNFolJqaGoYNG5baM03r0ytt4HJ9hOhB4tTTAdoAMlSth/7OtcQsbJecMpfa//ONagGsOjp9Cqv9P19NqMSEfkQmGYU0n8GYlS2sDxU0iOwEVOsMGRl/TD8J0xsbAIYk4TOYpyeaOAk0kSTi9Uf01B2xzgDwRvW/O55IyHLURSOeK5LW50cfSQWZnJwcvvKVr/CTn/yEiooKhg0bxt133w3AlVcm7sCZ1mdMKZZD7186cyVRAySR9ErEJ1NnJFWGPIAMRe91xLuOWI6IxBn6z1eW5AHxqYwYSootA46XQGAzmMdoLBkZFR7Tq3qarIzmscsD890yZCiK6ePNepJ650nr86OPrJTd3Xffjc1m48YbbyQYDDJnzhzefvtt8vOTK7yT1qdfHzbFszwTkk7XXkUuBMuXwL65+i/cZJNzMS6QZl4OpX+GIjlRJCfqAEQsXLb+S6ZLkoRLySaoGp9CMiO3ov/3nGXLpTNyajXNZJVt1y8IlmvPoSdqLRomEOTa9beBC505NIXaLX2zVKGRZ1A8rdiZwwGaLW3PqEJQ4NCvu1KckYUiSZYiFgJBgVO/mF2RKxNFkomlmBAbl0ShM/W+Pml9+vSRdd+12+3cc889tLS04PF4WLlyJRMmpF7mOq1PrySlFMk+DWtfNwXZuRhJ7v8Cpcg5uJ0L0e/Oa0YyLsccbEr/24+yZKfYfZZlRqZ9LC6d/juSJDE8+2wkSwyJLFs5+Y5Ruhbjc8+wlKwKkKnkU+7WL48/M3+BZYZLdjMqS/+6saBoDpKlNE9wyHam5E3UHT+jZLpl91aRZOYX6TPOLptsOUcEYMkg/ffq/PIJlrdNVCE4f4g+48Jh4yw6IXGn7aLhqbddSOvTp4/MEUkrreNly7wVa6dmVGzumw0tcrNuw1q0QiMv6zZDi8E5N1hmlOfcZNhLZUzuMoTFqMvYvKsMGVPzL7C0NSMhMa3gImSDbYt5hWdiJUIlIzO/6Ezssv7W3qLi+Sn3jOljLCyej9umXyX2zNIZOOXUOjpD3AlZXDKNPINoxdJBE8m2pdYJuY+xsGQMZRl5ujaLy0ZR4kq9u68sScwuGmZY6n1u6VCGZeen7BrKSEwsKE1XV/2cKe2IpPWxSHGdA1I+qR19lUEuR3aebmiV6VqCTSkjta+1jCIXkpVxrqFVrnMWGbbKFBkSipRFSeYFhlaFrrEUOselGE2QUCQ7I3LON2Y4hzDUPcVCxEJmcv45hhZ5jkIm5s5M+eirQDCvcKmhTY49m/mFs1JmaGicVbrI0MZtc3Fu2ZyUGarQuHjwAkMbh2LjiqFzkFNcwlWhcdWweYY2iiRzw4jZKTM0IbhxxGxDG0mSuHXszJTmh3ii6i1jUn98Wp9OpR2RtD4WSZIDR/7vSN4RkQAZR/7vEvaakSSFQQV/Ib51kgxHAiQGFfxFN4n0Q4bEuOJfI2NLkhHX2OK7UUz0aJlf+mMUyZkCQzCv9A4cSuJmbucM/g+csjslZ+Tssq+TpZODcrwuL7+FTFt2Sov4xYNvoMhZmtDu+mFXkO/IS4mxbMhFDHUPSWh38/DzKMsoTKmb7hVDFjM+d3hCu1tHLKIquyRphgRcXjGLOYWJO1XfPHIOk/IHoyR5xFZC4oIhEzhr8NiEtteOnMbc0mFJM2RJ4szykVxaqb+FldZnU2lHJK2PTYpzIfa83xN3FMx89WTAjiP/bygOc3dJGc45lBX+g3getllG3IFxu4wjLn3Kdk5kYunfkSWnSUbcmRpTdBdF7jNNMfKcVZxZ/ltsUoZJRyF+0Z9d/B0qs882xch3DOaqYb9I2hlZVHIbU/LPM2Wb5yjkqyPuwG3LSspROLv0chYVG0d1+pRrz+FH475FniM3Kcb5g5ayrPxCU7bZdje/nPIVSp35STHOK5vLl0ZcZMo20+bkTzNvYVhmUVJRi7PLJvO98RcZbsX1yaXY+fv8axmTU5oU44yyUfzvjEtMMRyKwt8XLWNq0WDTDAmYVzqMP55+KYqcXpY+b/rIuu8OhNLddz+bUsPriXp+iojto/9TKPHfSfYpOHJ+iuyYkjQjFN5Ca/cPCUd3GDIc9gmU5P2cDOecpBm+8B4Odv4UT3grEsopeR19v3PbRzKi4EcUZJyWNKMnUs0HrXfTEtxiyMi2VzCj6D+oyDLnTB2vrkgjK5v+TI1/S7/HemUUNFRy7KUsLv0CY3NSYbTzdP197PHoMWQ0NHLtBVxQdg2zChYmzeiOeLiv+lE2dW1D4tTutX2MPHsOVwy5mDNLk2d4on7+dPBZVrdtQwhxSoGwvrb2ufZMrht2FpeVLzS1eB8vbzTEr/e+zCuN29AMGNk2FzdXLeTmqtOTzpMJxCL8cudKnq3dRlSLf6eOp0hICASZNge3jpzL18YtTDpSE1Zj/HLrOzx2cBuR3mqrJzLi/59hs3PzmBl8e8rCATvGnNb/vZJZv9OOSFr/JxJCIKLbiPofjnfkFb2FsaQcFNc52DJvQLZbP2UViuygx/cgvtBrx4qeyVI2mRlnkZd1Ky7HVMsMX2Q/Td7HafO/RkzzINCwyVnku06nPOcGcpzTk16MTlZPpIYDPc9S411JWPUAApvspixjJmPyrqQ0wzqjK9LItq5X2NPzNiHViypUnLKbIe6JTC+4iOGZ0xJujyVSZ7iVdR1vsbHrPXwxD5pQccguKjNHs6DoHMblTLWUfArQGenirZb3WN32Pj1RLzERw6W4GJE5nHMGncH0/MmW6490hj282vwBrzWtpyPiIabFcClORmaVc0n5Ak4rmmxYN8SMusJ+XmjYzIq6TbSGeohoMVyKg5HZpVw1dC5LB03EoVirwNATCbKidjtPVG+mKdhDWI3hUuxUZRdxfdUsLqiYgEtJPVEXwBsJs6J6F48c2EK9r5uQGsNlszM8O58bRk/nkuETyLQPZK2htD4JSjsiaX3q1Pc1TLSYithRRPAZUGtAC4DsBmU4UsYVSDpHYpNlxGL1BAKPE4sdRmg+JDkTmzIMt/tqbHbjfXizjEismXb/kwSjB1E1L7LkxmkbQlHmFWQ4Rhs+to+TiBGKdVDrfZ6eyEGiqhebnEGGrZSh2ReR5xwzIIxgrIe9Pa/RFj5IRPVhk11k2goZk3MWpRmJ8wnMMAIxH1u6V1MfOEhQDWCT7OTY85mat4Ch7jGJvzNmXocaZF3HOg56DxJQA9gkG7n2XGYXzGZM9sAwQmqY1a2b2OU5jD8aQJEVcu3ZLCiayuS80QkdMHOMKG8172Bz52E80QCKJJPnyGRRyQTmFg0MI6LGeKNpD2tbD9MTCSJLEnkON2cMGsPC0lEJHTAzjLQ+/Uo7Iml95iTC7yP8/4LI+8TzMgTx48Ay9AbicSxAyvwiktP49ICewuH1+Hx/IRx6S4eh4nAuICvrK7hcS1Ji+MJbaPb8g67gG8f9VqMvjwRUspyzGJT9RfLdxqdS9NQV3svBrgdp8L95XFg/zohvi6jkOycyKu8GyjPPTmlR6AhXs6XjCQ563kFD7Q3ln8godo5iSsEyxuQsTSmS0hZq4N22F9jSvRpVnMiIb7OolLoqOK3wAmYWnGF4lFhPraFWXmt+jffb3yciIse2b+DDrZxBrkGcVXoWC4sWYkuhOmlbuIvn6t/mjeZ1hLTwCQxFklGFxiBXIRcNXsx5ZadhT+GocHvYw+M1a3i+4QP8sfCxeY9nlLpyuaJiPlcMnYdLST4C0Rn289DhdSyv2YQnGuqXUezM4trK2dxQNYdMuzNpRlqfHaUdkbQ+MxJCgP9vCN9vSVzVtDe3JOt2yPxyUgusz3cvnp476XMGEjGysv+T7Oz/lxSjzfc4NZ0/os+p0ZcMaJRm30ZF3h1JLeJ13lfZ1PpjgAS1SOKM4dmXMbX4h0mVlj/iXcNrjT/rzZEwYsSzAEZlL2Fp2XdRDOqBnKwD3m08XPMrVBFL0HAuzhiXM4trh/4nDtn84rffu5/fHfgdES1iqqndhJwJfH3k18lQEp966tMBbw0/2fVXArFQQoaExLicSn48/t/IsutXLz1ZB72N/Ofm++iJBo45BkaM0dmD+c30Wylwmq8pcsTbxhfXPkxb2IuWYMmQkajMLuLeeTdSmpG+bn9elcz6nU5PTuuTrWNOCCQuJNabdOf7NfjvNY3w+f6Fp+cnxCMg5hg+7+/wen9lmtHme5Kazh8Qj0wkYsQXkxbv/Rzt+plpRr1vJRtbf4hANVEQLc6o8T7H1rafY/Z+pMa3nlcbfoImYiYY8TkPet/hjaa7ECYrbh727eKB6l8QE1ETDkKcsc+ziUdr70EV5grBHfYd5p799xDWwqY76+717OV3B35HVIuasq/2N/DDHX8kEAuaYggE+zw1/HjXnwipEVOMo/42vrrxH3RH/AmdkD7GIV8T39j0D/wxc43lGgPd3LTmftrDvoROCMSThGt8Hdy05v4Ba7SX1mdbaUckrU+sRHjdcU5Iko/13YOIbEhoF4lswdPzXykxfN7fEwq9mdAuENlHTef3UyAIWn0P0OF/IfFzidaxseWHpFJ3pNb7PDXeFSYY7bzacGcKtVIFh72r2db1TELLQMzLQzW/RHDqaRFjgmC/dyvvtCZmhNUwvz3wWzShJcXQ0DjoO8gz9YkZUS3Knbv+SlSLJVW6XUPjsK+efx5JzFCFxre33E9IjSTFUIVGrb+N/939bEJbIQRf/+BxPNGgKUfneEZTsJvvb07MSCuttCOS1idWwn8fqfd1UXofbyyf715S/zNQ8Hn/mtCq1fcgqVWUBZBp9vwjoVV1z1PEowOp7LRKHOh+MGFUZE/Py2giliIDtnU+iZYgYrGp6x0iWigpB+F4vd/+MrEEEYt1Hevwq37TkZDjJRC80/YOIdU4mrC2fTudkZ6UGW+2fIAn6je0W9e+n4ZgZ1IOQp80BG+37KQ1ZNz4cFNHLQc8LSn1qFGF4L3WQ9T42pN+bFqfL6UdkbQ+kRKxeoi8S+p9XVQIv4NQG/Ut1DZCwZcsMSKRdUSjB3UtYpqHdt+zFhgagegu/OEd+s9CC1HtedZCfxqBP3qU9tBmfYaIsbPreUv9afyxDmp9H+iOa0JjbfurKTshAEHVz66e9brjQghWtqy01CgvokVY17HO0OalxtUpl1KH+HvxZov+6wB4+ujalCq9Hq/n642jho9Vb7DEUCSJ5TWbUn58Wp8PpR2RtD6REsFnsP71lCCoH+IOBp7GSlO2uJT/3969B0dR7XkA/57uyTwzk5BJJg/IE7jJKq8QIEiweMUgAhrh4rpGl9dCSQWEVQsirItVgggLWmu0AKUWULBARUUUdSPyiBQskQglYAhciEICJDzyIJNkMjNn/wiJ5F47M5nusZPh96nKH5nT6d/v5DH9y+k+58Bu/1Cy9Wb9HnB4d7+/oxhV9TskWyvq98PJO/7v2RMGERdrpb9Xv90+hgZXtcwYAk5V75Fsv1h/BtXNVbJjHL3xv5LtZfYyVDRWyCp2GBj2V+6XbL9sv4aSujJZu+lycOy9UijZXtlYg/+7cc6n0ZBWbnB8ekm62Kl22PHdlV9kxXBxjl2/FsPplreJIwlsVIiQrsn1G+QXCQzceUmy1eksg/w/ATdczt8kW5ucv4JB3qJTgAuNzWWSrbebf5Mdg8OF245fJdtrmstlbJDXGsONasdlyfYbTVdlnb81xnXHFcn2ysZKBWJwVDVJF0xXG5W5FVHZeFPydlm5/YYiMaqb69Ho+uNbWVfsNV49nOpJvdOBmuYG2echgYsKEdI1cTsg4zZACzfQwUgB5/WQX+zwthVb/4iL18v677vtPB3EcHJl3uSbW1e3/aM2d6Os2xm/n0c6V4e7UXax03oeKU3uJtnn93SeBpcyMTg4HBLPuzR4OavGG3bnH+drVzBGvVO5c5HAQ4UI6ZqYCfJ/PYU755EIIQTD94dI284CQZCeIy+yYEUu4GIHMTTM+3UtOhLEpNeVCBL0ihRUWkF6fQytoJf1DEornaDvoE2ZRbY6Oo9BVCaGAAFaicXNjBrlFgszSZzLqFFu2fVgBfMlgYcKEdIlMU2iAmfhHZ5Ho+kN+aMuAjSaJMlWfVASOLxbd0KaCENQH8lWszYBHE5ZERhEmLXS/QjVxsouEhhE9NDFS7ZH6GNknb8lhoAIXS/J9mhDtAIxGKL0UZLtMQab7BgAEGWwSi6Y18toVaTAtWrN0EnsJRNjCIVG5sOwAGDW6BCiVaZYJoGJChHSNRmmKnSeKdJNhinwfXpwKxeMpqckW8OMEyHIHrFwISL4ScnWaNMYBAlmWRE4XEiy/FWyPc40FCaNVXaMfqGTJdsTjP+EMG0k5IxScbgx3Jol2R5njEOcMU7WRZyDY4xtjGR7jCEC91t6y5o1wwBMjJbeHThcZ8GI8GRZM1oEMEyNGy7ZHqI1YHzM/fJiMIZpCUNkz+4hgY1+O0iXxMQoQDcGctYRgS4TTIyUPkK0wmB4VFYMne5BaDoYdRGFYISb/iojhgCTdhCM2vukYzAtEi1TZTxfwWAOSkSYfqB0FkxE/9DHZF3AgzU2xJmGSGfBGDLCH/H5/ABgFM24P2RYh8dk2jJl3WbSC3qkh6V3eMykmFGyZs1omAZjIzvux9S4B2TNaAGAR3t2HOPJxKGyYnDO8URCms9fT+4NVIiQLouZZsH3WyfuO1/fMVPwHPj+wKoLpuB5Ho+ymaffuYD7chF3I8ryrMejkizTwJjGxxgcfwmd4XHfnPtCH4HIdD4XI4Ot/+xx35zBPUbDIBp9LqoejJgM0cO+OenWdFg0Fgg+xGBgGBc5DjoPz4EMtw6ATRfmc4zxUSMQrOl4v5l0a18kmGw+jTYIYHg4JhVWXccjaalhsegXGgPRh40RBTCMjUpBrCms019L7i1UiJAui2mHgplf8u1rzcvAtIM9HqfVDkBIqPd7xtzNbMmDXj/a43GGoD5ItL4BXwqeKMs8hBkf9nicMSgG6ZGt/ejcRSPJ8gTizNK3TNpiaMIwqdcK/L4bsbcYUixZ6B+a7fFIg2jCjISlEJjQqWKEgeF+yzCMivAcQyto8ULyC9AImk4VCgwM91nuQ3aM5xgaQcQr/eZBL2o7FUMAQ4olEbOSPMcQmIA3Bs9AsEbfqWJEAEMfczReTPEcgzGG/GFPIkxr6lQxIjKG+GArVg72HIMQKkRIl8ZMM8DMrTvWerq9IQJgYOb/ADP9q9cxTKanEBL6X2j5c/AmBmC2LEVw8AKvY1hNjyLJ+t93vt67GDGW59ArZLHXMaJNozA8ah0EaMA8xGht7xOSg4Hhi73eRbiXaTAm9XoNGqb1IkbL28t9IY9gbLT3OxXHm5IxO/E/oRU8X8RbYwwIGYF/ift3CF5ekOOMcViSvAQG0eBFjJa8B4UOwoI+C6ARvFuzJdYYhdcHLoIlyOTxeZHWGANDk/HK/fMQJDFb5u9FG8KwYdizCNeZvYjR8tE/NB75Q/4NBi9nxdgMFmx7cDZiDKFex0i2RGHryBkwB0nPYCKkFePebrupgs5sI0wCG3cUg9dvBpoK7rzC0HLbRkDbSINufEvhok31KYbDcRK3b7+LxoY9d53bjd//+3dDp89CcPBc6HQP+BTD7jiDq7X/g5v23XdmuohoWf699WLoRoh+NCLNsxBieNCnGLWOv+F89Yf47faXcPNmMIjgcLdd7DhcsBnS0TskB9Em32JUOy7j5M1d+KXmGzi5405B0Po9a4kRYxiAgWFTkBT8oNdFyN1uOipx+PpXKLq5Dw53I4R2/WBww4U441+QEf4IBoRk+BTjluMWCq4V4EDVATS4GiBChPuu75UbbiQYE5AZmYkHrA94Xei0j1GLLysO4esrhahz2iEyAW7OwcDAWMsGcfHGGEzuOQrjbOnQCJ1/nqjaUY9dl45g16UjuOWoh+ZODKBlVMPF3Yg3RWBa7AhM7jUUWi+LqbvVNjdg58Ufsf3iMVQ11rWLITAGJ3cj1tgDOUnpmJaQBr3EbBxyb+jM9ZsKEdKtcFcl0PAZuOvXlsXKmAlMTAAMj4OJEYrEcLmuo8H+MZzO83Dz2xCYCaImHkbjNIii/CmmAOB03cL1+k/R0HwOLl4HkRmh1fREuGkqdJpYRWI4XHW4dPsr1DSdQ7P7NkRBD6MmEnHBExGslZ5G27kYdpTW7kNV0zk0uW5Dw3QwBYUj2ZKJsA6m6nYqhrsRJ6sP47L9PBpc9dAIQTBremBQ6EhEGxIUidHsbkbRzSKcu30OdpcdGqZBSFAIhoUNQ4JJqRhOHLl+EqdqzuO20w6NICIkKBgZ4alINif4VEj9PafbhcKqM/jx5t9Q19wAkQkICTJidGQ/DAxVJoaLu3Ho2jkcrjyPGkcDBMYQqjViTFQy0sMTFYlBuj8qRAghhBCims5cv+kZEUIIIYSohgoRQgghhKiGChFCCCGEqIYKEUIIIYSohgoRQgghhKim85PJ/0StE3pqa2tVzoQQQggh3mq9bnszMbdLFyJ1dXUAgNhYZdZVIIQQQsifp66uDiEhIR0e06XXEXG73aioqIDZbFZ8kZza2lrExsbi0qVLAblGCfWv+wv0PgZ6/4DA7yP1r/vzVx8556irq0NMTAwEoeOnQLr0iIggCOjVq5dfY1gsloD9BQOof4Eg0PsY6P0DAr+P1L/uzx999DQS0ooeViWEEEKIaqgQIYQQQohq7tlCRKfTYfny5dDpdGqn4hfUv+4v0PsY6P0DAr+P1L/uryv0sUs/rEoIIYSQwHbPjogQQgghRH1UiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiAAoLS3FY489hvDwcFgsFowcORL79+9XOy1FffXVV0hPT4fBYECPHj2QnZ2tdkp+0dTUhEGDBoExhhMnTqidjiLKysowe/ZsJCYmwmAwoHfv3li+fDkcDofaqcnyzjvvICEhAXq9Hunp6Th27JjaKSli1apVGDp0KMxmM2w2G7Kzs3H27Fm10/Kb119/HYwxLFq0SO1UFFVeXo6nn34aVqsVBoMB/fv3x48//qh2WopwuVx4+eWX272nvPrqq17tC+MPVIgAmDRpEpxOJ77//nscP34cAwcOxKRJk3D16lW1U1PErl278Mwzz2DmzJk4efIkDh8+jKeeekrttPxi8eLFiImJUTsNRZWUlMDtdmPjxo04ffo03nzzTWzYsAFLly5VOzWf7dy5E88//zyWL1+O4uJiDBw4EOPHj0dlZaXaqcl28OBB5Obm4ujRoygoKEBzczOysrJQX1+vdmqKKyoqwsaNGzFgwAC1U1HUrVu3kJGRgaCgIHz99dc4c+YM1q1bhx49eqidmiJWr16N9evX4+2338Yvv/yC1atXY82aNcjPz1cnIX6Pq6qq4gD4oUOH2l6rra3lAHhBQYGKmSmjubmZ9+zZk2/atEntVPxu7969PCUlhZ8+fZoD4D/99JPaKfnNmjVreGJiotpp+GzYsGE8Nze37XOXy8VjYmL4qlWrVMzKPyorKzkAfvDgQbVTUVRdXR3v27cvLygo4KNGjeILFy5UOyXFLFmyhI8cOVLtNPxm4sSJfNasWe1emzJlCs/JyVEln3t+RMRqtSI5ORnvv/8+6uvr4XQ6sXHjRthsNqSlpamdnmzFxcUoLy+HIAhITU1FdHQ0JkyYgFOnTqmdmqKuXbuGOXPm4IMPPoDRaFQ7Hb+rqalBWFiY2mn4xOFw4Pjx48jMzGx7TRAEZGZm4siRIypm5h81NTUA0G1/XlJyc3MxceLEdj/HQPHFF19gyJAhmDZtGmw2G1JTU/Hee++pnZZiRowYgX379qG0tBQAcPLkSfzwww+YMGGCKvl06U3v/gyMMXz33XfIzs6G2WyGIAiw2Wz45ptvAmIY7sKFCwCAV155BW+88QYSEhKwbt06jB49GqWlpQHx5sg5x4wZM/Dss89iyJAhKCsrUzslvzp//jzy8/Oxdu1atVPxyfXr1+FyuRAZGdnu9cjISJSUlKiUlX+43W4sWrQIGRkZ6Nevn9rpKGbHjh0oLi5GUVGR2qn4xYULF7B+/Xo8//zzWLp0KYqKivDcc89Bq9Vi+vTpaqcnW15eHmpra5GSkgJRFOFyubBy5Urk5OSokk/Ajojk5eWBMdbhR0lJCTjnyM3Nhc1mQ2FhIY4dO4bs7GxMnjwZV65cUbsbkrztn9vtBgAsW7YMU6dORVpaGjZv3gzGGD7++GOVe9Exb/uYn5+Puro6vPTSS2qn3Cne9u9u5eXlePjhhzFt2jTMmTNHpcyJt3Jzc3Hq1Cns2LFD7VQUc+nSJSxcuBDbt2+HXq9XOx2/cLvdGDx4MF577TWkpqZi7ty5mDNnDjZs2KB2aor46KOPsH37dnz44YcoLi7G1q1bsXbtWmzdulWVfAJ2ifeqqircuHGjw2OSkpJQWFiIrKws3Lp1q90WyH379sXs2bORl5fn71R94m3/Dh8+jLFjx6KwsBAjR45sa0tPT0dmZiZWrlzp71R95m0fn3jiCezZsweMsbbXXS4XRFFETk6Oan9cnnjbP61WCwCoqKjA6NGjMXz4cGzZsgWC0D3/j3A4HDAajfjkk0/azd6aPn06qqursXv3bvWSU9D8+fOxe/duHDp0CImJiWqno5jPP/8cjz/+OERRbHvN5XKBMQZBENDU1NSurTuKj4/HQw89hE2bNrW9tn79eqxYsQLl5eUqZqaM2NhY5OXlITc3t+21FStWYNu2baqMSgbsrZmIiAhERER4PM5utwPAP7ypC4LQNprQFXnbv7S0NOh0Opw9e7atEGlubkZZWRni4+P9naYs3vbxrbfewooVK9o+r6iowPjx47Fz506kp6f7M0VZvO0f0DISMmbMmLYRre5ahACAVqtFWloa9u3b11aIuN1u7Nu3D/Pnz1c3OQVwzrFgwQJ89tlnOHDgQEAVIQAwbtw4/Pzzz+1emzlzJlJSUrBkyZJuX4QAQEZGxj9MuS4tLe3y75nestvt//AeIoqietc8VR6R7UKqqqq41WrlU6ZM4SdOnOBnz57lL774Ig8KCuInTpxQOz1FLFy4kPfs2ZN/++23vKSkhM+ePZvbbDZ+8+ZNtVPzi4sXLwbUrJnLly/zPn368HHjxvHLly/zK1eutH10Vzt27OA6nY5v2bKFnzlzhs+dO5eHhobyq1evqp2abPPmzeMhISH8wIED7X5Wdrtd7dT8JtBmzRw7doxrNBq+cuVKfu7cOb59+3ZuNBr5tm3b1E5NEdOnT+c9e/bkX375Jb948SL/9NNPeXh4OF+8eLEq+dzzhQjnnBcVFfGsrCweFhbGzWYzHz58ON+7d6/aaSnG4XDwF154gdtsNm42m3lmZiY/deqU2mn5TaAVIps3b+YA/vCjO8vPz+dxcXFcq9XyYcOG8aNHj6qdkiKkflabN29WOzW/CbRChHPO9+zZw/v168d1Oh1PSUnh7777rtopKaa2tpYvXLiQx8XFcb1ez5OSkviyZct4U1OTKvkE7DMihBBCCOn6uu+NZkIIIYR0e1SIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDX/D5jrWwVsUUF3AAAAAElFTkSuQmCC\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotPinPow(fuelBlock)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"89553e58\",\n   \"metadata\": {},\n   \"source\": [\n    \"As expected, our pin powers have rotated 180 degrees, with the maxima now in the south west direction. So what changed: the locations of pins or the pin power data array?\\n\",\n    \"\\n\",\n    \"This introduces the second key concept: with limited and documented exceptions, `Block` parameter data are **not** modified during rotation, the **locations** of objects within the `Block` are updated. See a discussion in \\n\",\n    \"\\n\",\n    \"If we compare the post-rotation pin powers and pin locations, this is confirmed.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 15,\n   \"id\": \"5971c84d\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"assert (fuelBlock.p.linPowByPin == pinPowerBefore).all()\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 16,\n   \"id\": \"6bd98651\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"assert (getPinRingPos(fuelBlock) != ringPosBefore).any()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"1dd38ce3\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Component-level powers\\n\",\n    \"This gets a little trickier to explain because, in our example here, one fuel `Component` occupies the entire fuel lattice. Cases where that may not be the case can follow a similar pattern.\\n\",\n    \"\\n\",\n    \"The connection between block level pin powers and the related components is the `Circle.getPinIndices()` method. For a block with `N` pins, a given pin component will have a multiplicity of `M <= N`. `Circle.getPinIndices` will return an `(M, )` vector of integers that translate between the component and block level data.\\n\",\n    \"\\n\",\n    \"For the `k`-th pin reflected in `Circle`, with `0 <= k < M`, `kx = Circle.getPinIndices()[k]` is the index in parameters like `Block.p.linPowByPin[kx]` for that particular instance of the pin. And this `k`-th instance of the pin is spatially located in `Block.getPinLocations()[kx]`.\\n\",\n    \"\\n\",\n    \"To demonstrate, we'll present the trivial case for a singular fuel `Circle` occupying every lattice site in the grid. Here, we would expect the `.getIndices()` to return what is essentially a `numpy.arange` vector, since every position `[0, N)` is held by this fuel pin.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 17,\n   \"id\": \"39131a19\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.reactor.components import Circle\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 18,\n   \"id\": \"9fed4c75\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fuelPin: Circle = fuelBlock.getComponent(Flags.FUEL)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 19,\n   \"id\": \"fb292c1d\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fpIndices = fuelPin.getPinIndices()\\n\",\n    \"assert (fpIndices == np.arange(0, fuelPin.getDimension(\\\"mult\\\"))).all(), fpIndices\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5164c1ec\",\n   \"metadata\": {},\n   \"source\": [\n    \"To help illustrate how to map between component data -> block data -> spatial data, let's plot pin power assigned to just this component.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 20,\n   \"id\": \"cf7c081c\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from matplotlib.colors import Normalize\\n\",\n    \"\\n\",\n    \"\\n\",\n    \"def plotCompPinPow(c: Circle, **kwargs):\\n\",\n    \"    blockLinPowByPin = c.parent.p.linPowByPin\\n\",\n    \"    xs = []\\n\",\n    \"    ys = []\\n\",\n    \"    ps = []\\n\",\n    \"    myIndices = c.getPinIndices()\\n\",\n    \"    for k, loc in enumerate(c.spatialLocator):\\n\",\n    \"        x, y, _z = loc.getLocalCoordinates()\\n\",\n    \"        xs.append(x)\\n\",\n    \"        ys.append(y)\\n\",\n    \"        kx = myIndices[k]\\n\",\n    \"        ps.append(blockLinPowByPin[kx])\\n\",\n    \"    # normalize the color scheme against all the pin powers in the block\\n\",\n    \"    # not just those for this pin\\n\",\n    \"    norm = Normalize(vmin=blockLinPowByPin.min(), vmax=blockLinPowByPin.max())\\n\",\n    \"    kwargs.setdefault(\\\"s\\\", 150)\\n\",\n    \"    return pyplot.scatter(xs, ys, c=ps, norm=norm, **kwargs)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 21,\n   \"id\": \"1cd268d1\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.collections.PathCollection at 0x1baf958fc50>\"\n      ]\n     },\n     \"execution_count\": 21,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnWV4HEe2hr/qGY2YyUKLbEu2ZElmZmZ2Yggzb7Ib3DDtZpPdwIY2aCdOzMzMLLDAtmRZFlnMIw131/0xlq9B3dOgJE7S7/PoZq/rTH+DXadOnTqHUEopVFRUVFRUVFR+A5jf+gmoqKioqKio/HlRHREVFRUVFRWV3wzVEVFRUVFRUVH5zVAdERUVFRUVFZXfDNURUVFRUVFRUfnNUB0RFRUVFRUVld8M1RFRUVFRUVFR+c1QHREVFRUVFRWV3wztb/0EhOA4DuXl5fD09AQh5Ld+OioqKioqKioioJRCr9cjNDQUDCMc87ilHZHy8nJERET81k9DRUVFRUVFRQalpaUIDw8XtLmlHRFPT08A9hfi5eX1Gz8bFRUVFRUVFTE0NzcjIiLi6jwuxC3tiLRtx3h5eamOiIqKioqKyu8MMWkVarKqioqKioqKym+G6oioqKioqKio/GaojoiKioqKiorKb4bqiKioqKioqKj8ZtzSyaoqKjdSVtuIdSdzUVLTiFazBe7OOkQG+mBm/0SE+3t3iEZFfTPWH85BcVUDWk0WuDk7ITzQB9MG9UDnYN8O0aiu12PzgVxculyHFqMFrs5ahAR4YfKwHogJD+gQjfrGVmzdk4OLxTVoaTXDxdkJQQGemDiyB+KigzpEo6nJgO07s5FfUIXWFjOcnbXw9/fEuDE9EN8tpEM09M1G7N6WhfNny9HaYoJOp4WfvwdGjktE96TwDqkx1Npiwt7NmTibWYKWZiOcdFr4+ntg+ISeSOoT1SEaJoMZ+9anI+fERbQ0G6DRauDj74Ghk1OQPLiLw1oLYjAbLTi4/hTOHDyP5oZWaDQMvAM8MWhyKvqMSewQDYvZisNrjuP0rjPQ17WAMATe/p4YOK0v+k/uBY1Wo1hD5c8FoZTS3/pJ8NHc3Axvb280NTWpp2b+5BzPK8b3+9JwLK8YDENAKcBRCoYQEAJwHMWg+M64a1Qf9O8SKUsjLb8MS3eexuGcS/aJ54oGIQBDCFiOom98BO4Y2xuDE6NlaeQUlOPHLWk4eLrg6r/dqJHSLQy3T+yN4X3iZGnkXazCz+tPYv/RfHAUINdqMAxYlkP3LiGYN603Rg3uJmuSvXSpBj+vOoF9+8+BZTkQQsBx12t0iQvG7Jl9MHZ0DzCMdI3S4lqsWnYMe7Znw2Zj29WIignEzPn9MW5yMjQa6ZNsRWk91iw5hJ3rM2CxWMG0oxEeFYAZiwZhwqw+0DpJn2Rryhuw5n/7sGP5cZgMFjAaBhzLAQTQaBiwNg6dIv0x/e5hmLRoMHTO0teHdZWNWPvpLmxbehCGZmO7GoFhfpj2wChMvW8kXNycJWs01jRhzX+2YMuXO6FvaIVGa78uAGi0GrA2Fn4hvpj2yHjMfGIS3DxdJWuo/HGQMn+rjojKLQ2lFF/tPon/bj0KDWOfqPloG39y8mDcM7qvpAl22e50fLDqgEMNhrFPVPdN6o+Hpw2UpLF+Xxbe+3YPCIGwBiHgKMVtE3rhiQXDJU3iuw6ew9sfbQUIwLKOX8fUsUl4+sGx0EqYxA8fycfrb28EpRQsy/HaEUJAKcWoEQl47q+ToNOJn2BPH7+I159fCZuNFXwdhACUAgOHdsULb8yCi4uTaI3s05fwymNLYTHZHLwO+39TBsTi5f8shJu7+Ek8L7MYf1/8JQwtJrtjwCtidxi794nBq9/cB08fN9EahdmleGnOf9BU1yKsAYAwBLFJkXhr1ZPwCRR/Ty05fxnPjXsT9RUNojQi48Pwjx1/R0CYv2gNlT8WUuZvNUdE5Zbm692n8N+tRwEIT97Xjn+05Qi+23tatMbyvRn4YNUBURrclfGvt57AZxuOitbYfCAH//hmNzhKHWtcWRss356OD3/cL1pj35E8vPGfLWA5Kjh5A///Ojbvzsa/PtsJseuR4ycv4pU31l9xEIQnpLZr7jtwDu+8t/mqpiPOpBfh5b8uh8Vic/g62p72iSMX8OaLq66u0B1xPqsULz74HUxGq4jXYf87c6IQrzyyFBaLTZTGpXPleG7+pzDojQ4nb1zROJdehJcWfQ6T0SJKo6ygEn+d8p4oJwQAKEdRmFOKZ6e9j9ZmoyiNquIaPD3sZTRUOnZC2jTK8svx9PBX0VynF6Wh8udGdURUbllOXCjBJ1uPyHrsh5sP43RBmUO77EsV+NeK/bI0vtl2EoeyCx3aFZTW4J1vdsnSWLkzA7uOnXdod7miEW/8ZwukboBQCmzdm4NNu7Id2tbW6fHqG+sBSAuiUgocOJiH1WtPObRtbjLilb+tAMdRSInVchzF6eMX8dOSQw5tTQYLXn5kCVgbByrSOWrTyM0oxpKPHX+WFrMNL9/xJawWm2gHDAA4lsPFnDJ8+dpah7Ysy+HleR/B1GoW5SBcq1F2oRIf/2WpQ1tKKV6Z/k+0NLaKdvIAgLVxqCquwT/u+ET0Y1T+vKiOiMoty9J9adDIyC0A7Ns0S/anObT7aXe6rPwFwL69sXSnY43VOzMlOwhtEELw4xbH0Z31OzLBUSrRRfh/fl5/ymFUZMvWM7DZWEkOwrWsXHPKYfRh55ZMmIxW0RGaa6EUWLfipMOIxd4tmdA3GSU5CP+vQbF5xQkYDWZBu6Pbz6CuqkmSg9AGx1HsXnUKzQ2tgnand2Wj4lKNPA2Ww8H1p1FzuV7QLuvgWRRmFUtyQq7VOLUtA2X55ZIfq/LnQnVEVG5JLtc34fC5IofbGHywHMXBs4WoaGjmtalrbsXu9AuyNTiOIi2/DIUVdbw2LQYzth4+K1uDUoq8omqcK6zktTGbrdi4M0vWxNpGWUUDMnNLecdtNhbrN2Uo0qira8GJk/wRJI6jWL/KsUMkRIvehMP7zvGOU0qxftkxKDkEYzZZsXfzGUGbDd8dku3gAvZox86VJwRtNn61F4yMBN02CIDtS4UjSBs/3Q6NVr4Go2Gw+Yudsh+v8udAdURUbknWn8hVdCMHAAKC9Sdyece3HD8ne3XfhoYh2HCEX2PXsTxYbaxijY37c3jHD54ogEFkToGgxs4s3vGTpy6hsdGgSINhCDZtyeAdz8ooRnVlk2KNzev4o1QXzl5GycVqRZ87IcAWASeh7GIVzqcXKXLaKKXY8gP/tmTN5Xqk7cuVFQ1pg+MoNn+7n3e8uV6Pw+tOyoqGXNVgOWz9Zg9Yhb8BlT82qiOicktSWtuk2EkgBCir45/YSmsaFTs7HEdRVsOvUVbVAI3C2g0sR1FS2cCvUdEg6+jqTRqX+TUulzd0yHtVWsavUSGgL0Xjcin/dkOFwJhYKBW+Tnkxf4RMCtVl9bzRocqiWqmpOu3SVKuHmceJrS6uVeTotGHUm9Bc36L4Oip/XFRHROWWxGCxXD09IheOozCYrfwaJquiVStgnwtajPz5AkL6UmgRyEkwmqyyc1CuxSDwOoxGS4cU9TIKRG6MBotiZwcATCb+99xoUBY5asMsoGFqFc4fEQvHUVh4vj/GVlOHaACAsaX9a/H9uywNvbgTOip/TlRHROWWxN1ZB0bhxMcwBG7O/HUl3F10HbD9A3i48teVcHPRKbp+G54CBahcXZw6YnEMdyENV52i3I023Nz43w9XN51ixxCwP1chjY7AWUDDRUKdESEYDQMdz/fXzcOlQzQA8BYec/XsQA0vtbiZCj+qI6JySxIVpLyUOqVAZ4HrdA72lZ1E2gbDEHQO9uHXCPGFTWF4W8MQRIX58Y5Hhvk5PI0iSiOCv/hURISfYidBwxB0juQvXx8eqbz4FcMQRETxXyc8Snn5fEIIwjsLaMQEKtYAAUIi/XmjUCExQR0SofIL9oaOpwhccOfADinX7u7tBk8/D8XXUfnjojoiKrck0/v16JDrzBC4zqQBCbKPB7fBchQzhiTxjo/u3w0uEiqK8mlMH9GTd3xo/zh4KFyFsxzFtHHJvON9e0fD31/ZZMJyFNOmpPCOJyZHICTMV9GJFo6jmDqzD+94bHwoYuNDQBR87pRSTLmtP+94aFQgEvvHgtHI1yAAptwxhHfcv5MP+o1LUnZqhiGYct9I3nFPXw8MnzdQ8amZyQ+MhUaj9p9R4Ud1RFRuSTr5eGJYj2hFdURGJcUiyJt/8vT1cMW4Pt0UafRPiERkkA+vjburDpOH9ZCtwRCCHrGd0KUz/ypb56TF9HHJsreZCAE6h/shKT6U10ajYTBjaqqi7bKgIC/06c3fo4cQghnz+sm+PgB4+bhh0PBugjbTFwyUVMjsRlzddBg+gd8xBIBpdw4F56AqrBBaJy3GzOkraDP1vlGKkkkJIZiweKiwxsPjlZ2a4ThMeXCs7Mer/DlQHRGVW5Y7R/SWvR3AcRR3jOjt0G7hmFTZp3NYjuKOcfyr7zbmjksFCJGVUMpRikVThCckAJgxIRlaDSMrmkApsHBmP4eh/kkTk6Fz1sreErhtbj+HztLYiT3h7uEi26mafVt/aB1sJwyf2BO+/h6yNAgBpi0YCBeBHBEAGDg+CUHhvrIiFoQhmLhgIDy8hfvN9BrVHZHdQmRpMAzBqHkD4Bcs3LG6x6Bu6NY3DoyMqAijYTB4ej+ExARLfqzKnwvVEVG5ZekdG46/zhgu67HPzRyBlGj+FX4bCZHB+PuiMbI0Hp0+GAO7d3ZoFxXqh9cemiArofSOqX0xsm8Xh3adgrzxxt+mAYBkZ2TmxBRMGOl4K8zP1x1vvz4bDCNNgxBg3JhEzJjWy6Gth6cL3vrgdjAaRpKjQAjB4OHdMH/xYIe2zi5OeOuLO+Gk00rSYBiC1IFxWPzIaIe2WicN3lr6EFxcdZIcBYYhSOgVhXtfmi7ClsEbK5+Eh7ebNA0Ng+jECDz2r4UObQkheG3d3+AT6C3JGWE0DMK6hOBv3z0i+jEqf15UR0Tllmbx8F54dsZwEMDh9oaGsUcdnp85AguGpYrWmDEkEX9fNAYMIaI0AOCJmUNwz0THkYo2xg6Mx+uPTISGEa9xz4z+eHgef57AjQzuG4u3npsOjYZxqNE2Ac+b2htP3jtKdJSjV2pnvPPmHOh0Wmgc5EC0aUya0BPPPjNRtEb3pHD846OFcHZxcphn0aYxfEx3vPDGLNGORWx8KN779l64ebg41GjLJ+k/PB6vfLgQWidx+Q4RccF4f80T8PJ1F62RMrgr3lz6IHTO4vKKOkUG4INtz8G/k4/D106I/S+hbwz+seEZ0ad7AkL98OHhN9Gpc6Dj95fYnZfY5Ch8sP91uHu7i9JQ+XNDaEecyfuFkNJGWOWPTealcvywPx17sgsA2G+oHKVgCLm6tTImuQsWD09FcpTjSEh7nC2qxI970rHr9AVQSkEIAUc5uwbsSYrDe8Zi4Zhe6N01XJbGheIaLN+ehh3HzoNlOTAMA477fw2OoxiUHIX5E3qjf5LjaEt7XCqtxapN6dixPxdWG3tFg17J77B3/+2T3Blzp/TCoD6xsjTKLtdjzbo0bNuRBYvZBkbTpgGAACxL0TMpArNn9sbQwV1lbedUVjRi3YqT2LYxAyajBZo2DQYACFiWQ0JiOGbM64sRY3rI0qitasKGZcewdfUptOpN0GjtGoTYK/OyLIcu3cMwfeFAjJycLKtwXEONHhu/P4gtPxyBvtFwvQYhYG0couJDMP3uYRgzp59oR+damur02PT1Pmz+Zh8aa/TQaDWgHGffEiT2JnQRXTph2gOjMX7xEN5jwUK0NLZi0+c7seHTbagrb7BrXPnx2V8Hi5DYYMx4bCImPzAGzgLH2lX++EiZv1VHROWWwGKzASDQOdjfr2lqwcZTZ1FS24RWkxnuLs7oHOiDaX27I8BLePVl1wB0WuHVZn2zAZuPn0VRZQNaTWa4uegQFuCNqQO7I9jXU/CxVhsLjlI4OwlrNOmN2HbkHArLatFqtMDV2QmdArwwaWh3hAYK79vbbCxYjsLZwWkcfasJOw+cxcWiWrQazHB21iLI3xPjR/RARKjw8WibjQXLctDphHNCDAYzdu89hwsFlWhpNcNZp0VAgCfGju4ueFQXAGwsB5uVhbODvBOj0YL9u3KRf64cLXoTnHRa+Ad4YOS4RMTECecfsDYOVptjDYvZioM7c3A2oxitehO0Thr4Bnhi+PgkdOkRJqzBcrBabHB2cRLUsFpsOLI9C9nHC9DSZISTkwbe/h4YMjkF8amdBR/LshysZhucXYU1bFYbjm87g8yD56FvaIVGy8Db3wODp/RCj4FdBB/LcRwsRiuc3XQOnguLk1szcHpHJvQNLWAYBp5+Hhg0vS9SRiY61DAbLXBxc+6Q48cqty6qI6Jyy0MpxfHCUiw7mYlDF4pgudKLwkWrxaj4GCzon4JekaGKblaUUqQVXsbyo2dw4GwhTFa7I+Ks1WBIfDRuH5yMfnERijWyiyqx8sAZ7DlzAaYrnV+dtBoMjI/E/OEpGBDfWXHhtHNFVVi9JxO7T+bDeKXappOWQe/4CMwdk4pBPaMUl5IvKK7B2h2Z2H34PFqvVEDVahgkxYdhzsRUDOkTC63CUvLFJXXYsCUDO/fmoqXFXoFUo2GQ0C0Es6b1wtBBXeEkIyJwLWWl9di8IR07t2ehudl4VSOuSzBmzO6L4SMSRG998FFZ3oCtq09jx4YMNNbbu+QyDEF0106YPr8fho9PdJjQ6oiaikZsW3ECO1acQH2tHqAAoyGIiA3GtMWDMHJaL7gqPLZdX9WE7cuOYNvSQ6itbLRrMAShMUGYcvdwjJk3AO4Ki5E11jRj+3f7seWr3agurQPlKAhDEBIdhCkPjsG4O4bDS60z8odDdURUbmkOXSjCm1v2orS+CRqG3FRUrO3fYgP98NrU0egTJX0b5FRBKd5cuweXqhsENSL9ffDS7FEY1FX6NkjWpQq8+dNuFJTXCmqE+Hni2bkjMaKn9G2Q/OJqvPXtTpwvrm5Xg2EIOI4i0NcDT902HGP7Cx9dbY+isjq8+/kO5ORXCGr4ebvh4UXDMGmE9Bovl8sb8N6H23Emu/Tq9drT8PJyxb2Lh2D6FPE5Pm1UVzXjg/c2I+3UJUENdw9nLL5zKGbPc3xS6Ebqa/X48M2NOHEoHwy5WYMwBJSjcHXTYd7dQ3DbPUPBSHQQmxta8fHLa3B0R459C/JGDWI/6eTs6oSZdw/DoifHSd4yam024tPnf8aB9acBerNG2xEvnbMTptw9HHe/NEPylpGx1YTP/rIEu388DI7jbj4yfWX7S+OkwcR7RuLB9xZC10GViFV+e1RHROWWZW16Ll7esAuUUoenSBgCMITBv+ZMxITErqI1tmfm4fmftoFSCkenf9tyAd6YNxbT+4qfYA9kX8TfvtoMlqMOe+LYMzOAF+aPwrxh/EXDbuTk2RI88+F6+3aPyGPMj88bisWTxCfRZp2/jGfeXgOTxSZa4+45A3DffMenU9rIu1CJv764EgaDWXQl27kz++CR+0eKdhSKLtXgr0/9CH2zEazI+h2TpqbgqWcmiY5WXS6pw7MPfI/62hbR9TtGTkzC396YKbpCaXV5A55b+AWqyxtFawwY3R0vfrIYTiIL59VXN+H5WR/icmGVqFonhBCkDO2GV5c+LFja/lqa61vw/IR3UJhVLOp7RRiC7gO64O1Nz/GWnFf5fSFl/lZPzaj8auw9fxF/X78TnAgnBAA4Ctg4Dn9dvRXHC0tEaRzLL8Zzy7ZdcRAc21NqT3p9eeVOHDhbKEojs7Acf/1qM2wsJ6oxX5vFuyv2YkdaniiNvOJqPPOfdbBYxTsIAPDJykPYeDBHlG1RWR2eeXstTGZpGt+tPo6VW9JF2VZUNuKvL65EqwQnBABWrTuNH5cfF2VbW9OMZ/+yDM1N4p0QANi6KRPf/G+fKNvG+lY8/+AS1NfqJRUR27c9G5+9t01Un56WZiNevPMr1EhwQgDgxN5z+M8Lq0RpmFrNePm2/+JyYbXogmuUUpw5nId/PvStqFYCFpMFL8/4FwqzS0R/ryhHce5EAd6Y9x/Yrmyhqvx5UB0RlV8Fs9WGF9bukPVYSoHn1+wAywnfBG0shxd/3g5xbs6NIsBLy3dcTWjlfy4ULy/ZDo6TrkIAvP7jLhhMwh1gKaV44+sdsLKcrGJr/1y6G40tjrudvve/XTBZrLK6HH+yZD9q6vQO7T76bDcMBrOswnTfLD2Essv1Du2+/GwPmpoMsjRW/HQMF/IrHdot+WwvamuapVdLpcDmVaeQk+HYkV7+2R5UlNRK7htEKcW+Dek4deC8Q9s1X+xG0bnLkiuychzFse1ncHiTYwd005e7cf5kgXQNlkP6nhzs+uGQpMep/P5RHRGVX4XtufloNpllFfXiKEWVvgUHLxQJ2h04V4havUHW5E0BNBlM2H3leDAfJ/NKUVbbJGvypgCMFiu2nRaeMM5eqsSF0hrZVWVtLIcth3IFbS6V1uHMucvyG9kRYOOebEGTiqomHD9VKLuxIMMQbNyaKWjT0NCKg/vPSYqEXItGw2DThjRBm1a9Cbs3Z8ou2a7RMNi04qSgjdlkxbblJ2RrMBoGm344Imhjs7LY/O0B2Z85wxBs+na/oA3Hcdjw6Q7ZnZoJQ7D+v9s7pNOzyu8H1RFR+VX48Ximoj4lGkKw7ESmoM3Ph5VpMITgp8PCGisOZipqlEcI8NO+DMEb7ao9ZxRpUAqs2J0pOOGs26nsdXAcxdodmbBdOe3UHpu3nVF0WojjKDZvz4LJZOW12bYlEw4CZYKwLIddO7LRojfx2uzecgYWi/ztApblcGjPWfvJFx4Obj0DQwv/c3AEx3I4fTAPlaX8EaQTO7PQKPAcHGpwFLknLqL4fDmvTcbeXFQW1UDWigP2LZpLOaU4f+qizGep8ntEdURUfnGqmluQU14lK4rQBkspjhQUo9Xc/rZGs9GEEwWlijQ4SnGmuAI1zS3tjlttLA5kyV/hA3YnobCyHiU1jTzjFLtP5inSAIDKumbkl1Tzju86fF6xRmOzEdl5/JPS7n1n5UdcrmAwWJB+pph3fN/uXMWrZ6uFxckT/BPf/h3CkR8xcByH4wf484MObjmjqCMwYHekjwg810Mb0xV1BAbskZdDAtszB9ccV9StFwA0Wg0OrhaXH6Tyx0B1RFR+cepaDR12rQZD+7kPDSJyIsRSz3OtplaTIkfnWhr07b8nJosNFit/lEGSRnP7GhxHoW+Vv/oWowEATc0d85k0NvJrNDS0Kr4+IQSNAtdpqG2RvcJvQ8Mwghr1Nc2KOgID9t4zTfUCGtVNijoCA/atkyaB3KDG6mZF3XoBuzPeVNOs6Boqvy9+cUfk8uXLWLRoEfz9/eHq6oqkpCScPn36l5ZVuYVwlGQqBStPAlxHath4rtWhGnyvQ0Fbd7EalFLZHYdv0hCYdJS0qL9OQ+A6cnNDroUQ4dfRIZ/JlTLrvBoKJ+//vw6/E8t2hINLKWxWodfRURod44yr/D74RR2RhoYGDB48GE5OTti2bRvOnj2LDz74AL6+wuWlVf5YeLp0XM8Jb57+FV6uLh2m4cWn4dZxGp4813Jz0Ununsur4d6+hkbDwEVGr5F2NTz43xM3hVU/xWh4eCjX4DgKT09+DU+FlUUBgGMpPLz4Nbx83ZRrUAoPb/7n6uXvcbVQmXwIPHz4n6unr4ekTsDtKjAMPHzVZnl/Jn5RR+Sf//wnIiIi8N1336Ffv36Ijo7GuHHjEBsrr9GWyu+TcB9vBHgou9ESAFH+PvB1a/9G6+/phgh/b8X32SAvD4T6tl98x9XZCV3DAqFwKx/ebi6IDm7fGWcYguQuYYpLwrs6O6FrZCDveK8e4YqSVQFAq2XQPa4T73jv5M6ymsRdCyEEid35e7307hPtsAuwGJKSI3nHUvvHKP48KKVI6hXFO548IE6xBsdySOwbwzvec2BXEIW/ENbGImlgF36NYQmKI2GsjUXPoQmKrqHy++IXdUQ2btyIPn36YO7cuQgKCkJqaiq++uorXnuz2Yzm5ubr/lR+/2g1DBb0S1F0ogUAFg1I5a20SQjBgiEpiq7PEILbhyQL9my5fUSKqEJpQhpzhvaETqAp3rwxqYqSPDUMwdShPeAmUC579oRURcmqGoZg3JAEeAtUwZwxNVXRtoZGQzBkYBwCA/gbDU6d0VvR9gzDEPTqHYXwCD9em8lz+ir6PAhD0K1HGOLiQ3htJszrJ/v6gH17KSI2CIl9o3ltxtw2QFZn32sJDPVFn1HdecdHzB8IF4VRKq8ATwyeIb46sMrvn1/UESksLMTnn3+OLl26YMeOHXj44YfxxBNPYMmSJe3av/vuu/D29r76FxER8Us+PZVfkTm9ExVtOei0GkxPFl4lTevTHU4iS2m3B0MIZvZNFLQZ36cb3BRsa1BQzBqcJGgzolcsfDzkbwewHMXsUcKl5PslRyFYYIIXozFrfIqgTWL3MERF+sv+3FmWYubUXoI2sXHBSOguvzkix1FMn9VH0CY0wg+9BsTKjlhQjmLGggGCNv7B3hg0LlH2tgalwPQ7hwi+D54+7hg5u6/sKBVhCKbdN0Kwd46ruwsm3j1S9utgNAymPjBGdLl6lT8Gv6gjwnEcevXqhXfeeQepqal44IEHcP/99+OLL75o1/6FF15AU1PT1b/S0tJf8ump/IoEerrjnsHCN3whHh0xAB4Ock28XF3w0FjhG74Qd4/sA39P4S0kV50THpsmvs/KtRAAtw1PQai/cN8FrVaDx+cPladBgMlDuiM61F/QjmEIHrtjuCwNhhAM798FCQLbMvbnQvDwfSPlaTAEfXpFIVVgy6SN+x4aJcvZYRiCHonhGCCw1dDGnY+OAsMQyTqMhkFst04YMoY/itDGgsfHQqtlJGtoNAzCYwIxekZvh7bzn5wAnauT5KPCjIZBYKgvJiwa4tB29lOT4e7tJtkZYTQMvAO9MO2RcZIep/L75xd1REJCQtC9+/U/wISEBJSUtF/u2NnZGV5eXtf9qfxxeGr0YEyU0LyujXm9k3DfUHGh2vtG9cXsfsJRjfaYnBqPx8YPEmV72/AULBwlvFK/EQJgeM8YPD1L3OQ/dWgi7psuzakiBOiTEIkX7xoryn7UwG54dPEwSRoMIejepRNefXyiKPv+fWPw1CPins9VDYYgJjoQb7w0XVSkIzmlM/72wlR7A0OR8yvDEISF++HNf8wTVfciPjEcz787F4QQ0ZM4oyEIDPbCW/9dBJ2IFX50txC8/NmdYDSM6OiLRsPA298Db39/P1zcHDekC40Owms/PAKtk0Z0TRFGw8DD2w3vrHoSHt6Oc72CIvzx9qZnoXNxEu2MMFoGru7O+MeW5+Eb5C3qMSp/HH5RR2Tw4MHIy7u+iE9+fj46d5becl3l9w/DEPxrzkTcNcg+iWsEZg2G2Fefj44YgNemjRYdeieE4NW5Y/DgmP4gV67DR1uy5t0jeuOd2yeIvvkTQvDMrGF4fPpgMISI0pgztCf+dd9UaCWsEh+YOQjPLBwJhhGnMWlQd3z49ExJ21MLpvXFCw+Pg1bDCL7HbRrD+8fh41fmwlnC9tT0Kal4+bmpcHLSCDoKbVsGfXtH4+N/LYCbm/hcg7Hjk/D6246fV5tGz+RIfPL5XfCScCJm6JjueOvTxXC90oGW77W0aXTtHoaPf3gAfhK2wPoMj8c/fngQ7ldO8fA5PW0TfOeuwfh43RMIChV/ErHnoK7414Zn4OXrIUojLDoIH+94HmExQaI14vvG4aODr8Ovk8911+LTCAr3xydH30J0kuMImMofD0J/waL+p06dwqBBg/D6669j3rx5OHnyJO6//37873//w8KFCx0+XkobYZXfF0V1DVh5Khsr07Jvqpbq5eqM2/smY26fJIT5yP/cL9c3YdXxbKw8lgW90XzdmIeLDnP6J2HewJ6ICPCRrVHVoMeaI9lYdTALja3XF/BydXbCzEGJmDOkJ6I78SdDOqK2sRUbDmZj9Z5M1DVdX9zLWafF1CE9MHtUMmLDA2RrNDQZsGVfDlZvz0BN3fWVZZ20GkwYloCZ41PQLSZYtkaz3ogdu3OwZkMaKquuT0TXahmMHp6AGVN7Ib5rJ9k5Hy0tJuzekY11a07hclnDdWMaDcHQ4QmYPqsPEpPCZWsYDWbs3ZqF9T+fQElhzXVjhCEYPDIeU+f3R3KfKNkaJqMFBzZnYuPSwyg8V3G9BiHoNzIBUxcPQurgLoI5G0JYTFYc3pyODV/vQ37GDdVrCdB7eHdMvXcE+ozuITuvxGqx4cj6U9jw2Q7kHs2/aTx5RHdMf2Q8Bk7pBY2C/C6VWw8p8/cv6ogAwObNm/HCCy/gwoULiI6OxtNPP437779f1GNVR+T3BaUUudXVKG1sQqvVCk+dDjF+fugSwJ+vYLLakFVWiSajCYQAPq4u6BneCTotfyj7fFUNiusb0Wq2wN1Zh85+PogP5j+qarHZkF1SiUaDCaD247OJkZ3gInBypaCqFoXVDWg1W+Dm7IRIPx/EhwbyTixWlkVuUSUaWkyglMLL3QU9IoPhKrBCL6qqR0FlHVpNFrjqnBDq54UekcG8GjaWw9lLlWjUG8GyHDzdnREfFQwPnronAFBa1YCCy7VoMVrgotOik58nEmNCeDVYlsP5wio0NBlgtbHwcndB15gg3pokAFBR04T8omq0GC1wdtIg0NcDSV35jyBzHEV+QSXqG1phtbLwcHdBXGwQvAWiE9W1euQVVKKl1Qydkwb+fh5ISgjjnSAppSjIr0RdfQssZhvcPVwQExsEX4H6FHV1Lcg7XwF9ixFOWg18/TyQlBQOLc8ESSlFYX4V6qqbYTZZ4e7pgs6xQfAP5I+ANNa34nxOGfR6E7RaBj6+7khM7QwngdMsRfmVqC5vgNlggbuXKyJigxAY4sNr39zQinOZJdA3GaHRMvD2dUdinyjoBL6LJfkVqCqtg6nVDDcvV4THBiM4gv9329JkwNnTl6BvaAVhCLz9PNCjfyxcXPm3h8ryK1BxqRpGvRFuXq4Ii+uEEAHH1qA3IvdYPprr7Y6xl58HegzsCjeBk1oqtw63lCOiBNUR+X3QarFg47nzWJKegQt1dTeNp4SE4M5eKRjfpQucBRwMIUxWG7bk5uHHk5k4W3lzD5UeIUFY3C8FE7t3E3QwhDBbbdiZcwHLjmYiu/Tm1vBdgv2xcHAqJqfEw00n7+SM1cZib1YBfj6YiYzCm/u0RAX5YsHwVEzuEy/oYAhhYzkcOnMRK/Zm4vT5mxO+wwO9MX90KqYM6s5bWM0RLMfh+JkirN6RgeNZRTeNB/t7Ys64VEwZ3gM+XvJqyHAcxekzRVi7OQPHTl+8qRqsv58HZk1KxeSxSfCTWQCLUorMzBJsWJ+GI0fybzqm6+PjhunTe2HS5BQEyDxlRCnF2axSbFx5Cgf3nL2pzoanlyumzOmDSTN7I6iTvPwISinys8uw+efj2L8586ZKse6eLpg4tx8m3dYfIQJHlR1RkFOKLUsPY8+aU7Car28E6OrujPG3D8TkxYMRHis/claUW4bNX+/Fzh8PwWy8Plrq7KbDuIVDMeW+UYjqES5bQ+WXR3VEVH410i6X4/5169FkMoGg/ZYcDCHgKEWIpye+nzMLcf7CJzpuJKe8Cg/8vB51rQYwBO3W8WjTCHB3w9cLZyKhk/j9bMAeAXngm3Woam65eq0bIcR+TNLbzQWf3zUDyZH8dSHao6SmEQ9/thZldU38Glf+6+aiw4f3TUO/rtKOsFfWNeOx/6xFUWU9GIa0W/+iTcNZp8U/H56KwUn8tSfao7ahBU+/txYXimugYQhvPRJCAK1Gg9cfm4SR/aQlKTc2G/DCm+uQm1fuQINAoyF47vEJGD+yhySNlhYTXn1lLTIzi6HREN56JG2RnSeeHI+pU1MlaZiMFrzz4mqcOHwBGg3DW1eFYQgoBe5/cixmLRggaUvHYrbi/edX4dD2bGENjf37sPixMbj94VGSNKwWGz55fjl2rTzpQIMBx3KY+8gY3PX8FEnbRizL4Ytnf8TGL/eI0pj24Gg89N4ixUXzVH4ZVEdE5VfhaEkJ7l69FhyloprBaQiBi5MTVt4+H/GB/Fsp15JRWo47f1gNK8uJ1nDSaLD0jjlIDhfnKJwrr8YdX6yE2WYTVeSLIQQahuB/98xCv1hxjsKlqnos/vdyGMwW0RqEAB/dPx1De4hzFC7XNOHud35GU6tRlAYhAAHBOw9Oxpg+4hyFmoYW3PfyMtQ1torWoBT4+4PjMXm4uNNMjU0GPPy3ZaiobpJUSOypB0dj1mRxp5laWkx48okfUFJSJ0njvvtH4PbbB4qyNRkt+NtDS1BwvkKSxu33DMVdD48SZWux2PDSvd8gN71YUtO8aYsG4qEXp4pyRmxWFq/f8z+k7T8vqdPx2Pn98Zf3F4jSYFkO7971GQ6tPyW+wSABhs7oixeXPCI7T0bll0PK/K1+eiqyuFTfgAfWbRDthAAASylMVivuXLUGtSI68l5ubMb9P60X7YS0aVhYFvf/tA4VTfxdQtuo1bfigW/WwmQV54QA9p4eLEfx6JINKK5tdGjfbDDhwU/XiHZC2jQ4juKZbzfhQnmtQ3uDyYJH/71atBMC2B0ESin+/tVW5BRWOLS3WG146h9rRDshbRoA8M5XO5F+1nFdIBvL4bk31kh2QgDgwy/34NjpQhHPieLVV9ZKdkIA4Ouv9mP//nOiNN79+xrJTggA/PztIezYmCHK9sO/r0FumjQnBAA2/ngMG388Ksr2i1fXIG3/OUlOCADsWnECyz/ZKcr2+9dXS3NCAIACh9adwnevrZb0vFRuPVRHREUWX5w8CYvNJtpBaIOlFPVGI346c8ah7fcn0mGwWCRrcJSixWzB0hOOb+Y/HTuDRoNJlobZZsN3Bx13kl5zNBvVTeIn7zYo7BPz1ztPOLTdeuwcymqaZGlwlOKrTccc2u49kY/C0lp5peEp8OWqIw7Njp26iHMXKmWVVCcE+HLJAYcTZmZGMTIzi2WXbf/qf/scPjbvbDmOH7w550Qs3366x2FH3uKCKuzblCnZQWhj6ce7YDZZBW2qSuuw5YfDsrs1L/9oJ1r1RkGbxupmrPlomzQn5BrWfLwdjdVqO5DfM6ojoiKZJpMJG86eAyvz7sRRih8yMmFl+Vt9GyxWrM7Ika3BUooV6dkwWvlvtBYbixXHz0h2Qq5qcBQb0s+i2WgSsOHw80H5kwXLUezKvIC65lZeG0oplu/JkN3OjOMojmYXoby2SdBu1Y4M2f2COEqRlXcZhWXC0Z01m9Pll1KnQGFxLc7lC0d31q1PU5RXUFnZhIyMIkGbTatOKdJorG/F8UN5gjZblp9Q1OnW0GLGoe3Zgjbblh1V1IzPYrFi75pTgjbblx6Q/RsE7BW8d/xwUPbjVX57VEdERTJrcnJh45R12Kw3GrG74CLv+Jac8zBYhFdrjmi1WLA19+baBW3syS2wH+lVgNXGYmM6f6j+6LliVDW28I6LgVJg3fFc3vHMC5dRVFkvd0EJwF7/Yu2BLN7x/KJqnL1YqWjC0DAEa3fxR8JKyxuQnlWirOGfhmDdVv5IWE2NHkePXFDUjI9hCDasT+Mdb240YP+ObOUaK0/yjhtbzdi55rSiTreEIYLbM1aLDVt+OAJOQVNBANjwLX+UimU5bPxyt+StpWuhHMWGL3crer9VfltUR0RFMmmXbz52KhUtwyCtnP866aXlitvUaxiC9FJ+jYzicmgVJrkRYr8OH5mXlGtwlCL9Yhm/RoHy94rjKNLy+DXO5F1W1LQQsEd30s/x54nknLusTAD2RnkZ2fwa58+Vy45OtcFxFFlZ/BoX8ipuOj4rR+PsGX6NogtVDrdVHEE5igu5l8Ha2o9Mll+qQUuT41wuYRHgcmENWpvb356pK29AXUWjMo2r12lwbKhyS6I6IiqSaTQZFa2+AXsX2maTmXe82WRW1KYesN/M9Sb+iIfeaFY+KVGgSSCqojeYIXvP5BoaW4U0TLIreF5Ls4BGi8HcIScT9C0CGq3mDnkdrQb+71WLwGuUgsFg4R1r1XeMhtXKwmKxtTvmKO9CCi08z7eFx3mQpdHU/rVamvi3HCVrNHbctVR+XVRHREUyThrlpZgJCJwE9rftvU8Ualw5yiuk0RFOgk6gNLWU3jJyNaT0lhFC6DpOWgayMxavga9KaZtGR1QT0Ah95h30Xgnlf2gFqqR2lE5HlkPne74d9V4Ja8grPti+RsddS+XXRXVEVCQT6O4u2LBODJRS+LvxV9wMcHeHhijdNiHwc+fX8PeQV/HzWjQMEbyOv6ebopwHwF5TJNCLv3Kon5cbWIU5O4QQBHjza/h6uyuOUAFAgEAFVF8fedVRb8TPh//zECrxLgVvIQ0/jw7R8PB04XVEfP07RsNJp4Gbe/sVfH0EStVLgTAEXjzvu09gx9WH8glQa039XlEdERXJTO7WTfZpljZYSjG5Wzfe8Yk9uipOiGU5DpO68xfqGt+zq+LJleUoJvTkfx1jU7sqSvAE7Dki43vza4zq1QVEYWiHUooJ/eN5x4f0ioFWqzyfZtygBN7xvqlRcJHQ1bd9DYJxI7rzjvfsGQFPT3ll7dtgGIIxY/iruHbtHirYb0aUhoZg1MQk3vHOXYIRFhWgKGqo0TAYNrEn73ZYcLgfuvSM4O3OKwZGw2DAuCToXNr/XL38PZA8LEHR6R9GwyBleAK8Osg5U/n1UR0RFckMi45CiKf8Gy1DCHqHhqJbIH+32N4RoYgN8JM9vRIAXYMCkCJQXTUhNAhJEZ1kH0klAMJ8vTAwjr91eWSgDwZ0i1R0BNLPwxUjEmN5xwN9PDAiNU5Rwqq7qw5j+vI7bd4erhg/OEGRhpNWg4lD+Z0EN1cdJo9LUqTBEGDyWP4JXKfTYtq0Xoo+D46jmDqFv9S7RsNg+rx+ivJdOJZiyuy+vOOEEExfNEhRrhbLcpi6QLhK7LR7his60cKxHKbeNVTQZvpDYxSd/uFYDtMfGiv78Sq/PaojoiIZhhDc2StVft0KSnFHL+GeHYQQ3NE/VfaNlgK4o1+Kw8lg0eBURRGLhYNSHU5qtw9Lkb09wxCC+UNTHOaazBuVIju6wzAEs4YlwcVBI7/ZY+VraBiCScN6wMNNuJHfjInKNEYM6eZwi2fylBRZ1wfs71X//rEIdtCcbvz0VGg08n4hjIYgMSUSnWOE2yCMmp4KZ2cnWXlODEMQEx+CrknCjeOGTUmFh7errMgLwxCEdA5AymDh9gEDJqXCN9hbVuSFMAR+nXzQf2KK9CeocsugOiIqsrgjNQW9w0Il54owhGBi1y6Y1M1xb5M5qYkYHBMpOWLBEIJhcVGYmeK4Cdqknt0wpkesZA0NIegdHYbbB/Z0aDs8MQZT+yZIvplrGIL48CDcObq3Q9ve3cIxd2Sy5DlJwxBEdfLDvVMGOLRNiOmExdP6SVSwawQHeOGheUMc2nYO98f9i4RX0Hwavr7uePSekQ5tg4O98cgjYyRrMAyBp6cLnnxyvENbH193PPHCFFkarq46/OXv0xzaunu44Jl350quSEoYAp2zE/76j7kOHXWdixP+9vEdkPrlJcSeUPvcf+90qKHRavDCdw/b7aTIEIBhGDz/7UMdmryr8uujOiIqsnDWavG/mTPQIzhI9CROAAyL6owPJk0U9Rgtw+DjuVPRKyJU9H2QEKBv5zB8OGeyqPodDEPwz9smYVCXSNH3QIYQ9AgPxid3TINORNY/IQSv3j4WI5P4t1fa04jt5I9PH5oBVweRijaNv94+EhMH8udg3KTBEIQH+eC/f5kFD1fhSEUbD80bgpljkkVraBiCIH9PfPLiXHh7uop6zKK5/XH7LPEOj4Yh8PVxx4dvzkeAyETRmbP64K67xTs8bU7Iv96/3WE0pI3x01Lx4F/GidfQELi6OeOd/y5GeGdxHaqHTkjC46/PtDcwFPEFZjQEzi5OeP2LOxHdTVxTyH6je+Cv/1kEhiGitpsYhsBJ54RXvrkP3VI7i9JIHpaAv//wKLRajajICGEItFoNXlr6CJKHif/Oq9yaqN13VRRhslrxjwMHsSI752rJ9mu/UAyx19pw1znhrl698MSggZILfFlsNvx77xH8fDoLZpvtJg1y5f93cdJiQZ9k/GXUYOgkHjG2sRw+3X0MPx7JgMFivdo19loNwJ7nMLdfEp6eOBQuEo8LchzFVztPYOneNLSYLGAIuWlbiBC7Aza1X3f8bdZwuDnrJGlQSvHDjtP4futJNBvM7WrYO/sSjO/fDX+7fSQ83aQlb1JKsXpnJr5ddwyNzUYwDLlp64m5srod2a8rnrlrFHy9pJ9Q2rwzC98sO4y6htb2NRgCSimG9I/DXx4ciwAZyYp79uTi66/2o7q6mVeD4yj69Y/FU0+OF+2EXMvhvefw1ce7UHm5od329oyGgGMpUvtG4/HnJyMsUpwTci0nD5zHV//cgrJLtTwaDDiWQ2LvKDz6ynREde0kWSPzSD6+eGUNivMq2tVo+7f4XlF49O25iEsS15n6WnKPX8Bnf/0BBZnF0GiZm/rttP1bXEoUHnl/EXoM6CJZQ+XXQcr8rToiKh1Cs8mEtWfP4qfMLFxubobZZoOLkxOifX2xODUZU+Pj4eqk7EREi9mCjdnn8PPpLJQ0NMJstcHZSYvOvj64vU9PTE1KgIfEiftGDBYrtmaex/LjZ1BU0wDTFY0wXy/M798T03p3h6eLuOgBHyaLDTsz8rDi0BlcrKyHyWKFzkmLTj4emD24J2b07wFvd2UnOyxWG/amF2Dl3gwUlNXCaLZC56RBoI8HZgxNwrQhifCT4Rxci83G4mDaRazemYG8S1UwmqxwctLAz9sdU0YkYvrIJAT4KjvJYGM5HD9diLWb03E2vwJGkwVarQY+3m6YOCoRU8f3RLDCI6AcR3HqVCHWr09DTnYpjEYLNBoNvL1dMXZsIqZMTUVIiI8iDUopMk5ewsZVJ5GVVgSjwWKPsni7YuT4JEyZ3UeWA3KjRs7pImxadgzpRy/A2GoGYQg8vFwxbGJPTL6tPzrHBSvWOJ9ehE3fH8KpvbkwtJhBALh7u2LIpBRMvmMIYrqHKdIAgPz0S9j0vz04tiX9alVWdy9XDJzcC1MfGI2uvaIVa6j8sqiOiIpiTDYbthbm4fDlYjSZTGAYBn4urhgXFYcREdHQOIhqUEodhnEtLIsdBRdwsKgIjSYTCAAfV1eMjonB6JhYh5ETMRpWlsXeC4XYf+ESGoz2G5qPqwuGxkZhbLc4h5ETMRosx+Fg/iXsPVeIhlYDOAp4uzpjYFxnjO/RBc4OIidiNDiO4mheMXZnXUBjqxE2loOXuwv6xUVgfEpXh9s3YjQopTiVV4pdafmo1xths7HwdHdGSmwYJvWLh5uLsJMnViMz/zJ2HctDXVMrLFYWXu7OSIwLwcTB3R0ms4rVyM2vwK6D51Db0AKLhYWHuzMS4jphwsge8PIQdvLEaABA/oVK7Nydg5paPcxmG9zdndElLhgTxibBR6DOiBSNSxersWPLGVRXNcNsssDN3RnRsUGYMCUFfg4iQG23dkc6JYXV2Lk+HVXljTAZzHDzcEFkTCDGzeiNQAcRILEaly/VYNeqk6goroWh1a4RFhOI8XP7IzjCr0M0KktqsfPHI7h8sQoGvRGuHi4IiQ7E+IVDEBoTJPhYlY5HdURUZFNjaMXXWafx87ksNFvM0BBytWaIljCwUQ6d3D1wZ2Iq7uyRCjcn6RGIBqMR36anY1nWGTSaTNdpaAgDlnIIcHPD4uQU3JWaCk9n6REIvcmMJacysOx0JuoMRmgYcvU0Rtv/9nV1xYLePXFnv17wcZUegWg1W7DseCZ+Op6Jan1ruxpeLs6Y168n7hzUS1YBNZPFhhVHz+CnQxmoaNBDwzDgOA70Gg13Zx1mD0jEHSN6I8hbegTCamOx5lAWftqbgbKapus02rYmXHVOmD64BxaN6Y1Qf+m/RRvLYeP+bCzfkYHiinporly3TYNyFE5OGkwe2gMLJ/VGRLCvZA2Oo9i2LwcrN6fhYrF9i4LjOFD6/9s4Wo0G44Yl4PYZfREVLj0CQSnF7n1nsWbtaeRdqGxHw/7fUSMSMH9OP8TKmAAppTi47xzWLD+BczmXb9aAfatw6Ih4zF04EN0SQiVrAMDx/eexZslhZKcVXdGgdgeJIVe3OweOiMecu4aiewr/EXUh0g6cx5qv9iHjcD4Yjb1yLuWuaBACjuPQd0QC5jwwCj0HxsnSOHM4D6s/2Y5Tu3PAMO1osBxSRyRg9qPj0Gd0oiwNFemojoiKLPLqa7B4y2rUGQ0OC5YxIIj3D8D3k+YgyE18tcpLDQ24Y+0aVOr1jjUIQbSvL5bMmo1QCXVLLjc14+6f1qKkodHh0VyGEIR6e+K722ejs5+PaI0afQvu/34dCqrrHGpoiL366td3z0ZckPjJr6HFiEe/Xofc0iqH1dU1DIGXmwu+eGAWEsLFT356gwl/+XwjMgouA1T4AIaGIXBz1uHjx2cgOUb85GcwWfDCJ5txPKvo6gQnpKFz0uL9v0xHnx7iJz+zxYY3PtyCA8cv3JTf056GRqvB23+bhoG9Y0RrWK0s/vWfbdi1JxeEEMFy9BqNfRL8+/PTMHwofzG6G2FZDv/993ZsXpfebs7KjRqUAn99aSrGTnR8eqsNjuPwzX92Ys2Sww41GA0DynF47KVpmDxPfAIxpRTLPtyBZR/tuJqfIqTBsRzufXEaZt8/QnT9FUop1vx3J75+dbVojUXPTcXCZ6d2SE8jFWGkzN/qqRkVAEBxUyPmbVwhygkBAA4UefW1uG3jcjSZxTX5qtDrMX/lClFOCGCvN1LU0ID5K1agziCuC2hdqwELl65EqQgnpE2jokmP25euQGVziyiNJqMJd3y9ChdrHDshgL2KbF2LAYu/Womy+iZRGgazBfd9vhrnyqpFtXhhOYqmVhPu+XQlCqvqRGmYLDY89sk6ZF4sB3XghLRptJoseOg/a3C+pFqUhs3G4q//3oCT2cUAxGmYLDY8+a+1yMoX1+WZZTm88v4mHDxRYNdwIMJyFFarDc+9uw6ns4pFaXAcxT/e34Lde3OvaAiLsCwFy3J4/e31OHLsgigNSik+fn8btqxPv6rpSIPjKN57cyP27swRpQEA3/x7B9YsOSxKg2PtkZhP3tqIbatPidb46SO7E9J2DUcaAPDNOxux7psDojXWfrYLX7+6WpLGj//chGXvbRKtofLroDoiKuAoxT3b16LFYpZUup2lFMXNjXj2wA6HtpRSPLRpIxqMRskalS16PLVtqyj7p9ZtQZW+RbJGg8GIx9ZsEtV07aU1O1BW3ySp8BZLKVrMZjz8w3pRGm+t3ouLlXWSNDhKYbLa8MhX62ETUanywzUHkVtUJanYGkcprCyLx/+7DmZr+51hr+XLNUeRfr5UUtE4Su0T7NMfrEOLkb+TbhvL1p3E0dMXJTXMo9T+98K769EgotX9+k3p2Lv/nKS+f222r7+9AdXVzQ7td27NwtYNGdJ7CxLgvTc3oqzEsQN6eFcu1iw9IlHAzsdvbcTF8xUO7dIOnMePHzq+J7THV29vQO7pQod2uccL8NXLq2Rp/PjPTTi9R7zjpvLLozoiKjhUVoSLjfWy+sewlGLnpQso1Quv9NMrKpBdVSVb40hJCS7UCd9oz1fV4ERxmWyNrPJKZJVXCdqV1DVi3/lCeRocxcWaehwvLBW0q2luwdb087IqvrIcRXl9Mw6eFb6ZN7easO5IjiwNjqOoazZgV1q+oJ3RZMWqXZmymvZylKLFYMb2I+cE7axWFis2pcmqwEupPfqyZU+28HPhKJavOiFDwe6MsCyHTVszHT6XlcuOyesdQwEKig1rTjs0XfX9Idm9YxhCsOGnYw7t1ny1T3bvGA3DYL2IqMjaz3dBI7P3EaNhsPazXbIeq/LLoDoiKliak6Gomy5DCH4+myVo88OZTEUaGkKwLOuMoM3P6VmK+pRoGIJlaZmCNitPZcnuTdOm8dNxYY21x5Wt1hhC8NNhYY1Nx8/CdqXui1yN5fuENXYdPw+j2SpbAwBW7swQjHQcPHkBTXqj7OtTSrFmW8ZNNTGu5VRaIWpq9LI1OI5i4+YMWK3873duVilKimplOW2AvTfN9k2ZMBotvDYXz1cgL7tMdu8YluWwd8sZ6Jv53+/yohpkHM6X3TuGZTkc3ZGNuir+hU1dRSOObsm4qcaIWDiWQ/q+syi/JG57UeWXR3VE/uRUG1qxt0TeCr8NllIsO3eGd8JoNpuxNT9fscaqnBxYeCZPs82GtVm5irrpshzF5tw8tJjbv5lzHMWq0znKXgdHse98Iepa+LcDVh7NUtT/hqMUJy+U4rJAPsrqg1mSS4PfqHG2uAoFl2t5bdbuzVLUHZYCKK5oQE4B/3bAhp1nFDWwA4CauhakZZfwjm/eqlyjWW/C0eMFvONbN2ZCo6ADLQCYTFYc3MsfQdqxLk2xBmtjsW8L/4Jg58qTijrptrFbIB9l1/KjCntN26MiO348rPAqKh2F6oj8ySnTNynq4NlGk9mEFmv7E3ilXg8bJ7+7ZhtGmw31xvYn8NpWA8w2+Sv8Nmwchyp9+0mrzSYz9CbHOQuO4ChFeWP7OQNWlkVNc6tiDQAoq+N3RC7XdsznXlbDr1Fa1Sh7hX+dRnUjv0Z5g+yGgtdyuZJfo6S0XrEGwxBUCGiUFtcKRmXEoNEyqChv4B2/3BEaGgYVpfW84+XFNYq69QL28u0VJfwObnlhteztpTYopai4VKPoGiodh+qI/MkxWJWFzq+llccR6VANS/vXMlj4Q9LSNXheR0dq8ERdlG5lXIuB51o2lhOVzCoGvtcBAGaez0oqBiP/dUwm5RoMQ2AQ2NIQ2u4QrUEIDAb+6wiNiYUAMApcp7VFuRNNKWBo5b+OocUsKWm4PTiOwijwXE2tZsWOIeUoDHpxp/1UfnlUR+RPjpvCsuvX4uHUfuGxDtXQtV9AzZ3n32Vp8JSJd1dYPv46DZ4qpa7OHfde8T1frYaBUweEzwH+1wEALiKa9YnB3ZVfw9VBtVcxcByFuxv/ddwExkRrUCp4HXd3ZW0D2nAV0PDwVNY2ALBXN3UTeK7uHi6KoxUMQ+Dqwa/h4u6seKuMMATuXuKaMKr88qiOyJ+cSC9vRcmXbfi5uMKdx+EI8fSEk8RGd+3h7uQEP9f2bx4B7m6Ke9kAgE6jQbBn+9VJPZ2d4S2jAuuNMIQgzKf90tlOGg06+Ygv3sYHARAZ4MM7HhHkq3ifHQAiBSqgdg7165DCUZEh/BpREX6KJyUAiAjlLzMe1TlAsQbHUYSH8Wt0jg5QnL9hs3EIj+AvmBcRHag8R4RlER4VwDseFhukKC8IuPJeCVSkjejSSXFEhBCCsFhlfXdUOg7VEfmTE+DqjrFRcYpPzSzqnsI76Xg6O2NafLziUzPzk5LgxNMbRqfVYk5yD2UaDMH0pATe6ArDEMzv11PxqZlxPbrA151/NTZ/sHKNQfGd0cmX36GZO1x8Jc72YAhBz5gQRHfin1xnj05WFKYnBIgN90dCNP+EMX1ciqJJiQDoFOSF1B78nWKnTlKmAQA+Pm4Y0I+/iuuk6b0U52+4uekwdGQ87/j4Wb0VazjptBghUMV1/Nz+HeIkjJndl3d8zG2DFDu4HMdh/OIhiq6h0nGojogK7uyRqugkCADcniA8sS1KTlF8amZBz2Th59Crp+ITLQt6Cb+OuX2SFE2uLEdxe3/h1zGjX6KiVSXLUdw2OEXQZnL/BOichBv+CcFRittGCmuM7tdVcFvFEZQCc8emCk46g/vGws9BgzlBCDB7YqpgxCM1pTNCHDR/E4JhCGZMTYVWy/9+x3cPRbSCaALDEEya3gvOAlt7UXHB6J4SKTu6o9EwGD01Be4CWzzBEX7oMzxBfh0RDYOhk5LhE8DvRPsGeWHItN6yozuMhkHfMYkIFogeqfy6qI6ICgaGRiDeL1BWNIEhBJNjuiHEQ3g7IblTJ/QODZWloSEEI6OjEeMr3AgtLtAfQ2M6y9boExGGHiHC4dowXy+MS+wiK2KhYQjiQwLRJ0q4Tbq/pxum9e0hW6NzgA+GJEQJ2nm4OmPOsGRZE5+GIQj29cCoFOEmZc46LW4b30vWFhDDEPh4umL8IP4VPmDPd7ltGv/qWVCDELi56jBplHAjNIYhuG3eAFkahBB7M7+Jws4nIQS3LR4k65QRIfbJddqs3g5t590zTHbEggKYfvtAh3azHxgpu44Ix1HMvHe4Y41Hxsp+HRzLYc5j42U9VuWXQXVEVEAIwTcTZsLH2UXSJK4hBF19/fHusHGi7D+bMhWB7u6SNSK8vfHvCRNF2X8wYxLCfbwlawR5euCT2VNE2b81cxxig/ykaTAEPm6u+GzRdFFh5RdmjkT3iCBJBdo0DIG7sw6fPTATGhE5OU/MHIJeceGSHB6GIXB20uK/j8+Ezknr0P6eGQMwMDlKmgYhcNJo8OHfZsFNRDLqbdP6YOSgbpKcKkIIGIbgvZdmwdvTcdLi1EnJmDg+SaKG/e/NV2chwN9x3s+ocYmYNV98Y7k2DUqBl96YiZAwxx2LB4yIx4IHR0jSaOOZN2chqovjvIqUQV1w7wtTZWk8+uZsdEvp7NCuW+9oPPb+Alka9742B8lDhR1clV8X1RFRAQCEeXph9YwF6OTu6XDSIFf+egZ2ws9T5/OeZLmRQHd3rJp/GyJ9fERrdA0IwIp58+HtIi5J1MfVBcsWz0VcoL+olThDCDr7+WL5nfPh7y4uxO/urMP398xF97Cgq8/TkUaItyd+vH8+OnmLS0R10WnxxYOzkBptj56I0fD3dMeSx+cjQiBJ9VqctBp89Oh0DOze+co1HGgwBN5uLvj6mXmIDeVPWLwWrYbBu09MxfDesVefpxAahsDdVYdPX5gjmBty4/N65clJGD+8h2gNF2ct/v3KHCQnhIvSIITgmScnYOrklKuaghoaAicnLd5+fTb69o4WpQEADz4+FvMWDhSpwUCjYfDyW7MxZLj4iXXxI6Ox6OFRdg0H2xsaDQOGIfjrW7MxekqKaI3ZD4zEvS9Ou3oNRxqEEDz21hxMXjRYtMbku0fgsfcXghDisNx72/h9r8/BnMfFLZxUfj0IVXro+xdEShthlY6h0WTE0txMLM3NQK3RAC1hwIFemXAJbJRDtLcv7kpMxfz4nnDROl4V30iz2Yyfss5gSWYmqlpaoGWYq5VEGUJg4ziEe3nhztRULEjqKes0jMFixc/pWfjxdCYuNzW3qxHi5YFFfVJwe6+e8HCWfnzSbLVh1elsLDueieK6xnY1Aj3dcXv/ZNzeP1nWiRurjcX6k7lYdigDhVX19kgHpaDXaPi6u2Le4J64fUgK/Dyk50vYWA6bj5/Fz3szcOFyLTQMA2pvYAKGIbCxHLzcnDF7WE/cNjIFgd7tnyoSguModhw7h5U7M3C2sOpKxIaCu6LBshzcXXWYMTIJ88f1QrCICMKNUEqx92geVm9JR/b5cvsESK/XcHF2wpQxSZg7uRfCOvnI0jhyrABr1p1CZlYpNBoCUFyn4azTYvy4JMyZ2QcR4fzJvEKcPFaAdStP4vSJQjAMASEEHEfBMAQcx0Gr1WDMhCTMmt8fUTGBsjQyjl/E+mVHcfJgHgi5UYNCoyEYPqEnZi4ehLiEUFkaOScvYt03B3BsV479HsIQcCwFo7FrEEIwdFIyZt47XFQkpD3Ony7Eui9249CGNFD6/8+fYciV5oYUgyalYObDY5A4sKssDRXpSJm/VUfkTwbLcTBzNrhqnAS3CGwch93FF3GkrBiNZhM0hMDXxRXjo7ugf0i44GM5SmGyWeGqFdZgOQ4HiopwoOgSGk0mEELg6+KC0TGxGBQZKbiytWvY4KLVOrQ7eqkE+y4UotFoAgWFj6srhsVG2fNJBLYwKKUwWm1wcRLWoJTi1KUy7Dl3EQ0GIyil8HZ1wYDYSIzoFgOtwIrwqoZWK7gCppQis6gcu7MK0NBigI2j8HJ1Rr+4CIxMiuU9TXSthrNW4/D15hZVYVdaPur1BlhZFl5uLkiNC8Po1DjBrRh6pfOvzoEGAOQVVWPX8fOoazLAYmXh6e6MpLgQjO7fVbD2CKUUZosNWq1G8D0FgMLiGuw8dA51Da0wm23wcHdGQlwnjBkaL1h7RIpGSWkddu3JRW2tHiazDR4ezoiLCcaYUd0F64JQSmGx2KDRMIIJrABQXlaPXduzUV3ZBJPJCg8PF0THBmH0+ER4OqiDYTHbQBh7jooQVeUN2L0pE1WXG2A0WODu4YyImCCMmZoCb193BxrWKxrCC5LaykbsXnMKFcV1MLaa4ObhgrDoQIyZ3Q++gcJOp8Vs7/KscxbWaKhuxu7lR1FWUAWD3ghXDxeERgdizG2DEBAqvG1ltdhAOQqdS8fV8fmzozoiKtdRadBjeUEGll/MRI2xxb6aBkGMlz/u6NYbM6ISeYuRiaXG2IrleVn46XwmKlr1V1fsnT19cEdCKmZ3SYS3s7IaHPVGA1aezcFPOVkoa7aXKCcAwr28sSgpGXMTEuHLU2dELE0mE9Zmn8WPGWdQ0tB4VaOTlyduT+mJ+T0TRW/h8NFiMmPDmXNYdvIMiuoawFF7xCnQ0x3zeidhbu8kBHtJjzpci8FixdaM8/jpSCYKKuuuavh5umF2v0TM7Z+EEF9lvymTxYYd6XlYfiATeZdrriYP+ri7YvrA7pg7pCfCRW4T8WGx2rDn9AWs3J2Jc0WVV3sJebm7YPLg7pg9MhmdOznOjRDCZmNx8FQBVm3PQE5+xdUjrh5uzhg/NAEzxyUjJkLcVhSvBsvh2ImLWLspDVm5ZbBdadjm5qbDqGHxmDE5FV0U1rVgWQ6nThZiw9rTyMgohu1Kkz1XVycMHR6PaTN6I15mZKMNjuOQcfISNq48gbRjF2G12J0EZxcnDBoRj6nz+qF7zwhFx2sppcg+WYhNPx7FyX3nrnNE+o1MwNRFg5DUL0axxrnThdj03UEc23YG5ivVc52cteg9ojum3jMcKUO7gemA+kd/Vm5JR+Qf//gHXnjhBTz55JP48MMPRT1GdUSU0WQx4u8nt2NbyXmA4KZGagT2THgXjRZ3deuLp3sOh1biD6/FYsYrx3Zj/cWzoBTgcLMGYC8Utig+Bc/3HQGdwOq9PYxWK944tA+rz+WC5W5U+H8dDcNgXvdEvDx0BFy00lY2ZpsN/9x/CMszs2G90ljvRh2GEBAA03sk4NWxIyVXc7WxHD7ccwQ/nMiAxcavAQATenTBa1NGw0vidg7HUXy++zi+P5AGo8V69TO+UYOCYmT3WLw2Z4zk7RxKKb7bdQrf7DyFVpPlasLkdRoMAeUoBnePwisLxiLIR5pjRSnF8l0Z+HrjMTS3msEQctP3V8MQsBxF34QIvHzPeIQESL9HbNidhS9/PoxGvfFqSL89jeSEMLzw4DhEChQ+42Pn3lx8/s1+1De0tq+hIWBZioRuIXj2yQmIiZK+1XLwwHl89sku1NboeTQYsCyH2LhgPPPsJHTtFiJZ48ShfHz23hZUljdevV57Gp1jg/DkS1PRIzlSskbG0Qv476vrUF5U264Go2HAsRzCogLw2BuzkDJQ+ORWe+SevIhP/vYTivMqBDWCI/3xyNvz0G9skmQNlVvQETl16hTmzZsHLy8vjBw5UnVEfgUqDXos2PMjSlsaRdXWIACGh8bi86Gz4awRl/dRa2zFgm0rcKGxTlS3WAJgQEgEvh07G25O4ibxZrMJi9evRk5NtSgNhhD0DArGkulz4CUy76PVYsF9q9fjdOllUY3gGELQNcAfS2+bAz83cREYs9WGx5ZvxOGCYlEaGkIQ6e+D7++cIzo6YmVZ/G3ZVuzO5u/yep0GQxDs7YnvHpqDMD9xdTJYjsOrP+7E5pP8XV5v1PDzdMNXT8xBVLC4SZxSivd+2IvV+/i7vN6o4enmgs+enYMuEeImcUopPlt2CMs28nd5vVHD1UWHD/8+G93jxE/iS346im9FdnllGAKdTov3Xp+D5CT+Ams3snrlSXzx6W7RGhotgzfenou+AgXWbmTr2tP4+J1NAG52OtvTYBiCl/4xD4NGJojW2LshHR88u+JqXocQ9hNJBM+8Nx+jpvcSrXFkayb+8eA34FjO4fFf+5qA4PF/3Y6Ji9TiZ1KRMn//4nGnlpYWLFy4EF999RV8HdSBUOkY9FYz7tz3s2gnBLCvmg+UF+KvxzaJmvCNNivu2rkaBSKdkDaNE5VleHTfRrAiuvGaWRvu27weuSKdEMAe9cmursKDWzbAwjruxmvjODy+fjPSyspFd6PlKMWF2jrct3odTFabY3uO4tm123HkYoloDZZSlNQ34r4f1qJFoLFcG5RSvL56N/bkiHNCAHvhs6omPe7/31o0GcQ1AHt/7QHRTkibRr3egAc/WYM6kV2Fv1h7VLQT0qbRbDDh0X+tRmVd+12Nb+THDadEOyFtGgaTBU+9tQalFfwdbq9l3eYM0U4IYP+emM02PPvqahQWiesMu3N7tmgnpE3DZmXxykurkHe+QtRjDu89i4/e3nTFQRCpwXJ467mVyM4oFqVx+mAe3n92BTiOiioYSKld5/1nVyDtUJ4ojexjF/DuA1/DZmNF1SBpc4g+/ttPOLIlQ5SGijx+cUfk0UcfxeTJkzFmzBiHtmazGc3Nzdf9qUjni9xjuNhcJ7nKKAXFlpJz2HP5gkPbb3PTkFtbLVmDoxR7SwuxodDxZLY8Jxunyy9L1mApxfHLpVh5Ntuh7cbcczh4qVi0o3OtRnZlFZamO75B7T5fgB1nL0jX4Cgu1tTjm8OnHdoeu1CC9afPSi6IxXIUlxua8Pmu4w5tsy5V4Of9mdIErmjUNrfi441HHNoWlNXi280nJGtwHEVzqwn/WX7AoW15dRO++OmQLA2jyYIPvtnj0LauvgWffCneQWijLZH1/Y93OLRt0Zvw4QdbZWgArI3De+9ucjjpm4wWfPDaeska9pNEFO+9vAacg0WHzcri/b8tv3kPUaTOv/66/Go+DB8cx+H9x5fYHRAZOv9+6geYOqBDskr7/KKOyPLly5Geno53331XlP27774Lb2/vq38REeLDkyp2zKwNPxWkS5702tAQgqV5whMfy3FYcjb9pnwQsTCE4Puz6YI2lFJ8f0bYRggC4PszGQ5vtEvSMmX3daEU+CEt02F058cTmbJ74HCUYvnprKs5JXz8fCRTUvGza2E5irUnc2CwWAXtVhw8o0hj6+nzaHYQeVm9V5nG/vQC1Da2CNqt23lGdodYlqM4mVWMsspGQbstO7IgIujXLhxHkXu+HBcvVQva7dyRBauDCVhIo7ioFmdzLwvaHdiZA0OrWZYG5SiqK5qQfrxQ0O7Y7lw01bfKap1AKUVTfSuO7c4VtEvffw7VZfWgcqqxUsCgN+HghjTpj1URxS/miJSWluLJJ5/EsmXL4CKyGNULL7yApqamq3+lpaW/1NP7w7K9NA9NFnFh9vZgKcWRqiJcaq7ntdlXVogqg/DNXgiOUpypqUBObRWvzYnLZShqapTp6tgXPRcb6nGqnP9Gm11Ridwq8ds+7VGhb8GhS/zh58KaepwsKlPUA6fBYMSe8/xbLhWNeuw/V3j1RIkcDBYrtmXyh7gbWozYnpanSMPGsth44izveIvRjM1HchVpgALrD+bwDpstNmzYnaWoMRvDEGzYzb91ZGM5rNvs2AkWQsMQrN+SyTtOKcW61adl/z4Ae5LshnX8kyulFOt+Pq7odAqjYbBxpXCEa+PSI4q6GzMMwcYfhKNtG789ILv/DWCvf7Lhm32yH68izC/miKSlpaG6uhq9evWCVquFVqvFgQMH8PHHH0Or1YJtZ//e2dkZXl5e1/2pSGNryTkwChu8awjB9tLzvONbLuUp6nLbprG1iH/i21KQBy1R9vXUMgy2FuTzjm/LuyCqFLoQGoZg23l+jZ1nLyh+rxhCsC2HX0NKXggfhEDQETmQXSgqr0cISoHtp/k1TuQUw2xxnHMjBEcpdhzn/+5mnC1Di0HeCv+qBkex8zC/xvm8CtQ3iMuH4YPlKPYc4N++vFRYg4qKRnnbGW0aLMXB/ed4nbKq8kZculClyKHiWA4nD1+A2dR+tK2pvhU5py8pcgw5jiLn1CU01bf/npsMFpzekyu7/w1gj+4U5pShqqRO9jVU+JFeFlMko0ePRnb29Xv0d999N+Lj4/Hcc89BI/EIp4o4qo162VsmbRBCUGcy8I7XGFsVd+slIKgX0KgzGsFSZRMfRynqjfwa9QajuOw7AViOos5g5B2vazXYV5RKbuaUorZF4HW0GKBhGNiU3GgpUKvnnzwbWgxXj7IqQShhtV5vaPe4sVTqm/nfqwaBMSk06fk/84bGjtFobTWDZbl2S6Q3KHR02rDZOBgMZnh43By1buSZ2KVCKUVzkwGBLjefzGqskx9ZvZGm+hZ4+91cgE3f0KLImbqWxlo9giPVrr0dzS/miHh6eiIx8fqulu7u7vD397/p31U6DpvCVSsAgAI2yr/3bOPk7UvfICH4XPnqhUjSoBRWAY0Oea8AWAXeD6UTdxtCJ4CURiraEHJk7GPK3QTBz5zl0G5REokIvR831oyQrSFwnY76PNqu1Z4j0lGvA7Anrrb777+ChpIohVgNvn+Xg81BrpaKPNSycX8wfJ2VVf1sw1vHn9fj6+wGonD7hwDwEtDwdnbugO0fRrCaq7eLs6L9b8C+beIjkAPl5aqsYm0bfu789Uo8XZwVhbbb8HHjfx2ers4OTz+IwUtIw82lQ1auHgLvuadA6XUpuAloeHSQhpOThresvqensirF19JeNAQAPLw6UIPnWh7eyiohi7mWh0/H3BPtGh13LZX/51d1RPbv3y+6mJmKPAYEd1acI2KjHPoF8VdF7B8SAcUrY8qhfyf+U1H9wyIUb//YKId+YfzdVftFhCuOinCUom8Ev0bfKOUahBD0ixLQiI1QlHAL2B2q/nH8n3mfruGKI1QahqB/N36NlK5hChXsGv2682skdg1VlBjZptEnkf+7261LJ4f9XRzBMESwqFl0TBBcXaVV9m1PI6F7KG/n2rAIf3j7Kpt4CQEiYwLhzuPs+Ad7ISjUR5EGAASF+sA/uP2cQncvV0R27aR40eHt74GwmCBF11BpHzUi8gdjXmwylPzeCIBIDx8MCo7itZkV1wM6kdVX+QhydcfoyFje8cldusJTYgn1G/FydsbkOP5um6O7xMJfZGVUPly1WszowV89cmB0JMJ9vBS5hhpCMDuVfzuzZ2QndOkUoOhzB4C5A/hLWceFBCAlJlT2UWfAvk01b2hP3vGwQG8MTIxS5CiwHMW80Sm84/4+7hjRv4vsI8JtGnMmpvKOe3q4YNyoHu1uqYiF4yhmT+WvGOri4oRJU1LAaOS/Do6jmDm7L++41kmDKXP6Kvo8KAVm3j6A1wlgGAZTFw9W5CQQQjDtjsG8fWEIIZh+30hF0TaGIZhy1zBoFTqYKu2jOiJ/MAJc3DEpMkHRtsZd3foK3hi8dM6Y06WHbA2GENzZvZdgXxsXrRNu69FTtoaGECxMTIazlt9h0jIMFvVKkT25agjB7J494OHM7zAxDMGi/vyTlkMNhmByUjf4CmzNEEKwcEiK7NQKDUMwonsMOvkId0G9fXiK7MgLQwj6do1wWOZ9/pgU2dtMhAAJUcGIjxJuHjdnQqrs3B0CICLEFykJ/BEqAJgxOUVRjkWAvwf69xEuwT51eio4Vv7k6unlgiHDugnaTJzZW1EkzMXVCSMnCPdqGTe7jyKnTaNlMHZWH0GbkbP7wllBBIkCmKCWef/FUB2RPyCPJw6BjtFKzuPQEIJID1/MieFftbbxUM/+cHfSSZ7ENYQg2M0DixJSHNrem9Ib3s4usjR8XFxxV7JjB2BhajIC3d0lOzwMIXDX6XBfv94Obef06oFwX2/Jq3CGEOg0Wjw4rJ9D2ym9EhAb7CdZo61Z4CNjBzi0HZUSh+6RQbI0CCF4bMogh7YDk6KR2i1c1iqcgOCxuUMd2iXHh2FgarQsDQrgscXDHK7gu8Z1wqhh8bJX+g/fO8Lh5Bwe4Y9JU1JkR8Lue2AkdDrhyGZgsDdmLRgoTwDAHQ+NgqubcM6Ml687bntklGyN2x4eBS/fm0/LXIuruwvueG6qbI2ZD4xCQIiP7MerCKM6In9A4rwD8OXwOdAyjOh8EQ0h8HV2w9JRt8PDyXGyXaSnD74dNxtOjEb0JK4hBJ46Z/w4YR58nB1viQR7eGDJ9Nlw0Wolabg6OWHp9NkIcnfcLM7PzRXfz58Fd51OkoZOo8HXc2cg3NtxszgPF2d8e8cs+Li6iJ7EGUKgZRh8vmAaYgIcN4tzcdLiy/tmIcDTXZKGhmHw78VTEB/meO/bSaPBfx+eiRA/L9Eabc3J3rlrApJjHLegZxiCD56YhqgQP9GOQtvH9tLdYwXzQ/7fnuDNv0xB16ggyc7IX+4eiaF9xHV8ff7pSUjqHibZGbn/zmEYM6K7KNsn/jIeffrGSHZGbl84CJOniovU3fvEWAwZnQCp+4vT5vfDrIXinJgFj43BmJmOnfobGTOrNxY85rh9CADMfHAUpt4zXJoAAQZPScU9L8+U/NxUxPOrdN+Vi9p9VxnpNWW4/+AqNJiNYEDarS+iIQQspejqHYjvRs5HiJu09zm7thJ371yDGmNru63ar9WI9vLF0vFzEenlI0kjv64Wd21ci4oW/dVr8WmEeXrh+2mzEOcn7ax/UUMj7l25DsWNjbwaba8vyMMdX8+Zge7B0hLXyhub8eCy9bhQXedQw9fNFV8snI7kcGnt2mubW/HIt+tx9nI1b92PthOyni7O+PiuaegbK7zNcCONLUY89b+NyCws59eAPXrg5uyE9+6ZjCE9oiVptBjMeO7TTTh5tkTwdYACOict3nhgIkb16SJJw2iy4tWPt+Dw6YuCNVIIAbQaDV54aBwmDBPnILRhttjwz/9sw54D56DRELA8WymE2DvWPvXIGEybmCJJw2Zj8eEH27F96xkwDOHd2mpzuh54eDTmzHMcZbsWluXw5b+3Y8PyE2A0hHdLiGEIKAXueGgkbr/XceToWjiOw5J/78DK/+2zvw4+DY39Nc57YCTufHo8b25Ie1BKsfzD7fjhvc0gBPzvlYYBx3KYdu8IPPDGHEVbR39WpMzfqiPyB8dks2JLyTksyTuNnIbK68YIgBGhsbijax8MDYmRnSthZm3YVpSP73PTkFFzc0fPIaGdcVf3XhgVESu7kqmVZbGzsABLsjLaLdvePywcd/ZMxZjoWDjJLJZn4zjsu3gJS9MycKz45vYCqaEhuKN3CsZ1jRPMPRGC5TgcLijGspOZOHSh6CbXsEdoEBb3T8WEHl3hwnN00xEcR3G8oAQ/H8nE/nOFN+WOdOkUgEVDUzExpRvcdE6yNCilSCu4jBUHM7Ens+AmBzQ62Be3j0jF5L4JcHeRtzdPKUVWQQVW783ErlP5N+VchAd5Y/6YVEwe3B2eAseCHXG2oBJrd2Ri55FzsN1Qc6JTgBfmTEzF5BE94O0pP7H5wsUqbNiaiR27c2C5oT9MgL8HZk3thUnjkuDrI7zFIETRpRps2pCO7VuzYDZfX8nUx9cN02f0xsQpKQgIEM4FEqKsuBZb1pzGtnVpMN7QBM7L2xVT5vTFxFl9ENTJcaSQj4qSOmxdfgLblh9Hq/76dhXuni6YeNsATLqtP0IUFBarLqvHth8PY8uSQ9DfUBzO1cMZExYOxqQ7hiI8VjjfSIUf1RH5E1BvbkV2YxmaLEZoCAN/Z3f08usseJrlQlMNLrc2w2CzwNPJGbFeAQh1539fmyxGpNeWodFiBEMI/Jzd0DcgEi5a/snrYmMdSlua0Gq1a0R7+yHCk/+m1GwxI626DA1mIwgIfJ1d0S84HG5O/JNXUWMDipua0GIxw0PnjCgfH3T29uG1b7VacKriMhqM9mqYPi4u6BMSBk8d/xZUaWMTLtU3oMVigbtOhwhvL8T482+RGK1WnLps1+AohY+LC3qFhsJboMZIeWMzCmsb0GI2w03nhFBvL8QF8d9czTYbTpdcRn2rESzHwdvVBcnhIfATOPlT1dSCi1V1aDGZ4eLkhE4+nujSyZ93pWqxsUgvvow6vQE2joOXqzOSwjshwJN/gqxtbkVBeS30RjOcnbQI8vFAt7BAXg0byyGj8DJq9QZYbTZ4ubqge2Qwgrz5t9Iamg24UFYLfasJOictAnzcEd85iFeD5ThkFZSjpqkVZqsNnm4u6BYRiBB//u97c4sR+ZeqoW81w0mrgZ+PO+Jjgnm3bziOIqegHNX1LTCZrfB0d0aXyCCEBvF/31tazci/UAl9iwlaJw18vN0Q36UT74qbUopz+RWorGmG2WyDu5sO0Z0DERHqy6thMJiRn1cJvd4IjYaBt7cbusWHQKtt30GnlCI/vxIVlU0wGS1wc3dG50h/dO4cwKthMlqQf7Yc+iYjCEPg5e2KbolhcOJxnimlKLxQhcul9TAZLXB1c0Z4Z39Ex/JHFS1mK/LOlELfZK9U6+nthm7JEdA5899/ii5UouxSLYytZri46RAa6Y+Y+BDe74nVYkNeRhH0Da3gOApPHzd0TYmCixv//afkQiVKC6pg0Jvg4qZDcIQ/uvSMUHw8+I+G6oj8QaGUIquhFMuLTmJ7ec5NJdC9nFwwt3NfzOncB2Fu/DcqR2TXl+PHgtPYWJJ7U9VQD60O82N64fbYXojydJy7wMfZ+mr8eD4Day7mwMxe31/ETeuEeV16YlG3VMT5yF/1XKivxY+5Z7DyXDaMtus1XDRazI7vgcWJKYj3D5Stcam+AT9lncHK7By0WK5fIeo0GkxPSMCilGQkBstfWZU2NGFFehZWpGej2XR9nxQtw2Byj25Y0CcZyWHyayVUNOqx6mQ2Vpw4g8YbOuRqGIKxiV2wYEAyekVJz3loo6apBWuP52DFoTOo019fBp0hBCOSYnDb0BT06yL/pl7fbMCGIzlYsfcMam7owksIMDgxGvNHpWBA986yj6U2tRix+UAuVu3MQGVt803j/ZI6Y+64VAxKiZYdAWxpNWPHvlys3pyGyxWNN42nJEZg9pReGNwvDlqZ2wYGgxm795zF2rWnUdJOD5UePcIwc0ZvDB3aTXZdFJPJin07crBhxUkUFtzc5LJrQihmzO+HYaO7Q+csLwJoMVtxcHs2Ni47hgs5N0dLY+JDMG3hQAyf1BMuMk/OWC02HNmWhY3fHcS5tEs3jUfEBWP6PcMwcmYfuPHUTPmzoToif0CMNgueT1+NfVXnoSEMbx8Whtj3aJ9MGIO7Y4dIuqGbWRueP7UJm0pyBTXa8hse6z4ET/YYLknDyrF45fgu/Jx/hjdP4lqN+3v0xQt9RkraNmI5Dm8fPYBvs9JEaSzsnozXh40WPE58I5RS/PvIUXx24oQojRkJCXh3/DjoJGwbUUrxvyOn8J99R8AIaVzJbxgXH4d/zZgoeUvnxyMZ+OeWAwDAezy3TWNIl87498IpcBc4stwea4/l4K2Vu0GpY43esWH48L5pglVY22P7ifN47fsdsLGUt2ZEm0aPqGB89MQM+HpKK9h18HQBXv7vFlhsNt7j0m15GrERAfjwudkI9HWcNH0tJzMu4eV/bIDpyvZKezptGhGhvnj/tbkICZa2FXImqwQvv7wGLS1m3qr6bRrBwV745z/nIzJC2qIg7+xl/P2pn9HUaODVIAwB5Sj8AjzwzkcLEdNFmsN+Ka8Cf3/ge9TX6K9ei0/Dy9cNb3xxF7olScuJKrtYhZcWfY7qsgbeHJy21+fu5YrXvr0fif35ayT9WVAdkT8YRpsF9x/7HjmNlyU1tLs7dgj+0n2cKFsLy+LeQz/jeHUxqASNBbG98HqviaKcERvH4cG9a7G37KKk2gQzYrrj30OniHJGOErx1O4t2HiBvzvqjRAAY6Pj8Pn4aaJWsJRSvLRrN1bc0NTRkcagyEh8M2um6ByWf+46iG+P87dpvxGGEKSEheC7RbNFOyOf7T6GT/ccl6TRtVMAlj44T7QzsmRvGv694aBoDQ1DEBnoi6VPzRftjKw5kIV3ftwjuhuOhiEI9vPE9y/cBn8vcXkZ2w6fxeufb5Ok4evthm9fX4ggf3F5GQeP5eOVf24EBUQV4NIwBB4eLvjivYUICxEXBT11+hJefHEVKKWiarYwDIGrqw4ff7wI0VHioofZGcV4/vEfwdo40Ro6Zy3e/+JOdE1wfLoKAPJzyvDcnV/DYrGKqqfCMAQarQbvfHMPEntHidIozqvA0zM/hMlgEdUXh2EICEPw+vcPovfweFEaf1SkzN9qKvDvgJcy10p2QgDgu4uHsarolCjbl9O2SnZCAOCni+n4Jv+EKNu3T+2V7IQAwPrCs/go84go2/+cPCLJCQHsk8quSwV459gBUfb/O3VakhPSpnG0pASv7tkjyv6n02ckOSGA3QnLvFyB5zfuEGW/Mf2sJCekTSO/shZP/7RF1ES550yBJCcEsFcuLalpwJNfbRQ1iR3LLca7y+zvq9jvFstRVNXr8eTH62EV0cgsM68Mb365XbJGQ5MBT/xjNUwWq0P7vItVeO39TaDgj+i0p9HSYsLTr65Cq8Hs0L6oqBavvLIGHCfOQQDsuTBGowXPPrsCTU38XYfbqLjcgJefXi7aCWnTsJhtePGJZair0Tu0r6tuxssPfC/aCWnTYG0sXn1oCSrL6h3aN9W34MUFn4l2Qto0OI7izfu+RsmFSscPUAGgOiK3POeaKrC74qxkJ6SNT/L2wMrZBG2K9PVYU3RGshPSxse5B2CwWQRtKlv1WHIuXXaVxs+zj6PJbBK0aTAZ8XnGSVnXpwC+y0pHdatwW/JWiwWfHDsmW2Nldg6KGxsF7Sw2Gz7cJ87xuhGOUmw7m4/zVTWCdizH4d/bD8vWOJxfhMySm09IXQulFB9uOiSrvD3LUaQXXsaxvGKHtv9dK+91sBzFueJqHDhz0aHt/1Ydla1RVF6P3cfyHNp+v/wIOI5KrpDLchSV1U3YvjfXoe1PPx+DzcZK1uA4ioaGVmzenOHQdvWyYzCZrJIr5HIcRYvehPUrHf+GN/x4FC3NRsmVZTmOwmSyYu33jr8zW388ioZaveQOwZSjsFpZrPx0l6TH/ZlRHZFbnBWXTkBD5H9MjRYD9lYKRwiWXUxTVBLeYLNic4nwTfCn/EzJBZGuxcqxWF0gHIVYdS5HcQv25eeENTacO39T4qsUGELw85ksQZsd5wrQZHK8uuVDQwh+ThPWOJh3CTX6VkEbQQ2G4OdjZwRtTheUoaSmUbbzqWEIlh/KFLQ5W1SJ8yXVssvbMwzBir3CGkXldUg/V6qg9DzByh3pgjZVNc04euqioi7KqzenCUZSmpoM2LfvHG8tE0dwHMW69WmCpesNrWbs3JQpefK+VmPL2jRYLPy/MYvZim0rT8p+rziWw861aTC08v/GWBuLzUsOtZtzIlbjwIYMNNULL2xU7KiOyC1Ms9WITZfP8CaNioEBwU+X+MPvJpsVKwszFHW6JQCWXuDfArJyLH48n6G4Q+z35/hvtByl+D5bfsSl7RpLszMEu+UuychQ1MCOpRTLs7JgFnBmfjiVoay5HKVYdyYXLWb+G+1PRzMVOZ8sR7EjOx/1LQZem+WHzihuLnfo7CVU1N98MqWNVfuzFGlwHEV6/mVcqrj51Egb6/Yo06CUIr+4Bmcv8ofqN+3MUnT8k1LgckUj0rNLeG22bctS5OgAQH19K46f4I8g7dmWDbOAEyGGFr0Jh/ae4x0/vCMHLc3C0VFHmE1W7NuUyTt+cs9Z1Ffzf+/EwHIcdq0Ut239Z0d1RG5h8poqbzo+KxUOFGcaSnkn8ILmWrQ62FZxBAVwvqn6pmO4bZTqm1Bvdry37EijtIX/OrWGVpS3ON5bdkSt0YByffs3oFaLBQV1dYqcHQDQWyworG9od4yjFFnllYqdNrONxfmqWt7x9OJyRc4nYE8+zr1885HMNtIulsluLtcGpUBWMf8WUFqecg0AOFPAr5F+tlSxBkMIsvJvPlp6VT9XfsSlDY2GQc45fo2cnDJFHWivamSX8Y7nZpUqcqLbNHIz+R2q3PRiaLTKpi6GITibwb/tl3uqEBqeuitioZQi91Shomv8WVAdkVsYvVWZ198GRymMbPvORnMHaQD2Amjtalg6UIMnT6RZYPX/e9JoNVtkbzPcSLOpfQ2W42CyKlu1ttFk5H9PWk3KHNw2mgWSMFsE9MXCMATNBv7vqL5V+feXYQj0Aq+jWa9cgxBAL7Dd0NSsbDHQRovA+9HSbFTsUFFqT8Dl1dAr1+A4Cr1A4m1rsxHi05J5oEBzvfztzz8TqiNyC+PEKPPIxVzrj6Iht6hTuxo8x2udZBanav9av93rYAhRvGr9fw3+5yu3mNfNGvzfn47oAUIphZPA6pevIqk0DTjQ6ID3yoGG3KJk10KI8Puh/RU0nJy0sjsO/78I4KT7ZV8HADg56G6sYkd1RG5h/J2lFULiw02jgxPT/g8iwEV+b4tr0RIGXk7t13wIcJVWNIoPAsDPpf1y5n4ubopyN67Fn+f5eru4KMqruJYA9/Y1XLRauMjsYyNWgxACbwW9Wa7T8OD/bP0U9Ga5Fn+BomP+Xsq/W5QKXyfAx13xd4vlOPh68b8fAX4eip1DlqPw9RZ4r/w9ZFeTbYPjKHx9+DV8/Tw6wDkk8PHjvy/5+HuAKPxENAwDH3/++6tPoKfiyCSjYeAX/OetfyUF1RG5hYn37oRQVx9F19AQBpPDe/KOR3n4oYtXoKKftYYwmBCewLsCDnbzRK/AUEU3Wg0hGBEeA3eeHjRezs4YGhGlyFFgCEHfkDAEurV/E3TSaDCuS5xCDSAhMBCR3u1XwiSEYEpiN0XJkQRApK834oP5i09NSYlXpAHYnZDkSP7iU5P7JCieXD1dndGvawTv+KQBCYpXx85OWgxKjOIdHzcoQXFekEbDYFjvON7x0UMTFOcFARTDB3XlHR0xIqFDtjRGjEzgHR8+trvgqRoxsCyHEWN78I4Pm5jUIRrDJ/LfF4dNSZV98qcNjuUwbGovRdf4s6A6IrcwDGGwILq/Iu+fpRzmdeZv+U0IwZ1d+iq60bKUw+IufQRt7krorehGy1KKO+N7C9rcmZSqKAGToxR3JQnfOBanpCjUAO5MTRU8IbGgT7Li5MhFfVMENeb376lIgyEEtw9MEdxKmjMoSfb1AXtexexBSXAWqBI7dVAPRVtAGoZg6uDu8HDlb4A4fnACXBSE2DUMwdgB3eArEHUZMbgrPNz5n4MYjUF9YxEcyL8CH9A/Fv4CUQBHMAxBcnKkYKn35N5RCA33le0cEkIQFx8iWF21a2I4YhNC5Z8yIkBoZ38k9Y3mNYmIC0bSgDgwGvn3Xr9gL/Qd1V324/9MqI7ILc70iFTZuQkaQtDTJxzdvDsJ2k3rnAg3rU6Wu8MQgjivAPTyF+7fMKFzN/g6u8pyqhgQhLl7YVgY/40DAEZERiPE3QOMDA0CAn9XV4yL5l+1AkC/8HDE+PrKWukTAB46HabEdxO06xESjKSQYFmRFwJAp9VgZrLwDTA60A/9YyNkR3cIAWb3TRS0CfbxxPDEGNmRF0op5jpwZnw9XTG+n/wIEstRzBmeLGjj7qrDlBGJsrc1WI5iztgUQRudkxYzJqYo0pg1WdiJ1mgYzJzRW/YEznEUM2cKLwYIIZh5W39Z1wfsn/mMefwLpzamLx4k+wQQATB90SCH78P0e4ZJLph2VYMhmHbXsA7JYfozoL5LtzjeOje8ljxD8uMYELhonPBGykyHtm5aHd7vN02yBgGBE6PB+/2nO/xR6zQafDRsqmQXgcC+Evto2FSHk7+GYfDxuCn2fg8SNQgBPh47xWEfGEII/j15EpwYOe4O8MHEiXB14m9j3sa708bB2UkLqfMSBfDO1HHwcnGcA/LazDFwd9HJcqpenj4agZ6O84temDMSPu6ushyFp6cPQ3iAj0O7p+YMQ6CPhyyNB6cOQJdw/nb3bTwwZzDCgnxkaSyY1BuJXRz3T1k0ewCiIgIkaxAA08Yno09yZ4e2s2f3QXy3TpIdHkKAUSMTMHQI/9ZPG5Nm9kZy7yjJGgxDMGBoV4ye6DiSNmpqCvqPiJel0bNfDCaJcHYGjk/C8Om9QKRqaBh0S+mMGfcOl/S4PzOqI/I7YEp4Mp5PnARAXHFSDSFw1erwef87EOMprknVuPB4vNNnChiRMQsNIXDRaPH1kPlI9A0RpTEsLBofDZ8KDSGiohYMsTs6X46ciT7B4jpm9g0Jx+fjp0HLMKImWAYEGobBJ2OnYHC44xs5ACQFB+OrmTPgrNWK0iDE/lr+OWE8xsSJ68rZJSgAX90+E65OTqKiFuTK3ysTR2FKorhmW5H+Pvj63tnwcNFJmvyenjAEc/uJ23YJ9vHEl4/MhrebiySNB8b3x+IR4vbX/bzc8PnTs+Hv7S5JY+GYXrh/6gBRtl7uLvj4+TnoFOgtSWP6yCQ8dru4CcnNTYf3X5uDiDA/SRPsqKHxeOrBMaIiHc7OTnjnnXmIjQ2SpDFwYByefXayKA0nJw1efW8+4hPDREdfCLFv67z49mxRUQSNhsHzH9yGnv1ixGswBPHJEXj5k0WiTsUwDIOnP1iI/qP581VufgxBdEIoXv/+ATi7SutQ/WdG7b77O2Jf5Tm8n7sDpYZ6aAhzU8XVtn/r5x+Nl5KmIFqkE3ItBysv4u2MXbiorxXU6O0fjtd6T0SCj7S23QBwvLIEr53YjfMNNdC0096+7d+SA0LwxoCxSA4Q5+hcS0ZVBV45uBvZNVWCGt39A/Ha0NHoFyqtNTgAnK2uxqt79iK9vFxQI87fHy+PGIEhUeIcnWu5WFOH17btxcnisvY1rrS17+zng+fHDsOortLbj5fUNeLN9XtwtKBEUCPM1wvPTByK8UmOV8U3Utmgx9ur9uBQ7iUQQm7KF2rTCPbxwONTBmNqX+l763XNrfjHsr3Yn2Gv/HmjRlsLd38vNzw0fSBmDeNPVuSjqcWID5bsxZ7jeaC0HY0rr83H0xV3zxiAeeOF84Hao6XVjE++3oudB85e6T3T/uvwcHfGgln9sWBWP8mRAaPRgi++2IvtO7Lb7T1DCAGlFG5uOsyZ0xeLFw2WvM1gMdvwzad7sGXdlZLtN2nYTyy5uDph+ty+uPOhkZKPStusLJZ+sgublh2HyWi5es3rNADodFpMnNcP9zwzATqJ+T4sy+Hnj3Zg3Vf7YGgxgzDk+tLvxB4d1jppMHZeP9z/8gy4uMnP9/mjIGX+Vh2R3xmUUpysu4Tll07gRG0hDDYLGELg5eSKiWFJmBfVD9EejkPNjjTSakvxQ8FpHKosRIvVbNfQuWBieAIWxvVGV+8gxRqZtRX44Xw69pZehN5qL8TkpXPBhM5dsbBbKhL9pTs5N5JTU4Wl2RnYeakAeotdw1PnjNFRsbgjMQXJwdKdnBvJq6nBsjNZ2Jafj2azGRyl8NDpMCwqCotTU9A7VEFi3RUu1tbj57Qz2JqbjyajCRylcNc5YWB0JBb1TUG/zuGKNYprG7HqZBY2ZZ5Dk8EEG8fB3VmHPlHhWDAwBQPjIhUf/7xc14Q1R7Ox6dQ5NLQYYOM4uOl0SIkOwW3DUjA4IUpx/ZHqhhasPZiFjUfPor7ZAJuNhauLDj2igjF/VAqG9oxRXK+lrrEVG/dnY+P+bNQ1tsJqZeHq4oSunYMwZ1wqRvSJU1x/pLHJgK17srFpZxZq6lpgtdjg4uKEmM6BmDkpFSMGd4VOIJFXDHq9Cdt3ZGHz5kxUVzfDYrHB2dkJkZH+mD69F0aNTICzs+OtRCFaW8zYsy0Lm9acRmV5IyxmK5ydnRAa4Yepc/pg1PgkuLopix4YW83Yt+UMNv98HOXFdTBf0egU7ovJt/XHqGmpcPdQdmTdbLTgwMZ0bF5yGKUFVTCZLNA5OyEozBeTFg7CmLn94SlwtPnPhuqI/M4o0F/GrqqTqDE1wsxZ4a51RYxHKMZ36gdfnafgYymloiagAn0VNl1OR4WxESbWAg+tC2I8gjA9vDcCXYTf27aviCOdQn0t1hZnorS1EQabBR5Ozojx8MfsqFSEurV/XFWqRom+ESsLsnBJX49WqwXuTjpEe/phflxPRHj6dIjG5ZZmrDifhcLGBrRYzXB30iHSywdzuyYixsevQzQqW/RYlZuLC3V1aLFY4OqkRbiXF2Z374Gu/o4dSTGfe21LK9Zk5SKvuhZ6sxmuTk7o5OWJmUndkSBwtFeKRkOrEesycnG2vBp6kxkuTloEeXlgWnICksKFk6TFajQZTNh0+iyyS6qgN5rhrNUgwMsdk3vFIzkqxOHjxWi0GM3YevI8zhSWQ28ww0mrgb+nG8b36YZeXRxvMYjRMJqt2HH0HDLyLkPfaoJWy8DXyw2j+3ZFn+6OnTwxGmazFXuP5iEtpwTNepO9Xoa3K4b164L+qdEdomGx2HDwSD5OpV2CXm8EYQi8PV0xaGAXDOgX69DJE6Nhs7E4cjAPJ48VQH+lIqynlyv6DYzD4GHdHDp5YjRYG4uTh/JxbP956BsN4CiFp7cbeg+MxeAxPRxGTkRpsBzSDp7HsR3ZaKpvBcuy8PRxR/KgLhg2ORnOLn/c7RvVEfkdQCnF/uoMrCk7gDx9CTSEAUcpKCgYEFDYw7zDA1MwN2IU4jzDZGnsqzqLHy8dxpnGmzXaGBncHXfEDEWiD3+9BiEOVF7AN/nHcKK26GYNYn8eI0O64t4ug9AnIFKWxuGKIvwv9wQOVVwCQ4g9LH5FgxB7iHxYaDQe7N4fg0KiZGmcqCjF/86cwt6Si3YN2K9LQMAQ+xHiQaGRuL9nX4yMjJGlkVFRjq/S0rDzYsHVf7Nr2D9vllL0DQ3Dvb16Y1yc8AkePnIrq/D18dPYfu7C1Wj4VY0rWyDJoZ1wV79emJTQVVYkJb+qFt8cOo2t2XlgKQcCco0GA5bjkBAShDsGpmJacoKsSMql6np8vy8Nm9LOwWZjr27pXPs64jr5Y9GwVMzoJ+8Yb1lNI37YnYaNx87CYrWBXNn2uPZ1dA72xYKRqZgxJNFhMnN7VNXpsWzbaWw8kAOj2Xp1Gwqw5zqwLIewQG/MG5eKWaN6yopy1Da0YMXG09i4OwutBku7GkH+npgzKRWzJqbCRUaUo7HRgFXrTmHjlkzoW0xXr3uthr+fO2ZM7YXZ03vDTcb2hL7ZiHWrTmLj2jQ0NRra1fD2ccO0Wb0xc24/eAoUiePD0GrGhp+OYdPyk6iv1ber4entislz+2LmokHw9pVe9NFktGDz0sPYuOQwaioaodFe0aD2ZFaO5eDm6YKJtw3ArPtGwC/ojzW/Aaojcstj41h8lL8S2ytPggEBJ1DFQwMGIMBz8QsxMlh8cRyWcvj3ua1YXnzMsQZhQCnF3xNnYHqEcD2Qa6GU4j+5e/Fl/pF2cwuu17BPIi8lT8DiWMcZ69dqfJpzDO9nHhSlwVKKZ1OH4+EeAyRNsN9kn8Zbx/ZddQYcaTyeOhBP9xksSWN5dhb+vncPCCCo0ZZncHdqL7w0bLikUy2bcs/j2Y3bAQLBOiFtGvNSEvHahNHQSpjEd58twNMrt4CjVFCjbb9+clI3vDNrHHQSKsYeOV+Ep77bBCvLCmvAngMwskcM/rl4Elx14ifYtAtlePLT9TBbbQ41AKBffCTef3Aq3CWsYnMvVuCp99ei1WhxrEGAnl1C8f5fZsDLXfw2woWiajzz5mo0iujzQghB1+ggvP/3WfD1Fj/BFpfW4a8vrEBdfYsojc4RfvjXO/MRGCAc0b2WivIGPP/UT6isaHSowTAEnUJ88I8PFyAk1Fe0Rm1VM1586HuUFtVen+fBo+Ef5IV3vrgTEdHi8+0aa/V4+e6vcDH3ssMjxoyGgZevO95e8gBiuktfbN7KqI7ILQylFO+d/wl7qk5LLiL2So+7MTTQcYIdpRT/PLsJq0qkt6B+veccTAlLFWX7Qc4e/C//iGSNV5InYmFsX1G2n2Yfxb8yD0rWeC51BB5OFHci4vucdLx2dI9kjcdSB+CvfYeKsl2Vm4Pndu2UdH0C4M6UVLwyYqQo+23n8vHkui2SNWb17IF3Jo8V5VQdyL+ER37cAEqp6O8vIcC47l3w73mTRUVGThWU4v4v1tgjayJFGEIwqFtnfHLvdFH5H9mXKnDfv1eBZTnRhfYYhiA5JhSfPzFLVNTiQkkN7nvjZ1hsrOiKpgxD0DUyCF++NE9U1KKkvB73P/cjjCaraA0NQxAR6ocv310AdxFRi8qqJjz4+BK0tJhEF8HTaAiCAr3xxcd3wFtE1KKuVo/H7vsW9fUtomt3MBoCPz8PfPrNvfATUaitudGAJxZ+geqKJtFVUxkNAw8vF3zy08MIDvVxaN+qN+Hp2R+hrLBGkoaLmw4frX8K4THKcu9uJaTM3+rx3V+ZzeVHsVuGE0IAvHN2KSqMdQ5tt5WfkeWEAMDrWWtwUc/f2r2NPeV5spwQAHjzzDZkN5Q7tDtaUSTLCQGAf2bsx/Eq/lbibWRUl+N1GU4IAPw34zj2llx0aHe+tgYv7N4l+foUwPeZGdiUd96hbUlDI57ZsE1ybRMKYE1WLlZm5ji0rW5uwZM/b5bkhAD2qMiO3AtYeizdoW1TqwmPf7MBlEK0EwLYt56OnC/CV7sdf++NZiue+HS9JCcEsBf0yrxYjk83HnVoa7Ha8JcP1kpyQto08our8e9l+x3asiyHv729FiYJTghgj5SVlNfjvS8cO8aUUrz42hpJToj9uVFUVTfhnX9tFmX/5str0SDBCQEAjqVoqG/BG39fI8r+Xy+tRnVFo6TS7RzLoaXZhNefXCaqgNrHL65E2cVqyRomgwWv3vO14tL1v1dUR+RXhKMcVpbulfXYtnyFzeXCN0FKKZZcOii7LDwhBCtLjju0++bCUVkVTAF76folBY4njK/OnpRd+VNDGHx19qRDu2+z02T3Q2EIwZdnHGv8kJkpu0g/Qwi+Sjvt0O6n9CzJDkIbBMDXx087vNGuOp0NK8vKbgfw3ZE0sJzwjXb9qVwYLFZZ7QAogB8PZsBiswnabT15Dk2tJnkalGLVwTMwmCyCdvtOF6CmoVVWbxeOUmw5lIsmPX+begA4nnEJlysbZZXq5ziKfUfzUV2nF7Q7k12Kwks1sjVOnCpEaVm9oF3++QrkZpWClVHFlGUpcrNKcSGvQtCutKgGpw5fkFUplWM5FOZXIie9WNCupqIRh7ackfeZsxzKi2uRdsDxouOPiOqI/IpkNlxApUn4RykEBw5byo/Cwlp5bXKaylCgr4K8KcmeW7KpLAMtVhOvzYXmaqTVlQrmnTjS2FaWi3pzK69NaUsj9pcXyu7rwlIOe8sKcLm1idemxtCKrYV5sjU4SnGiogwFDfxRqmazGWvOnVWkkVNdjayqSl4bk9WGFRnZsjUogOKGRpwsKeO1sbIsfjp5RlG/oGp9Kw7mX+Id5ziKZYcyFHU9bTaasetMAe84pRQ/78tQ1CjPZLFh6ynhCWPlrgxFR51ZjsOmQ7mCNmu2ZShrXEiATbuyBE3WbUqHRkG/FYYh2LglQ9Bm07rTikqhazQMNq1LE7TZuuoUGKUay4UXT9t+Pqboe8VoGGxcelj+BX7HqI7Ir8jWiuNgFL7lrawJR2qzecfXl56GhijTsHBW7Kzk11hdlKFYg6UUG0r4b4KrLmYr7txKCMGqAv7Xse5CrvKuqoRgRR7/69iclwcryyrXyOHfOtmVX4BWi/AKXZRGBv97dfhCEepbhVfoojRO8WucvliGigbhFbojGEKw8ij/53GupAqFFfWKnB1CgNUHzvCOF1fUI6egQlGnW0qBNXv4Narr9DiZWaSocSHHUazbkck73qw34tCRfFmRims1Nm/Pgo1ny8FksmLPjhxFWxIsy2H39myYTO0v0Fgbi+1r0xR102VZDod350LfZGh3nFKKrcuOKvrMuStHfWsqGmVf4/eK6oj8ipQaqsBB2R6ghjAoN/GvwItba26qhipHo8zAH7kpbqnvAA2C0tZGfo3mBsVOAgFQ0sKvUdTcKHt7qQ2OUpQ080ddipsaJJ1IaQ+WUhQ1NvBr1Dd2iMaleiGNJsWOIUspimr5NUrrGhVdH7jyeQhp1PB/VmKhFCir5b9OWXWjYg0AqKxt5t0uK6/sGI3GZiPM5vYn8KqqZkUTaxtGowV6nm2mulo9rFZljjoAWK0s6mvbd2Kbm4wwGpQ56oDdqaquaP9zN5usaKrnj/CKhgJVpfKj5r9XVEfkV8TAmhVfg4DAaOPfNmm1KdcAAIPAdVo6QIOCCj7XVptF0TYAYJ+UWq38NyCD1SJ7C6sNClyt2NoerRarYocKAPRmfg2D1aLQnbLTYhZ4r8wWxY4IAMHIjcFs7RANg4V/69LIM+lKxWThz0Mx8qzMpcJRCrO1fZ2O0gDAm+9idJAHI0mDxxHoCAfh19QwGtr/HZpaO+a+CwCGVv77+x8V1RH5FXHTKCsxDNgncDct/3U8BMbEQ+Cu5T/W59kBGsSBhoeTs+xE1TYYQuDuxF/zwd1Jp7gsOoG9ZDwfHjpdhzgJns78Gm5Oug5xdjyc+d8rN2edYsfQsYZTh2i46fg1XBWWKxdzHVeXjtFgGAJnnmPCbh3YUM3Ntf3vlmsHavAdE3ZTWNr9Og33X17DjUfD1aPjesu4KSxF/3tEdUR+RTq7d7IXKFMASzmEu/IX14n2CFKcv2GjLDq785cZj/b074AcEQ5RHvzl0mO8/BRPrvTKdXg1fPwUh54ZQhDtzV9QKdrXF1YHJ0UcoSEEcX78ryPa3xc2pRoMQVyAP79GgK9iJ0HDEMQE8mtEBYkvTMUHQwhigvnfq6hg5RqEAJFBPrzjkZ06QANAWJA3r6McFuKjKDGyDX9fdzjzlDLvFOStKIm0DXd3Z3h6tj+5+gd6Sm5C1x7Ozlr4BbRfS8TTxw3uPPpS0GgYBIX4tK/vouuQ6qiEEIR2VtYr7PeI6oj8ikwOHQhWYY6Ip9YNAwMSecdnRvRRnL/hqnHC2E78bd7nRKUq1tAyGkyL5C/ONjc26aZunZKhV67Dw8wu3RU3cWMpxW3x/K9jctducJVQUZRXI5FfY2zXWHi5KFuRsRzFban879WQuM4I8pRe6vomjX78r6NXdBgi/L0VRZA4SjFvEL9Gt4ggdAsPhJKPnVJg7vBk3vGIYF+kdgtX/N2aMzqFdyzA1wMDe8UoOjVDCMHMCfwanp4uGDksXvGpmWmTUngdGmdnJ4yb1FPxqZlxk5J5m/NpNAwmzu4DRsHr0GgYDJuQCA+B4myTFw5S9JkzGgb9RnX/Q5Z7d8Qv6oi8++676Nu3Lzw9PREUFIQZM2YgLy/vl5S8penpHSsYzXAEA4IpoYPgxPBPbAneYYj3CpVdR0RDGEwL7wNXLX84M8YzAP0DomTv52sIg6nhifDR8f+oQ9y9MDo8VkEdEYJxkV0R7MZfYtrPxQ1TY+IVaQwJ64wogYiIh06H2d17yNZgCEFyp05ICOT/3ui0WsxPTZL9eRAAsf5+6BUeymujYRjc3i9ZUQ5HiLcnBsd25n8ehGDhUHFVffnwdXfFqKRYQZvbRqZCSSDMzdkJ4/t0E7SZOzZFUbRNq9Vg8pDugjazJqYqOjVDCDB1tHCl5ulTUhWfmpk6KUXQZurM3opPzUyd2VvQZtKcvrJqiFynMa+/oM2E28RVcuaDYzlMXTxY0TV+r/yijsiBAwfw6KOP4vjx49i1axesVivGjRuH1tYOyC7+HUIIwfzI0fIeC0DLaDEldJBD27tihslKwiRX/uZFCv/gAODergNlh+opKO6Ic6xxf/f+sjU4SnF/guMy8vf27CM78MJSigeSHWvcmZIKQuS5hhyleLCPY40FvZLhxMg7A0QB3D+wj8N8mTl9EuHspJXtjNw7pI/DFePUvgnwdHWWrXHH8F4OG9ON79MN/l5uslavBMD8ESkOe9oM7xWLTgFesiIWhAAzRibB00G/mX7JUYgK95OlwTAE44Z1h7+Dhm6J3cMQ3zVEtsaQQV0QyrOd0UZMXDBSekfJ+jwYhiC1dxSiY4VLo4dG+GHgSHnNFxkNg66JYUhIFm4K6hfkhZHTe8nWiIgLRurQrpIf+0fgF3VEtm/fjrvuugs9evRAcnIyvv/+e5SUlCAtTbj4zB+Z8Z36iXImroVc+b+v9LgLQS6O95/HhiThjmhxPVCu1aAA3kqehygPx1Gb4Z264ImEEZI02ngrdSoSfBy3iO8XHIG/95HnuL3adwx6B4U7tEsMCMa7Q8fJ0vhb36EYFh7t0C7Wzw8fjJ8gy+F5qE9fTIjr4tAuzNsLH82aDACSnZGFvZIxM0l49Q0AAR7u+GzhNBACSfkJBMD0lAQs6M+/ndGGl6sLPr1/BjQMkeSMEEIwOikW94xy7LS56LT472Mz4aTRSNJgCEH/hEg8NHWgQ1utVoOP/joLLs5OkiYmhiFIigvFE7cNE2X7r5dmw8P9/9g77/g4qqvv/+7MNvXei2VZlmQ1S5Z7773hijE1QAgBUiAhEEIJIY0AgUAahNANNu69914kWbKsZsnqvWt3tW3mvn+s5Mi2ZnaKIOR5/Xs+PJ/n8b2a7+7s7txzzzn3HL0sQ4FhCOIGBeGZR13/tggheO3lu+Dr6y6bERnhh+eeWSBp/q9eXYagEG9ZYSCWJQgK8cYLry6TNP9nv1mGiEEBsgqbMSwDX38PvPzneyQltj/52goMHhYuKwzk7Gfjht989CgYlcfw/1f1rb7rjg7nGWx/gcQ7q9WKzs7Om/77vyZCCJ4cuhxLI5yGgqs9LEsYaIgGr6R8D2MCXC8WvXoqYTa+N2SKZAZDGPxu+GrMChPOE7hVP0ychJ8mTeu5hhQGwe9GLMbymHTJjIeHjcJLI2eA9FzDFYMAeGXUTDyYKL2L8OrENPx+0mwwhEh4H87xX4yejB+mu/bq9GpRQiL+PHceWBmMp8aMwc8nTJTMmD50CN5bvggalpHMeHD0CPxq9lTJp4fGxkbjH/cuhV6jcbkw9S7yKzJT8NrS2ZIZ6THheP8Hy+Gmk86Ymx6P1++bL3nRT4gKxgdPr4Snm04yY3JaLN78wWKXHpdexYT7458vrIavp5vL19V7a0YlRePtn0lrqgcAYcE++NtraxDg5ymJQQCkxIfj7VdWwU1iF+HAAC+899a9CAnxkXR/CQHiYoPxzp/ugafAKZNb5ePrjrf//gAiowIkfU8IIYiMDsTbf38APr7ukhgeXgb86cOHERsf6rzfLjAMQxAa7ou3Pn4UARLzNgzuevz+i8eRmBEjyWDv7fD7xtdPIiRSOMn6/7q+te67PM9j8eLFaG9vx8mT/ZexfeWVV/DrX//6tn//v9R9t6/ONF/B5urjyGkvuVFxlYL2tGjnoWU0mBUyCssipyDaI0QZo6kE68pP4UxzCZiehZyn/2FoCIu54cOxNmYChnq79lL0p3NN5fj42lkcqSvuCUE4W8yzPa3mGUIwLzIZD8aNQYqfcB6CmC41VuNfBRewr6oYgNO44noZPb6GedEJ+N6wUcgMUtZOO7epHh/mXsSusiLwoDcYvc9enlLMHBSHh1NHYmy4uJtWSAVNTfh31iVsLyqEg+fBMgw4nr+x2PGUYkpMDB7KyMSkQcL5FGK61tSCjy9kYWteAewcB5ZhwFPqfChSZ0hpfEw0HhidgWlxsYoY5S1t+OxMNjZn5cNqd9zMgDMxdeSgCNw3LgOzkuIUHZOuae3A58ezsfnsFZhtdmhuYhBwPI/hg8KwdnIG5qbHK2I0thvx5ZFsbDqRB2O3tV/GsOhgrJmWgXmjE8Eq2LG2dJjw9YEcbDp0GZ0mC1iWcbagJ84FleN4xEUFYtWsDCyYmASNRpqh01ftnWZs3puDLXtz0NZhdjJ6Hu29jEER/lg+PwMLZ6RKNnT6qstowfad2di8IwstLcZ+GeFhvli2JBOL5gknj4rJbLJi17YsbN14AY0Nnf0ygkO8sXTFKCxcmgk3BUdzrRY79my6iG3rzqKuuhWspufzAEAYAs7BIyDIC4vWjMHClaNFE1SFZLM6sH/DOWz7+Diqy5qcDEoB+h+Gb4AnFt4/AYvumwhvFyGy/0XJ6b77rRkijz/+OPbs2YOTJ08iMrJ/l7nVaoW1T+Gmzs5OREVF/U8aIpRS2KkdDBhoRJJLAaDa3ISDDRfRZG2HlbPBQ2NArGcEZoZkwkMj/iOwcjawhIWGEX941ZhbsasmG3Xd7ejm7PDQ6DHEKwQLwtPhoxPfUVg5Z6EpsSRZAKgzd2BL5WVUm9phctjgpdVjsGcA7ho0HP568R+ajXMABNC5YDSajdhYloeKrjYY7TZ4anWI8fLH8iEpCHYTbwVu5ZzFofSsOKO524TNxfkobW+F0W6Dh1aHaC8fLI9PQZincPKr831w4CmFwcVJmbbubmwpKEBJazOMVhvctFpEeHtj+bAkRPr4iP6tvYehd8HotFiw7Uohihqb0GW1wU2rQZiXF5akDkOMv3iIz85x4HgKvYYVXeBNVht25hbiam0jOi1WGLQahHh5YtHwRAwJFj6qCwAOjoed42DQakQZZqsde7OLcKWqHp3dVug1LAK9PTB/RCISwsXDiFIZVrsDB7KKcbm0Fl1mK7QaFgHe7pidmYCkQeKbAI7nYbNzMOjEGXYHhyMXS5BdWI1OkxVaDQM/L3dMHz0UKUPCRP9WKsPh4HDyQikuXalEZ5cFGpaBj7cbpowZirRhEaJ/y/MUVpsDBr04g+N4nLtQhvOXrqOrqxuEEPh4u2HCuKHIGB4tgWGHQa91Oe/S+TKcO12Crk5nVVYvbzeMGT8UmaNjRT0zlFJYLHboXYTFKKXIOV+Gs0cL0dluBqUUXt7uyBwfh1GT4kVP8lBKYbXYodNrRMMplFLkX7iOU/ty0dlqAsfx8PJxx/DxcRg7MwUarfBzm1IKa7cdWr1mQI5Rf9v6zhkiTz75JLZt24bjx49j8GDXMfVeyXkj3wU5eAdy2nNwsPEgrhmvgaPO0sVurBvG+o/FtOBpiHJXtpPuFUd5XGjNx46a47jScQ2OGww9JgVmYEH4JMR5qWPwlMe55hJsrDqDC60lsPNOhoHVYWpwMlZEjUOST5SqYmCUUpxrvo51ZedxovEabLyjh6HBtNBErBk8CiP8xR9qUhgXmqrwWfElHKotgaWPITI1bAjuG5qJcSGDVDOym+rwaUEW9lYUo7un86uOYTEpIgYPDBuBSRHKTxj1Kq+xAZ/lZmPXtWKY7c6qmlqGwbjIKNyfloGpgwYr2q33VWFjE77IvowdBUUw9lRA1TAMMiPCcX9mOmbEDVFdSr60qQVfXsjF9twCdFqsNxhpEaG4d0w6ZibGQafAI9BXFU1t2HA2F9suXUW72VmlkmUIhkUE457x6ZiTFi9YKEyqalo6sOlUHraeuYJWo3OhZBiC+PBA3D05HXMyE1wmtLpSQ2sXthzLxdbjV9DaYQLtYQwO88eqGRmYOzYR7hLDK0JqbjNix+E8bD2Yi+ZWo5NBCKLC/LBs9nDMm5IMT4FiZFLV3mHGrn252L4nBw1NnaDUGbIID/XFkgUZmDczBd5e8r0OfdXZ2Y19e3OxfVsW6mrbQXu8WsEhPli0KAPzFgyHr686r4PJaMHBnZexY+N51FS2guedjIAgb8xflom5S0cgIFB8o+JKZqMVR3ZkY8fnp1FV2ug8eUUA/0AvzFk1GvNWj0ZQqK8qxrel74whQinFU089hS1btuDo0aMYOtR10l1f/S8ZIsebjmNj9UZ0ObrAgLmtp0zvv8V5xuF7Md9DmFuYbMbRxkv4sGwrWm0d/TJYwoCjPOI8o/Dj+HsQ6yk/RHGsMR9/LtyBBks7WDC31T35DyMUzyUvR7KPfKPnREMJfpu7G1XmthvX648R6xmIV9IXITNAfojibEMFXry4F6WdLWAJua0zbe+/DfL0w6sj52JSmHQDuVdZjbV4/tQ+FLY1iTIiPL3x67EzMCta3vcfAK42NeIXh/bhSlOjKCPEwxO/mjgFC+MTZTOutbTg+T0HkF1b1y+D6QmxBbq749mpE7EsJVk2o7K1HS9sO4ALFdWiDF83A348fTzWjHKd2Hqr6tq78PLX+3G6pFKU4WXQ4wczx+D+SSNkG6HNnSa8+uUBnLhyHaTnejczAJ46j/g+NGsUHp41WvYpinZjN37/yUEcySq5EebsK0KctUwMOg3umZ2J7y8dJ9sINZqteOPDgzh4ugigEDydptOyWD4nHY+vmSQ7ZNRtseEv/ziEfYfzwfP0tr45PdEvsCyDhXOG44ePTBMsriYkm9WBf/z9IHbtvAyO4/ptZkgIcZ4SmpOKJ56aJbtirMPB4d/vHcT2DRfg6Cm5fyun9zOeMjsFTz23AB4yq6NyHI/P/3IAWz46AavFfuMzvpVBAYyflYwfvbrsOx/O+c4YIj/84Q+xbt06bNu2DQkJ/zl37+PjAzc31xbw/4IhQinF5prN2Fm3U9J8Bgx0jA7PJDyDOM84yZwNlQfwSfkOiQwCLaPFy8nfx3A/6cfBvq48jbcKt0tmsITB79Pvw4Qg6YvflopsvJSzHVTCAWMGzpMTr2cux5wI6Yvfzoqr+OmZbaAUN/JHhOTMWSP445gFWB4rXlOhrw5WXsMPDm8FR6nLI8a9y9Cr42bh/mHS62ScqqrAIzu33gj3SNFzEybjsRGuT4706lJ1Db63cQssdsdtC7eQnhw/Bj+ZKP3k15XaBjz86SYYbTbJdS8eGDsCz82ZLNlQuFbfjO+9vxEdZotkxorRKXhp2UzJhkJlUzse/cvXaO40SWbMy0zAb+6bC41E13p9Syd+8PrXqG/plMyYnD4Ef/jhQmglGgot7SY89eoGVNa1Sap1QggwMiUarz+7FHqJXp7Orm48/cIGlF5vlMggSE4Mx59eXQF3iR4Yk8mK55/9Clev1go2B+wrhiEYMiQYr795D7wl5n1YLDa88vRXyLlQJqljM8MQRMYE4o9/ux/+Er0jdpsDv/vR5zh7uEDSfIZlEBLhiz9+9hiCXByN/m9Kzvr9jQae/v73v6OjowNTp05FWFjYjf/Wr1//TWK/Ve1v2C/ZCAEAHjysvBVvFr2Juu46SX+zt+60ZCPEyaCw8Xa8kv9PlBlrJP3N/rocyUZIL8NBOTyf8xmutFdK+pvDdYV4MWcbeIlVTpwMHj+/tAlnm8okMU7WX8dPzmxzGggSKLSH8+y5nThUUyKJcbGhBj84vBUOnpdkINCe/148cwA7yqQ9bPKbGvDwjq2wOhyyaqn84dRxbLiaJ2nutZYWfG/jFnTLMEIA4L3T5/DxxSxJc6vbOvDwp5vQZZVuhADAJ2ez8M8T5yXNbegw4pEPNskyQgBg4/kreGdv/4nzt6q1y4zvv7tRlhECAHsvFeH1jUckLZRdZgueeGOTLCMEAE5cLsVvPtovidFtsePp321ClUQjBHDuzC9dqcJL7+wCJ6GVgNXmwHOvbJJshDgZFFeLavHi77bC4XDdjdfh4PDKS5tQUCDNCAGceSelpY144fkNsFmFmxb2iuN4/OGFTbh88bokI6SXUV3Rghd+9AUs3a6b7VFK8edfbsS5I4XSAHAWPmuoaccvH/wXjJ39dzX+X9M3aohQSvv978EHH/wmsd+aWqwtWF8l36iioLDxNnxa8anLuR22Lvz92teKGA7egbeL17mca3JY8Pv8TbLrT1A4c1ZevbLB5cPAytnxQvZWmYQeDqV4PmuLy7LyDp7HM2e2S35o3Kqfn91xI6FV7LU8fXwXOF5+yTgC4NmTe0U7Avcyfn5wH+w8p6j2yK+OHEJbt+sH1K/2HYLFLs/Q6dXvjhxHfZfR5bzf7D4Co1VZJ+W3D59GeUuby3lv7jqONlO3oiqjHx69iIKaRpfz/rbrNJo6jLIZFMCGk7nILnW9Ifj3znOobmqXz6DAnjMFOJ133eXcL3deRGlls2wGTylOXCzFkbPFLudu25WNq0W1sivL8jzFxewK7D10xeXcfXvzkHWpXBGj4GoNtm93bUifOHQVZ44VyWdwPMqvNWDj56ddzr1wrAhHtmdLNqb6Mmorm/HV3w/L+rvvqv73UnG/QzrWdExxKXUePAq7Cl16RfY3nFXc14UHRamxCiVd4h6LvbXZsPLK2tXzoKgyNyO7TfwhuK/2KjrtFsWMRksXTjSIeywO15agyWJSVFWWAmi3WbCvSrwFwam6SlR0tUvytvTHMDvs2FZ6VXTe5YZ6FDQ3Ka4q6+A5bCzIF51T0tyCi9U1sjwht2r9ZXHPS3VbB46XXFfMYAnBVxdzRee0GM3Yl1usuNQ5yxCsP3NZdE5XtxU7zl9VxzghzrDY7NhyLE9xWXiWIdhwKEd0jsPBYdO+HMXfK4Yh2LhXnMHzFJt3ZCnuE0UIsGl7lujCTCnFlk0XFDf9oxTYsvmCy3u9bf05xb1jeJ5ix9cXwLnw7mz/7JSsAms3MTiKPevPw2qxK/r775LuGCIK5eAdONJ05LaEUTliwOBo01HBcY7y2FlzQtHC2iuWMNhdK+x+ppRiQ6Vry90VY1PVGdE5X5Sdc1lYTZxBsK5M3FX/afFFxT1dAGcy4yfFF8UZBVmqGATAv69eEn3QfpaXo4pBAXySmy264KzLyVXF4CnFF9mXYeeEH7QbLuWpOpHEUYqvs66g2yb8oN18/opiDxjgrHeyPasAnd0WwTm7zhfAZncdLhBjHMwpQXOncGuLgxeKYZLgyhdjnMkrR01Th+Cck5fK0NZpVszgeYrcohqUVTULzrl0uQJ1DR2Kn1iUAmXlTSgoEt6gFVytxfXrTao+9/q6DmRnlQuOl19rwNXLVar6BbW3mnD2hLAHqa6yBZdOFoNX0WfHbLTgxB5xY/1/QXcMEYUqNhbD6HDtmhYTDx5nW84Kjpcaq9Bsa1fF4CiP403CbsgKcxMqzU2qGt1ylMfRhitw8P0/rBu6O3GlvVaRF+E/DIpTTaUw2a39jnfaLDjdUKFqh89TiuyWGjR29/+52jgOByqvqWJQACXtLSjv7D/kQCnFrpIiVQwAqOnqxNUm4ZDDjquFqhmt3d3IqhFeMHbmFSrefffKZLXh7PUqwfHdOeoZNgeHk4XlguN7s9Q36uR5iqN5pYLjB84XqTLaAGfC55FLwl7Dw2eLVHcEZhmCwyLhmaMnClXXvGBZBkdOCudMHDtWMCCMY8eE87VOHLqqqlsv4PQgHT8o7Jk8fSBf9bF+whAc333HEPn/Vl32rgG5jtFhFNwdt9vUGTq9svA22Pn+d5Vt1oFh8KAwOvrfVbZaB67JYZut/x1di1X5Tu+2a1n6v1a7tVv1oterZgFGt8MBq4iXQY5auvtn8JSiwyLsAZDFMAvf9zbzwCTStZqEGS1G9Z87IQQtIozmTpMqQx0AGIZBm1H4fjR3mGTnCdzOIGjrEma0tJtU7fAB571qF/GqtHWYVXXSBZzGeHuH8PtobzOrvlccx6O9Tfh9tLeZVBuGPE/R1iz8fG1vNaruLUN5itam//1WKHcMEYXqLVamVrzIGZKBYgCAQyDPRGn+Sf+M/l+vEHsgGVKy+VUzBvCku0Pg9Qr9uxLZBa5FqZpg380Se70Ddb+E3gcAxXkbfUXgrMAqyFC5sALO3IdvmgGIM8TGpIq6uA7nGKD3IZJbMWD3yhVjAL6+dhEGP1D3SkXY8LuiO4aIQrlrpDVaciUDY7jRA+ZWeboo7y5VLGFgYPov4uOlHRgGAHgJvF5vrbziPmLyFni9PrqBYwhdy0enrsKkFIanTqcik+YWhr7/18syDNxUVha9wTAI3xMvvbqqn1IY3iJjUsVTCm834e+Pt4f67xbHU3iL1Mfw8VT/O6SUwlukyZyvyuqlvfISuR9eXgbV4R9CCLxECoJ5eupVeysYhsBL5H54ehrkt7HuR2IN+Ty8Dao9OwC+84XNpOiOIaJQQzyGgCXqSlEzYJDoLVwMbIhnFHSMujLRDBgkeQ8R/OHGeATDU6PuQUtAkOAVDj3b/2uNcPdFoF68D4xrBhDjEQA/gb44gQYPRHv6qn52hLh5IcLdp98xd60OSf7BqpJuAcBXb8AQ3/47bTKEYGR4hKpEUgBw02iRFBgsOD4mKko1Q8swSAsTbpQ4dnC0rNbx/YkhBCOihSsEjx2qngEAIwcLM8YkRKteXCmlyBzSf48tABg5LEp1vgDHU2TECzMykqIUnzS5weB4ZCQJM9JTo1SHfziOR3qqcMXm4emDVHtFeJ4ibXi04HhaZoxq7w4hQNqIGMHx1NGxqt8HwxAMHztE1TW+C7pjiCiUl9YLo/1H3+iaq0Q8eMwMnik47q4xYGbIGLAqGYsjJguO61ktlkaOUbW4UlCsGjRBcFzDsFgzeJTqBXxt7BhBg4oQggfiR6q6PgOC++MzRctlP5g0QlXSLUMI1iakizbeeyAtQ1VYgyUEq5JS4KET9kjcNyJdNWNRUiL8RCokrx09XFXohGUIZiQMQYi3sBG7epxKBiEYGxeNQUHCTQBXTkhTtbgyhCBlUAgSo4QNw6WTUlXtwAmAmDB/ZMQLG1TzpyRDw6rbPAUHeGHscOF2CDOmDIObQd3mycfbDZPGCVeEnjgpQXJlVCG5uWkxY6ZwtebMcXEIClFXzVujYTFrUbrgeMrIwYgcHKTaOJy3arS6C3wHdMcQUaEZwTNUHd8N0gVhmPcw0TkLwife1u9Fjny1XhgTkCI6567IMaoWVw+NHjNCxMujLx8kv69HX+kYDRZHuWAMToPWRRdiMTGEYGWseI+TxbHD4KFVHnKglOKeRHHG7Ng4+BuUP2g5SnFvqjhj0uBBCPdW3qCLoxT3ZogzMqLCERfkr3h95XiKtaPFGYnhQUiLDlXsTeAoxT0T0kXnRAX5Ymyicq8ITynWTBEv7R/k54mpGXGKvTsUwN0zM0R/Y96eBsyZmKiYQQjByrkZovfBzaDDgjlpihkMQ7Bkfjq0Il1ptVoWi5eMUPx5MCzB/AXpMIgYTCzLYPGq0YqfWSzLYPr8NHiJGEyEECx5YILiJy/DMpgwOwX+wd/N9idydMcQUaFYj1hk+GYoLmq2KmqVYH5Ir2I8wjEteJRixvdil7gMIYW7+2N51FjFC8bjcXMFwzK9CjJ44aE46f1JbmMkTIGni1wTb50BT6VMVMx4LGksAg3i8VY3jRY/z5yk6PoEwAPDRiDSs//QT6+0LIvnJgh7sVwxViQmI84/QHQeQwiem6qMwRCCOfFxomEZwPmgfXa2csaE2GiMGey6qeJP5yn7PFiGIH1QGCYnum54+MSCCWCI/F8hyxAkRgZhVrrrhoePLhkLDcvI3iGzDMGgUD/MH5/kcu79d42BXqeVbbixDEFIgBcWz0h1OXf1XaPg4aGXbSgwDIGvjzuWLRrhcu7SZSPh6+uuiOHhoceKVWNczp2/LBNBod6yjwoThkCn12D1g66fRTOXZiIyJkh2UTNCCDQaBmuemCHr776rumOIqBAhBI/FPobBHoNlGwp3R92Nkf7SQgk/jr8bqT5xshlrB83DjBBpbrufJCzC+MBE2Q/aewZNxvLocZLm/njYdMyT0byuVysHZeKRodIMjB8mjcfq2HTZjMWDkvHT1CmS5j44bAQeTpYXBiIAZkbH4cUx0yXNX5mUgh+NGiubMT4yGr+dPkvS/PmJ8XhuqrxFnCEEw8NC8eaCeZLmTx46GC8tmH7j9UllJIQE4p3ViyTtSEcNicRrq2aDEHmM6ABf/PWhpZIa0qXGhOIPD853dnKVCGEZghBfL7z3+F3QSUgOjosMwutPLgbDMJINBZYh8Pd2x3vPLIeb3nVIJDLUD3/6xVJoNIzkRZxlCLw8DHjnVytEE1V7FRzkjdd/vRI6nUYWw82gwxuvrYKfr+vkSz8/D/zxjTVwc9OBlVjvg2EIdDoN/vD63QiW4EXw9HLDH/56Pzw89ZINBYYh0GhY/ObtexARJb4ZAACDuw6//ehh+Pp7SDZ4GIaAZRm8+Nf7MThBfhf376K+0e67avW/0H0XAKycFR+UfYBL7ZfAgBEM1xAQMITBgzEPYmKgvJ27nbfjneIvcaTxIljCCB67JT0da78/ZBkWhsvbjTp4Dm8VbseW6nOijN5cj8eHzsXaGOkdUgHnceE38w/gk9Iz/bZqv8EgBJRSPJ4wBT9MmCqLQSnF23nH8V7+KTAijF7+9xPH4tn0abJ2iZRS/D33HP6UdQKAcBv1XsZ9iel4ZexMaGTWDfj4chZeO3HU2ZzPBWN5YhJ+N302dDLzAL7OvYKX9h+Cg/KC1Sp7GXPjh+KNBXNhkHnqZldeEZ7fuu9GJdb+MCxDwPEUk+Ni8OeVC+Ah89TNkfxSPLtuNyy9rdpFGKOHROHt+xeKnpbpT2cKK/CzD3fCZLH126q9LyM1JhTvfH8J/L3knbDLLq7Gz97djk6TBQwB+ktP6WUMjQrCOz+5C0F+8pLBr16rx89f34K2DjMYQvr9bvUyBoX7463nlyEsWNyTd6tKy5vw7Etfo7nFCIYh/ebZ9P57WIgP/vSblYiK6D+JW0hVVS147tmvUF/X4ZIRGOiF37++GrGxwrk6/am+tg2/+tEXqCpvBsMS8NztDMIQUJ7C198Dv3n7HsQnCefq9KeWhk689Oi/UVZYB4Zl+q22SnqeiV4+bnjp7w8gZaRrT95/U3LW7zuGyACq3FSOQ42HcLblLBz05uZp/jp/zAiegUmBk+ClVR6bv26swe66kzjYcB62W4qU+em8sTB8EmaHjoO/Tvn9Kjc2YnP1WeyouQgLd3PZaV+tB5ZHjcXiyNEINsh7MPVVhbEFG8ov4uuKLJgcN1dL9dYacHfMKKyMyUS4u69iRrWxHeuuZWPdtWx02m8u4OWl1ePuIem4J24EBnkJJyq6Up2pC+uKcvB5QQ5arTcXYXLXaHF3fBrWJqYjztf17khITSYTvrqah89ys9F0SwExg0aDFcOScW9qOhICAhUzWsxmbMzLx2dZObc1s9OxLJYmD8M96WlICQ1RzGg3W7D18lV8di4bNe03F2HSMAwWpibgnlHpSI0IURyb7+q2YkdWAb44lY2K5vabxliGYHbqUKwZn46MmHDFDLPVht0XCrHuWDbK6ltvGmMIwbS0Ibh7cjpGDo1UzLBY7dh/vghfHcxGSVXTTWOEABPTYrFqRjpGJw1SnCthtTlw5Fwxvt6TjYLS+tvGx6QNwoq5GRibMVg0gVtMdjuHE2eKsWl7Fq4U3N74LyMtGssWjcD4MXGSPFP9iXPwOH26GFs2X8TlnNv7aiWnROKuZSMxcVKCaO6JKIPjceF0CbavP49LZ2+vkJuQHI4lq8dg0owk6CR4pvoTz/PIPlWC7Z+fxoWjRbcd7Y1NDMOSByZg8vzhMLgNzNH4b1J3DJEBFE85VJuL0OVogYO3Qc96INQQC1+dsFVtcphQYa6A2WGGhtHAS+OFwR6DBfNBeMqj3FSKNnsLbLwNbqwbItyiEaQXfuibHd0oMVbB6OgGSxj4aD0R7xUtmA9CKcU1YwWarK2w8Fa4swZEu4cj3E2MYUVhZzU67d1gCIGP1gPJPlHQCCSEUkpR0lWD2u4WdHM2eGgMiHYPQoyncC6BhbMjr60GHbZuEAL46tyR6hsBncipkuLOelSaWmByWOGh0SPaIwDx3sIMK+fA5ZZatNu6QSngp3dDmn8YDBrhB0ZJRxNKu5phctjgrtEh2sMPSb7CC6Sd55DTVIc2i7P6qq/egLTAULiLJLaWdrSgpL0ZXXYb3DVaRHr6IC0gVJDh4HnkNtSjpdsMjqfwMeiREhwCL5H6JuUdbShsaUaXzQo3jQbhnt7ICAkTZHA8j7z6BrSYzbBzPHwMeiSHBMPbIOw5qO7owNWmJnRZrdBrNAj19MSI8HBBDxPPU+TXNaDZaIaN4+Bt0CMxNAh+7sKJffWdXbhS14guiwU6VoNgLw+MiAoXXCAppSioaURTlwlWuwNebnrEhwUhwFPYO9HUaUReTQM6uy3QsiwCPT0wIiYcWgEPE6UUxTXNaOwwwmKzw8tNjyFhAQjyEfZOtHSZkVdRhy6zFRqWgb+XO0bERkCrEV4gr1U3o6G1E91WB7zc9YgJ80eIv/Bmpt3YjdyyOnSaLGAZBn5ebsgYGgG9iBervLoFdU2d6LbY4OGuR3S4P8KChJ+5XSYL8kpq0WG0gBACXy83pCdEwCCyCFfVtKK2rh3mbicjMtwP4WG+gvPN3TbkXq1GR0+1WB8vN6QlRcJdZBGurW1DTXUbzGYr3Nx0CI/wQ2SksJfFYrEjL68KHZ3d4HkKb283pCRHOOuICKihrh3VFc0wGZ2MkHBfRA8OEpxvszqQl1OBjnZn1VkvbzckpUbB20f4+95U346q0kaYOi3Qu+kQHO6LmHjhZ5zd5sCVrAq0txjBOXh4ehuQmBYF3wB1ZRPU6I4hMgAyOzqR3XYAF1p3odN+e5OnOM9MjApYgCGeGWAU1hMxO0w403IcR5v2o8XWdNt4glcypgbNRopPuuKaJWZHN441ncfuuiOotdzeeyTJOw7zQqditP9wQQPDNcOKQw3Z2FR1AuWmhn4Y0VgWNRGTg9OgY5QV0rJwduytzcOX18+isPP2/ibDvMNxz+CxmB2eAoOLxFkhWTkH9lYX4LNrF3C5tfa28XjvINw/dBQWRafAXaNsR2LjOOyvKsbHhZdwsfH2HWKstz8eSszE0thkUQNDTA6ex8HyUnySl4Uztbf3aYn29sWDqRlYnpAMH72yGjIcz+N4eTk+zcnBifLy28Ig4V5euC89HStSUuAvcsRXTDylOF1Wic8v5uBocdltjGBPD6wdlY4V6ckI9FRW1IlSivNl1Vh3NgeHrpbeFqLw93DDmrHDsXJUKoJFjhG7YuRcr8X6k5exP+f2TsE+7gasmpCG5eNTEean7DlHKUV+eQO+PpqDveeLbqt+6ummx7JJKVg+JQ2RQb6KGABQVN6IzQdzsOfU7Y0A3Q1aLJ6airump2FQuLwQS1+VVTRh694c7D50BVbbzZ5lg16DedNTsHRuOmIHCS/+rlRZ1YLtO7KxZ+9ldHff7FnWalnMnpWCJYtGIC5OuQewtroVu7Zcwu5tWTAZb/b6ajQsps1JweLlI5EgM4zTV4217dj99XnsWn8eXbeUxWdYBpPnpGDRmrFIyohWXQROru4YIip1teMktlT/GTx1CJZfJ2BAwSPMMARrYl6Cp0aeez+vIxv/KnsXDmoXZPTmm4QZIvBE3LPw18lz7+d1FOGPhf9ENyfcV4QBAQ+KUEMgXkx6CqEGeT/uK+3leP7yh+hydIOg/7h8LyNY74s/ZTyKQR7yftxX22vwxPnP0Goz3biWECNA74m/jb4fiT7ykrhKOprw0Il1aOjuEmT0vj9fnRven7gaGQHChZ36U3lnG+4/uAGVxnbBuHzvo8JDq8P705ZhfOggWYyark7cv3MjSttbBXNwehkGjQZ/m70Y0wbFymI0Go14eMsWXG1qEs3zIXCeAPrzvHmYGy9cF6I/tZq78fj6bciprruRq9CfGOLMifrtollYmub61EhfdVms+NHnO3CurMolAwBeXDwdq8eIHyG/VWarHb/4dBeO5193yaCgeHrxZNw3Vd5Rd6vdgZf+vQ8HLxWLMtieXInHFo/DIwuEa/L0J7uDwx8+PIhdJ/JdMjie4r6Fo/D4qomywkYcx+MvHx7G5t3ZkhjL5mfgRw9Pl3Wqhecp/vXvY/hq/VnBnBLAefyW43jMnZOKp38yFxoRj9WtopTii49O4NMPjjp/5y4YU2Yk4ecvLYVOL32TRinF5k9O4V9v7AUhcMkYPSUBv3zjbhjcv72Qzh1DRIWy2w5gR827kuczYOCpDcDDsX+Cl1baLuBi61l8VP5XABA0Qm5leGi88GzCrxGgl5YHcLE1D38s/Cdoz/9IYbhrDPh96s9FwzV9ldVagmdz/gWe8pLqkDBgYGC1eDfzCQzxCpfEuNxWiUfPfAQ75SQ1nGMJgZZo8MG4h5Dm5/roJwBcba/HmiOfwsrZJRX5YkDAMgw+mrQGY4JjJDGudbRg2Z7PYLLbpDF6jor+a9pyTIuUVjmxqrMdSzevQ7ulWxKDwJkA9+6shVgwJEESo8FoxLJ169BkMklmAMAf5szBimRpJ6ZaTWas/ugr1LR3yiq69tLcaVg7Kl3S3M5uC+795waUNbXKamT49JyJeGTKKElzzVY7HnnvaxRUN8piPDprNJ5cIFwgsK9sdgeeeHszsq/VyioXfvf0dPxstbQkcIeDw8/f2oazeeWCycz9aeHkZLzw6GxJDI7j8cqbO3DsdLHkuhoEwJTx8fj1zxZLMngopfjTG7uxd3+eRIIzH2f0qFi89uoKSQYPpRR///M+bN1wXgaDICU9Cr9/517odNKMkY/fOYCv3j8qmcEwBHFJ4Xj9o0e+NWNEzvp95/huH103XsbOmvdk/Q0PHl32FqwrfwWcQIfbmxima/i4/O+SDYRehsnRhXev/QFWEe9Gr8pN1Xij6ANQkYZ6/THMDgt+nf8XmByuO5pWmZvwy9yPwEk0QnoZFs6Gn+W8jzab6+7FteZ2PHHuM9h5aUYI4CxQZeMdeOL8Z6jv7nA5v9lixEPH10k2QgBnp2GO5/H9UxtQYWx1Ob/DasF9B9ZLNkIAZ1iCpxQ/OLYVhW23h+1ulcluw707Nko2QgCnd4dSih8f3IWchtvDXbfK6nDgwc2bJRshNxgAnt+/H2erbg8T3SoHz+Oxr7bKNkIA4Dd7j+BYyXXXr4lS/PiLnbKNEAB4a99J7MktksR4/rPdso0QAPjgwHlsPSfcPr6vXv30ALKv1cjuWfLV4Rx8dThH0ty3Pjsq2wgBgJ3H8/HxdmkL8gdfnMBRGUYI4PxeHT1djPc/Py5p/hfrzsgyQgDnqajzF8rw7l8PSJq/dcN5WUaIk0FxJacKf/7dDknz92+5JMsIAZwek2tXa/GHZ9fL+rtvS3cMkT463PA5lNRZpuDRYC1HQedpl3N31G6EkraOPHg0Wutxoc014+vqPeCo/FqpPHi02NpxqME1Y135Ydh54dCVMIOiw2bCtuozLud+fv00zJxVdtVXHhQmhwVfXHfN+OzaRbTbpC/efRlWzo4Pilwzviy5jAazUTaDwnmk+q95rhmbi66iorNdEYNSincuuv7M9xQXo7i5WXFp+D+fOuVyztGS68itbVDMeOPwSZeL8rmyKpwrq5JtIPTqrX0nXZZ8v1LZgKNXyhQz/rLzpMtuuWW1LdhzrlC2gdCrf2w/DcstORi3qrapA1sOXVbM+GjrWZjMVtE5be0mfLX1gjIAgK+2XURbu0l0jtFkwefrXH/H+xOlwI6d2ahvEN/YWC12fCLTQPgPg+LQ3jxUXhffdDjsHP791j5FDJ6nOHukEEV51Yr+/pvUHUOkR/Xd11HTXQSqsJw6AYPzLTtF5zRZG1DYdUVxWXgCgiON+0QftK22dpxvyVHMoKDYXX8UvEANEQDosptxoD5LsM6IK/Gg2Fp9Cg5euH212WHDlspLihckjlJsqryI7luOH/eVjeewrvSS4sWCoxRbynPRaRP2UnE8j08KLykuoc9Rit0VhWjqFn7QUkrxcV6W8lLqlOJo5XVUdYo/aD/JzlZcSp2nFBdra1HcfHvid199dj5bcTM+CqC4sRm5tbcfRe2rL87kqGqUV9PWibNltx8T7av1J9UxWrrMOJZfJjpn47FcVQxjtw0HLxWLztl2OA9EBcNu57DnVIHonJ0H8/qtlSJVPE+x69AV0TkHDuTD5sLoEhMhBLt25YjOOXboKswmcaNLTAxLsGPzJdE5Z48Wor1V3OgSE8sy2PnVWcV//03pjiHSo0ute1Q1sKPgUd1dhAaLsGv4RNMhlQyKOksNykwlgnMONpxW0TXGqSZrKy63FwqO7627qNgI6VW73YSTTcIPj721uTCLGBFSZHJYsa9WmHGgphDttm7BcSmy8xy2VOQKjh+vvY46s+swlJgoBTZcE2ZcqKtBaXurqs+dIQTrrl4WHL/a2IjchgbFRhvgzN9Zd1mYUd7ShrPlVeqa8TEEX1wQZjR0GHGkoEx1M751Z4QZ7aZu7MkqUsVgCMGXJ3IEx80WG7aduqKKQQjBV4ezBcftDg6bD19W1fCPAtiwL1tw88RxPDbvFh6XxKAUm3dlCXaypZRi89aLiq8POI2d7TuzYbcLb562bTiv6mQKz1Hs25GNbrPwc2/7F2dUdYLmOB5HduWiq911+P3b1B1DpEflpjxVDex6VWUWXsBLjIWqGQQMykzCu5iCzmuywyW3iiUMCrtuL9rTq7x213F4KYwrHeWC49mtlWBd9OGRwshpFd65XmquhkYlg4Agq1nY1XmxqUY1gwfF+Qbh/IoL9TWKvQi94ijF2X6O+vbqYk2Nyt7JPYxq4XuVXX37kWnZDJ7ifIUwI7eqTpUx1cu4VC7MKKhudBlWcSW+58ivkEprW1yGVVyJUoqCCuHXWlXfhi4VO/xeVda3wSgQnmluNaK51djvmBw1iVzHaLSipqZNNaOry4Ka2v6v43BwKCmqU2VQAc66JuVlt5dZ6NXVnEpVhiHgDO+UFKj/rQ2k7hgiPbJwyt1dvWLAwMIJ/6jMA8IgMDuEr9MlMiZdRDRhtdNuVm3sAIDRIeyN6LJbVHtdeMqjyy7OUPs+eFBRr4pY2EaO2qwiDKtFccikrzoswq+102pVXF1TMsNiHZD30WUVXjw7LeoXVgAwWoV3rV3dA8OwOzjYHP0bG10u8i7kSOhaA8oQMGi6TAPz+xC7ltE4cAxjV//XurVOyDfBsNkccIh4ZOTI1Dlw92QgdMcQ6ZHSomR9RQGwRPj4ldKiZHIYmgFgOK8jzNAqLHx223VEGYzijsO9IiCir9XpqVC/8OlFertoGPndVPuTWP8YHcsOgFkIwQqivWMDcdLfFUOttwKAaD8fMf53jQFA0PhTWg69P2k13zxDI8gYuHslVJ1WiK1EGoES8d8GQ24XYCWM/5buGCI9kluQrD9R8PDQ+AqO+2j9VC+uPDh4a4V7vPjrfFQzKOXhqxMuIe2v91YdNgEAX51wpUp/vafq3TEhBP56YUagQVk1zr5iCYMAvfB1Ag0eqnIeAGe+QIibyPtwcwfHq/MeMYQg2EPkfbi7q34fBBBneMhrDqfkOoEiJd7lyE+EESCzyZ2QvN30goaIv/fAMLQaFh6G/utK+Puo/30Azu+Wj2f/1XX9fAbmfQCAr8A98fZ2U5VX0Vd+fv0z3D30A7a4+/n3f99ZloGnl7JKyLfKN2BgPtuB0h1DpEepvlOgdnfMEi3ivYSLHY30G6c6FEDAYLivcAv6CYEjByTcMDZghOD49ODhqsMmHOUxLWS44Pic8NQBYcwJTxEcnxeVNCCM+VHCFT0XDEpUvcvnKcWimGGC43Nj41WXb+YpxZI4YcaM2FhoByA0szgxUXBswpBBcNMqK8/fK4YQLE4Vfh8jB0fAR2bH3f4Yi9KF30dydAiCRfrNSBHLEMwfKfw+hoQHIDrEV9UTi2UIZo8U/u6EBXojcbDyBoS9jEkjYqEXKNTl4+2GEanRqgwFhiEYkRYNH+/+jR29XotxY+NUMxLiQxEi0IGYEIKpM5NVeS0IASIHBWCQSN+aaQuHg1HpGQkM8UZ8iryq0N+07hgiPRruN0M05OFKDFik+UyFgRV+AI3yHwc9o6x/iJPBIMN3FHy0voJzRvsPh5dG+UPQyUhCqEG4guuogAQE64Vfg2sGQYpPDGI9hcuwZ/hFI9YzSPGDloBgqFcI0nyFq6sm+YZiuH84GIUUAiDSwxcTQoRLpMd4+2Fi2CBVyaQBBnfMjBoqOB7i4YnZg+NUMbx0OiyME66u6uvmhiXDhqli6FgWdyUJG20eOh1WZCSrYhACrMgQNj51Gg3uHpOmyttGKcWq0cKl3lmGwd2ThqticDzFqgnCDEII1kzPUHz9G4xp6aJzVs3OUBWS43iKFbPEX+fyBRmqEjB5nmL5fOGNEwAsXZKpmnHXUuENIAAsWj5S8OSOFFEAS1eOFjX8Ftw9BrwKBiEEi+8ZN6BhnoHQd+vV/BflxnoizWcqiMJbwoPDyID5onN0jB4TAqcpPsLLg8eUoFmic7SMBnNDJysOz/DgMS90iugchjBYFjVRBYNiWdRE0TmEENwzeJxi3w4FxT2Dx7rczd0/dLTiGh8AcH/cKJcLzoOJIxWHNRgQ3J8wQjQnAQAeSMlQzGAJwZphaaLdiAFg7fDhqhjLkpPhpRc3xNdkqmPMHRaPABchnpWjUxVdH3Du8CcnDEaEi+Z0d41NUbwDZxmCEbERGBIq3ltq/thh0Os0in6FDEMQHxmE5Bjxdg4zxsTDy0OvKM+JIQQRwT4YmSzeamH8qDj4+3koMtwYQhDg54Hxo8TbIGSkD0J4mK+iz4QQwNPTgKlThL1gAJCYHIHYoSGKGXq9FjPmifcyiokLQUrmIMVeEZZlMHtZpqK//SZ1xxDpoxmhD8BHG6jIUJgYtAphbq57giwIW4ZgfagixtSg2RjqJf5jAIClEbMQ4xEpm0EATAsahxF+wjvKXi2LmogUnxjZ3gQCginBaZga7Lp52F1RIzAucIhsBgOCiUFDsTjS9Y5xYVQyZkckyGawhGBkYDTuGeL6Rz0jcgiWx6bIXjBYQpAcEILvJ492OXdseBTuS05XxIj19cdTI8e5nJsWGoofjJLWZ+VWRri3N56Z4Lp/ypBAf/x0mrQ+K7cyAj098NysyS7nhvt647kF4sZ2f2IIgbfBgF8tnu5yrr+nO15cNVMRw12vw8t3i284AGdH3VcenCPbjCaEQK/R4NXvzXFpqOt1Gvz68XmyNx2EOBe9V5+Y75KhYRm88vRCECIvOE4AEIbg5acXukysZRiCF55fDJZVkjxO8MvnFrnsA0MIwbMvLYVWp5EdzqIU+PlLS+Dh4dpj/pNXl8HgplVk8PzolaXwFchB+W/qjiHSR+4ab9w3+DV4aQNkLeKj/BdgWvBaSXPdWHc8NfQXCNQHy2KM9Z+EFZH3SpprYPV4cdiTiHIPlbXAjg3IwA+G3CPpR6RjNPjd8Icw1CtSMoMAGB2QgF8mrQEjIdlVw7B4c+QapPtHS34QEhCMCIjBnzJXQyPhdA9DCN4csxQTQmIl3ykGBCl+YfjHhFXQs67DeYQQ/H7cXMyOFg6v9Pe64n0D8fGMlXBz4anoZbwycTqWxkvvQMsSgkE+vvhs4Qp46aSFDJ+ZOBH3pEnvQMsSgjAvL3y2YgX83PqP4d+qxyaMwiPjxN3gtzICPN3x8b3LEewlLSx57/gMPDXTtfF1g8EQ+LgZ8OHDy116Q3q1dEwyfrbUafBI+W6xDIGHQYe//2AZYoKlJc/PGhmPF+6d4VzEJUAYhsCg0+Cdp5ZiaKS0Ttvj02Px4mNzwDBEMkOrYfH6TxcjeYi0LtgZqdF49dleQ8E1hBDiNHR+vggZqdGSGMOGheM3v14OrYaVtIgT4nwvz/9iIcaOkdZ4MnZoCH771hroDRrJDEKAHz+3AJOnS/vtRsYE4ncfPAQ3Dz0YVsIH0jPl+8/Ox+y7xENY/y3d6b7bj0yODuyp/eeN3jG3ln0nYJwnZFgfTA5eg5H+82RbwGaHCRuqPsXFtjP9NsD7D8MTc0MXY3qwfEY3Z8FH1zfiaNM58PT2BngEztbj7qwblkTMxLKIOZIMhL6ycnb8/doO7K4939N75mb1MtxYPVZETcIDg2dJMhD6ysY58JeiA9hQfgG2nsaCfTmk5/83sFqsHjQaTyXOhJaRl+/j4Hn8Jf8YPrl2HmaH/cY1+zIA59Hl1bEj8GzadBhYeYmVPKV4L+80Psg/jy67DQzIbWEhAufR0OWxKXhx1Ax4aOV1yqSU4v2cC/hb9nl09NQXuTVZliEEDAgWD03EyxOnw0cvL3mTUorPcnLw7tmzaO3uFmQAwLyhQ/Hy9OkIcJd/OuLr7Ct45+hpNBlNYAm5LWTDEAJKKWYkDMFL86YjRKIR0lc7cwrx530nUdfRJcqYFB+DF5fMkGyE9NXByyX48/YTqG7p6Le9fe+/jYmPxgsrp2NQkPwTfCfzruOtDcdQ0dAmysiIi8Bza6cjLkJaF+++uphfibc+O4Ky6hZRRsqQMPzswelIHCyti3df5RXW4O33D6G4rAEsS8BxtzB62trHDwnBTx6dgdTECNmM4uJ6vPPefhQU1IoyBscE4okfzsSIjBjZjOuljXj3T7txJafyxvX6Y0QOCsAPfjwbo8dL36T0qqaiGe++uh05Z0vBsMxtuSO9/xYa6YdHfjYPE2dJ6349UJKzft8xRETUZW9BVtt+5LQdhNHRDo7aoWPcEOYWh9H+CxDvPVpVgquT0YFTLcdwuvko2u1tcFA79IwBEW7RmBo0C+m+o6CRuajezjDicOMZHGg4hWZbG+y8HQZGjyj3MMwNnYLxgSOgY9SdVuiyd2N//UVsqz6DBksbbLwdekaHKI8gLI0cjxkhGTCw6tpPmxxW7Kq+jPUV51FtboWVs0PPahHl7o9VMaOxIGI4PDTKk4EBZ4+bnZX5+KL0Iq53tcLC2aFnNYj08MWa2EzcFZMKL626UxcWhx07KwrxWWE2Sjqa0e1wMsI8vHDP0HSsjEuFr16a90BIVs6BvWUl+CQvG0WtTTDb7dCxGoR4eODuYWlYlZiCQHd1Llo7x+FgaSk+y8nBlYYGmO12aFkWQR4eWJmcjNWpqQj2VHd6xMHzOFZyHZ9fyMHlmnqY7TZoGRb+Hm64Ky0Zq0ekIsxH+Ki5FPE8xcmScqw7k4OsilqYbDZoGAZ+Hm5YnD4Mq0anIdJf+Mi8FFFKca64El+dvIwLJVUwW+1gGAIfdwPmZyZi5YQ0RQbIrYzskhqsP5KDc1crYbLYwDAEXu56zB6ZgBVT0hAbLp53IoVx5VodNh7IwenL12HqtoEA8HQ3YProoVg2YziGDpLmaRFT4bV6bNmdjRPnr91omufhrsek0XG4a34GEuNCVTNKSxuwfUc2jp8ogtFkBaUU7u56jB0zBEsWj0DSsHDVp9Eqrjdh56aLOHYoH12dFlBK4eauR+boWCxeOQqp6dGqGTUVzdj51Xkc3XUZXR3d4Hkebu56pI0ejMVrxyF9TKxqhhLdMUQEZOOMuN61Gy2WfNj4TrBEB4MmAIM8ZyPIMNzlh0UpdTnHxplxtfMwasz5sHJGsIwW7qwPErynIMp9gBi8FVltp1BqLICZM4IlGnhovDDcZwzivVJdejWkMew413IR+Z0FMDpMYAgDL40nMv3Ske47MAw778Cp5lxcbC1Ap90MQgi8te4YG5CCsQHJLgvASWE4eA7HGwtwsrEIHXZn1VlvrTvGBcVjWkiSS8+JFAZHeRyrL8Hh2mK02UzgKYW3zg0TgmMxJyLJZfhGCoOnFCfqyrCnsght1m44eA4+OjeMCx2EhYOGuQzfSGFQSnGmthI7y4rQYjHDznHw0RswMjQCS+OSXHpnpDIu1NZgR3Ehmswm2DkO3noDMkLDsDQxCd4uklmlMnJq67EjvxANRiNsDg5eej1Sw0KwLDXJ5fFdKQwAyK9twLacAjR0dsFid8DLoMewsGDclZEEfxcJs1IZxXXN2HYhH3VtXbDY7PA06DE0LBB3jU5GoLe4Idn7aHfFKatrwfYz+aht7YTZaoeHQYfYsAAsHZeMED9xI08qo7K+DTtP5KO6sR1mix0ebjpEh/pi0aQUhAeJG3lSGXWNHdh5OA9VtW0wddvg7qZDRIgvFk5PQWSYuJEnldHY1IU9+3NRXtkCs9kGNzctQkN8MG9WKgZFixt5UhmtLUbs3XUZ10sbYTZZoTdoERzigznz0zB4SLDo3/ZyXDHaW004sD0bpYV1MBkt0Bu0CAz2xoxF6Rg6LNwlQ4ruGCK3XsdWicL2L1DWtRs8tfeEC3g4AwcMKDj4aAcjwXc1Yr0XgVHg5eiw1eNC60Zcad8PB7XeCK04gywMeHDw1YZjhP9SDPebD5bI90C021pwtGkXzrYchpW39FzX6Y7r/b/9dUGYFDgXEwJnQcvI90B02Duxu24/Djceh5nrFmD4YXbINMwKmQ4DK98D0Wk3YXP1UeysPYUuh/kmBksYcJSHn9YLiyImYmnEFHho5HsgjHYLviw/ha8rz6LNZrpx3b4MX607lkePwZqYCfDRyQ8dmBw2fF56Hl+UXkCjpatfhrfWgNWDM/Fg3FgEKCigZnHY8VlxFj4puogaUyc0Pdd1Vth1hhI8tTqsjkvHo8NGI8RdvnfAxnFYV3AZH+VfQkVne78MN40WqxNS8UjqSER6yfcOOHge6/Pz8HFOFkrbWm9j8JRCx7JYNiwZj44YiRhf+d4BnlJszruKj89noaipGSzDgOedjN4Qi5ZlsTg5EQ+PyURcoHzvAKUUO3ML8cmZLOTXNoJlCHie/ofR837mpyTgexMzkRAq3ztAKcX+3BJ8diwLlyvqnAxKQSluOlkyK20oHpqWieQoZd6BY7ml+PTgJWRdqxFkTEmLxf0zRyJ9iLLF6UxeOb7YcxHn8yt7GM73x/RkplKeYlxaDO6bPwqZw8RP1wgp60ol1m2/gDNZ1515GdT5XSCEgCHOI8Sj0gZhzeKRGJM+WBHjytUafLXxHE6dLXXmyfRlMADHUQxPjcLqZaMwfmycIkZRYS2+XncWJ44WgsIZpuV52pOr4gzlJKVEYvnq0Zg0NVGRl6OsuB5ff3wSx/dduXHtWxlDk8Jx19pxmDY/TZUn5Y4h0kf15gs4XvdzcNQGCrE6/c5vV7j7BEwM/S00jHTXeI05H5uqfgU7b7ktn+R2BhDlnoalkS9Dz0pfmKrMZfhn6e/RzZlcNs4jIIh2j8Ojsc/CQyN9Yaoy1+APhX9Gp71LIiMSzyb8GL466QtTjbkJz+X+Dc3WdpfHZhkQRLgF4XdpjyPYIH1hqutuw5MXPkK1qUUSI9TNF++N+h6iPKQvTI3dXXjk1Be41tnkksESggC9J/498V7EeUtfmFotZnzvyAbkttS5PBnBEgJfnRs+mbEayf7SF6YOqwXf378F5+udTdzEOCwh8NDq8NHc5cgMkR6bN9lseGL3DpyoLJfE0Gs0eH/hUoyPkpaECABWhwNPb9+D/UXXbsvv6Y+hYRm8d9ciTI2TvjDZHBxe3HYA2y8X9JsXcxODISAgeHPlfMxOlh7/53gev9t8BBvO5EpiUAr85u7ZWDxSepIyz1O8s/UEPj14SRKDpxTPr56OlZOFCxDeKkopPth6Bv/aehZMj7EmxuB4ih+tnoy18zIlL36UUny5/SL++tmxfnNW+qr3NTy8ajweWjlO1gK7bWc23v7bATBEGuPuFaPx/YemyDrVcmBPLt74/U4QgttyVvpjLFw6Ak/+dI6seiAnDuTjj89/DUohWu+EMASUp5i5aDh+/NISaLXKUgPkrN//p0/NNHbn4EjtT+CgFhdGCND76Kozn8Gxup+Dp9I6W9Z3l+DryuckGCG9DIpqcx42Vb0ABy+tzX1ddxXeu/YqzBKMECeFospcir9dew1WTlpzowZLI35z9XVJRsh/GDV4reAN0QZ5fdVkbcczOX9Bs7VDUu0OHhQ13c14JucvaLdJ69DZajXi+2ffR425VTKjwdKBR8/+E42WDkmMDls37jv+CUq7miUxOErRYjVi7bGPUW2S1gXUZLdh7cF1uNJaL+l4JkedzfdW7/8C1zqaJTEsDjse3LsRFxtqer6ZrhlGuw1rd23AleYGSQw7x+H7O7fiZFWFZIbF4cBD2zbhUl2NJAbH8/jRll04WOzsGC2FYXNweGzjNpwuF+7O3Fc8T/HLLfuw43KB8/93sX/jeAqO5/GT9TtxuFC4k3VfUUrx2qZD+PpMrmQGTyle+HIfdmcJd/2+VW9vcRohUhmUAr/76jA2n8yTzPjX1rP419azToaLQmK9i/tf1h/Hun1Zkhnrd17CXz87dtM1hNT7Gj7ccBoffX1GMmPH7hz8+a8HnIu3RMZXG8/jHx8elcw4vP8KXv/tDvA8FTVC+jJ2bcvCX97cI7nY3Jmjhfjds+vBcbzLomu0h3Fo52W8+eIW8CrbR0jR/1lDxMYZcazuGQA8XD+a/iMKHg3dF5HX+i+Xc+28FZuqXgBHHRKMkJsZdd2FON74ocu5HHXgg7I/wsHbZDF48KizVGFT9Ueu51IebxS9i27OIskI6ctosDTi/bKPXc6llOLVKx+i0y7NmOrLaLZ24A8Fn0qa/8ucr9Bk7ZJVup2jPNrtZjyb9YWkH/YvL21DtblNJoPC6LDiB6e/ksR48fw+FHc0yyruxVEKC2fHQ4c3wCHh4fHbc0dxualeFoOnFHaew4N7N8Ii0Bm2r/589jTOVlfJKnPPUwqOUjy8fYtoJ91evX/2Io5cK5PFcBpFFI9v3I4Ws2tDet35HOzKK5JVs6N37k/X70JdR5fL+dsuXMXGs1fk1wUB8MKX+1De5NrIPZhdgs8OXZJJcOq3Xx5CUZVwi/penckrxwdbpS/2ffXOl8dwudi1AZpbWIN3PzmqiPHhhtM4l3Pd5byS0ga89d4BRYwNmy/g2Mkil/OqK1vw+m93yL4+pcDu7TnYvyfX5dym+g787tn1N/5ODuPo3jzsWH9e9uuTq2/FEPnrX/+KmJgYGAwGjBkzBufPf/Nv7HrXbth5k6zF+z+iKO7YAAcv7k0o6jyGbq5DEYOCIrd9N6ycSXReXsdFtNmbZS3e/2HwuNR2Al128Z3+lY6rqLXUK2Lw4HGxLRtNVvFd+NXOchQbq8ApZGS3F6PCVC86r6SzDpdayxT1j+Eoj/yOalztqBadV2lsxeG6YkXVPznK41pXE842iT8EG81GbCvPV9SjhqMU1aYOHK65Jjqvw2rB+sI8xYzmbjN2Xxd/0Jrtdnyam62obi1PKbqsVmwtKhCdZ+M4fHQ+SxGDUqDbYcfGy/nir4Wn+PDkRQUEpzHi4HmsvyC+YFBK8dGRi4qqpPYaVV+dynE595MDFxWXnicE+PKoa8YXey6qqir7pQSvyFc7LoJVyGAYgq+2u/48N227BKXtlRiGYP0m1+vcts0XFfcGIwT4et1ZlxubXV9fAOfgZRkhfbXxk1PfuFfkGzdE1q9fj6effhovv/wysrKyMHz4cMyZMweNja4ta6WilKKoY72qa9h5EyqNh0TnZLVuVdXp1kHtyO84KDrnRNM+xWXnAecD6lzrEdE5+xuOKC47DziTWA83Hheds6P2BFgVDJYw2Fl7SnTOpspzqroCs4TB1xVnReesv35JVQ8RljD4ovSC6JyvruUovr6TQfBJofiDdmPxFdh5V+FKYTGE4OMr4gvGjuJCmO12xQwA+CQnS/RBe7D4Gtq6uxVfn1Lg80s5ot2LT14rR32ntNBgf+IpxVcXcmFzCN/v7Ou1KGtsVdxsgOMpNp/Lh9kqfL+LqhpxpbxecRNGjqfYfb4AnWbhDVpVQxvO51cq7uvC8RRHs66hqU34fje1GnHi/DWXoRIh8TzF+dwKVNe3C87p7OrGwSNXXYZKxBhXC+twrUx4nes227B352XwChmUAhXlzcjPE9482WwO7Np4QVWfnab6DmSdkRZeVKpv3BB566238Oijj+Khhx5CUlIS/vGPf8Dd3R3//ve/vzFmsyUXRns15IRkbhdBSccmwdEmSxkaraWqO91ebtspONZsrUeZqUChV8cpCopTzcLuxXZbB3La8xR5Q3rFg8ehhmOCC4bJ0Y3jTTmKvCG94iiPffVnYef7DwdYOTt21mSp6qbLUR776i7D5Og/HMBTig3lWYp7ofQyDtcVo8Ui7An7oiRbVcdejlKcbqhAtbFdmFGQo/j6gPNe5DbXo6i1SXDOurzLqrrDUgBl7W3Irq8TnPNldp4qwxAA6ruMOF1eJTi+4VKeqkZ8ANDRbcHRojLB8U3n8hTv8HvVbbNj/+ViwfEtp/NVMxw8j93nhfNRdhxXzwAFdp28Kji852i+2kbpYBmCnYeEc14OHilQ1cAOcBYt27VX2BN2/GgBLBZ1hjrLMti9PVtw/PzxInR1KDfUAYBhCXZvVOYRlMz4Ji9us9lw6dIlzJz5n54LDMNg5syZOHPm9hii1WpFZ2fnTf8pUZdd3L0uTVT0Om222gFhdNiFww0ttoHxGrXbW8ALLNBN1mbVxhQAmDgzugUSY5us7aoMhF5ZeTs67P3vlFptRlgFjBQ5clAeTQJJq512C7rsrnMWXIkHRa25vd8xO8+hsVv57ruvKkUMkaqujgH41IGKTmFGRXv7gDAqO4QZ5a1tqoy2G4x2Ycb1pjZVxifg9FJVtQmHSK83tine4fdKwzCobhVmVA4Ag2UYVDe3C45XNbSr/jwIIahuFGbU1LcpasLXVzwFahpEGLVtqjvUchyPmlrhvJ26mjawGvWM6qpWYUZVq7Qy8CLiOYrqCmkJ8Er1jRoizc3N4DgOISE3l/oNCQlBff3tC/Dvf/97+Pj43PgvKkrZuXIHb4ZqkxkAR4UtSTuvzsrslYPaQAUWaaknXqTIJpDvYuXVL6y9sggwLNzAMcwC1zI7pJ1AkiKTwLW+FYZ94BhGgWs5eB72AYr5mkReb7dD3W6vV0abMENt6AdwGgkmMYbImFQRQmCyCl9HbEw6RPw6Jov63yGlgElkF2+22BTnIvSKpxRmi8jnYbFD7deXUnqjYmt/6h6A9wEARpMIo9s+AKsUYBJl2FR7DAHALMIYCH2nTs08//zz6OjouPFfVZWwu1RMGsYd6sIyPdchwrVEtDLqjIgzdCACeQ16Vl0p8b7SMf1fS8+oK4neV25s//dESdEzIXkI3BO1pd2lXMtDo65E/U3X0vbPcJfZW0ZMXgIMDcNAqzQL7xZ5ijTLc9OqaxsgheE+AAyOUnjqhO+7h179Z0IpFb2Op2FgPndRhpv63wghgIdB+J57uOlUlxNnGAJ3kfvhblDWefYmBiHwdBf57hp0qr0uAODlKcJw0w6Ix9BTlKEbEI+hhwhjIPSNGiKBgYFgWRYNDTfXHGhoaEBo6O1Fl/R6Pby9vW/6T4m8ddILIQmJgIG3LkZw3F8fqZoBEPjqhAtDBenU91MAAH9dkGBJ9mBDkKqE2155abxgEDBqgvR+0Lgo1y5FbqwePtr+e5f46zzhprKXDeBsahds6P9756U1wEer3gBlCEGku68gP9xdfTsDAmCQl3ARuBgfvwHZjQ32EWYM8fMfkN3YYD9hRlxgwMAw/EUYwQGq8x44ShETIHKvQtQzHBwv2q9mcKi/+vfB8Rgk0hl4UJif+rAJTzFIpCR7dLj/jVoXikWAqHARRqS/+hwRhiAq0l9wPDI6AJxDfR5K9CDhBoaRMYGKk2F7xbAMomNdl5ZXxfgmL67T6ZCZmYlDh/5z+oTneRw6dAjjxklvwS1XAfoUeGtjoCY8Q8FjqM9ywfFAfQzCDAkqF3GKDL9FgqP++mAM9UxRdaKFgGBi4GzBcR+tN0b6pas8NUMwM2SK4E7IQ2PAtOBMVadmGDCYFzpWsHOvjtVgcWSm6lMz88Mz4C7gEWEIwd2xmapPzcwJHwY/vXBJ+XvjR4BR8b1iCcHk8FiEewgbNPcNS1d8fcB5L0YEhyPOV7ga7drU4ap2YwRAfEAg0oKFu7iuyUhTzYjw8caYQcJh4FUjU1XnVgR4uGFyfIzg+Iqx6hkeeh1mpQlXcb1rfIpqhk7LYu6oRMHxRZNSVJ3QAACGAAsmCneKnTc1eUCMnUUzUgXHZ0xLglajbvPE8RQL5wpXo504JRHu7uo2TxzHY8GSDMHxURPj4eOnrrklz/GYv2Kkqmu40jcemnn66afxwQcf4JNPPkFBQQEef/xxmEwmPPTQQ98YkxCCBN9VUBOe0TFeiPKcJjonw3+JqkRPDdFjmM900TmTguaqOtHCEAaj/aeKzpkVMk0VgwKYFjRJdM6i8ImqTs3w4LEgfILonGVRo1WfmlkRPUZ0zqrBIyRXMxRirIkV/1GvjEtT9aDlKMV98SNE59w1NBk6F834xMRTigeSxRkLhibASyTk4UoUwAPDM0Rd/dOHxiLQRYM5V7ovM13UuBw7OBqRft6KTUOGEKwZPRxaVnhhS40OxdCwQMWfO8sQrBibCoNIOe4h4YFIjw1XbEizDMHCMUnwEgnxhAf5YHxajGLPC8sQTB8VD39v4c/U39cDU8fGq2KMGzEYoSLN9jw99Jg9IxmswkRPhiFITY5AjIi3wmDQYt6iDMVhJkIIYocEI0GkSZ1Gy2LhqlHKQ1kECI30Q/roWGV/L1HfuCGyevVqvPHGG3jppZeQnp6OnJwc7N2797YE1oFWjNdc6BkfxTU4EnzvBkvEH6LxXpPgqQlQyCBI91sEnYtckyTvDAToQhR5LAgIRvtPddlvJsk7EdFukYoYDAjG+o9EgF7YBQkACd7RSPIe7LJrb/8MBqP9hyHSXdw9GOsVgrGBQxU9aFnCIMMvBok+4j1UItx9MSciSTFjmE8IRgUOEp0XaPDA8tg0RV4RlhAM9vLD1PAhovO8dHrcO2y4osWVJQRhHl6YO1i8h4peo8GD6SMUMRhC4O/mhsXxwrtvwJnv8vCYTAWEnjwBvQ7LUsX7tDAMwSMTRynacjAE0GlYrMwU3n0DzkXl4emjFCVIEjjfy+oJaS7nPjh7pCoP0t1T013OWTtvpPIaH5TinjniBi4A3L1I+fvgeIo1i0e5nLdsSabivSzPU6xZIb6pAYDFyzLBsIwiA5RSitVrXffNmb98JLQ6jbLcHQqsfHCi6rwfV/pWklWffPJJVFRUwGq14ty5cxgzxvUHpFZaxgNTw98GIRrIe5sEEe6TkOzn2mOjYXRYHvVbZ8KpDAYBgyj3NEwKftDlXJaweGzIc9CzBlmGAgGDaPchWBrxgOu5hOCZhCfhqfGQxWDAIMItHA/H3i9p/ovJD8Ff5y0rfMKAQZhbAJ5NvE/S/NeGr0aEm78sBksYBOq98IeMe6QxRizCEK8gWbUlepvS/X3cGkk/6l+PmoXUgFDZDE+tHh9NXw1WQjLqL0ZPweiwSFlGFUsI9KwGn8xdAb0Ej8pTo8dhyqDBshgMIdAyLD5ashweEjwq3xudiXmJ8bIe5gwhYAjB+yuXws/ddd7PqpGpWJ6RLMuoIj3/+701ixHs3X9uU18tGJGIeycLu9mFGcCf7luAqABfl/OnpA3Bo/OUPX9/fd8cxIUL7/B7NSopGk+tFveQCunZ+2cgeUiYy3lJQ8PwzCMzXc7rTz+8bzIyU1znEcbGBOEXz8xXxLh/zThJXXjDI/zwwitLQSlkGyPLVo3G9NkpLucFBHvjpbfWgBB5DEKA2UszMH/5NxuWAb5jp2YGWgGGJMwIfw9axg0EruJ9zlsR7TkdE0N/C0ZicmWQYTBWD3oDetbTpTHSm08y2GMk7op6FSyRlvEfpA/DU3G/hqfGW4LB42QM8RyGx4b8EjpGmms8UB+Al5N/AX+dn+S8l1jPGLww7Gdwk3i6x1/njbfSf4RQQ4AkBgFBjEco3kh/Cl5aae53H5073h/zKGI9gyUzotwD8K+xj8Ff73qxAABPrR6fTrofyb5hIHCdicSAIMzNB+umPIRQiYmoBo0Wn864GyODonpepwsGIQgyeODr2feJJqn2lY5l8e85yzE5MubG6xQTSwh89QZsWLQG8f6uFyTA6bH424JFmBUbd+N1umJ46nT4YtlKpIrkhvQVQwjeWDwXS5OHSWYYNBp8dPcyjIyS1kWYEIJXFs/E6lFp0hgMgU7D4m9rl2BCnLgHrK9+vmgKHpqaeeMarhgsy+CN+xdgRqr01vOPLxyHHywYK5nBEIJX75+DBWOGSWbcO28kfrR6smQGIcAvHpiB5dOld/i9a046fvboTBAClyGU3tfwxH1TcI8Eb0ivZk9PxvPPzAfDEMmMh+6dgIfumyiZMXFKIl78zTKwLOOydklviGXVPePw2JPSDbHM8XF45Z210Oo0khkLVo7Cj3+1+Bv3hgAAoWoC3t+w5LQRFpPJXo+ijvW41rENDmoCgQYU3I1FnYJDgD4Z8b6rEOM5W/A4rZiM9hZktW3D5bZdsPJGMGB7KqI624Hz4BCsH4IM/yVI9pkp2dDpqy57B04278Op5v0wcf0zQvSRmBw0F6P9p0LDyM8BMDpMONBwBAcajqDD3gkWLHjwTgJx5jiEGkIwJ2Q6pgZPgo6Rf3zS5OjGztpT2FZzAi22DrCEuZF3QQjpYfhjScRkzA8bD4OC0zDdDhs2VZ3DhoozqOtu75cRYvDBqkHjsCxqNDy18o9KWzkHNpRn4fNr51FhaoWGMDfcxQwhcFAeQQZPrI0dhTWxI+Gjk3/ixsZx2Fiai48KL+BaZws0hAHtyUxi4GT4691xb3wG7k8YiQCD/HwJB89jc0k+PrpyCQWtTf0yfPQGrB02HA8mj0CwuzSDra94SrG9qAAfX85GbkN9j8eqh0EIHDwPT50Oa1LS8MDwDIR7yf+9U0qxp7AYn1zIQVZNrXNhoLiJ4abVYuXwZDwwMgPRfr6KGIcLy/DpmSycL6++sfhQip428Tz0Gg2WZiTh/nEjMDhQmlF4q04UXMfnJ7JxpqjixsLAUwqGMOApDw3DYtHIYbhvcgbiQqUZhbfqXGEl1h3Owon86yDEabb/h0HBMgRzMhNw74wRSIxSdmoiu6gaX+7LwvGsUoDgJgalFIQA00fF4545IyR5QvpTfnEdNuy6iCNnip2eBYaA5ykYpqfJGwUmjY7DqgWZSE9Sdtqx5FoDvt56EYePFYDjKJhbGJRSjB01BCuWjkRmhnTDs6+ulzVi8/rzOLT/ChwODgzD9DAIKKXgeYqRo2Nx18pRGD1OuuHZV1Xlzdj2xRns354Nm80BlnUyej9/juMxfNRgLL1nLMZOTVRlhMhZv/+/MER65eAtqDAeQIslHza+CyzRwcAGIMZrNvz08aJ/SykHjtrAEoPoh+PgbSjuOoka8xVYeCNYooE764sE7ykIc0sQZfA9DI1LhgN5HRdQarwKM2cCQxh4arwx3HcMYtzjRf+Wpxwc1A4t0YvO4yiHrLbLuNJRABNnAgMGXlpPjPTLQKKXKwYPO2+HjhGvKcBRHhdbC3ChtQBdDjMICLy17hgbkIJ036Gi+SQ85WHj7dAxWpfzzjeX4mRTIdptZgAUPjp3jA9MwNigoaIhHEopLLwdekYjyqCU4nxzBQ7VFqHNZgZPKXx0bhgfPBhTQ+OhEQmTUErRzdlhYF0zLjXVYG9lIVqsZnC80zgYGzIIs6PioRU4TdSXoWc0oiEbSikuN9VjV1khWrrNsPE8fPR6jA6NxNzB8aKhGEopLJwDOoZ1GRbKb2zAjpIiNJtMsHEcvPR6jAgLx4Kh8TBohA1bSiksDge0LCt6TwGguKkZ2/ML0Wg0wepwwEuvR1pYCBYmJcJdNzCM682t2JZTgMYuI7rtDngb9EgMDcKitER4GoQTOimlsDo4sAwRTWAFgKrmdmy/eBV17V3ottnh5abH0NBALMwcBh93cePZaneAEKdnRky1LZ3Yee4qals6Ybba4Ommx+BQfywckwQ/T3Hj2Wp3OENpLhiNrV3YdeoqqhvbYe62w8NNh+hQPyyYmIQAH/FTHTa7s2KyTiQRFwBa203YczQflXVtMJutcHfTITzEF/OnJiMoQDxPzu7gwPMUep04o73DjP2H8lFR1QKTyQo3gw5hoT6YPSMZoSHCya8AYLdz4KlrRldnNw7uu4LyskYYjVYY3LQIDvHGrLlpCI8QN2wdDg4cx0PnIifEZLTgyO5clBbVw9Rlgd6gRUCwF2YsTEdUjDLD9lbdMUQGSCZ7Fco7v0Zl13bY+HYAvfVFEhHrczciPGaDFSgUJlVd9nrkt+9EQcdudHNtNxj++sFI9b0LQ72nqy6e1mFvwcWWA7jQegBdjl4GQaA+AuMC5yPddwr0AsXIpDM6cKLpGI43H0WrrfUGI1gfjGnBMzA+YALcNeqOkXXYjThYfxZ76k+i0dIKCupkGPwxP2wSZoaMhbdWHaPTbsae2kvYXHUGNd29DCDY4IslkWOwOGI0/HTyPQJ9ZbRbsKP6Mr66fh7lxhbwPYwggxdWDMrE8kGZgrVMpMrssGF7RT4+K76Eks4mcNTJCDB4YFXscKwZkoFwD/EHpytZHHbsuF6ITwqyUNDaeKMMur/eDSuHpmJtYjqivXxVMaycA3uuleCT3GzkNdbfYPjqDViWmIS1KcMR6yeeKO1Kdo7DgWul+DQrGzm1dXD0lO300uuxJCkR96QPR3yguoezg+dxtLgMn5/PwcWKmhsMD70O85Pjcc+o4RgWqq5WA8fzOFlUjnWnc3CutAr2njoY7jotZqcOxd3jhiM1Sl1tIp6nOFdcia+O5+B0YcWNRn4GnQbTU+OwetJwDB8cpmonTSnFpaJqfH04Bydzr8PaY4jotRpMTBuMldPTkZkQqZqRV1KLTftzcPzCNVhsvcYOizFpMVgxOx0jUwapLpxWUFyHrbuzcexUMbp7KtJqNSxGDI/GsoUjMCojRnUp+WvXGrB9axaOHLkKs9lZkVajYZCaGoWly0Zi3Lg41QwlumOIqFS3owE5Tb9GY/dpELCguLVrJgOAh4Z4It7vEcT5PCD7R2F2tOFow5soN54GAemnsR0BQKElbkj3X42RAffJDhl1O4zYVvNPXOk4DYgydBgXuBAzQ9eAlRkysnDd+KLyc5xrOdPjzu//66QhWkwNmoYVkatkh4wsnA3/KtuEgw3nwFO+XwYBAUMYzAoZi0dil0EvM5xj4x34a/EubKs+Bwd1ft63UpwMgtmhGXg6cYlgvREhOXgOfyk8hHVl52Dr6YtzK6M3T2N2eDJeHL4Q3jILqPGU4t0rJ/BB4Tl0c/aeT/gWBnG6emdGxOO3o+YhwCDPeKOU4u955/C33LMw2m0939+bKSwh4CnFlIhY/HHiHIS4i+9I+2N8nJuNd86fRofVCqbnercyOEoxPjIKf5w+F5EKnhFfXc7FmydOoa27W5QxMjICv58zW7TwmZC25xbg9QPH0Ww037jeTQyGgOMp0iJC8driWYgPlm/07M8rwR92HEVDh1GUkRgehFeXz0JypPxTi8fzy/CHjUdQ09J543r9MYaEBuClu2ciPVb4WKmQzl+txB8+P4jKhnZRRnSIH56/bwZGDZNfvPJyUQ3++K8DuF7dIsoID/LG0w/OwIQR8o+uFl2rx+vv7sO1skawLLmti29vaCc40AtPPjINUyaIe8v70/XrTXjjT7tQWFAnyvD398D3fzAds2a5TmwdSN0xRFSoy3Ydp+oegY1r78cA6V9RnouREfSKZEOh01aHrVU/gcnRLLmzbqznFMwK/xVYIm0R77C34MPSl9Bqa5DMiPcagbWDfgGNxLyPTnsn3ix6HXWWWkl1SAgI4j0T8KOhP4VeYtl3o8OMl/L+imvGKkk1WwgIhnpF49WUJ+ChkbaImx1W/Dz7I1xuL5fEYEAQ6xmKdzIfha9O2iJu5ez48fmvcLrpmqQTgSxxJtF+OOEByd4RO8/hx6e3Yl91kaT5LCEIc/fGF9PWItLTV9LfcDyPn5/cg82l+ZIZgW4eWDd3NYb4CBc/6ytKKV46dgifX7ksmeGjN+DzpSsxLDBIMuNPx0/g/fPSuoqyhMBdp8PHK5dheJj0XIa/HTuLvxy9vcFnf2IIgV6jwftrl2LUIOm5DJ+cyMLrO49JZmhYBu/evxgTE2IkMzaeysVrGw7dyLlxxWAYgj89tADT06TnMuw5W4CXP9wLSqnLY8zOEyAEv354LuaNlZ5Ee/RCCV58Zyd4nro8/kt6/tcvHp6FJTNcH43u1fms63jhtS1wcLzk4m5PPDwNq5ZKP52Se7kSzz+3ATabQzLjoe9Nxr33iddiGkjJWb//T5+akSuLoxmn6x6TZYQAQJVxO/Jb/yyNwXVge/UzMDlaJBsIAFBmPI7jDW9LKqZl4cz4uOxVtMkwQgCgpCsbG6v+Itipt6+snBXvlLwl2QgBnKmJxcYi/LPsb5IYdt6O1/LfR6mxWnLhOAqKa11V+O3VD2CX0I3XwXP4Ve7nyJVohADO7rllpgb8PPsjWDnXTdd4yuP5rM0401QquSwBRymqzK34wZnPYHK4bjhFKcULF/Zgv0QjpJdRZ+7E/Ue/RLtVWhPH35w/ItkI6WU0d5tw794NaOo2SfqbN8+dkmyE9DI6rBbct+1r1HRJ69j9/vkLko2QXobJZsODX29GeZtwR9W+WnfhsmQjBHB6s6wOB77/xVYUN0rrdrrt0lXJRkgvw+7g8NSn23GlWrjzd18dvFyC36w/5EzKlMjgOB4/+/cuZJVK64R+Ou86Xv7XXvC8ayMEcCaI8jzFy//aizNXyiUxsguq8at3doLjeEk1SGgP5w//OoCj50skMQqK6/DL17bcyDmRqr9+eAR7D1+RNPf69SY8/9wGWK3SjRAA+Ojfx7Ft6yXJ879N3TFE+uhq6zuwci2yjJBelXZ8hjZLnst5F5o/QZe9QQGDoqBjF2rM2S5nHm/cjCZrjexqqRQUeR2nUNjp+gF9qPEAKs0Vihi5HZdxrvWsy7n76k/jamepbAYPHnkdJThQ73oR2F+fg3MtxeBlVi7iKY+CzmpsrDrlcu7hukIcqLsqm8FRHqVdTfj3tZMu555qKMem67my6y9xlKLa1I53810zshpr8XGB/AcZRykau43406XjLucWtTTjrxfPKWK0Wyz47cmjLudWtXfgjeOu3++t4imF2WbDKwcPu5zbZDTht3uPKGJYHQ68tOOgy7md3Ra8stn1vFtF4exL88v1+1xubLptdrz0xX7ZRemcizjFLz/d63KxdDg4pydEQfUwCoqX/rUHDof485TnKV792x7wvNxfoVOv/WMvLFbxTQelFH94ew84jldUmO7N9/ajy+i64/qbf9oNm82hqMLzX987iNZWo/wX9w3rjiHSIxvXjmrjHkVGCAAQsLje+bXoHDvfjYKO3bK8FLcy8tq3is5x8Hacb92vgsHgbMtu0Tk85XG48aDi8vYEBIcaDojOoZRiR430nd7tDGBH7TGXP9aNlacU9wuioNhYedplWfl118+BVcjgQbGh/KJL786nxRcV99nhKMWGshyYHeKt6D8rzFLF2Fp6FR1W8Qft53k5soq43crYV3YNjSbxB+2Xly8rLnPOUYpT5RWoaGsXnbcx64riNvI8pciprkNRQ5PovG2XrsLuYgEWY5Q2tiKnok503t5LRTBZbIp+6TylqGvrwpmiCtF5R7NL0dbVreh+UQq0dXXjaE6p6LxzueWob+5U/JmYum04eEbc23iloAblVS2Ke+3YHRz2HRb3NpZea0BBQa1iBk8p9uyW7m38tnTHEOlRZdc2xUYI4KxFUm3cAxvXLjinuPMAHNS1m12MUW48CaNd+AGV33EG3Zxyi5eCR6kxF83WWsE5uR2X0W5vV8GgKDdfR6VZ+AF1peMaai1Nijv5UADV3Q242in8gCroqEZRV42qfkGN1g6cay4WHC/rasKFlnJwKhjtNjMO1RUKjteaOnG4tkRVnx2zw46dFVcFx1stZuwoK1TFsPMcNl4Tdj932azYWJh/W6KlXH11VdgzaXU48OXlPFUMhhB8dTlXcNzB8/jiQo6qUuosQ/DlBWEGpRSfn8pRfP0bjDPCixKlFOuOZavqe8QyBF8dzxGds/5QtqpGkgxDsP6QuKd44/5sVZ2HCSHYsDdLdM6WXdnqTqdQYNOOLNHN0/bt2Yr73wAA5Sm2bs1S3Vl4oHXHEOlRtXEv1DTJAwAKO+rNJwTHSzqPQE1HYCcDuG4UdivndZxW2RHY6RXJ7xAOnVxqu6CqWy/gLN1+sfWC4PjJ5mxVnXQBZ+n2k805guNHG/MGhHGkUXjBOFh3VfEOv1cMCPbVCi/g+2uKoPZ7RQDsrBQ2RA5WlcKhwggBnN/dHWUFguMnKitgcbjO6xETTym2FwsbbeeqqtFlVb4ZAJxeke0Fwozcmno0G83qGDzFrivCjOL6ZlS3dqh6YnE8xb68YsHddU1rJ4prmxV7EXoZJ65ev3E89la1d3Uju6RGldHG8xTZxTVo7+o/z8liteNMznVVnYcppSipaEJdU4fgazh2uljVAk8B1Na3o6xCOD/oyOGrt52OkavWFiMKC4Q3mv8N3TFEemTlWlRfg4CBjWsVHDdzrVBr7DBg0C3idemyt6na4QPO0InJ0f8PDnDWDFHTrRdw7jCMji4RRpeqhxPgfHh02oUZbTb1sVKO8qLXabWaVBuGPCiaLcKMFotJtbFDATRZhJNJW7rNqhkARBNWW7rNKu9Uz3XMwkaA2JgctXULJ/e2mgaG0WW1geP7/521qjR0euXgeJis/YfkWrsGhkEp0GHq/34NFAMA2gSu1WFUFvbpT60d/TNMZiscjoHxMrQLMDiOh8mkzoi+wWgfuPs+ELpjiPRITVjmPyLgqfCOTmxs4BgD8T4AToTBDQCDUnqjXkf/jP7rhchiADcKRwkxVCIAOE/eCI6p9CL85zpi94oOyAJuF3kfHOVVG1ROhvD9cPD8gPS1ELvnQgu7XIldR+w7N1Ach4rd/e3X6p8xUPcKEP7cBzJE4BC4lloPgjTGAL4PAYPm27hX/y3dMUR6pGXkFVzqTxQctKzweWmDyJh0Bg89K/xa3TXq3wcAuLHC1UM9NZ4DEP4h8BCptOqpcVcdNmFA4ClSEMxL46Yq/t3L8BZpyCe3IJmQxOqVeGv1sk/k9Cc/vfBr9dbpVeWH9MpXL1yJ2FuvV+0FA5yvVYwxEPIUuY63SHl3OdKxLHSa/usG+bgNDAMAvARer7ebuqrRN1+rf4aXx8AxhK7l5TFw98pbgOHpOYDvw7P/16vTaaDVyu9R1j9j4F7vQOiOIdKjILexEjr0ulagIVNwLNJ9hITuueKi4BHuJtyhMtYzZQBCARwGeyQLjid4Jar2VnDgEO8p3N8nxSdO9cLHgUeyt3BBpXS/WNUeCx4U6X7ClRdHBsSoZhAQjAqIERwfEzxIdYInQwjGBQszxoZGqzZ1WEIwIVy4Idjo8EjVPheWEEyIEq62mREerioxspcxLjpKcDwlPAQ6Fz1kpDBGDRLuDDw0NBAeevkNJ/uKIQRp0aHQCCRYRgf7uuw140oEQGyoP7wEDJFgX0+EuugDI0VhAd4I9u1/8+TprsfgiADVmw5fbzdEhfr2O6ZhGSQlhKkuCe/upkPsIOGifGnDo1QztFoW8QnqSv0PtO4YIj2K8V6pMjzDIMCQCS+d8KKU7LtI8bFapwgC9XEIcROuJJjpP0OlIULgrwtFrGeq4Iyx/uOhJeoegj5aH6T5pguOTwzMgDurzmr30LhhYlCGMCNomOq+MQZGizlhIwTHxwYNRoS7r6pPhCUEd0ULv4/0gHAk+ASpW8QpsCZOmBHvF4iRwRGqFnGOUtybmC44HuXtg8nRMapyUThKcX+q8PsI8vTA3Pihqhn3jUgXHPc2GLA4bZiqUxocpbh3jPD7cNNpsXxUqioGTynunSDM0LIsVk1MU/WZUwBrp2QIhtwYhmD19AxVRgIhwOoZ6YILNCEEK+dmqMoTYQjB8lnp0Ig091u2cITiY7WA814snJMGg0H42br0rpGqGCzLYPbs1AH14AyE7hgiPfLWDYG/YQSU3xIesd53i87w0oZikMdYFV4RilS/ZaIzPDW+SPEZr+pUy7jABaKxeneNO8YHTlDMICCYHjxTtK+NntVhTqhyBgMG80InQidSrl7DsFgWOe5Gfxe5YgmD+REj4SHSc4YhDO4ZPEbR9XsZ8yJS4acXDs0QQvBA/CjFHguWEMyIGIowd/HQ4QPDRigOnTCEYFxotMsy7w+kZSj27jAAUoNCkBIs3kflvhHpihkEwGA/P4yOFC/BvmZUmqpTGiFenpgcFyM6Z/VYdQwfNwNmpYiXYF8+TnhDIkVuOi3mj0wUnbNoYrLLjs1i0jAMFk4Q9uACwJyJw2Bw0fXWlRZPF78XUybEqwp58DzFknnponPGjBmCgADlmyeO47F4qfDG6b+lO4ZIHyX5P6XIm0DAwlefglCPqS7njg58CISwkHvckoCFv24whnpNdzl3WsgqsEQr+70wYOCvC0Gmn2vG3NAF0LN6RQxfrS+mBE1zOXdJxDR4atxkGwoMGHhpPbAoYorLucuixiFA7yU7H4UBgTurxz2DJrtmRI9AhLufIoaOYfFovGvGkkHJiPMOlL3TJwA0hMGPUia5nDs3Jh6pASGKGAwIfpbpmjFl0GCMDo9U5rEgBL8Y75oxMiICUwbHKN7p/2LqZJdJtclhIZifHK+Y8fNZk1wuzjFBflgxOkWxN+Gn8yYK5qD0KsTPC/dOU75w/XD+OLjrxRtQ+nq64XsLlBvrDy0YA18XISR3gw7fX6W8z8rq+SMQ7C8eQtJpNXjsQde/1f5ECLBwThoiw8WbKrIsgx887vr53D+DYNq0YYiLk9/w8JvWHUOkjwIMGRgR/Bqcj05pv24CFu6acIwNfReMhHBFkCEec8JfBgEjeREnYOGhCcCiqNehYVwnXgUbInFvzHNgCCvZ+8KAgbvGGw/Fvgw96zouHKQPwo/inoaGaCR7LRgwcGPd8HT8z+GpcW3VB+h98OvUJ6BjdLIYelaHV1N+CH+d6zb3vjoP/HnEI3Bn9ZINBYYw0DIavJHxEMLcXLeg99Qa8P64++GjdZPOAAHLMHh39D0Y7Om6G6tBo8XHU+9GkMFT8iLOgIAlDN6bsAxJfq4fTlqGxUezViDS00cyw/lLInh7ykJkBgvnPNx4TYTggwVLMMTPXyYD+MO02ZgQJZyDcmM+IfjL4oVICg6WbSi8OGMaZsYNkTT390vnICNKfk7KT6dPwMJUcS9Cr361dDomDB0k2xh5dNoorBwjzdvxk8UTMXN4nOwt2prJ6bhPohHz6OKxWDA+SSYBWDghCY8uHitp7t3zM7Fidrqs6xMA00YPxRP3SDMwFs0ZjrUr5RlVhACjMgbjpz+YKWn+9BnJePgR15usmxkEqamRePa5hbL+7tvSne67/ajBfAIXGp4FR3vLUffXdp4FBQd/QwbGhLwNHet60euralMW9ta+BBtvAvpt1v4fRpA+AQsifwd3jetFr68qTUX4rPx3MHNd/bZqB5wLNw8eIYZoPBDzK/jo5LUgrzCV4y8lf0aHo8MlI1gfgp/GP4MgfbBMRh1eyf87mq1tYED6PSHyH4Y/Xkl5HFHu8pKxqs3NeCbr36jubhFhOP89QOeFN0Z8D/Fe8tqc15nb8cNzX+BaV2O/rdr7Mvx07nhvzFqk+U5lZusAALbsSURBVEnvwgoATd1GPHJ8A6601Qsyer9t3lo9/jFpBcYEu168+6rN0o1HD23GxcYalwwPjRbvTVuCaZHyWql3Wq14Ys92nKyuFGUAgJ7V4M+z52PukKGyGGabHT/duQuHSssEGb0cDcvi93NmYWmyvMXS6nDgl9v2Y9eVon5bzveKIQQMIXhx/jSszpTe6RUA7ByHVzcfwuaL+S4ZAPCzBZPxwCR5Xg6O5/Gnzcfw5fEclwwKih/OH49HZ4+WdRyb5yn+tuUkPtl9AaSnhX2/DIaA8hQPzB+FH941UVbyJqUUH285hw82ngIhwoze97hyTgZ+fP9UWaEjSim+3nYJf//oKEAhGM7sZSyYlYqnfzhLNP+kP+3ckY2/vLO/p1GgAIMl4DiKadOT8OwvFkCnMjwlR3LW7zuGiIDsvBHVXbtQ1vkljPbym8YIGIR5zMRg79UIMIxQXPvAznejpPMQcts2o9V2/RYGQYznBKT6LkWEuxqGFXntp3GmeRdqLWW3jSd4ZWJs4DzEeaaDUXhc1s7bkdV2EYcaD6LMdHtJ9STvZEwPnok0n+GKGQ6ew9mWXOysPYb8fsq2p/jEYWH4FIzxT4WGUXZiwcFzONNciK8rT+FSW3+MaKyImoApISnQMcp+0BzlcarxGr68fh6nGktuM3eSfMKxNnYMZocnw8AqSwjmKcXphnJ8VnIRh2puZyT4BOHB+FFYOCgJ7hpxt7mQKKU411CFTwuysbei+LaH7RAffzyYlIm7hiTBU6vs+CSlFFn1tfgsLwe7rhXfVu8i2tsHDw4fgeWJSfAWORbsSrl19fg8Owc7Cgpvq3cR4e2N+0akY3lKMvzclJ8gKahvxJcXcrH18lXYuJuT4kO8PLF29HAsz0hBgIfwUXBXulbfjK/O5mLLxXxY7DfXAfL3dMOacelYMToFwd7KcwzKG9vw9clcbD6TB/MtTeB8PQxYOXE4VoxPRaif8pMw1U3t2Hw0F5uP5cHYfXMBL083PZZNScWyqWmIDPJVzKhv7sS2Q7nYfPAyOm9pNOdu0GLx9DTcNTMN0WHyNn991dxixM79udi6KxtttxQp0+s1mD8rFUvmpmPwIHmbv75qazNhz+7L2LrlElpabi56qNWxmD07FYuXjPivhGPuGCIDKEopOmxFsHCN4HkrNIwXvHVDYdAIJ91ZHU3osObBwXeCEC30bAB8DZmCoRtKKVqsZTA5muCgVugYD/jrY+ChEf6Cdjva0GS5CivfCQYsDKwfQt2Gg2WEF5ZGSxXabE2w8xboWXcE6SPhK+IB6eaMqDYXwNLjUXHX+CDKPRlakfBQvaUOzdYmWDgL3Fh3hBhCEKgXPo7WzZlRZiyGydHVU1vEC0M8E6AXOTFT292EekszzA4L3DUGhBkCEeYmxrCgsLMUXQ7nD9VL44EEryFw1wgvLLXdragyNcHEWeHO6hHm5o9BHsIMC2dDbnsZOuwmUErhpXVHik8MvERqjNSZ23Hd2Ayjwwo3Vodwdx8M8RL2Flk5O7JaK9BmM4GjPHy07kj1i4SfSI2RenMXrnU2o8tugYHVItzdG/E+QYKGrY3ncKm5Ei0WE+yUg4/WDWn+4Qg0CC9ejWYjitub0WmzwsBqEOLuiST/YEGGg+dxobEKTd0m2HgOPjoD0gJCEeIuvHi1dJtR2NyEDqsVepZFsIcnUoKEGRzP41JDLRpMRlg5B7x1BiQHBiPCS/g50t7djauNTei0WKBlWQR5eCAlNEQwtMJTiuy6OtR3daHb4YC3Xo9hQUGI8hH2jnZZrMiva0BHtxValoG/hztSw0MEd9yUUuTW1qOmoxMWuwOeej3igwMQ4y+cS2Cy2pBf3YCObgtYhoGfuxtSokKgFThSTClFfk0Dqts60W23w1OvQ2yQP4YECz/jum125Fc0oMNsAUMIfDwMSB0UCq3Arp5SiqLaJlQ2taPbZoe7XoeYYD8MDRN+/ljtDuSX1aPT5DQUvD0MSI4NhV4rvAm4VtOMioY2mCw2uOu1iAr2RXyk8Pfd7uBw9VodOowW8JTC28OApCGhMIgcjb5e04Ly2laYum1w02sRHuSNxMEhwt93B4fCknp0dHaD43h4eRmQMCQU7u7Cz+qq2jZcr2yG0WyFQa9BSJAPkoaGCn/fOR5FRXVobzfDYefg5WXA0PjQ/+rpmDuGyH9BlFK0WS6guvNLNJj3A7cc09Uyfoj0vhuRXqtg0Cg7w00pRZMlHwXtm1HWdei248Y6xguJPkuQ4LsEXtowpW8Ftd0luNS6C/kdR2+rsKpj3JDhNxcj/ObBXy8vNNFX1eYKnGw+iPMtJ2CnN++sdIwe4wKmYmLQTIQalDOqzHXYX38cRxpPw8rfXMZax2gxJWgs5oROwSAP17kLwowmbK85jd2152Dmbt69aQmLWaGZWBI5AfFe8kIsfVVjbsPGygvYWHEBXY6bd28awmBueCpWDRqDVN9IxZ6zOnMH1pdlYV3ZJbTbbi7HzRKCORHDsHbIKIwMjFLMaDQb8dW1y/i0MAvNt5STZ0AwK3ooHkgYgXGhgxQzmrtN2FB4BZ9eyUb9LV14CYBp0bG4PyUDk6OUJ6u2W7qxMT8fn2TnoKar87bxSdGDcH96OqYOHqz4NEiXxYoteVfx6fkcVPbT6XfMoEjcOyodM+KHQKOQYbLasCOnAJ+fyUFZ0+2tKTKiw7B2XAZmJsVBJzNs0Ktumx17soqw7kQ2imtv76GSHBWCeyalY3Z6vKiBISar3YEDF4vx1ZEcXK1ouG08PjIId09Lx5zRCXDTKfMy2h0cjlwowYYD2cgrub1jcUy4P1bPzsCc8cPg4abMy+hwcDh5oRSbdmUhJ7/6tvHIMD8sX5CBuVOT4TmARdq+Kd0xRL5lOXgjLjf8GK2W0zfyOvqX84GREPBLRHuvlcmw4Gjdr1FpOiHKIGBAQTEy8DGk+q2V9UB38HbsrH0b+R3HwIAFL8rgMTHobkwOksfgqAMbKj/G6ZYjN/I6+lPv2PTg+VgSsUZWSIejPD4r34RddYclMWaFTMLDsatFjxPfKkop/l22F59XHARDGPACRctYwoCjPGaFZuLniauglRHSoZTi36XH8V7RQWc8WygO3MOYEZqE36avkB3S+bTkPH53eT9AROLZPYxJIUPwztjlssMt60su45dn94GCumSMCY7C+9OWw0dmuGVbSQF+dmQvOMqLMJy5IMODQvHv+csQ4CYvFHKg9Bp+vHs3rA6H4HHpXkZCYCA+vmsZQjzlhUJOllbgyU070G1zGuj9cXoZMf5++Pc9dyHSV16O2sXr1Xji8+3oslgFMtSc+R48pQj39cYHD96FwUHywhRXKuvxxPtb0WbqBiHot45HLyPI2wN/f+wuxIcLex37U3F1E578yxY0d5huXOt2BsBT5+mcd59aiuQYeRvBirpW/Oj1zahv7gQjkLvSew893fV44+klyEiQt/GobWjHz17dhKraNmFGz2PWoNfitV8swej0GFmMb1t3DJFvUQ7eiAu198JoL8GtXhAxDfH9EWL9HpfIsGBP9Y/RbCmQVRAt1e8ejAr6oaS5HG/HV5Uvo8KUJ6tq6gi/eZgb9kNJxghHOfyr9M+40pkDOU1eRvlNwL0xP5BkjPCUx7slH+Nks3Bn31tFAIz0H45nEr4v6VQLpRRvFn2NXbXnZDAIRvgNxR+GPyI5h+Wtq3vx6fVTkhkMCNL8ovCPMQ9KNkbevXoM7149Lp1BCBJ8grFu6oPwkJhf8kH+Ofz20hHJDJYQxHj5Y/O8+yQbI19cvYwXjh8QXFT7Y4R7emPzXfcgyF04tNVXWwuu4pm9ewEZjEB3d2xecw/CvKTlTOwvvIYfbdoJgEJKiRCWEHi7GfD1g3cj2t9XEuNUSQUe/3QreCpsFN7KcNNp8cVjqzE0RFo+w6XSajz2j81wcMJG4U0MhkCn0eCjJ1ciKUpaPsPV8no8+uZG2BwOSfVUGEKgYRn8/afLkREnzQtaVt2MR179CharXTKDMARvPbMUY1NjJDGq69rwg1+sg9FkkcQghIAQ4DfPLsbkMfIStL9NyVm/7xzfVSFKKS43/Fi2EQIApe1/QZ1xp6S5x+tfk22EAEBe2zoUtm+VNHd33V9lGyEAkNW2B+datkiau6X6C1zpzIbcTnMX2k5hb500xoaqnbKMEPS8mgutl/F5+WZJ87+qPCLLCHEyKLLaSvB20SZJ8zdUnJdlhADOcvO5bVV46bK097G1IleWEQI4PSZFHY348ZmNgpn6fbW3skiWEQI4q4qWd7Xi0SMbJS1ix6vK8avjBwBI/2ZxlKLW2ImHdm+GnXNdUflCTQ1+tm8fqExGs9mMBzZvgsVhdzk/v64BP92yC5RKM0J6GZ3dFjy4bhOMVtedWa81tuCpL7aLeo36Y3Tb7Xjko81oNwt3He5VdUsHnvxgm2QjBAA4nsJqd+AH/9yMxg7XXbGb2o144i9bYJVohADO766D4/Gjd7eiplm4u3iv2ru68dQfN0k2QnoZPM/j2be343qN647uJrMVP33la8lGCOBcdyilePmNHSgqvT0U9b+oO4aICrVbL6HVchpyjZBelbS+CeqiD0mLpRjlxqOKS8NfavkAHBV/CLZaa5DbflBx/5jjTetg4y2ic9ptrTjetF/R9QHgQMN2mB3CLeQBoMtuxNYa5YzddYfRZhN/QHU7rPjkujIGBcWuunOoMd8eK+8rG+fAe0UHFTF4UOyvu4LiznrReRzl8UbeIWUMSnG8oRTZLbfHsfuKUoo/XDqqqG4tRynON1bjRO11l3NfPyfPmOrLuNLcgAPl11zO/fNpeUZhX8a11lbsLCp2OffdE2edRzEVMGraO7E596rLuR8cvQA7x8kud87xFC1GM9afz3M595Mjl2Cx22VX4eUpRafZii9P5Lic++XhbHR1W2WXO+cphcVmx2cHLrmcu+VwLlo7zLKr11LqzPf4ZMd5l3P3HM5HQ1OnIgbPU3yy4Yysv/uu6o4hokJVnetUNcqzcvVo6T4pOqegfbNKRgcqjOIP6kttu1U147PzFlztEGecaj6s+PqAM6xzrlWccaTxjGCuhhRRAIcaxBecAw1ZsPKud7dCYsBgR634w+NgfT467a53nkJiCYOvK8QfgsfqrqHR4nrnKcb4ovSi6JyzDZUo72pTVXr+06Is0Tm5jfW40tyoivHJlWzROaWtrThbXa24vD0B8HG2+Puo6+jCkeIyVc0LPz2fI+qlajN1Y09ekeKy8DylWHc2Bxwv/BszWWzYej5fFWPD6VzYHA7BOVa7A5tO5CnuucLxFNtP58NksQnOcXA8vj6Yo/gz53iKA2eL0N4l/DumlGLjriy5DuIb4nmKkxeuobG5S9kFvkO6Y4golJVrRqNpv8pGeSyqOr8QYXThWtc+VQwCBgVtwuEAO29FTtt+1c34LrRsFxzlqAMnmpR7XACnN+FY4z7BBy1PeeypO6Kasbf+KDgqfL83V51Q1VyOB4+dNWdh44SNmS/LzyrufwM4vR3bq7NhtAt7qT4rvaCy8RuPPdVX0WoV9lJ9Wpilurnc4eprqDEKe6k+z89RzThXV41rbcJu9C9yL6tiUABXm5pwuV7YS7U+O0/xSaFeRmVbO86WVwnO2XzpiuouzU1dJhwrEvZS7bxUAJtd2IiQoq5uKw5cLhEcP3ipBF1m12EoMVlsDuw5VyA4fiqnDC3t4h5YV+J4ih3HrwiOX8qrRE19u6qu1oQQbN9/WcUVvhu6Y4goVKc1T6URAgAc2izCLsJWawl4F2EVV6Lg0Wi5IriAN1urYOOV7757KY3Wcjj4/ncYLdYmmDj1VnuLrQlGR//X6bB3odnWpprRYe9Cs/X2o4yAMyxTYW5Q9eAAABNnQaW5qd8xnvK40l7Tb2VXObLyDpR0CcePLzVXqV6UHJRHXuvtRxl7da5BPYMCyG6uFRw/U6ueAQCX6oUZ56qrVTMYQnCpVphxoVK5x6VXLEOQVS3MyKqolZTXIyYNwyCrokZwPOd6rapuvTcYZcLvI6e0BhpW3dLFMAQ5pcKMy8XqGZRS5BYLM/IKasCy6u4Vz1NcvioeIv1f0B1DRKHs3O31A5SIo2bBPBEbr9x13lcUPBy0f2PDwg0MA3AWQOtPZs7c778rY/S/SzE5Bo5hFLiW0aHWYHPNMDlsqrw6fSUU3uEoD4uIR2YgGABgsqvbtfaqwybs2emyqWcwhKDDKszosIjnP0lldIow2rvVMwgIOi3C96PdrJ4BOD0WQuo0W1QbbTyloowus/zckNsYPEWnSZhhNFsH5FcoFpoxmqyKmqzeqo6ugflc/5u6Y4golJQGd1LkzP/o/8vIYGAYgPDrZcnA9R4QupZGRn0OpQw5NUBcSSPEUFg6vl+GwLWUFqfqT1oBBtPTcvGbZABQ1dpdKmMg7helVLDqqCu+HIkyRMYGjjFAn4cYQ8Oq/mYRIv5atRpWccfhG4ye6whJw6p/HwCg04rfq4HQQF3nv6k7hohC6Vjl/QH6Ssv6CsaG3TTiLaElM4g7WAFDxGOAGM4y8/3XY/DSyCu2JCZPTf/1GLy1yntb3CofgWt5a9wHbAH3E2AYGC0MzMAYoP66/gtpEULgq1PeN6WvAvTCxboCDMr7pvRVkEG4zofUGiBiogACRQqbBXt6qP7UHTwvygjx8lAd0uApFe1VE+QtvTOzkCil8PcUZgR4uoNRbRwS+HsJM5xj6t4HwzAI8BZh+LirDmMxDEGgr/D308/XXbVnhyEEgf7Kewd9V3THEFEoX0MGtIzyhkiA0xsS5rlYcDxAHw9PheXg+zJivWcJjvvrwhGoj4aaHzYDFoneE8AIeCV8dH6I8YhTdTKHAYNk73TBHjQeGjcM900Co4pBMMwrDr66/ovvaBgWk4JSJRU9ExIBwRDPcIS79d/HgxCCueFqGUCkuz8SvIW/O4ujU1UvSoEGD2QECFeQvCs2WfXi6q3VY1yYcHfgpUOHqTYSDKwGU6MHC44vTkhU7abXMAxmDhkiOL4gKUF1jgilFHMS4wTH56XGqw6bcJRiXmq84PicjATRUzWSGDyPeRkJwoyR8QPCmD1S+H3MHJOg+ORPr3ieYuYY4fcxdVy86jAsTylmTkpUdY3vgu4YIgrFEC2ivO+BmltIwSHSa7XgOCEMkvxWQI2RQMEh0XepCINgtP9iKD5DBoAHh5EBC0XnTA2aq+pkDg8ek4Nmi86ZFzpVsJy7NAbFvLCponOWRk4Ep+qIMMWyyImiJyRWx4xRxQCANTFjRRl3DxmhalFiQHDvkFGioZE1Q9PVfK3AEoI18ekwsMLhw5UJKapCQCwhWJGQDC+dcMn6JcOGwU2jPITJEoKFCQkIcBfegc8dNhTeBuX9Q1hCMC0+FuE+whUspyQMRpCXcg8SQwhGD44ULfU+Ki4SUYG+ip9YDCEYFhksWl01KSYUiVHBisMzBEB0sC8y44WN6Jhwf4xIjATDKH/2Bvp6YHy6sIEbEuSN8ZlDwKpgeHkaMGXcd7e6qlTdMURUKMJrpeK/JWDhbxgPd2206Lyh3vPBKMzjIGAQZEhCgF78i5rsOxU6RlmXRgIGgfpoRLoNE5033HcUPFhPRclZBAT+ukAkeqeKzkv3S0aAzlcxw1vjiVH+6aLzhvvGIso9SFGIhgBwZ/WYHpIhOm+YTziSfSIUeRMIAC2jwaLIdNF5sV6BGBsUo9grQgjBysHi7yPMwxszo+IUM3hKsTZenOHv5o4lccMUMzhKcW9yuugcT50OK1NSVDHuGz5cdI5Oo8GaEWmKPUgcpbhvZLroHJZhsHZcumIGTynWjhNnEEJw72Txz8wV455J4gwAWDMjXXZRtr66e3qGy+PSq2ZnKA6dEEKwclaGSyN5+YIMxZ4XhiFYMmc4dAqbBX6XdMcQUSGDJhiJAS8q+EsGGsYTwwJfcTlTz3pjYshzyhjEgEkhv3Q5U8cYsCjiadkEAgKWaLA44mmXP2oNo8EDg5+QzQAAhjB4IOaHLnvNsITBj+MfltUgr1cEwE/iH3bZB4YQgheS1kLDsAoMHoJfJt0DA+u6R8uvh98FA6OVzaAAfp12F7y1rnNAXstcAE+NXtHC9ErGPAQZXMemfz16Fvz07ooW8RdGTke0l6/Lec+Pm4wQD2X5Dz8ZOR6JAa4brf103HhE+fgoYjySmYmMMNddpB+bMBpDgwJkMwiA1RmpGD9YfFMDAPePH4GUiBD5DALMT0vAzCTh0E+vVoxLxai4KNnfK4YQTEmOxcKR4psaAJg/Zhgmp8XKZzAEIxOisHyy+KYGAKZkxmH2uATZ9V1YhiA5NhR3z3VtkI0cPgiLZqXJ9u6wDMHg6EDct3yMvD/8juqOIaJSUd53I87vp5LnE7DQMl4YEfoh3LVRkv4mznsOxgb95MYVXDMYaBkDZke+CV99jCRGovd4LAj/Uc/CJ42hITqsjn4ZYW6uH04AMMw7DQ/EPAEGjKQF1snQ4JHYnyDWUzjWejMjDs8kPAoNkWYoOI0pBj+JfxipvtJirQneUfhd2sPQMRpJnhHS8z/PDluNCUEpkhhxXiH46+j74cZqJS0avTOeT16IeRFpkhjRnv74aPK98NLoZS1MP0+dgdWxIyTNDfPwxhez7oav3k0W40dp4/HwsFGS5ga6eeCLhasQ7O4hi/FwWiZ+nDlO0lwfgwGfLV+BCG9vWYzVKSl4btJkSXM99Tp8uOYuDA7wk7XALkhOwMvzpktaMA1aDf7xwFIkhAXJYkxNiMXvls+WxNBqWLz98CKkDQqVzCAEGBUXhdfvny8p1MYyDH7/6HyMTIiSvIgzhCB1cCjefHyRpFNKDEPw4qNzMDEjVhqghxEXFYS3nlkKg8510jkhBE8/NhMzJkjP82AYgqgIf7z50gq4u0lrPPld1zfWfbe8vBy/+c1vcPjwYdTX1yM8PBz33nsvXnjhBeh00m7e/0L33V7VG3ejpPVNWLhaELD9FDtjAfAIMIxHYuDLko2QvqownsD5pvfQZa/pl9H7b2FuIzAu5Bn46oST/IRUaryEA3X/QoutCsz/Y++84+Mor/39zMw2rXq3LMu25N57xdjGmN4xvUPKTbs3NyG/VHJDcnPDTSC9h4TeTDO9mWKDsY1777Ykq3dpe5t5f3+sZFw0s7M7gkvZLx/xAb1n32eb5j1z3vOeg4J2EqPvd0MyxnHu4K9S6jL/R9qng969PF3/EI3Bo8jIp+R19P1umHsEV1bczLBM/SQ/fUY1/zzyBEf8iRhD+ELVVYzLSX6f9ZC3gd/tf5bdnppjLeyPV9/vhrlL+cboS5hZYM6ZOl5HvK38YtdLbOqsNmQMdRdy+/hzWVSafOLaUV8nP9n6Ku+3HDnWXr4/Rrk7l+9OXsp5Q8YnzWj0e/jxB2/wdv0hpH7atfdxB7mz+e60RVw+wpzDdrzaAn5+/N6bvNHbO0aPUZyRybdmzee68cbbJf2pOxTkzrff4eUD+xH9MPpa0ednZPCN2XO4ZVriLYCT5Q2F+Z83VvHCrn2ovQ3O+mPkuJx8ad5MvjR/VtKRgUAkyt2vrmbF5j3Hmv4dT5GkeD+TTKeDm0+bzlfPmJN0Lk44GuN3L63h6bU7iMT6YfT+f4bDzrULpvD18+cnfYw5qqr89fm1LH9nO8FI9NjzPp4B8WO0l58+mW9eviDprQxV07jvufU8/uoW/KEIssQJTQml3n/ZFIULF07gm9cuIsOV3Mk3TRM8tmIDj67YgM8fRpalU7aFJElCkSXOWjSOb35hCZnu1HOKPg4ls35/ZI7Ia6+9xvLly7n22msZOXIku3bt4ktf+hI33ngj99xzj6k5Pk2OCIAQGh3BtdR5HqUrtAlV+JGwYVfyKMu6iCHZVyfMCUnMEDQFt7C3+xkaA5uJaUEkZBxKNlXZSxmXdxm5DuuM+sAeNna+xBHfFiJaAAkZl5LFuJwFTC84jxLXcMuM2sBh3m1bye6erYTUeOGfDMXNlLxZLCheSoXbGgPgiO8orzevZkPndgK9BcncNhcz8idz7qBFjMweCEYTzzesZXXrdnyxIEII3DYXswvGcOmQBUzMHW6pfDdAta+NJ2s38HrjTjzRIJoQuG0O5hSN4Jrhc5lZYJ1R6+vkiSNbeOHoTrojQWKaRpbdwayiYdwwYibzS5MPhZ+sOl83jx/YzrNHdtERChDTVDLtDmYUl3Pz2BksGlxluf5Is8/LY3t38PT+XbQHAkR7GZOLB3HzpGmcOWyE5fojbX4/T+7axfJdO2nz+4moKm67nfElJdw0ZSpnjxxpuTZIpz/AM9t38+TWXTR7vURiKhl2O6NLirhh5hTOHTcKh4UkWoCeYIjntuzhyQ07aOrxEo7GcNltVBUXcO3cqZw/eQwuizkIvlCYlzbtZfn7O2jo6IkzHDaGFuVz9WmTOX/GWNxOa3f2gVCEVzfs46nV2zna2k04EsPpsDG4MJcrF03mgrnjyMqwtnCHIlFWrt/P0yu3UdPURTgcxeGwMagwm8vOmMwFCyeQk5larl2fItEYq9Ye4NlXtnLkaDuhUBS7w0ZJYRYXnjWZC86cSJ7BseNPkj4Rjkh/uvvuu/nrX//KkSNHTNl/2hyRkyWEMLU4+CN7aPU9Q1htQtNC2ORs3I5xlGRdkbBeiVlGd/gQNd6X8EebUUUQu5xFjqOKqpxLyLAlZgAJOZ3hOnb3vEFPpImoFsShuClwVDAh71xy7CUDwugIt7Ch8x3aw82EtSBOOYMi5yBmFyyh0DkwjPZwB6vb1tAUbCakBnEqLkqcxSwsPo2yDOPj1OYZ3bzR/AFHA834Y0FcipNSVwFnlc5mWGaZ4WP7OInfKy8vNWzhoLcJXzSES3FQ6srlgvLpjM4ZGEZXOMCKo9vY092ENxrCpdgpcWVz8dDJTMovHxBGTzjIs9W72N7RiCcSxqnYKM7I5JLhE5heVJ7w8WYY3kiYFYf2sLmlAU8kjF1WKM5wc9GIccwZNGRAGIFolOf372VDYz2ecBibLFOY4eb8kaOZXzE0oZNnhhGKxnhl337W19bRHQyhyDIF7gzOGj2ShVXDB4QRicV4fc8h3j9cS08ghCxBnjuDJWNHsGhUZcLS6GYYUVXl7V2HeW9vNT29FWFz3S5OH1fJkokjEjp5ZhgxVeO9XdWs3nmYHn8QTYPcTBfzxg3jzKkjE0ZOzDBUTWP9zhpWbT5ElzeIqmnkZmUwc1wFS2ePTrh9Y/b6/knUJ9YRueOOO3jttdfYtKn/rp3hcJhw+MOyux6Ph4qKik+tI2IkIQQdgZdo9PwLX2RH77aKRjxYGf9DlpApdJ9Pee6XyXQkHw4XQtDgX8X+7kdpD20/gSEhI4iHFcszFzM2/yYKXRNSei3Vvg1s7niKusD23nlFLyOebyIQVGXNZWbhlZS7kw+5Axzw7mBV64sc8O1A7mWIXoaEhIbGmOwpLC6+mFHZqTH2eQ7wctNrbO/eidT7vE9mjM8Zy/ll5zAlL3GyW3/a66nhmfq3Wdu+41jYWOtlyEioaEzMqeKyIWcwv8hcrscpr6OngYer3+Ptll1oQiAdz5AkVKExIXcI1w1fwNJBk1K60B3wtPKvA+/zSn28kdqHjHhysSo0xuUO4qaRc7m4IrXTIIc9Hdy75wNWVO8iqqm9z/1ExujcYm4dO5MrqyanFEk56unm7zs38vSBXYTV2AkMRZKJCY2q3HxumzCDa8ZOTqnKaqPXwz+3bmb5np0EotFjW0THM4bm5HLLlOlcN2kyToPjynpq9fm4b8MWlm/fiS8cOYEhyxKqJijLzuLGmdO4YfoUMuzJF83r9Ad4YN0Wlm/cQU8ojNI7L3Dsv4uzMrl+9hRumDuNrBSiHD2BEI++t5Un1m6nyx/sl5GfmcE186dw/enTyHUnH4HwhyI8vmory1dvp93j75eR63ZxxemTuX7JdPKzki/+FwpHeeqtbTz55jZaOr39MjIzHFy2aBLXnTuDorxPf1Gyk/WJdEQOHTrEjBkzuOeee/jSl77Ur82dd97JT3/601N+/1lzRDQR5UjHHbT6nyLudBjVjIgnXY4q+jVFmRclwVDZ1v5bDvYsT8iIOyiCWSU/oipHv8DayRJC8H7b/WzseKLXATFixMcXl36NaQWXJsV4q3UFrzUv7zfX43j1jZ8/6FrOKLkkqQX2taaVPHrUPOOSwReybEhyjFeb1vLHg08eczj0GRIagkvLF/GlqkuTOgX0WuM2frrzaQDDWiR9jEuGzOR74y9JeFroeL3ZuI9vb3waTQhDRp8zd8GQifxi+iU4klhgVzce4SvvPkNUUw3rnfTlGSwdMoo/nHYJGTbzC+wHTXXc+sazhGLRhAyA0wYP4+9LLyXLZI4bwLbmJm554Vl8kbApxoyycv554aXkuswvsHtb2rjtyWfpCgQT1oaRJBhfUsK/rrrMsArryTrS1sltDz1Dm9efkCFLElVFBfzrpsspzTG/wNZ39PDlfzxDQ6cnYXE3WZIoL8jhH19expBC85WbW7t9fPWPz1DT0pWYIUsU52bx129cTuUg88UrOz0B/vPXz7L/aGvCI8aKLJGblcEfvrOM0UMTn976NOkjdUS+//3v88tf/tLQZu/evYwd+2HSXENDA4sWLWLx4sX885//1H3c5yEiIoTgUMd3aPM/h/lqT/HL1JjiP1PoPtcUY0vb3RzyPJX085tdcieVOReYsl3T+i82dixPmrGk9BtMKTDn8LzVsoJXm59ImnF+2XUsKbnElO0bzW/xcO3jSTMuHnwBV1ZcZpKxnt8eSJ5xyeCFfGXkMlO2bzbv5IfbkmNIwIXl07lj4jJTTtXq5oN8bd3jvbEiswyJsweP4zezrzAVGVnfUssNbz2OJswzZEni9LJK/rnoSlP5H1tbG7nqpSeICc10RVNFkphRWs4j511pKmqxt72NZU89RlhVk2KMLy5h+eVXm4paVHd2sezBxwhEjJ2pkxmVhfk8eeM1ZDsT5000dHu44u+P4QmFTNe8UGSJwbk5PPnla8l3J44otHl8XPP7x+nw+pNiFGZnsvyb11GUk7hQW7cvyI13P05TpycpRrbbxaPfvY7BhYnXIF8wzBf++wmONneaZsiyhNtp5/6fXM+wQfmmHvNpUDKOSNKxzNtvv529e/ca/lRVfXiSorGxkTPOOIP58+fzj3/8w3Bup9NJTk7OCT+fNbX4HqPNv4LkSk7GbQ+0fZNQtC6hda3vtZScEICNrT+jJ3w4od1h77qUnBCAt1v+THPwQEK7g95dKTkhAK80PcZh356Edod8R1JyQgBeaHyZbV07EtpV+xv5/YHUXsfzje+yqnVLQrv6QAf/tf3JpCubCODFhi08X9//dunxag16+eYHTyblhMQZgtcb9/DQofUJbbvDQb646mmESO4vRBOCdxuP8Odd7ye0DUQj3PL6M0k5IRAvGrapuYG7N72X0Dasxrj1hWeJJOGE9DF2t7Xys/feSWyraXzxyRVJOSF9jCMdXfz4tTcT2goh+NpjzyflhMSfm6Cx28P3nn3NlP3tD72clBPSx+jw+vn2Qy+Zsr/jwdeSckL6GN5AiG/9/QVTvWfuun8ltUk4IRA/MRMMR/nWb1ZYLl3/aVXSjkhxcTFjx441/Ok7ntvQ0MDixYuZMWMG999//wA0Q/p0SwiNhp6/k1rJdoFAo9n3aAKGYF/XQykyACQO9iR2YjZ1PJly7xgJma2dzya0W932Usq9Y2RkVrclvkC93vSmJcbLTYkvtC82vJdStVeIRxOeqX8rod0zRz9AQ0u5ovrD1e8mvNA+VbOZqKamzLj/0LqEpeufOrKDQCyS0isRwP37NxFWY4Z2zx3eS3c4lFJvFw3BI3u34Y9GDO1eP3yQFr8vpTL6mhA8vXc3XUH9FvIAq4/UcLS7J2XGq/sO0uTxGtptrG1gf0t7StU/VSF492AN1e1dhna761vYWtOYGkMTbK1pZE99i6FdTUsn7++pSZlxoKGNLYcaDO1aOr28ufFAStVYVU1Q39rNuh01ST/2s6CPzDPoc0KGDh3KPffcQ1tbG83NzTQ3N39UyE+8ekJrCav1pN6AQ6XF+ziaCOtadIZ30xM5lDJDoFLjfYmo5tO1aQ/X0BjcnXLvGIHKAc9qArFuXZvOSCv7vFtT7h2jobHXs4WuSLuuTU+0hw2dmywx9nkP0BBs1LXxx4K82bLBMCfESALBIV89B7xHdW1CapTn6jZaappWF+hgS2e17nhUU3msepMFVwdaQ17ebT6oO64JwYP7N1lqA9YTCfHa0f2640II7tu12VKjvGAsxnOHjKNtD2zfaumos6ppPLV3l6HNI5u3WW5c+OT2nYbjj36wzVIvFEWSeGKTcdRw+drt1hiyxPK1xoyn39thnfHudkOb51btSPmGo4/x5JvbUn78p1kfmSOycuVKDh06xFtvvcWQIUMoKys79vN5VYtvOfHCZqlLFV46A2/ojh/xPIdkmRHhqHel7vju7tcsMwQae3v0Q8MbO1elHHH5UBIbO/VD3Gva11vufikjs7p1je746tYtxITxHXoiKci83rxOd3xVy278qr5zaoohyayo36A7vqblEJ1hv0WGxPLqzbrjH7QcpcHvscSQJYlHD+pvZe1sb+Fgd4elT10CHt67TXf8cFcnW5ubLDmGAnhkp/7C1+Txsqa61lLjQk0IHtuqv4B3B0Ks3HvIUhdaVQie3ryTmNq/Ix6MRHl5yz5rDE3w0pa9BCPRfsdjqsaKtbssM97aepAef6jfcSEEz76zw9JnrmqC9btqaOk0jlJ9FvWROSK33HILorcq4Mk/n1cFo4fhlIqryUohFNO/O/ZEavup6pqcJBR80Xrd8e5I44AweqL60bH2sPXImQR0RFp1x1tDrSlvy/RJQ6Mt3KY73hhqR5EsOoZoNAT1GfWBDpQU+uucwBAaR/360aNaf2dKjf5OZAhqfB36DJ9xCN+MNCGo8erPU+vptswQwFGv/jxHe6wzABq8Ht3rZV13j0UXOq7OQJBQtH9HubEn8ekVM/JHovQE+1/A2zz+Y1VXrSgSU2n39O8o9/iDBML9OynJSBOC5s7+HeVwJEaX13grzawa23oGZJ5Pkz7fSRsfs1Rh7Y4S4vkVqsG2SUyzzgCIiYDuWHgAGAJBRNNnRLRwyls/fdLQCGv6F4eQGrIcEQEIqPqMoBoeEIY/1v+FPM6PWAoJf8jQj6oEYhHLVVXjDP3cioFiBGL6i44RPxkFY/pRLn/U+qIH8YVPL98lMEAMAH+k//ckoPP7AWWEB5ChM9dAOCEfJyMQGrj35NOitCPyMUqRrBetEQgUWX8eu8FYMrJL+sfhnHLio3KJJCHhkPXrGDhll+WtGRkZp6x/dNCluAZkAXcr+gy34hwQRqZNv66EW3EMiLOTadM/yum2OQbk7jjLpl+DY6AYboNaIpkG/AFjpFAsrD8pkqR7TDjTREM1s9IrPJaZRL2UlBkWS7ufMJdLh6Hz+9QY/f+NuJPsLWPIsFiK/tOotCPyMcptH4XVHBGI4bJV6o7mOKoGIH8jRpZBv5p8R4VlJ0FDJd+hXwK82JW4bXoiCaDYqZ+TVOYalHKiap9kZAa5SnXHh2SUEBPWQs8KMkPd+qXlh2UWJzyNkpAhyVRl6ZfIr8wqspSoGmdIVOXoF22qyim0ND/EC7WNzNWfpyrPep0GGajM1Z+ncgAYEjA0N0+3tsuw/LwBcG+hODMTp06/msF5OZZ78gBku5zkZvTvSBfnZuG0Wb0mgtNuo1ineFpupstynxmId/wdXJDd75jLYacwdwBu0CQYUmK+QNtnRWlH5GNUafa1WM0Rscl5FLiX6o5X5VxqOX9DkTIYmnWW7vjEvHMtb5vIksK43DN1x2flLyb100V9EswqWKw7elrR3KSqlvYnDY3FJafrjp9ePA2nbO1uSUXj3EH67eoXlY4n2yBiYoohNC6rmKM7vqB0BCWu/i/C5hmCaypn6o7PKh7C0CxrC6yG4PpR03XHJxSWMqGgxFK+iwbcOG6a7vjwvHxmDx5i+UTLjZOm6o6VZGWxeGSlJYYsSVw/Xb8DcW6Gi3MnjLZ8aubqGZN0y++77DYumTXB8omWS2eO123Op8gyy06baJlxzozRZBuUlL9iyRRLW4uKLLFgStVnstx7IqUdkY9ROc7ZvdGMVL+sMqVZ1yFL+qHGAtdY8p1jU2ZIKFTlXIzNYEujwFnBEPcUC3VEFMbmLMGl6Besy3MUMj5nhqUaHxNzZ5Fr1y/NnG3PZk7BLGuMnPGUGkRE3DYXZ5XOscCQGJM9jKos/eiRQ7ZxWcXslBdXCRieWczkPP0omCLJXFs109ICXpaRy2klI/SfhyRxyxh9R8WMCpwZnF0x2tDmlonTLUV3Mu0OLh4x1tDmpslTLZ1osSsKy8YZ95e6Ybo1BsBVU4z7Ml03e7Kl0yaaEFw907gv01XzrDFUTXDVfH2HCmDZAuuMKxcaMy5ZNDH1S3sv44ozp6Y+wadYaUfkY5QkSZTn/hup3elLyJKdQdnXJbQcl39zygyQGJl7RULLmYVXWoiKCKYVJC6Nvqj4Qks1PhYVX5jQ7ryys1LOr9DQOL/snIR2F5cvJNUbJQ3BlRX6kaM+LRs6B5uspFgqD26qWpSwxPsVw6bjVGwpOyNfGDU/4R3jsqpJ5DhcqTPGzk7YmO7iqrEUZ7hTiiZIwC3jpyXsaXN21UjKs3NSZEhcO2ESOU7jKNeCymGMKCxIiSFLEpdMGEtxlvF2wrSKwUwqL00pmiBLEmeOHUFFQZ6h3ZjBxcweWZESQ5El5oysYHSZcQfxiuI8Fk8egZwiY8KwUqZUGpeeKMrL4ty5Y1OKiiiyxPDBBcyZMCzpx34WlHZEPmaVZF5Jadb1ST4q7iCMKf4LTlvi3ImKrKWMzbsxBYZg3qD/JscxPKF1ZdZs5hXdlCQjrrPKvkWJS//OuE9VWeO4eHBqjEsH38LwzDEJ7YZnDuO2ytQYVw65jEl5iTsWV7hL+c6YZD+PuK6qWMppRcZ3YgBlGfncNfVa6O0UnIyuqJjDBYP1txr6VOTK4i/zrkWSkiNIwCUVU7iualZC2xyHi/sWX4kiy0ld0GUkzqkYzVcm6G9h9clls/PguVdgl5XkGJLEgvLhfGvGaQlt7YrCgxdfTobdnpSjIEsSM8rK+MGCRaZs/3nlpeS4nEkxFElibEkxd56d2MGVJIk/XXMxBW53Uo6CIkkML8znrssSO+oA99x4AaW52ckxZInS3GzuvtFcb6yf3XQOw4rzk2bkZ7n5zZcvNtWL6Xs3L2XU0OKkHB5FlshyO/ndty5LyVH6LCjtiHzMkiSJqoI7Kcu+pfc3iRK1FCTsjC3+G/kZZ5jmTC78BuPyb40zE3zMEgoSMvNK/4eKLP38k5M1p+h6TivuYxi/jj7G2WW3MyHP3MUJYGHxBVwy+GaAhNsbfeOXDr6FBcXnmWYsLjmd2ypvQkIyzbi6YhkXDT4/CcZ0vjv2RmRJNs24dug53DI8cVSnT6eXjOOX067HJskJ64r0LVzXDjuN28dfZLqL8NziSv4271qcii0hoy+qccWw6fx8urkLOcD04iE8vOQaMpTEi3ifI3HhsHH8/rRLTDsWEwpLWX7BNWQ7Ei/ifa9j6dAR3HvWpQkjLn0aUVDIk8uuIT8jIyHjWIffIUO5/+JlpprqAQzJy+XxG66mJCsr4WuXen+mlpfx0LXLcJs8eVOak8XjX7ya8twc04yxg4p5+NYrydY5ZXKy8jMzeOgbVzO8ON9U9FCSoLK4gIe+cTX5mYmb6gFkZzj557euZHR58bHnaSRZkhhcmMsDt19Nicm8jQynnT9/9womjSgzzSjKy+LeH17D4OLPX5Jqn5LuvvtxKpnufZ9GdQbeotFzP57wWj50SDQkFAQqsuSgOPNyynJuw21PHEHoT82B9ezvfpzmwNpjzoJAQ0JGoCFjY2j2uYzJu5Y856iUGHX+7WzpfJYjvvW9d+NS79zyMdbonEVML7ic0gzj/Xs91fj3s7rtZXb1bAT6Wsx/+DpAYlLubBYWX8DwzNQYR3w1vNa8kg2d8XLpfW3s++7/BYJpeVM4t+wsxuUkjrb0z2hgRcMqVrVuRhUasiShCYGMhCC+3TMrfzyXDlnE9HzjPAR9RgtP1KzllcatRLUYsiTHGb1XeFVozC4cyTXD5rOgJDVGja+Dhw9/wLO12wirUZRehnQcY2bhMG4cMYezBo817YQcr3pfN/fv38QTh7YRiEWxHWPEP/+Y0JhWVM6tY2Zy4bBxKTGa/V7u372Fx/ZtxxMJ98uYVFTKrROmc+mI8bpJl0ZqC/h5cPtWHt25ne5wCJvcyyB+YxLTNMYUFnHLlGksGzsBu5L8KZLOQJBHt2zj0S3b6QgEjzEgvtjFNI2qgnxumjmNKyZP0D0pYyRPMMTjG3fw6IZttHr9/TIq8nO5cc5Urpo5WTd51Ej+UISn1u/gsTXbaOr29ssoy8vmugVTuWreZNwpHP8NRWI8+/4OHl+1jfr2nn4ZxbmZXL1oKledPtkwQVVPkWiMF97dxfKVW6lt7kKR5fgWsIh3242pGvk5GVx55lSuOHMqeVnmnKlPk5JZv9OOyCdAwWg1bf7nCMca0UQIm5yN2zGW4sxLscnGr1vTQkiSgiQZ3934og3UeF8hEG0iJoLY5UxyHSMYln0+TsXYE1e1MJIkIydgeKOt7OlZSU+kmYgWxKlkku8Ywvjcs3Db8hIwIiBJKAkYnmgXmzpX0x5pJqQGcSkZFDnLmJm/iBy7MSOmxYsO2RKcYumJeljTvo6mYBNBNYRLcVHiLOL0ovkUOPWTX+OMGAINu2x8gfRE/bzVspGjgWYCagin7KDUlc/S0tmUuoyPscY0FQ0NR4LX4Y0GebVxG4e8zfhiIVyKnVJXLucNnsbQTOM99ZimogoNh2wzXOD90TAv1e9kT3cTnmgYl2Kj1JXNRRWTGWFwVLePEdVUXIrdkBGIRXixZi87OproiYRwKgolGVlcPHwC4/L1jxzHGVovw/h1hGIxXqnez6aWBjyRMHZZpjgjk4tGjGVSkf7RaYj3hQmrMTJsxq8joqq8fvggHzTU0xMOYZcVCjIyuGDUaKaWlhk+Ns5QybAZv46oqvLWwSOsP1pHdzCETZbIz8jg7DEjmTmk3PCxmhCEojEy7MYMVdNYfbCaNYdq6QmGkCWJvAwXZ44dwZzKigFhaJpg7YFa3t1bTU8gXswv1+1i4bhK5o8eZriFIYQgGI3hstkS2m3YX8fqnYfp8cebIOa6XcwbP4wFEyoNnU4hBMFIDJc9MWPbgQbe2XyIHm8QVdPIyXQxc1wFC6eNwDYAR5c/qUo7Ip9hCaHSE3yLNu+D+MLrEcQXV1nKJN99IcXZN+F2GGepJ2ZotAbfp8bzBO2BD9CIV/pTpAzKMs9keM415DknpXQH+iFD0BDYwu7uFdT5P0AVfQwnw7NOY0LeZQzKsM6oDezhg45X2O/ZRLSXYZMcjMqeztzC86jMtM6o9h/mnda32Nq9iYjWx7AxPmciZ5QsZVzOBMvHhA/5jvJK47u8376VkBbuZShMyh3NBYMXMj1/guUy74e8TayoX8ubzdvxq6EPGXnDuKLiNE4rGo/N5LaEno5423iydgMv1m3HG+tjyEzMG8J1lXM4s2wcdjn5O+njVe3t5PFDW3imZgfdkXjVW0WSmZBfyk2jZnF+xTjTWx96qvP28NiBbTx5cAcdoUAvQ2Jsfgk3j5vORZXjEia0JlKTz8vju3fwxJ6dtAX8iF7GiPxCbp40lUtHj7NcdKzV52P5zl08sX0HLT4fgnhUYHh+HjdMncrlE8aT7bRWg6PTH+Dpbbt4YvNOGj0ehIhvrVTk5XLdzClcNmUCeTp1RsyqOxDiuc27Wb5+O3WdPccYZbk5XDV3EstmTqQgS7+Aohl5g2Fe3LiH5e9t52hb97GoVnFuFlecNonL506kOPfzd/RWT2lH5DOqTv/zNHT/nKjaQnwr5+R6IfHfuR2TGVrwK9wO4+N//anJ/za72/+XoNp8bIvoePX9LscxmslFPyHflbzTc9T/Ae+3/A5PtNGQkecYxsLS2ylzJ07WPFnVvl280Pg32sMNyMinnL7p+12Bo4yLBn+ZkdlTk2Yc8R3i4dr7aQjWJ2AUcs3QG5iap1/fQp9Rz58OPsphf10CRi63VV7O6cUzkmbU+Fu4a89T7O45iiLJpxRHk5HQEBQ4svjqyPM5b3DyR2zr/J38ZPtzbOqoQZGkU46d9jHy7Bl8feyZXD18dtKMxoCHH2x4iTUt1YaMbLuTf59wOreNnp20E9oa8PGDta/xdv1hZANGps3BVyfN4WuT5yV9iqIrFOSH76zktSMHkXq37o5XPK0cMmw2vjBlBt+aPT/pLSNvOMx/vfkWL+2LdyrujwHgsCncMHUq/+/0BUlvGQUiUX7++js8v2MvqtA4eaXpY9gUmSunTeL7Zy1MessoHI1x98vv8vTGeGO9/hYzSYo7opdMH8/3L15sOjemT1FV5Q8vvs8T720j2tsX52RO32d87vQx/OiqJWSZzI35LCvtiHwG1dzzZxp7fmnSWkaWnIwovo9sV+IM/z5V9zzOro67+PBSl4CBwsxBv6XUvdA0Y1/PK7zb/Kve2RMx4umjZw7+MSOyzSfq7uxew9N1v0X0/pOIAHDZkG8wLX+Jacb27q387fCf0IRq+vjvdUNvYnFJ4pMKHzL28/M9fyOmxUzXvbh5+KVcPsR8wvGO7hq+s/VfhLQomsnqrLdWLuULI842zdjT3ciX1z+IPxY2XQH2xqp5fGf8uaYdhQM9bdzwziN0R4Kma2tcXTWVn88837SjUOPp4trXHqc16DPNuKRyHL8+/ULTFUobvB6ufe5JGrwe04ylw0fwl3MvwmHSUWjz+7lh+VMc6eoyVVJfAuYPG8o/Lr0El8ny9d3BELc+8gz7WtpMMWRJYmp5Gfded5luOfiT5QuF+bf7V7DjaLNpxpiyYv75xWXkmcz7CEaifPPeF9hw8OgpjpQeY3hpPvd+/QqKcqxXWv00K5n1O31q5lOgdt9jSTghABqaCHGo7VYCkT2mHtHge6XXCQFzNUg0NGJsbP4WXaHtphg1vvdZ3fyr3oXbDEMgUHmr8b9p8Ou3dj9eh33bearut2hophyEPmfl2fo/sd+z0STjIH87/EdUEUuqBsljRx9iY+cHpmyP+Or4+Z6/EU3CCQF4sOY53mxeZ8q2xt8Sd0LUiGknBOD+6jd58ugaU7b1gS6+vP5BfNFQUmXoHz6yjnsPvmvKtjng4aZVjyblhAAsP7KNe3a8Y8q2IxTgutefSMoJAXihei93fvCmqa7jPeEQN7zwdFJOCMBbNYf57tuvm2IEIlFuffpZqk06IRD/S113tI7/fPkVVC3xZxiOxfi3J55jv0knBOIRmW0NTfz7Uy8SVRNXho6qKt985CXTTkgf40BTG1974DnCOh2Hj5eqafzgoVfZeLDOlBPSx6ht7eJrf1sxoI3wPutKOyKfcEXVDuo6f5zCIwVCRKjt+E5ihuZje9udJF8WMO4obG27I+FFMKaFeafpF0nO3yeNd5r/By1BzxZVqDxd9ztSLQ3/TP0fjiW06kkIwX3V/0hq4T5eD9X8i5Cq30m3j/GHg4/0Jr4m/1r+evgJPFH9Ds19unvvs3EnJAXGnw68SFuoJ6HdXTtfxh8LpcbY/xa1vo6Edv+7/S06w4GUqoz+fd86dnc1J7T79Zb3aAl4k2YI4JH9W9nYWp/Q9s+bPuBoT3dKjOcO7GVVbXVC239t2sT+9vakGZoQrDx0mFcPHExo+/imHWxvaEqJsbb6KM/tSHzz9PzmPaw/dDTpJomqEOyoa+KJ9TsS2q7cdpB3dh5OnqEJDja289A7m5N63OdZaUfkE64O/3ILvWM0gtFdBCLGf3QN3pdQRZjUFnANf7SWjpDxH90R7yoimi8lhkDgj7VT5zeOJuz3bMIX606xUqogqPrY41lvaLXPu4e2cGvK1VjDWpgNncYRi4O+Wqr9DSmXIVeFxtstxu9Vta+F7d3Vlkqdv9BgzGgIdLGm9UDKZcgVSeKpWuMoVXvIzyt1ey0wZB49ZPzd9UTCPHN4l6XX8dBe44heKBbl8T07LDEe3LnV0Caqqjy8bXvKHY5lSeLhrcYMTQge3rg15TZREvDQhm2GNzZCCB55f2vK1YoR8OjarWgJSr4//u62lHvHaELw5JrtxFRrPbk+L0o7Ip9gCaHS5n0QLDWYU2jzPmzAEBzxPGZh/nhyaY3ncUObXV3PWOrYKyGzq+tZQ5sPOl62zFjf/rKhzTutb6bcNybOkHir5Q3DC+0rTe9aOgEjELzUtNowavNc/TpLDA3Bivp1xDR9J/np2k2WTiSpQvDM0c0EYxFdm6eObDMdNu+fobGiZieeiH6UasXhXUTUxKF8fYbg1dr9tAb0o1QvHTqAN6L/Os0wVh+toc6jH6V6+/AROgKBlBmaEGxqaORAe7uuzbrqo9R3e1J2bwVwoLWdHY36Uaoddc0cbOlI+XMXQEOXhw8OH9W1OdjYzrbqxpSdNoAOb4DVu46k/PjPk9KOyCdYgcguomqTxVlUugIv6o76ojX4ozVY6XQrUGn2v4Um+r9Y+6NttIX3W+rYK9CoD2wkovV/IQ2qfo74d1pm1AX344129jse02Js796acv+bOEPQFGqkNdzS/7gQrGnbklQ+RX9qC3dS7W/QHV/Zss0yozvqZ2dPje74qw07LV3IAfyxMBs69LccXjy621JUByCiqaxqOqzPqN5raX6IL+Jv1h3SHX/p4H5LnVshXhzttcP6WyevHDhgmaFIEq/uP6A7/tqeAykVfTteNlnm1T36jNd3WGcosszrO/Xfqze3H7TUrRfihctWbtN/HWl9qLQj8glWTNO/80hGmgigiXC/YxE18R68GQk0opq337Gg2jUgDIBQrP87voDO71ORP+bp//eqL+UtmZPljfbPCGsRojoOXbLqifb/eWhCwxsNDgijK+JPaSwZdYb152kPWWfISHQYMFqDfsufuiLJdIb0oxGtAZ9lp02RpGM1TfpTm89vmSFJEp1B/e9Ohz9gKqHVSEIIOv36jE5/AEthMEDTNDp9+u9VpzdgKZoXZwjaPQPzN/BZV9oR+QRLJEjOTG6u/he31PNPzDMSJZkmI43+GeoAMlSd9yTVBNV+GTrP12qU4njpbZvEzywNjEMVM3jfrbaoP8Yw2P4ZEIYUr8Cqy7C4sPYpajCPET8ZGeUkDBTD+HVY/zwExs81pln/9gogavBeqZpmJUh8TEaMtD5U2hH5BEtJUN49iZmQpf6rCtrk7AFigF3n+TqVgWM4dZ5vhjJwFQ0zlP7P/7t1fp+K3Lb+58pQnEl3z9VTlq3/z1yRZFwJStCbVbZNv0dGlm1gijrlOPQZOXbrDE0Ich36dSVyndaqfh5jGMxT4LLea0QTglyXPiM/I2NAvlm5BpVWczOS6wTcn2RJIsegIFiOyzkgW0y5bn1Gdob175UE5GVa/+58HpR2RD7BcjsmIklWv8gKWU79CpLZ9hED4IzI5DrGocj9//Fm2weRoRj3aDGjXHsFLp2+OFm2PAocg0j+CPKJyrEVkGfvv0eKU3EyJGOoZUchU8lkkKus3zFZkhmXU3Ws42uqcsoOKrOG6I5PK6hCsfjnb5MUxudU6I7PKaqyXHZeRmJq/lDd8dMGVVpe+ABmFeszFpQNt8zQEMwu1X+v5g0ZanlxVYVg9mD9z3xOhf6YWcU0jdkG88weNsRylCqmacweps+YVTXEcnRHFYKZlfqMmSMrrEeQpPg8aSVW2hH5BEuRsyjMvJIPO/OmIpWS7FsNGE6GZV9h6bQJaFTmXq87Kks2JuZfZnkBn5i/TNehkiSJuYXnW5pfQmJO4fnIkv77vaT0LEvbGjIyC4uXYDdoWHfh4EWWEjBlZJaWziND0b+rWzbkNFQLSbeKJHPWoKnkOvSjRNdUzra01aRIMmcMGktphn5k8PoRMywtfIokMb9kOJXZ+o7y9WOmWmLIksSUojImFpbq2lwz3lp/KAkYkVfA7LJyXZtlE1Pr7Hu8yrKzWVRZqTt+wYSxuE1WX9VTvjuDpWNH6o6fOWGk6cqoenI77FwwVb/z9PxxwyjNsxZltSkKl8xJvs3G51FpR+QTruKsmzi1p4x52eQicjOMS34Pz7nS0uJqk7IYnHmOoc3Y3Auw8nVTJAejc4zLik/NX4Iipd7MTEJmeoFxCfZZ+XNwyalfBAWChcWLDW3mFEwhR2frxow0NM4rO93QZnbhKEpdeSkzVKFx+ZD5hjZT84dSlVWcsvupCo1rKucY2ozLL2VqweCUI0iqENw02rh3zrCcfE4fnHpURBOCW8cZ9wAqzczinKqRKTMEcOuU6YYJlrkuFxePG5syQ5Ykbpo21TBy43bYuWLaREuMa2dMNixX77ApXD13SsoRJEWWWDZrIhkGPWcUWebahVNTTlhVZIkLZ44lx6LD9HlR2hH5hCvDMYYC9+Wk+lGV5/0QKcHi7LYPYVj2VaS6rTGu4D90t2WOMWyFTCm4JqX5AWYU3oIjQY5GhpLJGSVXpcw4vfgysmx5hjZOxcml5VekzDijZCmFziJDG5uscHPlpSnNLyGxpGQOFW7j1vWyJPP1URemzFhUPJFxucZhZ0mSuH28sYOqJxmJecUjmF2of/fdp+9OWZLSV1eRJKYXDuGMslEJbW+fdjqSlHxMT5EkJhSUcN7wMQltvzlrHjZZSYlRlZfP5WMS331/dc7seOv6JBdYRZIoy87mmsmJIze3zZ1Bdgp5HIokUZCZwQ2zpia0vX7+VAqyMpI+YitLElkuJzefnrg55LJ5kxiUl50Sw2m3cdvS5Bs3fl6VdkQ+BRpa+EuynHNJ9uMqy/02hVnmFs2JRd+jxH06yV7RR+TezPBccw7G7KIvMiLbfGO5Po3LvYipBdeZsl1YvIwZ+eabvvVpcu5ClpRea8r2jJKlLC1NfoGdkjuNKyvMMZaWzuPqivOSml9CYnLuaL420hxjSelkvjbqgqQZ43Mr+K+J5hinl47mh5Mu7H2sOclIjM4p5TczrzF1RzqnZBi/mn0RUhIMRZIYnlXAvadfZaoh3dTiwfxx4cVIkmQ6+qJIEmWZOdy/9EqcSuJI3djCYv5+3sUosmx6EVckiSK3m4cvvsLUlsjw/HzuvexS7Ekycl0uHrxyGTkGybB9KsvN5t5rL8Nps5mOjCiSRIbDzn3XLaMws/8k6+NVmOXm3i8sI8NhN+0oKHLcQfj7rZdRlpc4Ly7H7eLvX7uc7AynaYYsSdgUmT99+VKGFueZekxa6e67nxppIkxtx3fpCqwgnjOit10jAxIV+XdSnH1zkowYu9rvotb7FBKKwdHe+IV7XMF/MCL31qTCl5pQ+aDtb+zoehIJWbcAWXxMMKPwZmYU3pIUQwjB262Ps6r1KRMMjQVFl3LWoBuRk0isFELwWvPLPNfwNCDpMmRkNDQWFS/hmqE3oBjkn/SnlxpX8a8jzxp2Eu5jnFEyh6+PvBa7nNz21EsNG7ln37PxVu06DEWSUYXGGSWTuGPCNTiV5PIAXm3YyY+3PUtU67+N+vGMBSWjuGfGVbiTPHXzZsMB/nPdc4TUaELG3JJh/PW0K8gxOC3Tn95rrOar7zyHLxrR7VGtSBKqEEwrHsw/z1xGoSvxwnq8NjTW8+VXnqc7HEJG6jdfqI8xrqiYBy68nNLM5PIZdjQ188UVz9ERCCBLUr/1RfoYIwoKuG/ZZQzJ7T9RXE/7W9r50uMraPH6EjKG5OXwz+sup7IwPylGTVsXX77vWRq6PLqMvt+X5mTxt9suY/Qg44jkyWro6OHrf1tBdWsXiiyh9nNEuY9RkOXmT/92CROGGkckPw9KZv1OOyKfMgUie2n3PUyH/ynESUXKbHIJxdk3U5R1NXalJGWGN3KEWs+THPWuQBUnFhZyyPkMz7maoTnLyLDpJ98lUk+knj3dL7C350WiJ1VLdcrZjM+7hHF5F5FtT/0PuivSysbO19nY8Toh7cTCQk7ZzcyCs5hdcA4Fzv5PsJhjdPJu2ypWt72FL3ZiCW+n7GRB0SIWFp9BWcZgCwwPK5vX8nLTu3SfVAjNIds5s3Qu5w06nWGZVhg+Xm7cyDN1a2kLn1gczi4pnFM2ncuGzGNMTuonL3oiAV6o28aj1etpDHafMGaTZM4rn8Q1w+cwMa885b15byTEitqdPHhgIzW+EwvpKZLEeUPGceOomcwoGpIywx+N8NyRPTywZxMHe04sCCgjcfawUdw0djrzBg1NmRGMRnnx4D7u37mVve1tJ4xJwJLhVdw8aRoLKoalnCsRjsV49cABHtyylR3NJ1b6lYAFw4dx07RpLKocnnIl04iq8ua+Qzy8YRtb6htPGZ8zvIIbZ03ljNFVpiJT/Smmaryz9zCPrd3GhiOnNhecNmww18+fypkTRuKwpZasq2oaa/bU8MR721i3r/YU13Di0FKuXTiNs6aOwmlPPU/ts6S0I/I5kKp5CUR2EtN6kFCwKYVkOqbo5oMIIQhFthFT69FEAFnKxmkfg8M+QpcR0wJ0h3cT7WU4lDzynBORpf7vhoUQ+CJ7CMbqULUAipxFpr2KTId+BnxMC9Ma2ktY9QASLiWHEtc4FIM6F53hQ3gjdURFALvkJttRQYHTiBGlPniQQMwLCNy2bMozRmI3yGtpC9XSEakjogVxyBnk2csodVXpLiwxLUaN/wi+mA+BhtuWxXB3JU6DkystoUaaQ/WE1CAO2UmBo5ihbn2GKlQOemvpifrQhEamzc3IrArcBrU8moOtHA00EFCDOGUHhc4CRmVVGjA09nnq6Yr4iGkq2fYMRmeXk203YnRyyNeAPxbCIdsocuYyIXe4boRJExp7e5poD/uIqDGy7S7G5paR59CPHLQEe9jracQbDcYZrmym5g/TPR4shGB3VzNtIR8hNUaO3cWYvBKKXPp5Rq1BLzu7GumJhnDICkXOLGYUVWCX+1+8hBDs7WqjJeAlGIuS43AxOq+IErd+dKI96GdbeyM9kRA2WaHI5WZWSYVhcub+jnYavR4CsSg5Ticj8wspy9LfWugKBdnS0kh3OIRNkinIyGDWoCG4bPoL5KGODup7PASiUbKdDqryCyjP1b/mekIhNjc10h0KIUkSBa4MZpWXk2GwPVTd0UVdVw/+SIQsp4PhBXlU5Ofp2vvCETbXNdAdjPcBystwMaOinCyn/rWhrqOb2o5u/KEIbqeDoYW5DCvSj7IEI1E21zbQ5Q8ihCDXncG0oWXkZOhHyho7PdS0duELhnE77QwuyKFqUKGu/edVaUckrWNSNS8e/9N0+f5FNHZqA6YMx1zys28lK+NcJB0HI5Fimp8W/0vUex7GHz21f0OOcwpDsm+kJPMcZCm1QloxLUy1dyX7ep6mM3xq/4YC5xjG5V3J8KwzsSVInNVnRNjnWcOmzhdpCu0/ZbzYOYyZBZcwPncxjhRPzsS0GDt7NvJu2+sc8Z/KKHGWsbD4XGYVLMClJBfS75MqVDZ37uC15lXs9pzKKHUWc27ZYhYVzyNTp+hZYobGxo59PNewho2d+04ZL3HmcemQBZw7aDa5jtSOQWpC44P2wyyvXc97rftPuQstdmZz1bA5XFoxk0JnagwhBB+01fLI4Y282bT/lNB+odPNdVUzuapyOqUZqdXbEUKwua2Bh/Zv5uXafaccBc5zuLhh9HSuHT2F8szktj+OZ2xva+bh3dt4/tDeU2pgZDucXDduMtePn8LQnLyUGAC7W1t5ZMc2VuzdS0Q9ces2027nqomTuH7SZKoKUq8bdKC1nce27ODZ7bsJxU6spOyy27h88gSumz6Z0SXJbbEcr+q2Tp7YsINnNu8iEImeMOawKVwydRzXzJnCuLLUI8ufd6UdkbQA8IfW0Nh+G5ro25bod0cbULHbhjOk+DEctuFJMXpCW9je+hViWg/o7prLgIZTKWNq6b/IdOhHYfpTR2gfbzbeTkjtMmDEf+9SCjir/DcUOEcnxWgL1bL86B14Yx1ISDq5EnFGhpLNlRU/pdytX4egX0a4mb8euouOSKth7gqAS87gi1W3Myp7QlKM9nAn/7PnDzSGmo/ljvQnCXDIDr41+stMy5+YFKMj7OGHO+7lkK8BWZJ1S99LSNgkhR+Ov56FJVOSYnRF/Hxr0yPs6K47ltehx1Akmf+afCkXlk9LiuGNhvj6uqdY31ZjyJCRkCT4ydTzuKYq8WmL4xWIRvjGe8/zdsNhQ4bSm2PwoxlL+MK4WUlt6YRiMW5/51VePrL/WM6FEeNbM0/j36fPTYoRUVXueOtNnt6zOyFDFYJ/mzmL/3fagqS2jVRN43/eWMUjm7fr5mIcz7hhxhR+dPbipLaNNE3wu5Vr+Od7m4wZvWOXTZ/AnZecabn+yudRaUckLXzBlTS030Z80TZTVEpBlrIZVvqi4XbN8eoMrmN7yxd7F1RzDEVyMaPsMbIc5hbx1uBO3mj4dzQRM9VZV0JGluycM+RPFLvMLeItocM8XPP/iGmRJBgyVw/9OcMyJ5tkNPDbAz8hrAZNde+Vev/5YtV3mJBrboFtDbVzx65f4o36TTMAvjn6i8wrNLfAtod7+Mbm39ER9ppkxL+B/2/sNZxbZu44Y1fYz83r/k5TsDupgmjfG38hVw+fa8rWEwlx7eoHOOxtT6oR3HcmLuHLY04zZRuIRrhm5WPs6mxJivGNifP5zrSFpmzDaowbX36aTU0NSRXBu2XiNH4yf4kpZySqqnz5hed5t7YmqWpDV4yfwC/POtsUQ9U0vrXiFV7fd9A0QwLOGTuK311+gSmHRwjBHSveYMWWPSYJccbpo4fzp+svwaakD5kmo2TW7/Q7+xlUKLKHxvYvg2kHAUBFE17q2q5B1RJ3sg1Eq9nR+tUknJA4QxUhtjV/wVTXX1+0iTcbbzfthEC8C7AmorzZ8G380ZaE9v5YF0/U3mHaCfmQofJU3Z10Rk5NwDtZgZiPvxz6hWknJM4QaAjur/4tjcGjCe1Daoif7/m9aSekjyEQ/PHgfRzyVie0j2gxvr/973RGzDkhcUZcv963nG1dhxLaxzSV/9j0cNJOCMAv97zEmtZTt6JOeU5C8I31TyXthADcs+ttXqnfbYrxH2teSNoJAfjTrrU8eWiHKdvvrXqdjU31SVfifWDXVh7YtdWU7c9Wr0raCQF4es9u/rJxgynb36x6n9eScEIg/t16bd9Bfv3OGlP2f1+9ISknpI/x3oEafvHyO0k9Lq3klHZEPoPq8PwWQYzk20eqxNRGenyPJ7Ss7b4XISKYd0I+ZES0Thq8iRl7upcT04KmHYQ+CTSimp893U8mtN3U+SJB1ZsCQxDTInzQ/kxC23Udb9MT7TK9eB9PUYXKyubnElq+2/YBLeG2FBjxRfPp+pcT2q1u3Ua1vznlsu33V7+a0GZN2wF299SnxJCQ+MP+N0gU5F3fVsP6tpqkHYQ+3bPz7YSP3d7RxJv1h1Jm/Grr6oS9Tg52tfPcob0p10T+zaY1hGJRQ5v6nh4e27E9ZcafPliPNxw2tOnwB7hv/eYUCXD/B1vo8AcMbbyhMH9f9UFK8wtg+YYdNHR5EtqmlZrSjshnTDG1GV/wVVIvCy/o8t2HMFgIomoPzf4XDOqMJJJGvedRNKF/EYxqQQ72vJgyQ6Bx0PM8MS2ka6OKKFu7Xk7aCTmesbPnTUKqT9dGExrvtr2Rcgl9DY1t3R/giXbrPw8heLUp9Ts2DY2t3btoDbUb2q2oX5NyKXUNwa6eamr8zYZ2j9esS5khEBzytrCr59QjnMfr4cMbLTXjqw90s67VOIL08P4tlhrltYf8vFl/auL38Xpk93ZLDG8kwstHTk38Pl6P79qZ8hFkiOeWrNi319DmqW27LDSYiCdOP719l6HNC9v2Eoml3ipDkiSe2rQz5cenZay0I/IZU7fvMctzxNR6AqF3dcebfCt6Iy6pK6p10h54S3e82ruS2Ek1TJJnBKj2vqk7vt+zlqDqtcRQRYyd3fqvY69nO93RxNtQRhII1nes0h3f5z1EY8h4gU8kGZk3W97THT/kbWC/96ilZnyKJPNCw/u640f97WzsOGKZ8WSt/p1vc9DD240HLDbjk3j08Ebd8a5wkOdr9lhulPfgPv0ogT8a4cn9Oy0zHti5RXc8oqo8tmN7ylGdPj24datulErVNB7ZtM0SQwh4ZNN2VJ0IkhCCR9aZ24bSkyYET2zYbsmZSUtfaUfkM6Zg+AOS3y45WTaCEf293Z6w/sXLrCRshvO0BncgWeo6DBIKbSH9u5j64F5kywxoCOrvO1f7D1hmCARHfKceke3TPu9hZIt/yhoaezz6d8e7eo5Y7J0cv3Pd3nVYd3x7V+JcGDOMzR360YrtnckldfbPEGxs13+uuzqaLbeQ13qP/Oppf2c7wZi1mwFNCHa2t+g+15ruLnoSbKskkgCqu7t0t2davD5aff5+x5KR0TyeUJjajm6Lnzp4gmGOdnZbnCWt/vSxOCLhcJipU+OdDLdt2/ZxID+3UrWuxEYJJRkmrEbVbpLPPzlZgqimH42IaD4LWz99BI2IASOs+ix1HY4zhGFUJaj6LS/gAH4DRiAWsBQ+75Mvpr/P7ouFkiqBryevAcMbDaW8LXMiQ387zhu1trD2yReL6I55BogR0VTCav/Ohseig3DiXP2/XwPJ0HNoPKEBZAT7fx3e4AC+VzqMtKzpY3FEvvvd7zJ4cOrlp9MyLynFgmGnzIP+PHqVVZMlyOjPI0s2Uu0G/CEhMSP5fqqnSjF4PxRJsfoyALAZMGwJuiubZhj0qLFJCgNx0N+mU6UUwC4rlqMVEH+u+oyBueTZDJyygWIYcQaSoVcjY2AZ/c81kEdi9V7Hx8FIy5o+ckfk1Vdf5Y033uCee+75qFFpATalFOsfq4qi6FctdCjFlrdNQOBQ9KsvZigFSJZfh4TLpl/eOVPJszg/yChkGjCybbmW99glZHLsebrjufZs3aJi5hkS+Xb9s/75juyUTuScrEKHPqMgxeqopzAM5ikaIEaBU78irVEZ+WSU63DpFusqcqdWEfdkOWSFLHv/Nx1F7oF5HbIkke/qv0VA4QC9DoACd/+MPHdGyv14TlZh1sA937Q+1EfqiLS0tPClL32Jhx9+GLeJL1w4HMbj8Zzwk1ZyynZfgvUcEUG2+0Ld0dLM8wZg20SlJPN83fHh2WcOCGN41pm64+NyT0ezyNBQGZejX3xqav7clE/l9EmgMT1/vu747MLkKor2zxAsKNIvODavaLxhpMGMJODM0ukGjJFkJNnV91SGxPmD9au4zioaSq5Dv2+OGcmSxMVDJ+mOTyksY1CGNYdHkSQurRyvOz46v4jK3HxLwTZFkrho5Fjdbb3ynBwmlZRaWsQVSWJpVRVOnT43+e4M5g6rsHT6R5Ek5g6vIF/HEXHZbSweW4Uip86QJYmJ5aUMzksX1vwo9JE5IkIIbrnlFr7yla8wc+ZMU4+56667yM3NPfZTUVHxUT29z6yyM85FkVPv8wAKbtdiHLZhuhYFGafjVKy0uZbJdU4ny6Ffhr3ENZlc+3BS39eQyHOMoNilX7681DWCMtcYS9szufZSKjOn6o4XOwcxOnuSpWTSLFsOE3P1K58WOPKYVTDVEiNDcRlWV82xZ3Jm6XRLx17tso2lg/SvBW6bk0srZlpiyJLEpRX6r8Oh2LiuaoalxVUIwTWV+g6VIsvcNHaGpXwXVQhuGK3PkCSJWyfqj5tl3DhhqqHNzVOnWYroqUJw4xRjR/mGmVMtnf5RheDGmVMNba6bM0W3nLsZaUJww1xjRlqpK+m/+O9///tIkmT4s2/fPv74xz/i9Xr5wQ9+YHruH/zgB/T09Bz7qaurS/bpfe4lSQ7ysm4mdR9TJT/r1gQMmYqcm0jdSdAYknNjAobEuPyrSD0pVjAu78qESZyzCi+2kLAqMbPgYqQEC+ei4nNS3taQkDi96Ox4romBzh20OGWGjMzSktNxKMb5RZeUL0j52KuMzNmDZpFl0CkY4Iqhs1NmKJLM0kETE27xXF05PeWvlSJJLBo0kvLMPGPGyCkp34ErksSskiGMyjNu6nbZ6PE4bbaU/goVSWJcYTFTio1vKC4YPZocpzMlhixJDM3NZX6CG8olo6soznSn5BzKkkRxViZnjKoytJtbNZSKgtyUGBKQ43JyzsTk+lelZV5Jr1a33347e/fuNfypqqri7bffZt26dTidTmw2GyNHxtu0z5w5k5tvvrnfuZ1OJzk5OSf8pJW8CrK/jtM+AZLO45DIcV9Npkt/O6NPQ3JuJNc5PQWGTLH7XErc5ya0HJVzIYMzZiedKyIhU+6ey8gc/a2fPo3PWcTo7PlJR0UkZCrcE5ier7+F1acJOdOZlX960gwZmSHuSpaUJmaMzxnN2aWLkpq/jzE4o5TLhyR+r8bkVHDt0CVJMxRkSl353FaVmFGZVczXRy9NniHJFDqy+Pa48xLaDnbn8sMpZ6fAkMixZ/CTqYkZhS43/zMn8Xf8ZMmSRKbNwS/nJX6vsh1Ofr34vKR9KlmScCo2fnPG+QkddafNxm/PPS/pU1kSYJNlfn9eYoZNlvnNZecjkdytjUT8tfzm0vOwJUislWWJX10Zt0vWFZEkiV9eeR5O+8Akhad1qj6ypndHjx49IcejsbGRc845h6effpo5c+YwZMiQhHOkm96lrpjaTn3b1YSj+zGbM5KVcSGDC/+MZPJUTFTtYVvLF/BGdptkSBRmLGRi8R9RZKc5hubnzYbv0BragbnbWIlBGdNYMvhX2GVziWVRLcyzdT/niH+LKYaERJlrNFcP+29cirlcgJgW48GaP7CjR78Q1okMmTLXEL4+8kdkGSSRHi9NaPzl0IO8126ulLWMzCBXMT8e/58UOPUTbk9m/OHAs7zYuNY0o8SVxz1Tv0pZRqGpxwgh+MP+N3jwiH6BteOlSDIFjkz+Puc2hmcVm3oMwJ/3vsvv96w2yYg7IQ+cfj3j8sxvS/5zzwZ+vvlt3Z7RJzMybQ4eWno1U4vMnzJ8bM92fvTeSjDJcCo27jvvcuYONr/1/dzePXznjdcRInH8UJYk7LLM3y66mEXDK00z3th3iP9c8TKaEAm3g2RJQpYkfnfZBZw9dqRpxnsHavj3x14gpmoJGZIU/1u/a9k5XDR1nGlGWnF9Irvv1tTUUFlZydatW5k6daqpx6QdEWvSNB8t3Xfi8T8F/faekQENWcqhIOerFGT/e8JthpOlaiEOdf2KJu9TaPSVbD+eE2coUiYVOTcxPO8bvUdzk2FE2NLxN/b3rEAVffUbjmfEL/OK5GJs7uVMK/o3wyO1/UkTKu+1PcLGjueJitCxOU9kgCLZmJp/HmeU3IrdpDP1IUNjZctzvN3yEiEtiIR0yraQhIQsKcwuOJ3Lym/CqbiSYggheLFxJc81vIZfDSAjnXIkNs6QmF84i1srrybTltxJACEEzzes4aGalfREff0y+u47FxZP4d9HX06eI/nkzefqNvGXA2/RHvYi97awP5khgEWlY/nBhIsodiV/jXjx6E7u2fU2TUFPv+3tZUlCCMHC0pHcOe28hFsy/em1o/v5xeZ3OOrr7pfR97vTBg3j53POoTIn+Ryvd44e4b/XvsORni4UST5le6uPMWtQOT8/fSljCsw7bH1aW3eUn616hwMdHYavY+qgMn62ZAkTS0qTZmypb+S/X3+H3c2tKLJ0Sl5H3+8mDCrhx+ecwfQhyZeF2N3Qws9ffJvt9c2GjFGlhfzg/MXMHTE0aUZaaUckrZOkqp30+JfT7X+UWKwBQQRJysBpH0Ne1q1kuy9ElpJb8E5WVPXQ7H+OBu/jhGKNaCKMLLlw2ysZkn0dpZkXosjWTitENT+HPa+zv2cF3mgDqgijSE5y7OWMyb2cqpyzscvWjhxGtBB7elaxpfNlOiP1REUYm+Qg1z6I6QXnMzH3TFyKVUaEbV3reK99Jc2heiJaGLvkIM9RwPzCM5lTuJhMm7VTF1EtyobObbzW9A5HAw2EtDB2yU6BI5clpQs4o2Q+uSYjLXqKaSrvt+/iuYY1HPTWE1LD2GQbBY5szh00mwsGz6PQOQCMtgM8UbOeXd31BNQINjkeAbloyHQur5jJoIw8SwxNCN5rOcwjhzayuaMOfyzOyHe4uWToJK6pmk5FprmIkZ6EELzfXMuD+zezvvko/lgERZLIc2RwSeV4rh89LSUH5GTGhqZ6Hty9lTX1tfiiEWRJItfh5MIRY7lhwhRG5RvnnZhhbG1q4uEd21hVXY0vEr8xyHG6OHfUKG6YPIVxxck7OSdrZ2Mzj27ezlsHjuDrLYaW5XRy5ugqrp8xhUmDrSTLx7WvqY0nNmznjd0H8YbCCAFZTgcLx1Ry3ZwpTKkoG5BCgZ9XfSIdkVSUdkQ+GgkhEv6BCREiEHyRUOg9NK0bSZKR5UIyXOfici1BSpA8aYahiQhdgdfpDq4mpnUDEnY5nzz3meRnnImUIHJijhGl0f8uTYE1hNVuAJxKLoPc8ynPPCNhcTZzDJUa3waO+NYSVHsQQsOlZDM0cwYjsxdik42TQM0xNA56t7Oz5wP8MQ+qUHHbshiRNZEpefNxJIjOmPvMBXu9+9jQsRFPzEtMi5FpczM6exTzCufiShCdMcvY7TnC6tbNdEU8REWMLJubsdnDWVI6i8wEyaymGT11vNa0jfaQh4gWI8vuYnzOEM4vn0GO3ToDYE93I88f3U5LyENQjZJjdzE2t4zLhk6lwGnsrJpl7Otu5ZkjO2j0xxlZdidj8oq5qmoKxQmOCPdd2hNxDnZ38NSBndT7eghEo2TZHYzKL+Sq0ZMpy8weEEZ1dxdP79lFbU83/miULIeDyrx8rho/kSE5uQPCqO/p4ekdu6nu7MIXiZDpcDA0L5crJk9keH7egDDSMq+0I5JWylLVNry+v+PzP4wQHuLJqH21NmxADEUuIyvrNrIyb0M2mYdxvKJqF82ef9Hqe7TXATmeEf9vu1xEafaNlObcik02vhj2z/BysOdxDnmeIqx2IqEcq0vS998OOY8RuVcwOvc6HIrxxbA/RbQg27tWsL3refyxjn4ZTjmLSXkXMq1gGW6Dwme6r0MLs7b9Dd5vf5XuaBsyyrHaJzIyGhpOOYPZhWeysPgicu3J31XHtBjvtK7mjZaVtIbbjs17IsPJwuIFnDvobIqcyd9Vq0Ll9aZ1PN+wmvpgC4oko4n4hk4fwyHbObN0NpcPWcLgjOTvqjWh8UrjFh6veZ/DvuaTGPFtMJukcM7gqVw/fCGVWSVJM4QQvFS/k4cOr2N3d1M/jPgWxXnlE7lt1HzG5CZ/5y6E4NW6ffxr3wa2tjecyJCk+I6hBOdVjOVL4+YyubAsaQbAm0cP8Y+dG/iguR5FktBEvJ7M8SdLzho6ki9Pms3M0vKUGKtra/jn1o2sqTuKIsXfH03EGVLvfy8aVsm/TZ/F3CGplWtYf7SOez/YxLtHauJbaScxVCE4bdhQvjB7BgurhqfESCt5pR2RtFJSJLqPtvZr0LR2SFjoS8ZuH0dx4WMoivkLeihazd6WG4mozaYYLlslY0sfwmkzvxfsjzbxbtPX8EXrSZREKyHjtg1i4eC/kGU3fyH0xzpYUfd9OsM1CY//xhn5XFbxKwqd+vVZTmV4uO/IXdQHDydkyMhkKFl8seoOyt3mEwT9sQC/P/hHDnjjLeeNODIyLsXJ7aP/k5HZ5hMEg2qYu/bcx+auvQmTNhVJxi7Z+a8JX2JKvvnjkmE1yk92LGdV6+5+c29OZtgkmbumXs/84rGmGREtxn9tfYEX6nb0mxdzMkMC7pl5BWeX6xcmO1mqpnHnptd59NDWfvNiTmTEF91fzbmQy6v0C6ydLE0I7tq4in/s3NhvrsfJDE0Ifj7/LG4YZ75wnhCC329Yx+83rDPFUIXgB6ct5EvTZpqOSggh+NeGzfzvqvdMM/7jtLn8+2lz05GPj0HJrN/p7rtpARCL1dDadplJJwRAIxrdR2v75WgGDfKOVzjWxJ7mq0w6IXFGKFbDnuariKodphihWCerGr+IP9qAmZM8Ao1ArIV3Gr5AMNZqjqF6ebr223SGj5qqQRJndPF07X/SE2kyxQirQf5++Kc0BI+YYmhoBFQffzv8X7SE6k0xIlqEX+//LQe9hxC9/yRiBNUQv9z/a2r9taYYMU3lZ7v+wdauePfgRK9EFRphLcJ/7fore3qOmGKoQuNH2x9jdeueXoYxRRUaES3Gd7Y8xMaOQ6YYmtD44ZbneLFuR/z/TTBUofGtjU/ydpN+5+TjJYTgxxtf47FDW3uZiRjx0yXfWf8iL9TsNsUA+MWGuBPSN0cihgB+tHYlj+/bbprxh14nxCwD4K733+Vf2zabZty3cQv/u+q9pBh/eH89f3x/vWlGWh+P0o5IWgih0dZxI0J4Mecg9EklFquhs+vbJhiCg21fIap1Jc2IqM0cav+mKev1LT8gGGtLqjy8QCWsdrO2+f9hJkC4sulueqJNSTI0wlqAF+rvMMVY0fBPWkL1SRUpE2hEtQj3HfkFqkj83J44+iRH/NVJMgQxLcavD/yOiBZNaP9I7cvs7DmUVDM7gUATGj/d/XcCsWBC+4erV/N+276kCtOJ3p/vbn2Yrogvof1jRzbySv2upGp29Nl+e+NTNAUSO+vPVO/kicPbkq4LIgHfWfciRzyJnfVXqvdz7y5zR8hP1g/XvsHujpaEdqtra/hdrxOSrH6xZjWbGhsS2m2ub+Cud95NifGH99fz7pGalB6b1kejtCOSFqHwamKxQyTnIPRJJRh6lVjMuAquL7wFf2RHygxP6H0CkYOGVt3hg7SFNqXUo0ag0hneRVfY+M6yO9LIEd/alPrHCFQ6I7XUBbYa2nmiXWztWpMSQ0OjK9rGXo/xnaU/5md123spVZXV0OiJetjYabyghdQwLzakyhD4YkHebt1kaBfVYjxRsyalQqkCQUiN8mK98XulCY37Dr2fAiHujMSExpM1xq9DCME/9qxLqYJp3KkSPHIgcTTh7zs3pFzeXgIe2L0lod0/t25MuXeMLEncZyIqct/GLZYq19630XzkJa2PXmlHJC18/vtJvkLq8ZLx+R82tGjxPmyRodDqe8TQ4rDnKUtdgSUUDvU8aWizq/slS12BJRS2dz1vaLOh8y1SL20fz0l5v/1VQ5s17WtNRU30GRIrW94ytFnduoWQFrbAgBcaVhtGkFa37qE7GkiZIRA8fXSdYVn5Na2HaQ6m3oBTE4InqjcR0WK6Npva6jnk6Uj5U1eFYPmR7QRiEV2b3R0tbGtrSrl3jCoEKw7vpicc0rWp6e5iTd3RlHvHqELw+pFDtPj0o1QtXh8rDx5OuXeMKgRrao5S29Wd0uPTGnilHZHPuVS1lVDoTVKLVBybpfeUTf8XhpjmoTPwsmVGm+8pNNH/hVbVwtR4X7LUsVegctT3OlHN3/+40NjZ/bKlbroClWrfWgKxLl2b9e1vWOh/E9+iOezbRWdEP+fl7dZ3LDIE1f4a6gP6+SivNq2x1FBQAA3BVvZ5a3RtnqvbYKm5HEBruIdNBrkiT9VsttQdFqAnGmRV0wHd8eWHt1lq9gcQjEV55ah+PsryAzstM2KaxorDe3THn9qzy/J7BfDMPv3I5LO79PlmpUgST+3YZXmetAZGaUfkc66YWoeVu+8+CdGNEP3fxURizQj07wbNShNBYmpnv2MhtRNNpH733SdBTDdpNaz5iOg4KckxBJ5o/3vtqojhMXBSklFnWH8/vy3cPiCM1nCb7lhjsN2Ss9OnpqD+c631tyWVf6Kn+kD/3yuAam+7pe6wEF/46gL6n+sRT0fKzf76ZJNk6nz6jOqeTusMWeaoR59R09ONhSa3QPwYdG1Pt+54bVc3Vn0dARztNpdkn9ZHr7Qj8jmXGICFtU+ajiOiiYFjqDpzxUTq4flT5tL6nyuiJU6cNKuozlxhVT/snazCWv9zqUK1tC1zvIIGzzes6W8TDBQjpFpnyJJEQNV3Yo22O8xKQiIQ02f4ogPBMJ7HG7HOEMKY4Y9ELDufGgK/wXMNRKNYLTqhCXGsYmta//dKOyKfc0kWS6IfL1nqv/CYLA0cQ5H6ryZpl5IvrKYnvTLxjhSKt+lJb65ke8sYyan0X0FUkRRsSfb70VOGDgPApRhXlDUrt8F7kmFLrt9Pf9KEINOmz8i0W2cIBG6D55o9IAzIsuu/5zkO6wxJMmZkORwpJ8P2SZYkMh36DLfdPiCMbKf19yOtgVHaEfmcy6YMYyC+BrJciKTjcDhsZUgk14SuX4aUiU3pv3Ko01aIIlnrZQMgYyfD1n+BNqeciSuFKq8nS0Imx95/xU1FspFnt9YPpE9FDv2qnqXO5KuK9qdBLv3GZkMySi3liPSp3K3/XCszSyzniAAMdeu/5yOziy3nPahCMDxLv/vwqNwi6/kbQqMyW58xMq9gQHJEqnL1q/dW5RdY/jQ0IajKN2AU5qeccNsnCagssNY/KK2BU9oR+ZxLUYrIcJ2L1VMzWZk361YrtMnZFGZebJGhUJJ1jW5vGEVyUJlzseVTM8OyL8CmE62QJJlJ+RdZPjUzMvt0Mmz6JeXnFZ5taQGXkBmdPZU8h/7iuqT0jJTnh/g+/sisEQzO0C8vfv7gBZbC9BISw9xljMrS7356WcUcSzkiElDmymd6gX412iuHz7CcI1LgzGRh6Sjd8WtGTrWcv5Flc3BuxRh9xpgplhkOxcbFI/QrxV41fuKAOAnLxk7QHb984vgBcXaunDzR4ixpDZTSjkhaZGXdirUTLZCZeb3heGn2jRYZKiXZ1xlajMi5wvKpmRG5VxjaTMy7wPKpmcn5FxvazCpYYvG0icb8onMNbU4rmoc9QcM/I2kIlpaeaWhzevE0w22VRBIILipfaFiOe0HxWAoc1joVXzFsHrJBpGBucSXl7ryUPxEZiWsrZ2GX9Z3kKYWDGZNbnDJDkSSuGTkNl03/Mx2dX8TMkvKUtzUUSWLZyAmGWzxDcnJZOGx4yhEkRZI4f+Roitz626BFmZmcO2aUJcbCquGU56bbhnxSlHZE0sLpOA27bTypRSxkMjIuxqYY94LJck4hyzkjRYZCnusMMuxVhlY5jipKM+alFBWRUChyTSPfOc6YYS9lVPailKIiEgrFzhGUZ0w2tMuy5zKjYHFKzoiMTJGjjLHZUw3tMpQMlpSkzihw5DMzf7qhnUO2c0n54qTnjzMkcuyZLC6ZaWhnkxWuH356yoxMm4sLBxu/DlmS+eKoBSnFXWQkHIqNK4cZMyRJ4isT5qfEkHqf4w2jjRkAX50yx1LE4pbxiRlfnjYr5QiSJgS3TZ2R0O62WTMs1UP54mzj71VaH6/SjkhaSJJEUeGDyHIeyTkKCnbbGAry7jZlPar4rziUkqQZLlsFI4p+a8p6bun/kGkbnJQzIqHgUoqYV/orU/ZLB91OgWNYUs6IhEKGksNFQ35uquHWpeW3MSRjRFIMGRmnksEXqn6ILCV+/VdVXMGY7NFJOSMyMnbZzu2jv4VdThxRuXbYOczMH58kQ8Im2/jZxK+SoSROKLx2+ALOLJ2UFENCQpZk7pl+M7mOxMnUVw2fweVDpyXltkm9//rjnKspyUh8933J8AncOmZWEgSOPZ8/nHYpQ7MS5zwsHTqS/5g6LylGn+5ZeD5jChJ3RZ5fMZTvn7YwJcZ/L17K1EGJuwlPGTyIn569JCXG9xafzrxhqXX6TeujUdoRSQsAm20IJcUvoChlJP5aSICEwz6V4qKnkWVzoXGHUsz4QU/isg01zXDbRzNu0JPYFP2cihMZuSwuv5ccRxWYWjYksuwVLBlyHy6bfoLciQw3y4beQ4lr1LHnaUyQyLYXc+Ww35FtN9fe3i47+eKIO6jM7OsOm4ghk2XL42sj/5tCp7nW8zbZxrdG/weTcicee55GkpHJtGXyo3HfY4jbXFt4RVL44fjbmFc42TTDbXPxi8nfYFS2fm7ICY+RZO6cfBXnDZ7aO4cxQ5FkXIqd38+4lan5w00xJEnizqkXctXwmaYZDtnGn+dcy2kl5jsV/2j6Ur48bm7vHIkZiiTzxwWXcY5BbsjJ+vb0BXxr2mkmGRKyJPGbhedz+Uj9vI2T9eVpM/lBrzNihiEBP1+8lOsnTTHNuG7aFH569hIkM4zecvDfX3w6X5ydOOKS1scrSZjpwPV/pGTaCKc1MFK1Lny++/H5H0DT2gAb8S62fQtuDJutiqzML5CVeR2SlHwOQEzz0Op9jGbvA0TVFiRsx+VdyEAMp1JBac7NlGRdhyInfxompgU57HmaQz3LCcSakFCOJU7G28SrZCiljMy9mhG5y7CbdKZOZETY1f0y27ueozvagHwSQ0PFrRQwJf8SJudfjEtJ/sRNTIuyqWsVa9pepjV8PEMgIaOhkqnkMK/obOYXnUuWQRKsnlSh8n77Wt5ofpO6YD0K8rH+JbIkowqVTMXNGSWLOav0TPIceUkzNKGxqnUzLzau5oD3KIokI0QfQ0IVGm7Fxbll87l48CKKXcmfaBBC8FbLTp6sXcuO7tr4CRERz2fpY7gUBxeXz+TqYadR7jbneJ7MeLt5Pw8fXs+G9ppjp1CE+JDhlG1cOmwqN42Yy/Cs1E5ArWo8zP37N7Km6UhvToeEJkRvO3sNu6xwaeVEbhszm9F55pzbk7WmsZb7dm3i7brDx/JGNBFf1DUhkGWJi6vG8YUJM5lYpH86ykgbGuq5b9tmVlYfBuJ/3WofA4EEnD9yNLdNnWEqEtKftjU28cCmLby67yACjn0OsiQdqzeydNQIbp05jVkVQ1JipJW8klm/045IWv1KiBjB0BuEw++had2AjCwXkJFxHk7HPMPtBSE0ECGQMhLYqXQHV9MTWkVM7QEkbEoe+RlLyXHNRzJIIBRCQxMhZMmV0K4l+AFNgfeIqD0IBE45j0Hu+Qxyz0My2MIQQqCKIEpChqAhsJ3DvrUE1R4EGi45h6GZ06nMmmu4TSKEICZC2CRnQkZtYD87u9fjVz29C3cmI7ImMiF3FopBXRAhBFERxibZEz6XI/5qNnRuxBP1oooYblsmY7JGMbNghuFWTJwR6WUYR7sO++pY3bqF7oiXqIiRactgbPZwTi+ehtOg9ogQgogWxSYrKAm2ng57m3m9aRvtYS9hLUq2LYNxuUM4e9AUMmzGjLAWxSYp2AySSyFecfWFuu20hLyE1CjZdhfjcgdx4ZBJZNn1HfQ4I4YiyYYJrAC13i6erd5JY8BDMBYhx+5iTF4xl1ZOJNdh7KCH1RgSEg7FmFHv7eGZQ7up8/YQiEXItjsZmVfIslETKHAZ184JqbHePBhjRrPPyzN793DU040vEiHL4aAyL59l4yZQ7DbeGgur8arMTsW49k2738+zu/ZQ3dmNLxwm0+FgaH4el08cz6BsawnNaSWvtCOS1scuTW0jFHicsP8RNK2JeHklGVkZhivzZpzuK5Hl5O/Wj1dM7aTTv5wO7yNEjpWml3AoFRRm30hB5tXYFGu1AaJqD/W+F6j1PE4g9iHDpZQyNOcqKrKX4VT0azWYUUT1c8j7Onu6n6UnUtcbDZJwK4WMzbuYMbkXkWmzVkskooXY2f0uGzpepjV89FjEKcuWx7T8s5iRfw55jtTupD9kRNjUuZ5VbSupDxxFO8bIZl7h6SwsPpNii/VKolqUte1beaVpNYd8xzPcnFE8h3PKTqc8I7W79T7FNJX32nbxTN377PbUHjvimmVzcdag6VxaPo/KLHPbXUaMd1v383j1B2zprCHWy8i0OTl38CSuGjabsbmpRQT6pGoa7zYf5uFDm1jXWkNUi58gc9vsnDtkHDeMnMnkAuOk8kTShOD9phoe3LeF9xqrCatxRoZi4+yho7lp7DSmF5ebyoPSkxCC9Y11PLRzG2/XHjnBEVkyrIqbJk1l7uAKS4y0PnqlHZG0PjYJzYev5w4iwRXEF+2Tj7b2XSwcuDJvxJ3zQyQpuYqbmhakoetOuvxP9fas6e8rKyFhIz/zasrz/ws5ye0cVUTY1/Fr6rxPoh3ri3MyR0ZCYnDWhUwo/JFuvRHd1yFibGq/l93dT6OKaL+voy85tTJrMaeVfgdnkts5mtBY3bqcte0riIow8ff/RI6EjEAwNns2F5V/ncwkt3OEELze8hKvNb1ISAv2bnWdyJCR0dCYmDOFG4Z9gTxHcg6iEIKXmlbxZN0r+GIBZKRT6oX0MSbljuYbI2+gxJW8g/hiwwf84/Cr9ET9/TIUSUYVGpNzK/ne+CupcCfvvL1Uv43f7n2d9rAPuXfboz/GpLwh/GTypYzKSd6xeq1+Lz/f+gbNQW/v9k3/jPF5pfxi5oVMLEje6Xm7/jA/+WAldb4eHUb8d6Pzirhr3jnMKEl+G+T9+lruWP0m1T1dhozK3Hz+Z9FZzB9iLo8orY9faUckrY9FmtqOp+Ma1NhBzNUIkbA55pFT8ACSyUVc1Xo43Ho9wchOTnVy+pOM2zGFqpJHUGRz35mYFmBj81fpCm/BXANAmWz7KOaU/ROHyQhMTAvzZuMPqQ9sNMWIV18dwvkVvzcdHVFFjKfr7mGvZ50pewmZXHsRN1f+nHyHucVPExoP1dzL+s41puxlZLLtOXxr9A8Z5DK3+Akh+MeR5bzW/J5pRpbNzU8n/gfDM80l0Qoh+NvhV3i8dpVphtvm4J6pX2J8rvnF7+8H3uEvB942x5AknLKNP8++iRmFw00z7tv/Ab/YvtIcAwmbLPO3BVexcNAI04zHDmzjR+teBxJ/e2UkZFniz4su4Zyho00znjuwh2+/9eqx3CEjScQTiH9z5nlcOlq/wFpa/3dKZv1On5pJKyUJLYin86YknBAAQSyyHm/XVxEmmq5pIkx1220EI7sw54QAaAQiO6hu+yKaSNzkSxMxtrR+m67wVsx3IdbwRQ+xseVrqDqN5Y6XEBqrmv/btBMC8aJknmgDr9XfTkSnCd+JDMGLDX9mr2e9qfk/ZLTzcM1PCMS8ph7zVP2jpp0QAA0Nb9TD7w78L56ouW6njx19ybQT0sfwxQL8ZNcfaAvrd9E9gVG7yrQT0scIxMJ8Z9u91AX0Ow4fr+U1H5h2QiC+7RFWY3x9w0Mc9Oh3Tj5eK2p2mHZCIJ64G9VUvrLmSXZ2Npp6zGu1+/nRutd7E5jNMVRN4+urnmdDS50pxqqj1Xz7rVfRhDBVjVcQf7++/darrD5abYqR1idXaUckrZQU8v8LNbqL5KulakTDbxEJPpfQstP3OP7wxhQYKv7wOjp9TyS0bPS9THtwDeYdnbgEKj3hXdR4HktoW+N7jxrfasw7Oh8yuiM17OhMzDji38627reTZmhodEVaWN22PDHDd4h3Wt9Iav4+hifazXMNTya0rfU38nT9aykxfLEA91c/k9C2MdjJPw6/kgJDEFQj/G7/cwlt20Nefrk7NUZYi/GzHc8ntPVEQtyxKXmGIN6X5v9teIFEAfFgLMp33k+NoQnBt957KWHhsaiqcvtbryR8Lv1yhODbb71CVB2YbtJp/d8o7YiklbSEUAn67yfZxftDyb2PN2II2rz3pTg/gES79/6EF7caz6Ok/mcgqPU8ljC6s6f7mZT70wg09vY815tToq8NHS8jW2Bs7VpJJEF0Z1XbypQZGhobOtfij/kN7V5rftcS44OO7XRGjCMvzzesS7mEvio0NnYeoCHQbmj3bN3mlBZWiC/gO7rrOOBpNmbU7CCixQxtjBiHPO1s7ag3tHuxei++aCSliq8agga/h/cajSMWb1QfoiMYTIkhgI5gkJU1h1J4dFqfFKUdkbSSVjT8NkIzFzruXxpqdBux6C5dC394PZFYNcne4X8oQTh2CH94g65Fd3gXnsgeUneoIKQ20xZ8X58RqaUpuNVSf5qw2kOtT3+roifSxn7vxmMnSlJRRAuxq1uf4Yt52dT5gSWGKlTWd+gzArEgb7eut8QQwJsta3XHw2qUFxvWW2qUJyPzQoP+FlhMU3mi5gNLDEWSebJW/7srhODBg/rj5hgSjxzabMi4f+8mSw3mFEnioX1bDG0e2Lkl5f43fYwHdmxN+fFp/d8r7YiklbTCwZew1kkXQCESfFl3tDvwEvFialZkoyegz2j2r7TUrRfipdub/PrbFdXe1Za69cYZMke8+rkGe73rLXcjBYldPfpOwo7urWgWGyMKBBs79RNpt3XvJaIZR37MMN5t26g7vr37CL5Y4rweI2lovNmyTXd8V3cDHWGfJYYqNF5t2Kk7vr+nlTp/twVXJ95z5dW6PbpbJ/W+HvZ2tVlmvF1/mFCs/8+1MxhgY1ODpf43qhBsaKqnM5g4lyqtT6bSjkhaSUtobVjt1gsSmtahOxpTOwaAoREzYERU/TGzEqhEVP0EyZDaZdkREWgEY/oMf6zHMgMEvliX7qg35kl5y+R4eWL62yY9UWuLt5l5uiIDxdDfYuocIIYvFjpW0+RkdYSNt7jMKio0/NFwv2PtoYFZ2AXQFQ72O9YxgM5DZ7B/RlqffKUdkbSSlhCp7UufNAsYzKNfLyQ5htFzFaiWCQCaQf6GZuJ0kBkZ5YgMHEP/vVIHjKE/jyrUlHM3zDNS3/YxO4+qDQzDaK6YNnBVF6I6r2Wg3iuA2MfxOgbwfU/r41XaEUkraclyPuYayhlJQjKotKrIuQzE9o9iwLDLOQOw8Mk4DBryOZVsU8cRE8ml5BmMZQ4II0PRP+vvVtwDwnAr+uW8M20Dw8hU9IvZZdmS743Un9wGXYGz7cn3RupPdlnBoVPWPNcxMK8DIEenHP2AMpw6DGfi7spmNZBzpfXxKu2IpJW0bM65AzBLDLtjju5olnMu1rdmYmS69BkFrpm9kRcr0ihwzdQdHZQxFTEA21hl7qm6o8MzJ1lKho0TZKqyJuuOj84eZ9lJkJEZlzNRd3x8jvkCW0aMKXljdccn5A5L2Dk3kRRJZnq+fkfd8XmDE/aQScyQmFkwXHd8TG4JmQY9c8xIliSmFpRjk/tfBobn5FPgTK568MmSgFG5heTY+3cSBmVlU55lvVhleXYOg7KSbyqZ1idDaUckraTlzFgGWLsISnIJdtdS3fE894XIkrULiyLlkOe+QHe8xH0GDjn5DqwnMlyUZ12kO17unkG2rQwrESQZhTG5+q9jSMZoSpzDLDFAMKPgHN3RwRlDGJE5ylIuiobGwuIzdcdLXUVMyxtvKRdFQ+O8soW644XOHBaVTDrWNTcVqUJjWcVpuuM59gwuLJ9qkSG4tnKe7niGzc5VVdNQLJw20YTg5tGzdMftssINY6daOtEigFvHz9TtCyNLEjdPmmbpmysBt0yaZul5pvV/q7QjklbSkuUcnO4rSX3rRMaVeSuSQcdYWc6gMOs6CwyFwuwbkSX98LIs2RiWcy2p/hlIKAzJugybrL/dIEky4/OXpTR/H6Mq+0zDrRlJkphTeCGp5tRIyIzOnk2u3biU/BklZ6cceZGQGJM9LmGZ9wvKFqV8fFdCYkTmUEZkGZdgv3zIaSnnP0hAhbuIKXlVhnZXD59tKceixJXDgpJRhjbXjZh+Si+WZJTnyODscv3oEcC1o6ZYStVy2+xcUmlcgv3KcRN1ozJmZJNlrhirH2lL65OvtCOSVkrKyPoakpRJ8l8hBVkuxZV5Q0LL4uwvppgromCT8ynKvjWh5bCca3AqRSkc45VR5Ewqc29JaDkm50Ky7WUpMCQUyc7UwpsSWk7OW0SxsyKFiIWEIiksLrkmoeW0/JkMdVemFLGQkLlk8JUmGOMZnzMy5ajITcMvSWgzOa+SuYVjU9qiEcBXR16YsPPruNzBnFM2MeUcpG+POydhRKUyu5Crq1KPJnxn0hk4dXJQ+lSWmcMXxutvPSbSt6cuINNuHD3Nd2Xw9Rmpb/d+fcZc8l0Dk5eT1v+N0o5IWilJsQ0lu+BB4ls0ZhdYBUnKJqfw8d6EV2PZbYOoKnmkN6phniFLGVSVPIJdSdzIzaHkM3vQvShypmlHQUJBlhzMKv0LbnviJmsOJZPzyn+LU8lJgiEjSzbOLv9f8hyJm6zZZSc3DL+TbFu+aWdEilO4suJ7lGUY3+EDKJKNb4y8nUJnkWlHQer957bKr1KVZXyHDyBLMj8c92+Uu0uTYMT19ZHXM9kgP+SYvSRx58QbGJVdnrQz8s3Rl7CgeIIp2/+eejnTCoYmzfiPsUs5r1w/X+d43Tn9XE4fNCJpZ+QrY+dzzYjppmy/P2Mx5w0dnTTj5rHT+cJ4/a2f4/XNmfNYNsbc+3q8rhgzgW/O1N/CSuvTobQjklbKsjtnkVu0Aknua7+u93WKL76yMpTc4pdR7PqJfifL7ZjEqNLnj3Mq9Bby+O/tShmjBr1AhsP8RS3bMYLTBj9Bhi3uVOgv5PHfO5QC5pc9Qr5rqmlGjmMwlw6995hToc+IX+6dSg4XDvkjg90zTDNy7UV8acSvGeSqNMeQ3dxU+TPG5JhbLABy7Ll8b+ydVGbGE0sTOQsO2cnXR97OzAL9pOGTlWlzc9ek25mUO9qQIfX+2GU73x37Jc4sNb8guW1O/jDjq8wrGgdgGH2QkLBLCneMv4ZlFQtMM5yKnb/NuZmzB09MyJCRsEkyP550MV8Yucg0wy4r/H3BVSyrnNLL0HcXZCRkSeKHU87iO5OXmGYossyfFl3CzWNnJGZI8RjQ7VNP587ZSxNGjvokSRJ3LzmXr06bjZSAofQyvjptNr9acq5pRlqfXEki1YYIH4OSaSOc1v+dhAgTCb5C0H8/avTUcs525+m4Mm/F7jwTSUot50OIKD3B12n33t9v2fZM51yKsm8lN+MsJMmeEkMTMdoC71LjeZSO0AenjOc5pzA853pKM5eiSKkl62pCpcG/gd3dz1AfOJVR5BzDhPwrqMw6A5uc2nFETWhU+3ewoeNl9ntP7fhb4hzG3MKLmJh3Og45tSOaQggO+vaxqvVNtnZvOiV3ZJCrjDNKzmZOwWm4DI7TJmLs91bzatNq3u/YckrOxSBXEReULeaMkjlk2lI/3bHXU8eKurW82bKV2Ek1SEpd+SwbchrnDZ5Jrl0/FyiR9vU08WTtBl6s33ZKf5gSVw7XDJ/DpRXTKXRmpcw40NPGY4c380z1doLqiXVnCp1ubhg5k6uqplGakXoS+JGeTh49sJUnDuzAHzuxu3W+M4Mbxkzl2tFTGZyZ+vX6aE83j+7ZzuO7d+CJnFhsLcfh5NoJk7l+/BSG5ualzEjro1cy63faEUlrQKXGDqPGjiKEH0nKRrFVodgqdO2F5kGLbEZoXcRrixQgO2YhyfoLSzhaTThWiyZ8yFIWTttwnPbhuvaa5icY2YDaWwFVkfNxOWehyPoX5EC0Hn+0lpjwYZMyybCVk+Wo1H/dWpCe8CaiahegYZNzyXFOx25QY8QXbaE7UktU82OTM8iylZLvNGKEaQluI6x2o6HilHMoyphomMjqiXbQFq4jpPqxy05y7UWUOIfp3kWqIkp9YDf+WDeaiOFSsijLGEOmTX8rrSfaTWOwnoAawCE7yLPnMyRjqAFDpdq3D0+sh5iI4lYyqXBXkWvXP8HUE/VS62/AFwtgl+0UOHKpyqzQZWhC44D3MF2RbiIiSqbiZnhmBUXOwn7t4+9VgIPeBryxIHbJRoEzmzHZ5cg6kQxNaOz11NIe7iakRsmyZVCVNZiyDH2GNxpiT08j3mgQm6xQ4MhkQl65brRECMGunnqagn0MJyOzSxmaqZ9Y7IuG2dXVRE8khCLJFDgzmFSgf6RYCMGu7ibq/d0E1QhZNicjsosYkVOsywjGomxvb6I7HEKWJPKdGUwpKsOh6DP2drVS4+0iEIuQaXdQlVPImDx9RigWY3trE92heEn+PJeLKSVluGxWWz+k9XEo7Yik9YmXFt1DLPAoauAZ4KQS01IGSsbV2DJvQLalXlsiHD1Aj+9BPIEnEOLE8s+S5CLHfRW5WTfjtI9LmRGIVtPofZxm71Oo4sSy2xIOSrMuZnD29WQ7k9//7pM32sjBnhUc7HmOiOY9YUzGxvDssxidu4wi14SUw9SeaBvbu15lW9fLBFXPSa9DZkzOAqblX8QQd+qMnmgXH3S8zZr2N/CeVOpdQmJi7kxOLzqHkVlWGB5Wtb7P683v0BXtPoUxNW8iZw86g8m543UdjETyRP283ryB5+vfoyV8aln8GfljuKR8AbMLx6d8hNcbDfFyw1aeqF1LXeDU8v4zCiq5Ztg8FpaMxZZizRJfNMyLdTt5+PAGDntP7SY8rWAIN46YzVnl43CkyAjGorxQs4cH9m1iX3frKeOTC8u4ecwMLhg2LmHibFqfLn1iHJGXX36Zn/3sZ+zYsQOXy8WiRYt47rnnTD8+7Yh89iRElGjPf6EGnyCe16FX7Cs+Zsv8IrbsHyAlcUEXQqW952d0++41xcjJvJGSvP8xPE58KkNQ0/07jvb81ZAhoSBQKcm8hDFF/4OcxJaOEIJdXQ+xreNvSEi6R2f7GEMzF3PaoDuxJbndsrnjed5u+TuYYFRmzuDiIT/EqSS3FbK+422eqvsnovef/iQjo6FRlTmOL1TejtuW3DbF++0b+NvhB1CFmpAxInM43x377+TYk9umWNu+k//Z8zBRLZaQUZlZxi8m/xtFTv2oWH9a13aQ7259jKAa3/rojyIjoSEYmlnEn2fewmB34uTv47WxvZavrluONxpC0mNIEpoQDHbn8q/Trqcq2/h498na3t7Ibe88SWc4qM/ofR0lGVk8sORqxuWXJMVI65OrT4Qj8swzz/ClL32JX/ziFyxZsoRYLMauXbu46qqrTM+RdkQ+WxIiRqTrK2jht0mmOIHsuhRH3q9NOSNCaDR3/ju+4IoknplEpuscygrvNZXDIoTgQMePafY9mRQjzzWPSaX3IpvMYdnc9gf2dD+WBEGmyDWRpeV/MO2MvN/2KO+3PZIUo9hVyXXD78Yhm8v9eKf1JV5oNM+QkSlyDuKbo35m2hl5s2U1/6p+NClGobOAn034PnkOc9eWN5s38ct9j+ouqqcwJJl8exZ/nP4til15phhvN+/me1sfB0AzQVEkmWybiwfmfYWKTP0toeO1puUw/7b2cTQhTDIk3IqDxxffyqgcc47ChtY6bnrzcaJCM9VZV5EknIqNJ866nkmFxrVm0vp0KJn1+yM5NROLxfjmN7/J3XffzVe+8hVGjx7N+PHjk3JC0vrsKer5n6SdEAAt9Bwx3x9M2XZ67knSCQEQ+EOv097z36as6zz/TNIJiTO6Q+s42HGnKev93c8k5YTECRptoV2sbTH3OnZ3v5WUE/Iho5rn636BmXuYHd0bknJCIF4dtT3czL+q70EzURRsR/du7qtO7r3S0OgId/Kr/X8gpiUu87+r+wh374szzH57NaHRHfHx/R1/I6xGEtrv7Wngh9uWIzDnIEC8yqs3FuJrG+/HFw0ltD/kaePr65ejCi0JhiCgRrh1zSN0hRN3y63zdfOFd5407YT0MUJqjJvfXk5LwJv4AWl9pvSROCJbtmyhoaEBWZaZNm0aZWVlnHfeeezatcvwceFwGI/Hc8JPWp8NCbUZNfAQqZZpjPn+itD0W8gDqGonnd4/pzQ/CLp9/yKmthgzND+13X9KmdHse5pg9GgCRoRtHX9LkaFR63uLrvBBYyuhsrr1vpQIAo1q/yYag3uN7YTgxabkHIQ+aWgc8e/jgHdnQtsn6pJ1PD9kVPuPsrlre0LbB2peTYmhonE00MKq1m0Jbe899DaaMOseHMcQGk3BLl5q2JrQ9u/71xDVUmEIOkJ+lldvTmj7jz0fEIxFTTshfdKEwBMJ8eD+xIy0Plv6SByRI0eOAHDnnXdyxx138NJLL5Gfn8/ixYvp7Dw18apPd911F7m5ucd+Kir0T1uk9elSLPC4xRmiqMFnDC08geVgqYmdoMdvvHC2+F9EOynxNTnJNHqfMLQ46nvnlKTUZCShcKDnWUObI76N+GL6f4uJGTJbOl80tDns20N7uDllhozMe+2vJ2DUUO0/mnJDPhmJ15rfNrQ56m9he/ch0xGEkyUhsaLhXUOb5mA377buR7VQT/2J2rWGUarOcIBX6nenXHpeQ/DIkY2Gj/dFwzx9eEfKpedVIXj04BbCqtVmlGl9mpSUI/L9738fSZIMf/bt24emxb+oP/rRj1i2bBkzZszg/vvvR5IknnrqKd35f/CDH9DT03Psp66uztqrS+sTISGixPyPgMUOsTH/A7oXWiE0un33YakxBho9vgcQQv8i2OB5CGvN5VSavE+gaWFdi33dT1lqLidQOex5mYjq17XZ0vmiRYbGfs97BGLdujbvtb9huYHdXs9WuiKnnujo05stqy0yBPu8B2kINunavNS41hJDIDjsa2C/Rz8S9mzdRostC6Eu0MnGziO6Ns/UbrXUmwagLeRjVZN+tO256t2WnQhPJMyrR/dZmiOtT5eS+uu6/fbb2bt3r+FPVVUVZWXxZKPx4z9sduR0OqmqquLoUf0/RqfTSU5Ozgk/aX36JdQ6EKnffffOEp9H638eVWsjpjZYZBjPo2p+AtFDWHN2QBU+ArH+FwwhNNrDu1NuLvchI0J3RH/BqA9YZ2ioNAf1GUd8e1NuYNcngaDGr8/Y49lvmQFwwHtYd2xH9yHLDAmJ3T3VuuNbO2tSjrj0SZFktnfV6o5vaa9LOXLUJ5sks7lD/xq+qbXechdcmySzqbXe0hxpfbqU1MHt4uJiiov1C9D0acaMGTidTvbv38+CBfGSyNFolJqaGoYNG5baM03r0ytt4HJ9hOhB4tTTAdoAMlSth/7OtcQsbJecMpfa//ONagGsOjp9Cqv9P19NqMSEfkQmGYU0n8GYlS2sDxU0iOwEVOsMGRl/TD8J0xsbAIYk4TOYpyeaOAk0kSTi9Uf01B2xzgDwRvW/O55IyHLURSOeK5LW50cfSQWZnJwcvvKVr/CTn/yEiooKhg0bxt133w3AlVcm7sCZ1mdMKZZD7186cyVRAySR9ErEJ1NnJFWGPIAMRe91xLuOWI6IxBn6z1eW5AHxqYwYSootA46XQGAzmMdoLBkZFR7Tq3qarIzmscsD890yZCiK6ePNepJ650nr86OPrJTd3Xffjc1m48YbbyQYDDJnzhzefvtt8vOTK7yT1qdfHzbFszwTkk7XXkUuBMuXwL65+i/cZJNzMS6QZl4OpX+GIjlRJCfqAEQsXLb+S6ZLkoRLySaoGp9CMiO3ov/3nGXLpTNyajXNZJVt1y8IlmvPoSdqLRomEOTa9beBC505NIXaLX2zVKGRZ1A8rdiZwwGaLW3PqEJQ4NCvu1KckYUiSZYiFgJBgVO/mF2RKxNFkomlmBAbl0ShM/W+Pml9+vSRdd+12+3cc889tLS04PF4WLlyJRMmpF7mOq1PrySlFMk+DWtfNwXZuRhJ7v8Cpcg5uJ0L0e/Oa0YyLsccbEr/24+yZKfYfZZlRqZ9LC6d/juSJDE8+2wkSwyJLFs5+Y5Ruhbjc8+wlKwKkKnkU+7WL48/M3+BZYZLdjMqS/+6saBoDpKlNE9wyHam5E3UHT+jZLpl91aRZOYX6TPOLptsOUcEYMkg/ffq/PIJlrdNVCE4f4g+48Jh4yw6IXGn7aLhqbddSOvTp4/MEUkrreNly7wVa6dmVGzumw0tcrNuw1q0QiMv6zZDi8E5N1hmlOfcZNhLZUzuMoTFqMvYvKsMGVPzL7C0NSMhMa3gImSDbYt5hWdiJUIlIzO/6Ezssv7W3qLi+Sn3jOljLCyej9umXyX2zNIZOOXUOjpD3AlZXDKNPINoxdJBE8m2pdYJuY+xsGQMZRl5ujaLy0ZR4kq9u68sScwuGmZY6n1u6VCGZeen7BrKSEwsKE1XV/2cKe2IpPWxSHGdA1I+qR19lUEuR3aebmiV6VqCTSkjta+1jCIXkpVxrqFVrnMWGbbKFBkSipRFSeYFhlaFrrEUOselGE2QUCQ7I3LON2Y4hzDUPcVCxEJmcv45hhZ5jkIm5s5M+eirQDCvcKmhTY49m/mFs1JmaGicVbrI0MZtc3Fu2ZyUGarQuHjwAkMbh2LjiqFzkFNcwlWhcdWweYY2iiRzw4jZKTM0IbhxxGxDG0mSuHXszJTmh3ii6i1jUn98Wp9OpR2RtD4WSZIDR/7vSN4RkQAZR/7vEvaakSSFQQV/Ib51kgxHAiQGFfxFN4n0Q4bEuOJfI2NLkhHX2OK7UUz0aJlf+mMUyZkCQzCv9A4cSuJmbucM/g+csjslZ+Tssq+TpZODcrwuL7+FTFt2Sov4xYNvoMhZmtDu+mFXkO/IS4mxbMhFDHUPSWh38/DzKMsoTKmb7hVDFjM+d3hCu1tHLKIquyRphgRcXjGLOYWJO1XfPHIOk/IHoyR5xFZC4oIhEzhr8NiEtteOnMbc0mFJM2RJ4szykVxaqb+FldZnU2lHJK2PTYpzIfa83xN3FMx89WTAjiP/bygOc3dJGc45lBX+g3getllG3IFxu4wjLn3Kdk5kYunfkSWnSUbcmRpTdBdF7jNNMfKcVZxZ/ltsUoZJRyF+0Z9d/B0qs882xch3DOaqYb9I2hlZVHIbU/LPM2Wb5yjkqyPuwG3LSspROLv0chYVG0d1+pRrz+FH475FniM3Kcb5g5ayrPxCU7bZdje/nPIVSp35STHOK5vLl0ZcZMo20+bkTzNvYVhmUVJRi7PLJvO98RcZbsX1yaXY+fv8axmTU5oU44yyUfzvjEtMMRyKwt8XLWNq0WDTDAmYVzqMP55+KYqcXpY+b/rIuu8OhNLddz+bUsPriXp+iojto/9TKPHfSfYpOHJ+iuyYkjQjFN5Ca/cPCUd3GDIc9gmU5P2cDOecpBm+8B4Odv4UT3grEsopeR19v3PbRzKi4EcUZJyWNKMnUs0HrXfTEtxiyMi2VzCj6D+oyDLnTB2vrkgjK5v+TI1/S7/HemUUNFRy7KUsLv0CY3NSYbTzdP197PHoMWQ0NHLtBVxQdg2zChYmzeiOeLiv+lE2dW1D4tTutX2MPHsOVwy5mDNLk2d4on7+dPBZVrdtQwhxSoGwvrb2ufZMrht2FpeVLzS1eB8vbzTEr/e+zCuN29AMGNk2FzdXLeTmqtOTzpMJxCL8cudKnq3dRlSLf6eOp0hICASZNge3jpzL18YtTDpSE1Zj/HLrOzx2cBuR3mqrJzLi/59hs3PzmBl8e8rCATvGnNb/vZJZv9OOSFr/JxJCIKLbiPofjnfkFb2FsaQcFNc52DJvQLZbP2UViuygx/cgvtBrx4qeyVI2mRlnkZd1Ky7HVMsMX2Q/Td7HafO/RkzzINCwyVnku06nPOcGcpzTk16MTlZPpIYDPc9S411JWPUAApvspixjJmPyrqQ0wzqjK9LItq5X2NPzNiHViypUnLKbIe6JTC+4iOGZ0xJujyVSZ7iVdR1vsbHrPXwxD5pQccguKjNHs6DoHMblTLWUfArQGenirZb3WN32Pj1RLzERw6W4GJE5nHMGncH0/MmW6490hj282vwBrzWtpyPiIabFcClORmaVc0n5Ak4rmmxYN8SMusJ+XmjYzIq6TbSGeohoMVyKg5HZpVw1dC5LB03EoVirwNATCbKidjtPVG+mKdhDWI3hUuxUZRdxfdUsLqiYgEtJPVEXwBsJs6J6F48c2EK9r5uQGsNlszM8O58bRk/nkuETyLQPZK2htD4JSjsiaX3q1Pc1TLSYithRRPAZUGtAC4DsBmU4UsYVSDpHYpNlxGL1BAKPE4sdRmg+JDkTmzIMt/tqbHbjfXizjEismXb/kwSjB1E1L7LkxmkbQlHmFWQ4Rhs+to+TiBGKdVDrfZ6eyEGiqhebnEGGrZSh2ReR5xwzIIxgrIe9Pa/RFj5IRPVhk11k2goZk3MWpRmJ8wnMMAIxH1u6V1MfOEhQDWCT7OTY85mat4Ch7jGJvzNmXocaZF3HOg56DxJQA9gkG7n2XGYXzGZM9sAwQmqY1a2b2OU5jD8aQJEVcu3ZLCiayuS80QkdMHOMKG8172Bz52E80QCKJJPnyGRRyQTmFg0MI6LGeKNpD2tbD9MTCSJLEnkON2cMGsPC0lEJHTAzjLQ+/Uo7Iml95iTC7yP8/4LI+8TzMgTx48Ay9AbicSxAyvwiktP49ICewuH1+Hx/IRx6S4eh4nAuICvrK7hcS1Ji+MJbaPb8g67gG8f9VqMvjwRUspyzGJT9RfLdxqdS9NQV3svBrgdp8L95XFg/zohvi6jkOycyKu8GyjPPTmlR6AhXs6XjCQ563kFD7Q3ln8godo5iSsEyxuQsTSmS0hZq4N22F9jSvRpVnMiIb7OolLoqOK3wAmYWnGF4lFhPraFWXmt+jffb3yciIse2b+DDrZxBrkGcVXoWC4sWYkuhOmlbuIvn6t/mjeZ1hLTwCQxFklGFxiBXIRcNXsx5ZadhT+GocHvYw+M1a3i+4QP8sfCxeY9nlLpyuaJiPlcMnYdLST4C0Rn289DhdSyv2YQnGuqXUezM4trK2dxQNYdMuzNpRlqfHaUdkbQ+MxJCgP9vCN9vSVzVtDe3JOt2yPxyUgusz3cvnp476XMGEjGysv+T7Oz/lxSjzfc4NZ0/os+p0ZcMaJRm30ZF3h1JLeJ13lfZ1PpjgAS1SOKM4dmXMbX4h0mVlj/iXcNrjT/rzZEwYsSzAEZlL2Fp2XdRDOqBnKwD3m08XPMrVBFL0HAuzhiXM4trh/4nDtn84rffu5/fHfgdES1iqqndhJwJfH3k18lQEp966tMBbw0/2fVXArFQQoaExLicSn48/t/IsutXLz1ZB72N/Ofm++iJBo45BkaM0dmD+c30Wylwmq8pcsTbxhfXPkxb2IuWYMmQkajMLuLeeTdSmpG+bn9elcz6nU5PTuuTrWNOCCQuJNabdOf7NfjvNY3w+f6Fp+cnxCMg5hg+7+/wen9lmtHme5Kazh8Qj0wkYsQXkxbv/Rzt+plpRr1vJRtbf4hANVEQLc6o8T7H1rafY/Z+pMa3nlcbfoImYiYY8TkPet/hjaa7ECYrbh727eKB6l8QE1ETDkKcsc+ziUdr70EV5grBHfYd5p799xDWwqY76+717OV3B35HVIuasq/2N/DDHX8kEAuaYggE+zw1/HjXnwipEVOMo/42vrrxH3RH/AmdkD7GIV8T39j0D/wxc43lGgPd3LTmftrDvoROCMSThGt8Hdy05v4Ba7SX1mdbaUckrU+sRHjdcU5Iko/13YOIbEhoF4lswdPzXykxfN7fEwq9mdAuENlHTef3UyAIWn0P0OF/IfFzidaxseWHpFJ3pNb7PDXeFSYY7bzacGcKtVIFh72r2db1TELLQMzLQzW/RHDqaRFjgmC/dyvvtCZmhNUwvz3wWzShJcXQ0DjoO8gz9YkZUS3Knbv+SlSLJVW6XUPjsK+efx5JzFCFxre33E9IjSTFUIVGrb+N/939bEJbIQRf/+BxPNGgKUfneEZTsJvvb07MSCuttCOS1idWwn8fqfd1UXofbyyf715S/zNQ8Hn/mtCq1fcgqVWUBZBp9vwjoVV1z1PEowOp7LRKHOh+MGFUZE/Py2giliIDtnU+iZYgYrGp6x0iWigpB+F4vd/+MrEEEYt1Hevwq37TkZDjJRC80/YOIdU4mrC2fTudkZ6UGW+2fIAn6je0W9e+n4ZgZ1IOQp80BG+37KQ1ZNz4cFNHLQc8LSn1qFGF4L3WQ9T42pN+bFqfL6UdkbQ+kRKxeoi8S+p9XVQIv4NQG/Ut1DZCwZcsMSKRdUSjB3UtYpqHdt+zFhgagegu/OEd+s9CC1HtedZCfxqBP3qU9tBmfYaIsbPreUv9afyxDmp9H+iOa0JjbfurKTshAEHVz66e9brjQghWtqy01CgvokVY17HO0OalxtUpl1KH+HvxZov+6wB4+ujalCq9Hq/n642jho9Vb7DEUCSJ5TWbUn58Wp8PpR2RtD6REsFnsP71lCCoH+IOBp7GSlO2uJT/3969B0dR7XkA/57uyTwzk5BJJg/IE7jJKq8QIEiweMUgAhrh4rpGl9dCSQWEVQsirItVgggLWmu0AKUWULBARUUUdSPyiBQskQglYAhciEICJDzyIJNkMjNn/wiJ5F47M5nusZPh96nKH5nT6d/v5DH9y+k+58Bu/1Cy9Wb9HnB4d7+/oxhV9TskWyvq98PJO/7v2RMGERdrpb9Xv90+hgZXtcwYAk5V75Fsv1h/BtXNVbJjHL3xv5LtZfYyVDRWyCp2GBj2V+6XbL9sv4aSujJZu+lycOy9UijZXtlYg/+7cc6n0ZBWbnB8ekm62Kl22PHdlV9kxXBxjl2/FsPplreJIwlsVIiQrsn1G+QXCQzceUmy1eksg/w/ATdczt8kW5ucv4JB3qJTgAuNzWWSrbebf5Mdg8OF245fJdtrmstlbJDXGsONasdlyfYbTVdlnb81xnXHFcn2ysZKBWJwVDVJF0xXG5W5FVHZeFPydlm5/YYiMaqb69Ho+uNbWVfsNV49nOpJvdOBmuYG2echgYsKEdI1cTsg4zZACzfQwUgB5/WQX+zwthVb/4iL18v677vtPB3EcHJl3uSbW1e3/aM2d6Os2xm/n0c6V4e7UXax03oeKU3uJtnn93SeBpcyMTg4HBLPuzR4OavGG3bnH+drVzBGvVO5c5HAQ4UI6ZqYCfJ/PYU755EIIQTD94dI284CQZCeIy+yYEUu4GIHMTTM+3UtOhLEpNeVCBL0ihRUWkF6fQytoJf1DEornaDvoE2ZRbY6Oo9BVCaGAAFaicXNjBrlFgszSZzLqFFu2fVgBfMlgYcKEdIlMU2iAmfhHZ5Ho+kN+aMuAjSaJMlWfVASOLxbd0KaCENQH8lWszYBHE5ZERhEmLXS/QjVxsouEhhE9NDFS7ZH6GNknb8lhoAIXS/J9mhDtAIxGKL0UZLtMQab7BgAEGWwSi6Y18toVaTAtWrN0EnsJRNjCIVG5sOwAGDW6BCiVaZYJoGJChHSNRmmKnSeKdJNhinwfXpwKxeMpqckW8OMEyHIHrFwISL4ScnWaNMYBAlmWRE4XEiy/FWyPc40FCaNVXaMfqGTJdsTjP+EMG0k5IxScbgx3Jol2R5njEOcMU7WRZyDY4xtjGR7jCEC91t6y5o1wwBMjJbeHThcZ8GI8GRZM1oEMEyNGy7ZHqI1YHzM/fJiMIZpCUNkz+4hgY1+O0iXxMQoQDcGctYRgS4TTIyUPkK0wmB4VFYMne5BaDoYdRGFYISb/iojhgCTdhCM2vukYzAtEi1TZTxfwWAOSkSYfqB0FkxE/9DHZF3AgzU2xJmGSGfBGDLCH/H5/ABgFM24P2RYh8dk2jJl3WbSC3qkh6V3eMykmFGyZs1omAZjIzvux9S4B2TNaAGAR3t2HOPJxKGyYnDO8URCms9fT+4NVIiQLouZZsH3WyfuO1/fMVPwHPj+wKoLpuB5Ho+ymaffuYD7chF3I8ryrMejkizTwJjGxxgcfwmd4XHfnPtCH4HIdD4XI4Ot/+xx35zBPUbDIBp9LqoejJgM0cO+OenWdFg0Fgg+xGBgGBc5DjoPz4EMtw6ATRfmc4zxUSMQrOl4v5l0a18kmGw+jTYIYHg4JhVWXccjaalhsegXGgPRh40RBTCMjUpBrCms019L7i1UiJAui2mHgplf8u1rzcvAtIM9HqfVDkBIqPd7xtzNbMmDXj/a43GGoD5ItL4BXwqeKMs8hBkf9nicMSgG6ZGt/ejcRSPJ8gTizNK3TNpiaMIwqdcK/L4bsbcYUixZ6B+a7fFIg2jCjISlEJjQqWKEgeF+yzCMivAcQyto8ULyC9AImk4VCgwM91nuQ3aM5xgaQcQr/eZBL2o7FUMAQ4olEbOSPMcQmIA3Bs9AsEbfqWJEAEMfczReTPEcgzGG/GFPIkxr6lQxIjKG+GArVg72HIMQKkRIl8ZMM8DMrTvWerq9IQJgYOb/ADP9q9cxTKanEBL6X2j5c/AmBmC2LEVw8AKvY1hNjyLJ+t93vt67GDGW59ArZLHXMaJNozA8ah0EaMA8xGht7xOSg4Hhi73eRbiXaTAm9XoNGqb1IkbL28t9IY9gbLT3OxXHm5IxO/E/oRU8X8RbYwwIGYF/ift3CF5ekOOMcViSvAQG0eBFjJa8B4UOwoI+C6ARvFuzJdYYhdcHLoIlyOTxeZHWGANDk/HK/fMQJDFb5u9FG8KwYdizCNeZvYjR8tE/NB75Q/4NBi9nxdgMFmx7cDZiDKFex0i2RGHryBkwB0nPYCKkFePebrupgs5sI0wCG3cUg9dvBpoK7rzC0HLbRkDbSINufEvhok31KYbDcRK3b7+LxoY9d53bjd//+3dDp89CcPBc6HQP+BTD7jiDq7X/g5v23XdmuohoWf699WLoRoh+NCLNsxBieNCnGLWOv+F89Yf47faXcPNmMIjgcLdd7DhcsBnS0TskB9Em32JUOy7j5M1d+KXmGzi5405B0Po9a4kRYxiAgWFTkBT8oNdFyN1uOipx+PpXKLq5Dw53I4R2/WBww4U441+QEf4IBoRk+BTjluMWCq4V4EDVATS4GiBChPuu75UbbiQYE5AZmYkHrA94Xei0j1GLLysO4esrhahz2iEyAW7OwcDAWMsGcfHGGEzuOQrjbOnQCJ1/nqjaUY9dl45g16UjuOWoh+ZODKBlVMPF3Yg3RWBa7AhM7jUUWi+LqbvVNjdg58Ufsf3iMVQ11rWLITAGJ3cj1tgDOUnpmJaQBr3EbBxyb+jM9ZsKEdKtcFcl0PAZuOvXlsXKmAlMTAAMj4OJEYrEcLmuo8H+MZzO83Dz2xCYCaImHkbjNIii/CmmAOB03cL1+k/R0HwOLl4HkRmh1fREuGkqdJpYRWI4XHW4dPsr1DSdQ7P7NkRBD6MmEnHBExGslZ5G27kYdpTW7kNV0zk0uW5Dw3QwBYUj2ZKJsA6m6nYqhrsRJ6sP47L9PBpc9dAIQTBremBQ6EhEGxIUidHsbkbRzSKcu30OdpcdGqZBSFAIhoUNQ4JJqRhOHLl+EqdqzuO20w6NICIkKBgZ4alINif4VEj9PafbhcKqM/jx5t9Q19wAkQkICTJidGQ/DAxVJoaLu3Ho2jkcrjyPGkcDBMYQqjViTFQy0sMTFYlBuj8qRAghhBCims5cv+kZEUIIIYSohgoRQgghhKiGChFCCCGEqIYKEUIIIYSohgoRQgghhKim85PJ/0StE3pqa2tVzoQQQggh3mq9bnszMbdLFyJ1dXUAgNhYZdZVIIQQQsifp66uDiEhIR0e06XXEXG73aioqIDZbFZ8kZza2lrExsbi0qVLAblGCfWv+wv0PgZ6/4DA7yP1r/vzVx8556irq0NMTAwEoeOnQLr0iIggCOjVq5dfY1gsloD9BQOof4Eg0PsY6P0DAr+P1L/uzx999DQS0ooeViWEEEKIaqgQIYQQQohq7tlCRKfTYfny5dDpdGqn4hfUv+4v0PsY6P0DAr+P1L/uryv0sUs/rEoIIYSQwHbPjogQQgghRH1UiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiAAoLS3FY489hvDwcFgsFowcORL79+9XOy1FffXVV0hPT4fBYECPHj2QnZ2tdkp+0dTUhEGDBoExhhMnTqidjiLKysowe/ZsJCYmwmAwoHfv3li+fDkcDofaqcnyzjvvICEhAXq9Hunp6Th27JjaKSli1apVGDp0KMxmM2w2G7Kzs3H27Fm10/Kb119/HYwxLFq0SO1UFFVeXo6nn34aVqsVBoMB/fv3x48//qh2WopwuVx4+eWX272nvPrqq17tC+MPVIgAmDRpEpxOJ77//nscP34cAwcOxKRJk3D16lW1U1PErl278Mwzz2DmzJk4efIkDh8+jKeeekrttPxi8eLFiImJUTsNRZWUlMDtdmPjxo04ffo03nzzTWzYsAFLly5VOzWf7dy5E88//zyWL1+O4uJiDBw4EOPHj0dlZaXaqcl28OBB5Obm4ujRoygoKEBzczOysrJQX1+vdmqKKyoqwsaNGzFgwAC1U1HUrVu3kJGRgaCgIHz99dc4c+YM1q1bhx49eqidmiJWr16N9evX4+2338Yvv/yC1atXY82aNcjPz1cnIX6Pq6qq4gD4oUOH2l6rra3lAHhBQYGKmSmjubmZ9+zZk2/atEntVPxu7969PCUlhZ8+fZoD4D/99JPaKfnNmjVreGJiotpp+GzYsGE8Nze37XOXy8VjYmL4qlWrVMzKPyorKzkAfvDgQbVTUVRdXR3v27cvLygo4KNGjeILFy5UOyXFLFmyhI8cOVLtNPxm4sSJfNasWe1emzJlCs/JyVEln3t+RMRqtSI5ORnvv/8+6uvr4XQ6sXHjRthsNqSlpamdnmzFxcUoLy+HIAhITU1FdHQ0JkyYgFOnTqmdmqKuXbuGOXPm4IMPPoDRaFQ7Hb+rqalBWFiY2mn4xOFw4Pjx48jMzGx7TRAEZGZm4siRIypm5h81NTUA0G1/XlJyc3MxceLEdj/HQPHFF19gyJAhmDZtGmw2G1JTU/Hee++pnZZiRowYgX379qG0tBQAcPLkSfzwww+YMGGCKvl06U3v/gyMMXz33XfIzs6G2WyGIAiw2Wz45ptvAmIY7sKFCwCAV155BW+88QYSEhKwbt06jB49GqWlpQHx5sg5x4wZM/Dss89iyJAhKCsrUzslvzp//jzy8/Oxdu1atVPxyfXr1+FyuRAZGdnu9cjISJSUlKiUlX+43W4sWrQIGRkZ6Nevn9rpKGbHjh0oLi5GUVGR2qn4xYULF7B+/Xo8//zzWLp0KYqKivDcc89Bq9Vi+vTpaqcnW15eHmpra5GSkgJRFOFyubBy5Urk5OSokk/Ajojk5eWBMdbhR0lJCTjnyM3Nhc1mQ2FhIY4dO4bs7GxMnjwZV65cUbsbkrztn9vtBgAsW7YMU6dORVpaGjZv3gzGGD7++GOVe9Exb/uYn5+Puro6vPTSS2qn3Cne9u9u5eXlePjhhzFt2jTMmTNHpcyJt3Jzc3Hq1Cns2LFD7VQUc+nSJSxcuBDbt2+HXq9XOx2/cLvdGDx4MF577TWkpqZi7ty5mDNnDjZs2KB2aor46KOPsH37dnz44YcoLi7G1q1bsXbtWmzdulWVfAJ2ifeqqircuHGjw2OSkpJQWFiIrKws3Lp1q90WyH379sXs2bORl5fn71R94m3/Dh8+jLFjx6KwsBAjR45sa0tPT0dmZiZWrlzp71R95m0fn3jiCezZsweMsbbXXS4XRFFETk6Oan9cnnjbP61WCwCoqKjA6NGjMXz4cGzZsgWC0D3/j3A4HDAajfjkk0/azd6aPn06qqursXv3bvWSU9D8+fOxe/duHDp0CImJiWqno5jPP/8cjz/+OERRbHvN5XKBMQZBENDU1NSurTuKj4/HQw89hE2bNrW9tn79eqxYsQLl5eUqZqaM2NhY5OXlITc3t+21FStWYNu2baqMSgbsrZmIiAhERER4PM5utwPAP7ypC4LQNprQFXnbv7S0NOh0Opw9e7atEGlubkZZWRni4+P9naYs3vbxrbfewooVK9o+r6iowPjx47Fz506kp6f7M0VZvO0f0DISMmbMmLYRre5ahACAVqtFWloa9u3b11aIuN1u7Nu3D/Pnz1c3OQVwzrFgwQJ89tlnOHDgQEAVIQAwbtw4/Pzzz+1emzlzJlJSUrBkyZJuX4QAQEZGxj9MuS4tLe3y75nestvt//AeIoqietc8VR6R7UKqqqq41WrlU6ZM4SdOnOBnz57lL774Ig8KCuInTpxQOz1FLFy4kPfs2ZN/++23vKSkhM+ePZvbbDZ+8+ZNtVPzi4sXLwbUrJnLly/zPn368HHjxvHLly/zK1eutH10Vzt27OA6nY5v2bKFnzlzhs+dO5eHhobyq1evqp2abPPmzeMhISH8wIED7X5Wdrtd7dT8JtBmzRw7doxrNBq+cuVKfu7cOb59+3ZuNBr5tm3b1E5NEdOnT+c9e/bkX375Jb948SL/9NNPeXh4OF+8eLEq+dzzhQjnnBcVFfGsrCweFhbGzWYzHz58ON+7d6/aaSnG4XDwF154gdtsNm42m3lmZiY/deqU2mn5TaAVIps3b+YA/vCjO8vPz+dxcXFcq9XyYcOG8aNHj6qdkiKkflabN29WOzW/CbRChHPO9+zZw/v168d1Oh1PSUnh7777rtopKaa2tpYvXLiQx8XFcb1ez5OSkviyZct4U1OTKvkE7DMihBBCCOn6uu+NZkIIIYR0e1SIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDVUiBBCCCFENVSIEEIIIUQ1VIgQQgghRDX/D5jrWwVsUUF3AAAAAElFTkSuQmCC\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotCompPinPow(fuelPin)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"8b58b2a1\",\n   \"metadata\": {},\n   \"source\": [\n    \"And one more 60 degree CCW rotation for good measure.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 22,\n   \"id\": \"36fa1751\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"fuelBlock.rotate(math.pi / 3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 23,\n   \"id\": \"ca93a8a3\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.collections.PathCollection at 0x1baf95f79d0>\"\n      ]\n     },\n     \"execution_count\": 23,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQABAABJREFUeJzsnXV4FdfWxt8958Q9IQkhSpBAIEpw9yAJ7lBq1O3WXW791nvbr06BFnd3d+IQCAQJJMTdjs/s749DuEhGzkzaUjq/58l9epl15j0jZ/aatddei1BKKVRUVFRUVFRU/gKYv/oLqKioqKioqPxzUR0RFRUVFRUVlb8M1RFRUVFRUVFR+ctQHREVFRUVFRWVvwzVEVFRUVFRUVH5y1AdERUVFRUVFZW/DNURUVFRUVFRUfnLUB0RFRUVFRUVlb8M7V/9BYTgOA5FRUVwc3MDIeSv/joqKioqKioqEqCUor6+Hm3atAHDCMc87mhHpKioCMHBwX/111BRUVFRUVGRQUFBAYKCggRt7mhHxM3NDYD1QNzd3f/ib6OioqKioqIihbq6OgQHB18fx4W4ox2RpukYd3d31RFRUVFRUVH5myElrUJNVlVRUVFRUVH5y1AdERUVFRUVFZW/DNURUVFRUVFRUfnLUB0RFRUVFRUVlb+MOzpZVUXlVoovlWLb/D0ovFAMfb0BTm6OCGwfgMQHhiCgrX+LaJTlV2Dbr/tQkFsEXb0eTi6OCAj3x8i5AxHUMaBFNCqKqrB90UHkny1EY50ejs4O8A9theGz+iEsUnipm1Sqy+qwY8lh5J2+isZaHRycHeAb6IXhM/ogvGvLLIuvrWzAzuVHcfFUARrq9HBwtIdPaw8MmdwDEXFhLaJRX6PD7tUncC4zH411etg7aOHt54FB47uhc7ewFqkx1FhvwN51aTiTloeGOj3s7LXwbOWGgWNjEdWzXYtoGHQm7N2YgewUq4ZGq4Gnjwv6J0Yjpnc70VoLUjAazDiw7RSyjl9CXa0OGg0DDy8X9BkWiYR+HVpEw2Sy4PCuM0g7egH1tXowDIGbpzN6D4xAj/4dodFqFGuo/LMglFL6V38JPurq6uDh4YHa2lp11cw/nPRdJ7Hy0w1I3ZkFhmFAKQXlKAhDQAgBx3FIGBmLqc8nI25IlCyNkwdzsPLzTTixJROEITdpMAwBa+EQO7gLJv9rDHokxsrSyDlxAau+2orDG9PQNLRxt2h07dMRk55MRJ+kbrI0zmddwar/bsfB9WmglII0aRACRmPViOjWFhMfHY4BExJkDbKXc4qw6v92YP+6NLAWDoTcrtEuKhjj5w3GkMk9ZA2AVy+WYtX3e7BnTSosZgsIQ8CxFIQAjIYBa+EQGhGA8Q8MxPCpPaHR2K5RnF+J1T/txc5VKTAZzWAIuXYc/9MICvfFuPsGIHFaL2jtbB9ky4trsHr+AWxfmQKDzgRGYz0OEEBzTaN1sDfG3dMXo2f0gr297e+HleV1WLPwMLauTIGuwQhGw4BjOasGw4BlOfi29kDyrN5ImtETjk72NmvUVDVi7e9HsHlVqtWR0lj3C+D6f3u3ckXStJ4YN7MXnF0cbNZQuXuwZfxWHRGVOxpKKZZ8sAYL3lj2v4crD03bH/hgJqa9NN6mAXbN11vx/Qu/gWGkacx8ZTzmvj3FJo0tv+7D108vuO5w8GtYB6oJj4/AQx/OsGkQ37vqOD55dD4IgbAGYx1wE+/pjyc/nWXTW+yRrZn48KFfQDl6fSBqDsIQUI5i4IQEPPvlHNg72EnWSNuXg3fn/QKzmRW8HoQAlAK9RnTFS9/MtWmAPXX8It564GeYDGbh47h2iWP7dsTr390LZ1dHyRrnsgrw+oO/QNdgFDwOEIAAiIwPw1vfz4Wbh7NkjUtni/HaQwtQW6MT1oB1KWW7zgF47/u58PRxlaxRkFeOVx5ZiKryenCc8JBBGILgsFb44Lu5aOWvPrf/qdgyfqs5Iip3NEs/XIsFbywDANGHbNP2X15dguX/WS9ZY9232/H9878BVLrGkg/XYcFbKyVrbP/tAL568lfr4C3gIFg1rA/6tf+3Az+8tESyxoF1qfj4oZ/BsZy4xrXBZPtvh/DVv36D1PeRE7uy8d4DP8FiYQUHbwCg1zQOrEvDp08sAMcJ2zdx8uh5vHnvjzCZLKLXo+lrn9h1Gu8/PB+shZWkcTbjCl675wcY9Cbx46DWv6yj5/HW/T/DZLRI0sg7W4yX7vkBunqD6HHgmkZO5hW8dt/PMOhNkjSuXq7A83N/Qm1No7gGrI79pXMlePG+X9DYYJCkUVpUg+fu+wVVFQ2iTghgve6FVyrx/AO/oK5GJ0lD5Z+N6oio3LFk7DmFX19fKuuzv7yyGFn7T4va5Zy4gP97dqEsjaUfrcPxLRmidnnZBfjyiV9tF6DAuu92Yt/KY6KmRXll+M/DPwM2zrJQSrFj8WFsW3RQ1LaypAbvP/gTQK3fzRaNgxszsO7HvaK29dWNePv+n65Pi0mF4yhS9+Vg2X93iNoadEa8ef9PYC2sbRosxem0PCz8dIuorclkwRsPzofZZJE0eN+ocfFMEX54f6OoLctyeOORhTDozdedV2kaHK7mlePrt9aJ2lJK8fbTi9EgxZm65buVFdfgk9dWS/6Myj8X1RFRuWNZ9flGMDLm/QFAo2Ww6nPxh/nar7fKyi0ArNM0Kz/fJGq34YddkJvrSBiClV+JD3yb5++3DnhyJloJsPK/20WjItt+PwyLySI5enIrq7/fJRp92LHiOAyNJpschOtQYN0vB0QjFnvWpaO+RmeTg3BdgqPYvPgw9I1GQbsjO7JRWVZnk4PQBMdR7FqThrrqRkG71IO5KC6osslBuFHjwI5slJfUCtqdSruMvPOl8jRYipTD53H1SoXNn1X5Z6E6Iip3JCWXy3Bia4asByBgzY84vikdZfnlvDbVpbU4sPq46DQGHxzL4eSBHOTnFPLaNNbqsHPJYdkalKO4kHkFuel5vDZGvQlbFx2Qfa5AgaJLZTh5OJfXxGJmsWnBAVmDdxNVJbVI2Z3Nu53jOGz49YBsRwcAGmp1OLwlk3c7pRTrFxyQ7RgCgFFvxt71aYI26xcdBsPIF2FZDjvWpArabFhyVLajDliDZ9tWpQjabFx+QrajDljznTavFNZQUVEdEZU7ku2/7lW81JAwBNt/3ce7fdfig/9LMpCJRstg2wJ+jX2rjsNikpZTIKSxdcF+3u1HNmdAVy9tvl9QY+EB3u2pe0+jpqJekQajYbBl0SHe7aeOXURZYbUyDYZg8++HebefP3UV+edLFV12QoBNvx/h3X71UjnOZuYrctoopdi8hH9KrrykFmlHLsh3PmGNimxafoJ3e32tDod3nxGNYglqsBTb1qRJzt1R+WeiOiIqdyRFF0sU74MAKM4rFdAoBVHwtgdY31yLL5Xxa1wqhUarUMPCCZ6Poktlims3sBYOVy/waxTnlSt6wwesEaTCi/zXo7gFQvgcR1GUxx8FK85XrkEpUJJfybu9SGCbLZQVVvNGh0quVsmbhruF2qpGGA3mZreVFtcqcqaa0OtMqK/TK96Pyt2L6oio3JHoG2xLjmsOluOgq+d/AOobDPJyEW6EAo11/CsD9A2Glhgv0FArcByNRkVTDU0IRVX0jUaQFiiGJZRboW80KnZ2AGvhMN5tIrkdUjEKrGox6FpGg+Mob76LXuAYbYXvmrTUcQCArrHlvq/K3YfqiKjckTi7OSma/wasRZac3ZwENBxBFA58hAAu7vw1H5zdnGxdyNIsrh78x+Hk4qAor6IJZzf++hhOLg6gEpffCmoI1OBwcnFokTdwJ4FCWo4tVGTLQaBeiaNzy2gwGgJ7h+aLm7VksTBn1+b35dRCxwG07PdVuftQHRGVO5Kgjm0U74NSILADf0n2oI5tFEddCMMgqENrfo0OrWExK5sf12gYBEfwn4+g9q1lJ8PeqBHaiV8jsJ2/YieB0TAI7ihwrsL9FO0fsOaIBLfnL/XfEhqEEAS29eXXaNtKsQYIEBDsw1swLyDEu0XKznv7uvEWmvNr46koUbUJF1dHuAk40ioqqiOickcy8r7Bit/yKaUYed9g3u1DZ/ZV/KDlWA6J9/NrDJjYAw7OtpfTvhGW5TDqvkG823uPiYWrDZU4+TRGzx3Iu73b4Eh4+3so0uBYDmPu6c+7vUuPcASEtlI0zcRxFGPm9OXd3i4yEO0iAxVFwiilGCug0Sa0Fbp2b6tomokAGDurN+92H1939BjQUdmqGYZg7PSevNvd3J0wYEQXZatmGILRkxNaxKFRuXtR7w6VOxLfIB/0GtsNjMxET42WQd/xPdCqjTevjUcrdwyc0lt2MimjYRA/tCsC2/O/5Tu7OWHErP6yBwyGIYhICEe7qBBeG3sHO4y+d4BsDUIIgjsGILJnO14bjYbB2PsGKBrAfQO9ED+os+D3GHf/ANn7BwB3bxf0HhktaJN8b39FuUFOLg4YmBQnrDG7j6IIktZOg2EThHsNJc3opSiiRwAkTkoQtBk7tYeyVTOUYvRkYQ0VFdURUbljmfJcsvw6IiyHyc8midpNfHqU7AGDYzlMfnasqF3yI8OsYXQZYzjHUUz91xhRuzH3D4JGy8iKJlBKMfXpRNFQf+KsvnBwtJPtjEx+bLjokuyhk7rDxd1JdjRh4rzBoo3pBibFwquVGxiN7RqEAMlz+4n2tOk9rAv8Aj1lOYeEEIya1hOu7sLTGfF92yMk3FeWBsMQDEmKhbevm6BdZGwIOnYJlK3RZ3BnBATxvwyoqACqI6JyBxPVvzMe/exeWZ99/Mv70aVPhKhdh7i2eOa7B2Vp3PfvqUgYLvz2DQAhEW3w4k8PyVpuOe25Meg3TvyN0j/YB6/9+ggAYlvuAAGSHhiEYdP5pwGa8PJ1x1uLHgUhxCZnhBBg6NSeSLqff+qnCVcPZ/x74cNgNIxNzghhCPokRmPKY0NFbR0c7fHuwodgZ6e1SYNhCOL6dcTsZxJFbbV2Grz38wNwdLKzyeFhGILOcSF44CVx55NhGPz7+7lwdXO0yVFgNARtIwLwxOvJoraEELz1xQx4ernYrBEY6oPn3pkg+TMq/1xUR0TljmbiM2Pw6Bf3WtuZi0yhaLQMQIDHv7of458cJVlj1H2D8cx3D4IwRJoGgAfen47pL42TrDFoSi+8NP8RaDSMqEbTA3/mS+Nw39tTJGv0SozBG4sehcZOusaER4bhkY9mSHZeYvtF4N+/PwZ7e63ovH/TADxyZl/86/PZkjU6d2uLD5Y8Bgcne9HBr8mRGJAUh5e+mSu5CF67yEB8vPQxOEsYxJucrp5Du+CNH+4Xjbg0EdzOD58ueRTuXi6iDk/TuYnt3R7v/vwA7O2bXy1zK60DvfDZbw/Bx9dNgob1r3NMCD765X44Ssxd8vFzx+cLH4R/gKe443ZNI7xja3zyy/1wEViJpaLSBKEtse7vD8KWNsIqdzenj5zD6i824fA6ayVIQgg4jgPDMNZlpYSg/8SemPjMGET2Fo+ENEdu2iWs/moLDqw6fm3fBBxHwTDE2oGVo+idFI+JT41G9AD+XAchLp7Kx9pvtmPvyqNgLdbvz3EcGEJAYS1z3n1ENCY8PhLdhnSVpXHlbBHWfb8Lu5YfhcVkAaPRXD8eeq3DcNygzhj/yDD0HCEe0WmOwktlWP/TXuxYdhRGgwkaDQOOo9ZoCaxTY117tcf4eYPRZ3SsrBUepQWVWPfLfmxbehQGnQka7Q0axFqErVN8GMbdPxADk+NkaVSU1GD9goPYuuQoGusNN2tcO44OUUFIntsfg8d3k5V0WV1Rjw2/HcHmJcdQX6u7QcN6H7MWDmEdW2PcPX0xbEI3yY7OjdRWN2Lj0mPYtPQ4aqoaodEy1/NgCGPVCG7ri+RZvTByUoJkR+dGGur02LQyBRuXHUdlef3NGoSAZTkEBHlh3IxeGDUpAQ6Oza/GUflnYMv4rToiKncEJqMZhBDYiTwgK4ursXPhPhReKIGuXg9nNycEdgjAiLkD4d3aS1QDAO9yxSaqy2qx6/eDKDhXBF2dHk5uTgho64fhc/rDN8hH8LNmkwWUo7AXeQjXVTZg17LDuJJTCF2dHo4uDvAP9sGwWf3QOpR/aShg7fvCsZyoRkOtDntWHEPe6atorNPDwckevoHeGDqtFwLb8S9xbdJgWQ72DlrBAV7XYMC+NSm4cKoAjbU62DvawyfAA0Mm9UBIR/6l0wDAWliYzaw170RAw6AzYv+GdORm5aOhRg87Ry28/dwxeHw3tO0c2CIaJqMZBzdn4UxaHhrq9LCz08LL1w0DxsaiQ1SwsAbLwWyyiGqYTRYc3pGNUyl5aKjVwc5OCw9vF/QbFYVOMSGCn2VZDiaTBY4iGhYzi2N7c5B5/BLqa/XQaBl4eDqj7/Au6BIfKvhZjqMwGs2iGizLIeVQLtKOXEB9rR6EIXD3cEbvwZ0Q071ti2io/P1RHRGVOx5KKTIPnMXGn/YgZVc2zNecBAcnO/QaFYekBwejS6/2ih5WlFJkH7uAjfP34/iOkzBdK2Vt72iHhCFdkHT/QMT0i1CscTb9MjYtPIjDW09er7hpZ69F3IAIJN87AHEDIhT3zTl/qgCbfjuEA5syr1cO1dppENO7PZLm9kfCoM6Kl0jmnS3GpsVHsG9TFnQN1iqrWq0Gkd1CkTynL3oNjVRcSj7/Yhk2LzuOPRsz0FBn1dBoGUREBSN5dm/0GRop6oyKUXilEptXnsCuDRmou1aRVqNh0K5zAJKn98KAEV1EnVExSoqqsWVNGravz0DNtS65DEPQtoM/xk3tgYEju8LRUdmy7fKyOmxZl4at6zNQXdUASq0aIWGtkDylB4aOjIKTwqXhlRX12LopE5vXZ6Cyov66RmCQN5IndsPwxCi4CBShk0JNjQ5bt2Rh44YMlJXVglLr9E1AgBeSx8VjZGIU3EUSc1X+fqiOiModTequbHz74mJr/xINc9vKGI2GActyCIkIwFOfz0HXPh1t1jh5OBf/fXEJrl4ovb6/G2nSbdPWF49/PAPxA22faslJy8PXLy3H5bNFghp+QV549N+T0WtElM0aF08X4suXluFC9lVBDZ/WHpj32jjRZaXNkX+hFF+8shJnM/ObvR5NU1SerVxx/wujMXyi7csxi/Ir8eUba3AqJU9Qw93TGXOeGoax03vZrFFeUosv3l6L9KMXwWgIOPbmRxthCChH4eLmiJkPDcLEOX1sdkKrKurx5fsbcfxQLhhCbltx1aTh5GyPqXP7Yfp9/Wx2Qutqdfjyo804vC/n2hTkLRrEWqzPwdEOE6f3xD3zBtnshDY2GPD1Z9uwb/cZAOBdOWZvr0XyxG544JHB0NrohOr1Jnzz353YuSMbHEebrQtECKDRaDBmTAweeWyorCkjlTsT1RFRuWPZsfgQvnhqIUAhWrCMMAQMQ/DSTw9hwHjpg9/+dan4z2O/glIqWi+iaVntv76cg+HTxFeONHFs5ym8/9B8sCwnXpPi2lj3+HtTMHYuf0GvW8k4nIu3H/gZFpNF8hLjB15JwuSHh0jWOJ12GW888AuMBrPkpdIznxiGOU+PkKxx/nQhXn1gPnSNRskaE+b2xbwXR0t2FC5fKMXL835Ffa1ect2LUZO64cnXkyU7CoUFlXjx4YWoqmyQfByDE6PwwtvjJUeSykpq8cLji1BaUnObI8VH7/4d8foHU2AnMbekqrIBLzy9GFfzKyXdV4QAcQlt8e+PpsBBYiSprk6PF59fiosXyyRqEER2aYOPPp4G5xYsLa/y12HL+K2umlH50zi6JRNfPLkQlOft6FYoR8FaOHz04I/IPJAjSSNjfw7+89h8cFIcBOC6s/LF07/h+I5TkjTOpF7C+w/Nh8XCSiuMRa1/3762Evs3pEvSuJB9FW/f/xPMRulOCAD88uFGbF9xXJJt/oVSvPHALzDoTTbVa1nyzS6sW3BIkm3J1Sq8+uB86GxsYrh24WEs+2GfJNuK0jq88vAC1NXqbCq+tXV1GhZ8vUuSbU11I15+dBGqKuttOo6920/h/z7dKul+b6g34OWnfkdZSa1kJwQAjh06j8/f3yBJQ6834dXnluFqgTQnBLBGXzLTLuODt9dJOr8mkwWvvbpSshNi1aDIOVOEt99cA4tFWUsElb8fqiOi8qdgMpjx2WPzIaeYBqUUnzzyi+hDkLWw+OSJBZAb4/vsqYXXE1qFvsunT/9uHYxs1SHAl88vEexA26TxxQtLYTGzssrcf/PaStRdy1sQ4r9vrIHRYJJVZfTHDzeioqRW1O7/3t8IXYNRVtG4RV/vROHlClG7n7/YhtpqnU2DdxMrfj2ICzlFonYLv9uDivI62zUosGlVKrIz80VNlyw4iKKrVTZXMqWUYve2U0g5ekHUdtWy48i7VGbzcXAcxZGDuTi476yo7Yb16cg5U2jzNec4irS0y9i+XdoLgcrdg+qIqPwpHFiXioZanSwngXIUlcU1SN0p/IA6vuMUqsvqZA2slFLUVzfiyOZMQbvMw7kovlIhrxortbao37s2VdAsNysfl3KKZFd8ZS0cdq46IWhz5XwpslPzZA3egHW2aZtI5KW0sBopB87Jro7LaAi2LBc+jprKBhzYcVq2hkbDYPPKFEGbxgYDdm3Okn2uNBoGG1cIH4fRYMaWdemyrzmjIVgvchwWC4sNa1LlazAE61cLa3Acxdo1qbJfBgjBtc/fsRkDKn8AqiOi8qew4cfdivqUMBoGG37aI6zxyz5ZZbuvazAE63/ZK2izccFBZY3GCLB+/n7BB+3G3w4pWgFDKcWGBQfBcfyD8+alRxUdB8dRbFp8VLCz8NYVJ8AoWJHEsRTbVqXAcG0lUnNsW5emqG8My3LYtTETDXV6Xptdm7NgMlkUaRzck4Oqinpem/27z0AnEikTgmMpUo5eQHFRNa/N0cPnUVOtk6/BUWSfvIrLl8p5bdLTL6NEQqSMD0qBvEvlOCshSqVy96A6Iip/OBVF1cjNuKxowOBYDml7TkNXb2h2e0OtDlmHzsl+awWsD9qzqXmoKm3+QWo2WXB8xylFjcYoBfJzS1CU1/zDnFKKA5syFTUaA4CywmpcOlPIu33fxkxFxwEAtVWNOJN+mXf73s1Zihq/AYCu0Yis45d4t+/bckrx27PZZEHK4fP8GjuyZZXnvxGO43DswDne7ft3n1ZcV4NhCA7v5Z86ObAnR1FHYADQaAgO7OXP1zqw76ziZeQaDYN9EqaAVO4eVEdE5Q+nRuBN0FbqqprfV21lQ4tp1PJ83/oaneKBtYkanu9r1JtgNsp/+75Jo6J5DY7j0FDLHwGwhdoq/lwUKXkqUuA7VwBQU6X8uhNCBI+jmuc82oKGYQSjEVUV9YodKoYhqK3hP46qqgbl9y8hqKnhP47qmkbFTjSlFLUCGip3H3+4I1JYWIjZs2fDx8cHTk5OiIqKQmqq8By5yt0F24JZ8HxTAS2qwbOvltRgeY9D2UP8RviOg0pYOi1ZQ2BqhlUQnbppPwLnXemgB1iny4RWarSEBojIcbTQdbcI7KdFNOifdBysunLmn8Qf6ohUV1ejb9++sLOzw9atW3HmzBl89tln8PISLsWtcnfh6uHccvvydOH59xbU4Pm+bi2pwbMvJ1eHFit9zXccGg0j2sZeugZ/RUwX15apB+EqUHXT1U15RU6Oo4L7cRM4RskaLBU8DvcWuLc4SuEq0GTOvYV+h4Lnyt1R8fQPIQRuCqu5qvy9+EMdkY8//hjBwcH49ddf0aNHD7Rt2xYjRoxAu3bt/khZlTuM1qGt4OWnsCAdAQLb+cPDx7XZzV6+7ggI84XSMdwnwBN+wc33k3F0dkB4ZKCipFvA6tAEtfNrdhvDMOiS0FZR0i0AODrbo10kfx+WqJ7hipJVAWuJ+Yho/j4sMb3aKc4XIAxBZHwo7/a4FtAAgKhuYfwaPcIVD66UUkTF8R9HbLcwxRocK6wRExei+PfBshyi40J4t0fHhCie/mFZDtEx/Boqdx9/qCOyYcMGJCQkYMqUKfDz80NcXBx++uknXnuj0Yi6urqb/lT+/mi0GiTNG6J4AB/38FDeaAEhBMkPDFK0f8IQJN8vXC47+f4BipJuGQ3B6Dn9BHudJN/bX1HSrUbDYMTUnnBy4Y9IJM/pqyhZVaNhMCgpFu5ezUeoACBpZm9F0xoaDYPeQzqjlb8Hr83Yqd0VaTAagtie4QgM5W9mOGZSgqLBlTAEEV0C0b4TfxPAUcnxsvcPWKeXgkN9EBXLP4CPGBVtc5n2W/H1c0f3nvwvkkOGRMLJSVkfHw8PJ/TrL6+Dtsrfkz/UEbl06RK+++47dOjQAdu3b8ejjz6Kp556CgsXLmzW/sMPP4SHh8f1v+Bg4a6XKn8fEuf0VzTlYO9gh6EiJdiHTesFrZ38XhUMQzBihrDGwHHdBAd4MSgHjJrVR9Cm94gouHvzD/BisCyHMbP7CtrE9+sA3zaeijTGihxHZFwIQtr5yX4LZ1kOSTOFr0d4RAA6RQXJdnI5lmLcDOG+Nm2CvBHfU35UhHIU46f3FLTx8XVD34GdZEfCKAUmTOsp+Btzc3fCkBFdZUeQCCEYPzlB8Dw4Odlj1OgY2cfBMARJyfGSy9Wr3B38oY4Ix3GIj4/HBx98gLi4ODz00EOYN28evv/++2btX3nlFdTW1l7/Kygo+CO/nsqfiLe/ByY/OVL252e9mAQXkQ6drh7OmPncaNkaU54YAU9f4SkkRyd7zH1prDwBAiTd1x/+Qd6CZlo7DR54OUmeBCEYNrk7Qtr7C9oxDIN5L8s7DsIQ9B3RVXBapum7PPjCKFkaDEMQ36c9YnqGi9re/8wIyBn2GA1BZGwwevQXb6o499EhYBhis1PFaBi0i2iNfkMjRW1nPzAAWq3GZg2NhkFQqA+GjooWtZ0xpw/sHbQ2O1WMhsDP3x2jJTRVnDK1J1xcHGzXYAg8PZ0xfkI3mz6n8vfnD3VEAgICEBl58w+wc+fOyM9vvtyxg4MD3N3db/pTuXu4940JGDixO2wdNUbdOwBTn5E2oE17OhEjZwlHA5pj8MTumPOStME/+b4BmDBvkG0CBOg1PAoPvTlBkvmIqT0x8ynpjeUA68Af06c9nvpgqiT7/qOi8cBLY2zTYAgiooPxwmczJNl3HxCBx95IvvZhaRoMQ9C2Y2u89tUsSVG06IS2ePbfE0EIJA/iDEMQGOKDd76eLakhXaeuQXj5/ckghEiOvjAaAl8/d7z31SxJXWXbtvfHWx9NBcMwkgdxjYaBp5cLPvpqNpwkJCAHBnnj3Y+nQqO1TcPN1QkffTFTMBm2CT8/d3z00TTY20t3eDQaAicne3z8yXR4CUz3qdyd/KGOSN++fXHu3M1FfHJzcxEayp9QpXL3wjAMXvxxHiY+Ntz6/wVCxIyGASEEs15KwlOfz5E8rUMIwdOfzcKMZ0cDRFwDACY/PhzPf3uv5C6shBDMe3MC7ns56XqHYDGNMXP64fUf75fchRUA5jw7Co+8PQGMRnjwawq1D52YgH//+hDsbGilPvnBgXjmgynQ2mkEz3HTcfQd0RUf/fYwHByl5wGMnd4LL306DXZ21972eWSajqNbvw745LeH4GzDFNiwpFi8+eVMa+6NBI2obmH4YtFDcLNhJUn/oZF47+v/Dfh8p6tJo2PnQHy9aB68W7lJ1ujeuz3+8+2c6yuO+K570/UIDffFN78+CL/W/Hk0txITF4ovvr3n+ioavvu3aXqlTZAXvvn5PgQFC0fybqRT5zb477f3wNvbVVjj2r/7+rnj2+/mIjy8+SRulbsbQv/Aov4pKSno06cP3nnnHUydOhUnTpzAvHnz8OOPP2LWrFmin7eljbDK34vCi6XYsmA/ti48cFu1VFdPZ4y9fxBGzR0I/xD+JEIxSvMrsWXRQWxZdOC2Al7Obo4YNacfRs8dgDZhvrI1youqsXXxEWz+7RDqbimK5ejigJHTe2HMnH4IFpkqEaKqrA7blx/DxkWHUF1+c7E1e0c7jJjSA2Nm90VYBH8ypBg1lQ3YsSoFG38/clszOzt7DYaMi8fYWb3RvkuQbI36Gh12rU/H+t+PoLSw5qZtWi2DgWNikDSjFzpGBcnOJ2qsN2D3pkysW3IMRfmVN21jNAz6D++CpGk90SUuRLaGXmfEnq2nsG75ceTfUu6cMAR9B3VC0tQeiOkWJlvDYDBj385srFtxApfOl96sQYCefTsieUp3xHeXn7tiMlpwYF8O1q1Kwbmc4tu2J/QIx7hJCeiuYGWS2czi0MFzWLs2Faezb6/0GxsXigkTuqF3nw4tsvpJ5c7BlvH7D3VEAGDTpk145ZVXcP78ebRt2xbPPvss5s2bJ+mzqiPy94JSiovZV1GcXwmDzghnV0cEtfNHaMfWvJ8x6k04l56H+upGEELg7uWCjt3aCq4qyTtbhMLLFdA3GuHk4oDAsFZo26kNr73JaEZuxmXUVTWCAlaN2FA4CISyr5wvRUFeOfSNRjg62yMg2AftOgfwDiwWM4tzmVdQd616pZunMzrGhMDRmf+tviCvHFculkHXaISjkz3823iiY5dAXg3WwiI3qwC1VQ1gWQ6u7k5oHxUMF4FweWFBJS6fL0NjgwEOjnbwC/BAp678Az3Lcjh/6ipqKhtgMbNw9XBC+y6BgjUwSopqcDG3BI0NRtg7aNHK1w2R0cG8AyTHcbhwpgjVFQ0wmyxwcXNEu85tBGtplJfV4fy5YjTUG2Bnr4VPK1d0iQrmHbwopbhwthhV5fUwGc1wcXNCeAd/ePIs/waAysoGnDtbhPp6A+zsNPDydkFUVDDvShNKKS6dL0VlWR2MBjNc3BwR2s4PPgIRkOrqRuScLUJDgwEajQZens6IigoWTM7Mu1iG8tJaGPRmuLg6IKStL3wFlsPX1ulxOqcQ9fUG69SNhzOiugbBQSBSduVyBUqLa6C/phEc4gN/gShLfYMBp88WobZOD4Yh8HB3QnRkEBwFImUFBZUoLq6BXmeCs7MDAoO80KYNf00pnd6Ek2euovbai4qHmyOiI4Pg3EI1cFT+WO4oR0QJqiPy90DfaMTedWnY8OsBXMktuW17p7hQJN83AH1HxcDeQd6qFqPBjP2bMrFh0WFcbKaHSvsugUie2w8DRsfYNG1wIyajGYe2Z2PD4qM4d/L2ROnQDv4YN7sPBo+NhaOzvIeh2WzB0T05WL/kGE5nXLlte1BYK4yb2RtDxsbARWZRJ9bC4tiBXKxffhxZqXm3bQ8I8sK4aT0xPClWdkEwluWQevQC1q9IQeqxi7dt9/N3R/LUHhiZFAsPmcW6OI4iPeUS1q9OxfEj52/r6OrTyhXJExMwKikWXt78DoYQlFJkZl7B+rVpOHwo97Zlup6ezhg3vhtGj41FKxumWG7VOH26EOs2pGH/gbO3VZx1c3NEclI8xo6Jgb+f9CmWWzXO5pZg3aZ07N6Xc1uFVRcXBySNikHy6Fi0CfCUpQEAuRdLsX5LJnbsPQ3TLVV1nZzsMHZ4NJJHxSJEJCFbiEtXyrFuWxa27MmG8ZZmg44OWowa3BXjE2MQHio/kqnyx6M6Iip/GmdS8/D2/T+hvkYHQtBs+2+GIeA4Ct82nnjvt0cQ0oE/QtIc57Ov4s0HfkFNZQMIQ5qt49H0716tXPHu/AcFi3k1x5XzpXht3nxUltbxaxACSq3Rjne+m4vOAjUbmqMovxKvPrIAJVerr5+T2zWs/dWcnR3w1lezENNDfNXIjZQV1+DVJ35DweUKXg0QawqFvYMd3vjPNHTv28EmjcqKerz+zFJczC0BoyG8NU8IIdBqGbz87kT0H9LZJo3aGh3eeGk5crILwWgY3ponhBBoNATPvjIWwxPFV43cSEODAW+9sRqZGVeg0RDekvRNkZ2nnhmJJBvrfej1Jrz7/nocO34RGg3DW/OEYaz31iMPDcHkSd1tmtIxmiz48NPN2HfwnKgGx1HcP6cf5szobZOG2czi0293YNvubEENDUPAchQzJ/XAvHsG2DRtxLIcvv5lD9Zszby+HyGNiaNi8dQDQ9QpnTsU1RFR+VPIPJyLN+75ARzLSSr4xGgYODja4bM1T6NtZ/6plBvJSb+Ml+f8AIuZlaxhZ6fBR78/gk4SHYWLOUV4fvYPMBktkop8MQwBo2Hw/s/3I1qio1CQV45/zfkBukaTJA3CEBBC8PbXs9BDYnGnksJqPH3vT6ir1UvTINb/efWDKRgwvIskjcryejx53y+oqqyXVHStyTl9/s1kjBgbK0mjproRTz+8ACUlNTYVdnvi2USMm5Qgybah3oCnn1yE/PxKm4qVPfjQYMwQqW3ShF5vwrPPL8H5C6U2acya2QcP3DdAkq3JZMHzr63AqdOFNvUPmpgcjycf4S8QeCMWC4tX31uLE+l5zb5o8DFqWFe89FSiJA2W5fD2Z5uw/2iu5EbHBMDA3h3xzvNJiqvSqrQ8tozfqiupIourl8rwzgM/g5XohAAAx3IwGsx4bfZ3kjrylhZW440HfpHshDRpmE0WvPHAzygvqhG1r66ox2sPzofJaJZcaZTjKFiWw1uPLkTRlQpR+/o6PV556FfJTghgLYJFOQ7vPbsUec1Md92KXmfEy48tlOyEAP9rfvfx66twNvuqqL3JZMErTy1GdWWDZAehaeD6/L2NyEq7LGrPWji8/sJym50QAPjm8204fuSChO9E8dabq212QgDg5x/3Yt/eM5I03v9wg81OCAAsXnIEW7edlGT7ny+32eyEAMCaDelYsyFdku3XP+6x2QkBgK27svH7imOSbH9afAj7bHBCAGvkcN/RXPz4+0HbvpjKHYfqiKjIYuX/7YbJaLG53DnHcqitbMDm3w6L2q799QD0OpPND3KOo9A1GLFuofgDauPio6iv0dk86FGOwmS0YNV8cY1tq1JRWVZvc0l1Sq3dVJf9vF/UdtfmLBRfrba9bDu1nq/ff9wnanpg9xlcvlgmu6T6gu/3itocP3oe53KKZJW4JwT45fs9ooNyZsYVZGZckV22/acf94p+9uy5Yhw5ekG2xs/z94ue58tXKrBr7xnZnZTnLzoIo9EsaFNcWov1WzNtdkKaWLTiGBp1RkGb6ppGLFufIk8AwLINqaiuaRQ3VLljUR0RFZupr9Fhz9pU2b1KOI5i46JDgi3kDToTtq84IV+D5bBt+XEY9CZeG7PJgs3LjsseLDiWw6516Wio0/PasCyHDcuOyR4sOJbDwZ2nUV3ZwGtDKcX6Zcdll1LnOIqUI+dRUlQtaLd++Qn5pdQ5itNZBbh8sUzQbu3KFPml1Kl1hcnZM0XCGmtSFeUVlBTXIiP9sqDN+g3p0ChoXFhd3YgjR88La2y25lLIpVFnwr6D5wRtNm7PUjTtYTJZsGPvaUGbTbuzoaRPHsdx2Lw7W/4OVP5yVEdExWZ2rToB1iLPQWiitrIBR3ec4t2+b2MGDDp+J0IKugYjDmzO4t1+ZNcZ1NXoFGlYzBbsXs8f4k47ch7lt9TlsBVKKbavSeXdnp2Zj4LLFbLfWgGAIQRbVvNrXMwtwbkzRQob/jHYtCaNd/vVgipkpl1W1GBOo2GwQeBclZfX48jh88oa5TEE69fxH0dtnR579p7hTX6VqrF2Pb+GTm/Clh0neRM6pUAIwZoN/BpmM4sNW7MUd9NdvSGd1xFnWQ5rtmTIdtQBqwO6ZkuGomuq8teiOiIqNnM65fYlobai0TI408zS0ibOpF9W3KZeo2FwRiAv4UzGZWi0Cn8ChOBM+u3LcP+nka9Yg3IU2QIapzOuyG4y1gTHUZwUOFfZmfmKW8hzLCeYJ3LmlPLeUizLIauZZdFNnM2xPZ/iVjiO4mRW820qAOD8+ZLbls/K0Th9+vZl6k3kXa6A0Wjh3S4FSinOnS+FhWcAv1pUjfoGQ7PbbKGgqBoNjc1Pz1RUNaCiij/aJ5XyqgZUVCvfj8pfg+qIqNhMfU2j4oc5KNAoMKXRUCc96ZIPjuMEp00a6wyKj4NyFPW1AsdRr/xBDkAwctPQYACj1EuANamWV6PeILkEvhBC16OhwSh76udGGhv4cxIaBLbZgk4gWtdSGmYzC5OpeWejobFl7ivAev/8VRr1PA6KHOpb6Lyr/PmojoiKzdjSy4QXYu0yy4e1C6myQYkQAjt7fg2NVgMiq2/rzQhpaLVMCygIn3M7rcam1QZ8CF0POzsNaAuoiGoonAYArOecf1vLtJcXyjGxs2u5xyqfjlbTMscBWO+fZjVa6FwB/Ndd24I1QOyURjdV/jLUK6diM16+boqnTSgFPAUqVXq2clM83UAYAg+Bkt5ereRV47wRjYaBp4/Acfi4Kp5jZxgCb18RDQX5CIDVaRNqzubprVwDgLBGC3Vd9fLm34/QNlsQqhbr5dkyGm6ujryOiJeXvGq1t2Jnp4EzT5VgL5kVcW+FIQTuPBV8vWxoOiiGp3vL7Uvlz0V1RFRsZkBSnPJpE5bDgKQ4fo0xMYoTYlkLh4GjY3i390+MUpzgxrIcBoyO4t0+YERXxY4Ix1EMTOTX6DekM2+3WalQSjFEoDJpr/4dBaMZUiAEGDKyK+/2hB7hgr1KpGkQDB3Bf66io4Ph5i6vdH4TDEMwbDj/cUREBKCVQidXwxAMHRrJu71taCsEBXopytvRaBgMGdCJN/LY2s8DEe39FUUmNQxB357tePvceLg7IT6Kvy+RFBiGID4qBB4C/ZBU7mxUR0TFZroN7ARfBf0qGIYgMqGtYLfYLt3CENzOT/aDlhCCsIjW6BQXymvTrnMbREQHy89LIIB/oBfierfnNWkT4oO4Xu0UPWg9vV3Qe1An3u0+vu7oO6iToiiVs4uDYHVVdw8nDBnZVdGyVzs7LYaN5nd2nJztkZgUqygSxjBAYlIs73Z7ey2Sk+MVXQ+Oo0gScKI1Ggbjx3VTNICzHEVyEn85eUIIJo3rpmilFMtyGC+gAQATk+IV5VGxHMWEscIak0bHK3LWOY5i0hj+66Fy56M6Iio2wzAMku8fIPtBy3EUyff2F7QhhGD8vf1kZyVQSjHunn6i33HcnD6y8xIIgHGz+4gmcY6b2Vv2g5YwBEnTe0IjMl+fPK2n7CgVwxCMnthNtFlg8uTusiNIjIZg+BjxRn5JE7rJngJiNAQDBkfCS2SKZ4yAEyGqwRD07NVOsDMtAIxOjJFdR4RhCKKighAW2krQbsSQLnCQ2USSYQjahfuhk0BnbAAY3K8TXF0dZL0QMAxBm9aeiI8WbrXQp3s7eHu6yEq4ZgiBj5cL+iS0s/0LqtwxqI6IiiyS7x2AyIS2Nr+FE4ag/5gY9JfQd2TklB6I79vR5rdXhiFIGBiB4RL6jgwcHY0+w7rYHBVhNARdurXFmJm9RG17DozAsKRYmx/mjIZB+05tMGluP1Hb6G5hSJrSXZZGUFgrzHxwoKhtx8g2mDa3r20CsJ4r/9aeuO/RwaK2IaGtcN9Dg2RpeHu74uEnh4na+vt74LEnxO1u02AI3Nwc8fQziaK2np7O+NfT4nbNaTg52eP5Z0eL2rq4OOCV58TtboUQAnt7LV59frSoo+5gr8Ubz42FrXN/hFgjQ2++MFZUQ6th8PZzY0CIbSoE1ufJW8+OadGkV5U/H/XqqcjC3kGLt355EO27Bkp3FAiQMLATnv9itqSloBqtBq99MweR3cIkR18IIejaPRyvfj1HNIoAWKM7L34yDfF92ksexAlD0KFLEN76dg7sJawgIoTg6bfHo/cQ/jn/278XQWg7P7z7f/fA0an5ZMJbNR59YTSGjOLPiblNQ0PQJsgbH357j2ikoon7Hh2CsZO62aTh6+eBj76ZDXeJyY8z7umLqbOkNZZr0vDycsF/vpoNH4Fk2BuZMLE77r1fWmM54H9OyCefzRSNhjQxKjEajz4yxCYNJyd7/OejaQgO8pb0mUH9O+G5J0dYB3EJ9y/DEDg6aPHRO5PQrq2fJI1eCeF49V+jwDBEsoadnRbvvzYBnTvyT7/eSFzXEPz7hWRoNIyk37q16zKDfz+fhLiutnXBVrnzULvvqijCaDDhl/c3YNuyY7CYLNaplBvuKMIQUI7CycUB4x8YiFnPjJTkINyIyWjBws+2YtOSozAZzSDATXPjTR1eHZzsMHZWH8x9NtHmJcashcVv3+zC+t+OwKAzgRBy09x407NRa6fFqKndcf9zo0SnMm6F4zgs/Wk/Vi88BN21mhm3TgsRQqDRMhiWHIeHXxgFJ2cHmzQopVj122Es+/UgGuoM11u/36TBEDCEYFBiFB57YRRceVY0CGlsWJmC3385gNpqXbMaTc5p/yGd8fgLo2StiNm6MQMLftqPqsoGXg1KKfr0j8ATzyailcDKIj527zqNn3/ci7KyOl4NjqPo0bMdnvlXomQn5EYOHDyHH37ai+LiGmg05LaKq00a8XGheOapkQiS6ITcyLETF/HtT3tRcLUKGg1z2xSahiFgOYrorkF45vHhCA/ztVkjPesKvv5pD/KuVEDDMGC5WzSuHVtkRAD+9chwdGzvb7PGqbOF+PKn3ci9VNbsuWo6to7h/nhm3hBEdQq0WUPlz8GW8Vt1RFRahIZaHXavTsHm34+g9GoVTEYzHB3tERjui6S5/TFwXLykN3shdA0G7Fmfjs1LjqL4SiWMRjMcHOzQJrQVxszqjcHJcXCW+GbPh0Fnwr7NWdi09BgKL5fDYDDD3sEOrQO9MHp6TwwbFw8XN2UaRoMZB7afwsblx5F/sRxGgwl29lr4tvbA6MndMWJ8PNwULms0mSw4vCcHG5YfR96FUhj0Vg0fXzeMGt8NI8fFwdNb2coOi4XFkf3nsGFlCs6fLYZBb4LWTgNvH1eMTIrFqPHxkiMUfLAWDsePXsD61Sk4e7oQer0ZWi0DTy8XjBgVjdHj4uDnb7tzcCMcR5GScgnr1qQiO7sAep0ZGg0DD08nDB8ehbHJcQhQkJwNWJ239IwrWLc+DZlZ+dDrTdeiLE4YNiQSSWPjZDkgt2qczL6KtRvTkZp+GTq91aF2c3XE4AGdMG5MrGjeiRSN0+eKsG5zJo6lXkSjzgRCAFcXRwzs0xHjRseivcRIixBnL5Rg7dYMHDxx8XrTPBdnB/Tv0Q4TRsWhU3vh3BaVvx7VEVFRjMloxoEd2cg8dhH1tXowDIG7lzP6DIlEQr+OoqsnKKWiIVaz2YLDu3OQeuQC6mp1IITA3dMZvQZEoNeAjqKREykaFguLowfO4cSh86irtVYndXN3RkLvdug7uDPsRJakStFgWQ4njl7AkYO5qK3RgXIUbu6OiO8ejgGDO8NeJKFQigbHUaSmXMLB/edQV6uDheXg5uaI2LhQDBoSKbrsVYoGpRQZmVewf99ZVNfoYLGwcHV1RFTXIAwb1gVOIo6kVI1Tp69i974cVFU1wmy2wNXVEZGd22DE0K5wdRGOAEnVOHOuGDv3nkFlVQNMJgtcXR3QqUMAEod2hZuIIylFAwDOXSzF9r2nUVZZD6PRDFcXR3QI98OoIV1F62NI1bh4pRxb9mWjtLweeqMZrs4OCA9phbFDouAjEmVqerSL6VwurMSmfdkoLq+D3miGi5M9wgJ9kDSoK/wEauTYopFfUo1NB06jsKwGOoMZzo72CAnwRNKArmjjK+xIStUoKq/FxoPZyC+pQaPBBGdHOwT5eSKpfxcE+3sJflal5VEdERXZVFc0YM2iQ9iyKgWN9QYwGub6aoymsGgrf3ckz+iN5Bm94MhTDEmIuhod1iw+ik0rU6xOTjMaXj4uSJraA+Nn9pKcv3AjjQ0GrF16HBtXpqCmuvGmcHXTf7t7OmPspARMmNEL7h621yDQ60xYtyoF61eloLKioVkNVzdHjB0fj4nTesoqpmU0mrFhXTrWrU5BaWndTRpNIX1nZ3uMHhuLydN6opWMCITZzGLjpgysWZOKoqKaZjUcHe0walQ0pkzpgdYyIhAWlsPmbVlYvS4V+QXW6QOO40Dp/zTs7bUYOawrpk3ugaA2tg8cHEexbXc2Vq5LxaXL5bdpUEqh1WowfFAkpk3sgbAQH5s1KKXYeSAHKzak4tzF0ps1CAGFVWtov06YMaE72ofZHh2glGLv0Vws35SK7Nzi244DFAABBvXsiJnjuqOzzOjAwbSLWLIpFZlnr0LDEHDUqs00ZY1SoH+3dpiZlIDojvKmQI6dvIzFW1NxIju/WQ2Oo+gTE4bZY7qjW+dgWRppOQX4bWsKjp68bE06pwB3zdFjiHUJcY8uIZiVmIDeUWGyNFRsR3VEVGRx+XwpXn34V9RUNYguoSQMQdsOrfHe93MFq2XeSuGVSrz86EJUlNaJLmllGILAUB988H/3wM+GufnS4hq88sTvKL5aJUnDr7UHPvhmDgKDpYfGKyvq8cq/luJyXrno8l+GIfDydsHHX81CaFvpc/O1NTq8+vJy5J4tFq0XwTAEbu5O+OiT6eggsiTzRhoaDHj9jdU4da3hnJAOwxA4O9njww+noksX6QOTTm/C2++vw4nUvOv5PHxoNAT2dlq8//YkxMfy14C5FaPJgvc+2YQDR3IlaWg0Grz76jj06i592afZzOLjb7dh+74zt+UQ3abBEBBC8NZzYzGoT4RkDZbl8Pkvu7FuR1azOSu3alAArz6WiFGD+GvA3ArHUXy79ACWbEqVpMFRiufvG4aJw6UnQlNK8fPao/h57TFJGixH8eT0AZg1Wnr9FUopFm9Lw9fLD1zfBx9N32He+N54cFwvxe0jVMRRHREVmykqqMRT07+DrtEouR4Fo2HQJtgbXy5+BK4SqhqWl9biyVk/oLZGekM7RsOglZ8b/vv7w/CUEFGoqWrEk3N/QkV5vWQNjYbA3dMF3yyah1Z+4vdZfZ0eT877FSVF1ZJbvTMaAhcXR3w7/34ESHjb1+tMeOrxhbhyuUJyDRKGIXBwtMM3392L0DDxXACj0Yxnn1uKc+eKbdKws9Pg669mo0MHcYfHYmHxwmsrkHWqQLKGdUUEwZf/mYGukUGi9izL4fX31uJoyiXJxbesq0wIPn13KrpJcHg4juLdLzZh96GzkouINY11H7wyAf168Be9a4JSiv/8sBMbd520uX7O28+MwfB+nSXZfv37PizdnGajAvDyg8Mxbih/Qbob+XnNUfy09qjNGk/PHIiZo6Stylq8LQ1fLdtvs8a88b0xb7z0VVkq8rBl/FaX76qA4zi89fhv0NvghADWMu1F+ZX4/M01oraUUvz72WWos8EJadKoKKvHR6+ukmT/waurbHJCAIBlKWprGvHvF1dIGsg+eX8jigulOyEAwLEUukYD3nh+uSSNr77YZpMTAlgHS6PRjFdfWi6pPP73P+zF2bPSnZAmDbOZxcuvrOTtDHsj8xcdQuZJ6U4IYL1XOI7i5TdXo1FCd9Ylq47jaMpFmyqAUmr9e/XdNaip5e9s3MSaLRnYdVC6E9KkAQBvfrIBpeV1ovZb9p3GBhlOCAHw7n+3Ir+oStR27/FcWU4IAPznl13IvVwmanfs5GVZTggAfLVkP7JyC0Xtss4XynJCAOCndUdx9NRlWZ9V+WNQHREVpB+9iIK8cllVMzmO4sieMygprBa0O5NVgNwzRfI0WA4Zxy/hykXhh+Cl86XISrssq8Iox1KcO12Ic2eKBO2Krlbh6MFcWZVSWZbiyuUKZKRdFrSrrGzAnl2nZWlwLEVpSS2OHT0vaFdfb8CWLVmyyndzHEV1dSP27T8raKc3mLB2Q5psjcYGA3buOS1oZzazWLkuVVapc0opjEYLtuw4Jfpdlq47YbsArM4Iy3LYsCNL9LssWXdCVgVTek1ozbZMUdvfN6XIqmAKWCM8K7eni9ot3poqu4S+hiFYtk1cY8m2dGhkajAMwZJtqbI+q/LHoDoiKti49KiiPiUMIdi6KkVYY8UJRX1KNBoGm1YKa2xanaJYY6OIxub1GYr6lGg0DDasEn4Ibt2UKXv/gPVBu26NsMb2HadgsbCKNNauFdbYsy8HeoNZtgYIsHq9sCNz8Nh51NbpZUtQSrFmU7qgg3wiIw9lFfWyNTiOYt3WTJjN/Of75NlCXC6skt07huUoNu4+Bb3BxGuTe7kMZy6UgJMpwnIU2w/loK7BwGtTUFqNE9n5slsasBzFvrQLKK9u4LUpr27A/vQLgjkhQnAcxfHT+bhaViPr8yotj+qI/MOpqqjHiQO5irrpchzF5hUneAeMxnoDDuw4rajTLcty2L4+A2Zz89MBJqMFOzdlKdbYt/0UdDzTARxHsXl9uqIGXSzL4cihXFRXNfLabFSowXEUGelXUFJcw2uzYUOGooZpHEdx7lwJ8vLK+TU2ZyrqDkspUHC1Cmdy+KNUG7dmKnIMAaC8oh7pWVd4t2+4ljiqhLoGAw6nXODdvnHXKdlv+E0YjGbsOZrLr7FXuYaF5bD9cA6/xv7TijUAYPPBM/zbDvNvk4qGIdhwIFvxflRaBtUR+YdTWlitqLtmEw11et4BvLysTpGD0ITRYEZtdfPz+dVVDTAZxXMWxLBYOFSUN//229BgQGODeM6CGJSjKCup5dFnUVnJ/zZoC8UCjkhJCf82Wygq4p+SKyyqVuTsXN+PwHEUSFgZpVQjv1C5BsMQFJU2f80B4EpRlew3/Ca0GgZFpTW82wtKahRraBgGhQIaV0trZEdcmiAgKBSIVhSUVitycAGAo1AjIncQqiPyD0ev4w/l2rwvHkfE0IIafM7O3+44dDzHoW9Jjeb3xbIcLBKSWZVoAIChBRxDQPic6I0Kpn6uwTAEOiENJdNLTRpEWENomy3o9PzftVHfAk40BXQC0z86g0mx88lRKqihN5qh1PeklKKxBX9rKspQHZF/OE4yCpLxwVdeXU7RMz5ceCpvOotU5LQFvn216Lni01BYBv9GXHi+r0bDQGtjvx8+hM67o6O8FvW3aQicdydH5eeL4yjvuQIA55bQoBTOAtdWSN8WnJ34K+y62ti3qDkIET4fLk72imt0MAwR1HBysJOdcHtdgxC4OrXcM0NFGaoj8g8nIMjbWo1QIR5eLrwDta+/O7QipdSl4ORsDw+estae3i5wFHgIS8XOTsNbS8TF1RFuEuqliMEwBK15epdotRr4SahlIoU2Ar1LggJbpuS1UJfYkCCfFikcFRzIrxEW4qM4f0OKhtK8B46jCBaoH9M2SLmGheUQ0ob/OELbeCvWYDkOIQEC1zzAS/m0CUcREsB/rkIDvJVPJxMgpLWnsn2otBiqI/IPx9PHFX0Gd1a02oRhCMZO68E76Li4OmJwYpQyDQ3BqAndeB0ae3stRibHKV41M3R0NK9DxTAEY8fHK141039QJ3h48vciSRofr2gAZxiC7j3CBR2a5OQ42ftv0ugSGYgQgTLp48bGKRowCAHahrVChECl2ORRsYryNwiA1n7uiI3ibyU/bmSM4twKLw9n9O4Wzrs9eZhyDWcnewzq2YFfY3CUYg07rQYj+nbi3Z40oKvyfBoCjOnHXyl2TN9Ixc4O5SiSB0Qp24lKi6E6IipImtFLUTIpBZA4qbuwxrQeijQ4lmLMlARBmzETExSvmhk7Wfg4xoyLU7xqJmmS8HEkjo4Bo+CXyXEU4yYIV6ccPrwr7O3lT51wHMV4EY1BAzrBRcF0AKXAxGThkt99eraHl4BTJwoBJiYJO5fxUaEI8POA3LGPYQgmjIoVnA6L7NAa7UJayR5gGYZg3LBoODjwRwXDg1shumMbRTU+Rg3oIjjF08bXA72jw2RHXjQMwZAeHeEt0DTQx8MFQxI6KNLoHR2GgFZqte47BdURUUFMj3C07dhaVjSBYQgGjIyCr0gvmIgugYiMCZZVr4TREPTo1wFBIi3MQ8N9kdCrnawHLaNh0DUuBB06BQja+Qd4YsCQzrI0NBqCdh38ER3L//YNAF5eLhiRGC0rKqLREAQGeaF7T+EeKi4uDkhOipM18GkYAl9fN/Tv11HQzsFei8kTuskawBmGwMPDCUMHRwraaTUMpk/sIUPBmifg7OSAxGHCb8YMQzBrUg+bK54C1jLydloNkkYI92khhGD2hJ6yEj0Jsa5mmZAYK2o7O7mHIkd6ykjxSNrs0Qnya3xQihmJ8aJ2MxO7KaqHMnuU8MuAyp+L6oiogBCCd76ZA1d3J5scBUZDENrOD0+/NV6S/RufToN3K1ebHB5GwyAg0Asvvj9Jkv1L701C60AvGzUIfFq54o2Ppkqyf+6VsQgJa2WTM6LRELh7OOPdT6ZJcjCeeHoEOka0tkmD0RA4Ozvgw/9Ml3T88+YNQnRUsG0aDIG9gxYffTRVUkRlzsy+6NE93KbkQoYh0Go1+M97UwUTPJuYOqE7BvWPsMlxI4SA0RB89PYkeEjI+0keEYMxQ6Nscqqs/WyAD14Zj1berqL2I/p3xtQx0vqs3KhBKfDOv8Yi0N9T1L5/t3a4b2IvmzSaeP2RRLQLFu9hlNAlBE9M7y9L48W5Q9GlnfDLAAB0CQ/Ai3OGytJ4cmp/JHQWfhlQ+XNRHREVAIBfgCc+/+0htPJ3Fx+Yrj1gO3YJwsfzH5S8YsW7lRs+n/8AWgd5iSfIXtMIa+eHT3+5X3KSqLuHEz798V6EhPtKetsnhCAw2Aef/3y/pKZ6gHWlyKffzEGHTgHXBxtBDYbA198DX3w/F74SE1EdHOzw8acz0DU6+Nr3FLZnGAJvL1d8+c0ctJGYiGpnp8EHH0xBQkLbaxrCIgxD4O7uhC+/mIW2YdK6CGs1DP79+nj062PNXRBzSBiGwNnZHl98NB0REprqNX3m9efGYsS16InY/athCBwdtPjk31MQ3UW8qR5gPTfPPzYC4xJjJGvY2Wnx0asT0SOurSQNAHhy7iDMHNddsoaGYfDec0kYKJAbcivzJvfBg5N6X9+HmAbDELzxaCIS+wtHp25k9ugEPDl9gGQNQoCX7h2KiUOld/idNCQGL94z9FpESFwDAJ6aNkCNhtyBqN13VW6ivlaHDUuPYeOy46ipbIBGy1xvc08YAtbCITDUB+Nm9kbipATYC8xJ89FYb8CmVSlYv+w4Ksvqm9VoHeiJcdN7YvSkBDjKWNJq0JuwaXUqNqw4gdLiWmg0zPXESUIIWJaDr787kqf2wJiJCXBxtT2XwWS0YMuGDKxbmYLCq1XNanj7uCJ5UgKSJ3aTteLGbGaxfWsW1qxKQf6VymY1PDydkTwuHuMmdoOnpzRn6kZYlsOOHaewek0aLl0qa1bDzc0RSWPjMGFCN/j4iL/d3wrHUezaewZr1qfhbG7xdQ1KrQMuy3JwcbbH2FGxmDS+G/x8bf+9U0qx99A5rF6fhuycQmtUiFJwN2g4OtphzIhoTE7uhjY8K5fENA6duICVG9OQkV1wfYBr0uBYDvb2Wowa0hVTkrohRGA1jhBH0y9hxeZ0nMi6DIYhICDgKL3Wzp6DVqNB4sBITB3bDeESohTNkZJ9Bcu3puNIxiUQQkCajoNYtTQMwbA+nTB9VDwi2vrL0sg4dxXLtqXjQNpF68sFrNMvDLFef0KAIT06YkZivKRISHNkXyzG0h3p2JOSC0qt9yxHOTCEXOvDAwyIb4cZI+IRFyHN8VRRji3jt+qI/MNgWQ4mkwWOjnaCb8CshcWx/WeRcfQi6uv01jl7Lxf0GRKJqIQwwc82dYEV1WA5pB65gJTD51FfqwchgLuHM3oNjEBsj7ZgBDI2mzQcHOwE3xw5jiLjxCUcP5SLuho9KCjcPZzQvU8HdOvVTnAKg1IKg9EMB3thDUopTmZcweEDuair0YGjFG7uTohPaItefTtAo20ZjdPZV3Fw/znU1ujAshxc3RwRGxeKvv07CiZCNmnY22lFj/fsuWLs23cWNTWNsJitGtFRQejfP0JwKoZSCqPJAjutRnRa6PyFUuzZn4Oq6kaYzBa4ujiiS+c2GDygk2CyZZOGVquBVkTj0uVy7Np3BpVVjTCazHB1cUSnjq0xdGBnwdojtmjkF1Zh+77TKK9sgNFohquLA9q39cOIgZGCSbqUUhjNFmgZ8XouV0tqsG3/aZRW1MFgsB5HeEgrjBwQCXeeuj1NGM0WMNdyVIQoLq/D1oOnUVRWC73BDFdnB4QFemNU/0h4ugsnAkvVKKuqx5ZDZ3C1tAY6gxkuTvYIae2F0f0j4eMh7DybrrV1sLcTngqsrG3E5sNnkF9SjUa9Cc6Odgjy88SYfpHw83IT/KzZwoKjFA4iGirSUR0RlZuoKKvDlg0Z2Lo+HVWVDdffQoNCfDBuSncMHRmluCBYdVUDtmzKxOYNGSgvr7uu0aaNF5IndMOIxGi4ugk/OMWordVh69aT2LQxAyUlNdfefoDWrT2RnByPkYlR8BDItpdCfb0B23eewrqNGSgqrr6u4dvKHUljYjFmVDS8ZEQdbqRRZ8SO3aexdlM6CgqrwXEUBICPjyuSEmMwNjEarXyEH5xi6A0m7DqQgzWbM5CXX3Fdw8vTBWOGRyF5ZAz8ZUQdbsRoNGP34XNYtTUdFy6XX0+C9HBzwpghXTFuRAwCFdZqMJkt2HvsPFZtz8DZiyXXkyDdXR0xakAkJgyPEaydIQWLhcX+tItYuSMDpy4UX1955ersgMS+nTBxaAzCg+RFHa5rsBwOZV7Cip0ZyMwthOWahoujPYb3jMDkoTHoGOqnSIPlOBw9dRnL92Qi9Ww+zNeq5zo52GFotw6YMiQWXdpKm+7ig+MoTpzNx/J9mTh6+gpM1xonOtprMTi2PaYOikF0eICi5eeUUqSdv4rl+7Nw8PQlGK81C3Sw06B/l3BMGxiDbh2CFGtkXS7G8oNZ2HPyAgxNzo5Wgz6dQjG9fyx6dgxpkRo1/1TuSEfko48+wiuvvIKnn34aX375paTPqI6IMurr9Pj6P1twYG+ONSR6SyZ7U6Kbg4MW46f2wL0PDRZ8e28Onc6I/36xHXt2ZYPjcHvdiGvhWK2dBknjumHeI0NgZ2NxM4PBjP/7dhe2bz8JluWaXVlAiLVGR+KoGDz22FDBN+vmMJks+OHnfdi4JRMWC8ujQcAQYNjQLnj68eE2V0G1sBx+XngAqzekw2Rqvvw5wxCAAoP6R+DZJ0bATeSt91Y4jmLB8iNYtjYFBqP5+jW+VYNSin492uOFx0fC00bnjVKK39eewG9rjkOnN4EQctt1t04hUPSKb4uXHx0pKVnzVo0VWzMwf9UR1Dcar08X3IiGIWA5im5dg/HqI4kIkOFYrdt7Et+vOIyaer2gRmxEIF59cIRgoS0+th4+g6+XH0Blre76eWlOo0t4a7z+wAi0k+H07E7NxWfL9qGsukFQIyLYF6/fOwKdw2yfajl46hI+XroXRZV11/fXnEa7Nj54ffYwxLRrY7PG8bP5+GDZbuSX1whqhPp54tXpQ9Ejwvak04xLhXh3+W5cLKkU1Aj0dsfLkwdjQBf++i8q/NxxjkhKSgqmTp0Kd3d3DB48WHVE/gQqyurwwhO/ofjaG7coBOjeqz3e+miK5PoS1dWNePGZxbhypUKSBiEE0bEheO+jqZIH8YYGA158YRnOny+RrBHRKQAffzwNrhIHcb3ehJffWIVT2QWSlk8yDEHb0Fb47OPpkiMwRpMFr7+7FinpeZI1Att44csPp0mOjlgsLN7+dCMOHD0vyZ5hCPxaueGr96YjwF94+XUTLMvhw2+3Ydt+aR1QNQyBl4cLvn5nquR8CUopPpu/G2t2ZEnWcHNxxNdvTEH7UGlJtJRSfLv8IH7flCpZw8nRHl+9NNGmXIZf1h/DD2uOSLJlGAIHOy2+eHYC4jtJz2VYvCMNXyzfL02DEGi1DD57Yhx6dw2TrLH6wEl8sGQ3QCG6jJkh1gTXjx8ag8Gx7SVrbDmRgzcWbQcFFf2NEGJtjvfuPSMxukdnyRq7sy7gxYWbwXFUdPkvufY/b0wdhkl91OJntmLL+P2Hr5ppaGjArFmz8NNPP8HLq2XKSqsI09hoxCvPLEZJkUQnBAAokHrsIj7593pJnzEYzHjtxeW4ki/NCQGsD/9TWfl47+21kgqPmUwWvP7aKslOSJNG7rlivPnmapivhXSFYFkOb7+3Dtmnr0qu4cBxFHlXKvDyG6tglNB0jeMo3v9kE1LSL9ukUVRUjedfX8nbIO9GKKX45NsdOHhMmhPSpFFeUY9n31qBunq9pM/8d8E+yU4IYK3ZUF3biKffWYGqmkZJn/lx+WHJTkiTRl2jAU+9txIlFXWSPvPbphTJTkiThk5vwtMfr0F+CX/H4RtZtTtTshMCWK+HwWTBvz5fi4tXKyR9ZtORM5KdEMCaKGq2sHjuv+tx5nKJpM/sTj+P9xfvBpXghDRpsCyHF3/YjPTzVyVpHD59GW8s2g6OijshgDXKx1GKNxZtx5EzlyVppF24ihcWbAbLcpJqkNBrOv9evgu7sqT/rlRs5w93RB5//HGMGTMGw4YNE7U1Go2oq6u76U/FdpYvOoyCK5VgWduCXZRS7N99BscO5Yrarll1AudzS8DZqMFxFMePXsDe3adFbbdszkR2doHNBZg4jiIrMx9bt4gPZjv3nMaJ1DxZGudyS7Bmfbqo7aGj57H/cK7N5c5ZjuJKQSWWrjohapuadQVb92TbXBCL5SiKS2uxYPlRUdvs3CKs2iJ+vM1pVFU34ofFB0VtL+ZXYOHa4zZrcBxFfYMB/120T9S2qKwW3y0/ZLsGpdAbTPh0wR5R24qaRnz2+16bNZoSWT/4daeobb3OgA8WidvdrmGdJnz7l+2i96TeZMbbC3fYXJTOOohTvDF/m+hvy8yyeGPRNlAZJeMoKF5fuA1mVvilg+MoXvt9GzjK2axCALy5ZAf0JuVdmFWa5w91RJYtW4b09HR8+OGHkuw//PBDeHh4XP8LDg7+I7/eXYnJZMGmtWmyqycyDMH6lSmCNizLYf2aVNl9RBiGYO1qYQ1KKdaskf7GeiuEAGvWin/HNevSZCe9UUqxdkOaaHRn9cY02UlvHEexfkumaHRnzeZ02SWvOY5i886T0Au0XgeAtVszZWuwHMWOAzmoazAI2q3ZoUxjf8oFVFQ3CGvsyZLd6JHlKE5kX8HV0hpBu/X7T8mqkgpYr8epC8U4n18uaLfp8BlJUb9mNSjFpaJKnLxYLGi3PeUcGg0mWVVlOUpRXFWPYzlXBO32ZV1EdYNe1vmiFKhu0GNf1kVBu6PnrqC4ul6eBoBGgwnb08Vf0FTk8Yc5IgUFBXj66aexePFiODpKm6t/5ZVXUFtbe/2voKDgj/p6dy2H9uagoV74YS8Ex1FkpOahsKCS1+bEsQuorBB+2ItpnMspxvlc/tBwVlY+CgurZT/MKQUK8itx6hR/aPhcbjHOXyhV1JitvLweKWl5vNuvFFQi86TtUZ0bqa3T46BA3kdpeR2OpFxU1NBMbzBj98GzvNtr6nTYdfisIg0Ly2LbPv5IWKPOiC37TytrzEaBjXtO8W42mixYt+eUouvBMARr95zk3W5hOazanSm7BDlgzUlZvYc/okcpxbLdGbL336Sxck+moMbSPRmKGsxpGILle/k1AGDpvkybKu/eCkMIlu0X0Tgg38G1agBLDig73yr8/GGOSFpaGsrKyhAfHw+tVgutVov9+/fj66+/hlarBdtMKM3BwQHu7u43/anYxoE9ObLf9ppgGIKDe/kHpf37zipe1qbRMDi4P4dfY/9ZRZ10mzQOCGkcPNciGvsP8J+rA4dzFZ8rhiHYK+AkWPNClGkQAkFH5HDqRUUNBQGrc7jzIP/1OH7yCow8q4mkwlGKHYf5jyPj7FU0SMi5EdTgKHYc4dc4c6kElbU6RRosR7Hj2Dne7ReuVqCwvFZWpOJGjV2pubxOWVFlHc5frZD9MtCkcSg7Dwae61rdoEfGxUJFThtHKdIvFKK6ofk8J73JjEM5eYocXI4C5wrLUVhZK3sfKvz8YdVbhg4dilOnbn4zue+++9CpUye89NJL0GhsW8KpIo3KivrrVUrlwjAENdX8iYXVVQ2KW30DQE0N/8O6ploHjlM68FFBjeoaHaSl3/HDshyqBQad6prml2zaAnctx4JXo1YHDUNgsTFf50YohSQNpW3khRJWq2t1IFB6RSB8PeqUOQhN1PIMegBQ1UIaDXojWI6DppnCflX1LaNhYTnoDKZmO+q2lAalQG2jHo72t6/+aikNAKhu0MHL9fbqxbWNBkXO1I1UNegR6CNthZmKdP4wR8TNzQ1du3a96d9cXFzg4+Nz27+rtBysRdngLWU/lj9Bg69eiC1QSq8XjvqjNADAYhbWUDyyArAI5AIojVRc17AIaVyr7NYCjhvvNo5Ds4VPWlKjhc6V6HG0ECzbvCPSUscBgPc38qdotOC54tOwtKiGvJwcFWHUpnd3GR6eyiqLAtZxQKgKqoeHk6Kqhk0Iabi5OSqeNmEYRrAgmJubo6K5acBat8TdXUDD1bEl/BB4ePD3qXF1cVQU2r6uIdALx9XFQXGECoDw9XB2UJSv00Rzb/dSttmCixP/ftxaSMNOq+Eta+7uoqxK8Y3wnRN355bTcP9TNJrfl7vAtWopDRVl/KmOyL59+yQXM1ORR3R8qGIngWU5RMeF8mvEhCgeMFiWQ3QMf1XE6JhgxW9kLMshOpp/5VVMVLBgxEQKlFJERwlrKD0OQghio/jPVVxUsOKpMoYhiI/mv+ZxXYIVR480DEGCgEZMZ+UNyTQMQXeBcxXVoY3y/CaGoFsk/zXvHOYv2ntFDIYhgkXN2ge2grOj7Q0nb9IgBF3DA3h76gT7ezY71WELhADhAd5w5XEG/DxcEeCtrJ0BAAR4u8HPo/nqvW5ODgj391aYRQV4uTohxNdT4V5UmkONiNxlJCbFKUpWJQQICPRCbEIYr82wkVGSq6/y4e3jil69+VuXDxrUGS4K+9+4ujpg4CD+qot9e3eAp8IIkoODFiOGduHd3i02FAH+HooeghqGYPQI/sqOkR0DEB7aStHqBkqB5JExvNvDQ1ohqpOyQZzlKCaMjOXd3sbPA71iwhRrTBoZx7vdx9MFg7t3ULSCguUopoyI5d3u5uKI0X06K9LgOIqpw/mPw9HBDuP7RynToBTTh/Fr2Gk0mDwwWlHUkFJgxpA43pcjhiGYPjBW0b1LCDB9YBzvfUMIwcyBcYoikwwhmNYvBnZqbuMfguqI3GV4ebtgwJDO0Gjk/7LHT+0hGFVxdXXEiMQo2VMnhCEYPzFBsK+Ng4MdxoyJlT0oMQxBUlK8oMOk0TAYnxQvW0OjIRg1IhrOAqF4hiGYmBwve1GLhiEYMrCzYD8YQggmj+0mO2LBMAR9e7SDXyvhN9PJo+MV1aeJ7xosWuZ98qg42RqEAJ3C/RERLtxHZcrwWNlJtwRASGsvxImUYJ80NEZRYq+vlyv6RIcJawxSpuHu4ogh8cIl2Cf2V1ba3MneDqN6dBK0Se7Vpdk8GKloGQbJvSIFbcYkdIKjws66E3urZd7/KFRH5C5k9v39obXT2jxFw2gYBAR6YeQY/jfjJqbN7ANHJ+HW9c2h0RC08nHF2HHxoraTJne35nHYqMEwBO7uTpgwMUHUdnxSHLy9XGx+s2QYAicne0yb3EPUdszIaAT4e9isQQiBnb0Wc6b3ErUdPrAzwoJ9ZGgAWg2D+6b3EbUd2LMDItr5264B67E8NLO/qG2vmLaI7RwkyzkkIHhMgkZMRCD6xITJetOnAJ6YMUD0t9UpzB/De0bIniZ9atoA0cE5tLUXJgyIkh1NeHJSP94clCb8vdwwa5j4b5WPR5N7w9lRuK+Up6sTHhwp/jvi44GRPeApMoXk7GCPx8eI3+N8zB4UD39P2xo3qkhHdUTuQkLCfPHOx1Oh0TKSH+gaDQMPDyd8+NUsOEuYEglo44n3P54GrVZjgwaBs4sjPv58JtwFEiObaNXKDR99PB0ODlrJGgxD4Ohoh4//Mx0+PuIPDg8PZ3zy4TQ4OdtLjiIxDIGdnQYfvTsFrVuLL+VzcXbAZ+9Pg7u7k00aWi2DD9+ciJAgH1F7Bwc7fPr2FHh7uUp2FKzNyRj8+6Vx6CASRQAArVaDT1+biNZ+HpKvB7F6IXjz6dHoGiHejZVhCD5+YRxC23jbpgHg5YdHICGKPwflf/YE7z05Fh3D/Gx2eJ69ZzAGdGsnyfbNB0cipkMbm52Rxyb3w8jewlGEJl6aNQS9uoTZ7IzcN7oHJgyMlmT71MR+GBrX3uag3rTBsZKdmIdG90JST+nN65pI6hWJh0aLO+oAMGdQPKb3F3/JuhECYFhMezyT3M/m76YiHdURuUuJ7xGOT7+95/rKFN452msDY3CoD7759UEEtJHemLBrVDC++vae63kWfA/1pn9vHeCJb3+8DyGh0tucd+zYGt98Mxc+16YNxDR8fd3wzbdz0b699DbnYaGt8P3Xc9H6WgdaMQ0vTxd888VsdIkMlKzRJsATP3x5z3WnQmg+G7CuLvn64xmIjxUfWJvwa+WGHz+bjXZt/QCA1yFpuhWcne3xxb+nok93aQMrAHh5uODHj2ahS8cASRqODnb4z6sTMLSvtIEVsOZY/PDuDHTrEiyqQQhgb6fFB88mY+xg6WUBnB3t8d1rU9E3tq2gRpOOnVaDtx5JxNQR/DkVt+Jgr8V/X5iE4T07StAg0GoYvHLvMNybJD06oNVq8MWT45DUt6uoRlNX3H9NG4jHJ0kfWDUMg48eGoNpg2MlaRACPJbcBy9OGyTZCSOE4O3ZI3HfiO4g4P994No2AuC+Ed3x9qwRNmm8PGkwHh/dB4RAMCLWdIzTB8TiP/eOUTR1pCIOoS2xXu4PwpY2wirNYzSYsX/3GaxbeQIXzt1eUr1Hn/YYN7k7uvVsJztXwmSy4OD+s1i3OhU5Zwpv2x6f0BbjJyagZ+/2svNKLBYWhw/lYu3aNJw6dXvp/+iYEEyY0A19+nSAVuaKBZblcPT4Raxdn4b0zNv7Y0R2boOJ47qhf9+OspN1WZZDSnoe1mxMx/HU20vDR7T3x8Rx3TC4XwQcHOStiuA4irSTV7BmczqOpFy8LXckPLQVJid1w9D+neAkEjbng1KKzDNXsWZrBvYfP39bXkdooDcmj47HyIGRcHaSr5GdW4TV2zOx+1jubauPAv09MGVUPEYNiISbguWsZy6WYPWuTOw4ehbmW2rbtG7ljikjYjG2fxd4uMlfQZJ7pQyr92Rh8+EzMN1SE8bXyxVTh8UiaUBXeLvLT56+WFiBVfuysPHQ6dsqmXq7OWPKkBiM7x8FXy/5UwxXSquxav9JrD10CrpbOk97uDhiysAYTOwfhdYKVsJcrajB6kOnsOrQKTTob66C6+rkgMn9ojCpXxSCWnnK1iiuqsPqo6ew4tBJ1Opubonh7GCHib2jMKVvNML81I7xcrFl/FYdkb8pNTU65JwtQl29ARoNAy9PZ0R1DRIcIK/klaO0pBYGvQnOLg4ICWsFP3/+qYX6egOycwpRX28AIYCnhzOiuwYJDpAF+ZUoLq6BXmeCi4sDgoK90TrAk9e+odGIUzmFqKvXgwDwcHdGdJdAwQGysLAKRUU10DUa4ezigMA2XmgTyP/A0OlNOJlTiNprre7d3RwR3SkQLgJJpsUlNSi4WgWdzgRnJ3u0bu2BkGD+KRKD0YzMaxocpXB3dURUxzZwF6ibUVpWh/yrlWjUmeDoYIfW/u4IC+GPFhlNFmSeu4qaOj1YjoO7iyO6dgiApxv/4FVeWY/LBZVobDTCwcEOfq3crq2wad7pNFtYZOUWorJWB5bl4ObigMjw1vDxcOHVqKxuxKX8CjToDHCw18LX2w3tw3x5NSwsh6zzhaisbYTJwsLN2RGRYf6CA2R1nQ4Xr5SjrtEIBzsNfLxcEdHWj1eD5TicvFiE8ppGGM0WuDk7IiLYFwE+/M+R2gY9ci+Xo15ngJ1WAx8PF3Rq68/roHMcxcm8YpTV1MNgssDNyQEdg3wR2Ir/N9WgMyLncinqG60anm5OiAxvzfvGTSnFqSslKK6qg8FsgaujA9oH+CBUYIBs1JuQc6UUdY3WZ4OnqxO6hPnzOuiUUpwpKEVhVR30JjNcHO0R7u+NcH/++11vMuPM5VLUNhrAEAIPV0d0DWvNu2yZUopzReXIr6iBzmSGi4M9wny90CFA4H43W5B9uQR11xwFd2erhoNAXsv5kgrklVej0WiCs4MdQnw80SmA/140W1iculKCWp0BHEfh4eKILiH+cLLnf8ZdLKvEpfIqNBhNcLK3Q5CXO7q08W+R2kp3E6ojcpdCKcWZnCKs25COvfvP3vaG6ObqiKQxsUgaEyspd4GPs7nFWLcpA7v2nrmtiqqzsz3GJsYgeUwsgmyYxrmVC3llWLc1E9v2nIbplrc3Rwc7jBkRhfGjYhEqIT+Cj7yCCqzbloXNe07BYLxZw8Fei1GDu2BCYizahfrK1sgvqsLanSexcc8p6PQ3d6+102owsn9nTBoZK7qSQ4iislqs3ZOFdbtPov6WPilaDYPhvTth0rAYdGkfIPthWFpZj7V7T2LNnizU3tIhV8MQDO7eAZOHxSK2Y6BsjYqaBqw7kI2VezJvK4POEIKBce0wZWgsEjoFy9aoqtNh/eFsLN+bhfKamxszEgL07doW0wbHoldkqOwIYG2jAeuPnsayfRkorqq/bXuvziGYPjAW/bq2lR3Sr9cbsfHEGSzZn4mCiprbtie0D8KMgbEY1LUdbx0QMRoNJmxOy8GSg5m4VFp12/bYsADM6B+HYdHtZddF0ZvM2JpxDosPZSC3uOK27V2C/TGrXyxGRHcUdDCEMJot2HYyF0uOZiL7ault2yMCfDGrTyxGx0QIOhhCmCwsdp25gN+PZiCz4PaOxeG+3pjdOxZJMZ3h4iAvAni3oToidyEGgxnvfbgRh4+eh0bD8BbJYhgCSinm3T8Q06f2tOmBbjJZ8PEXW7F7Xw40GmIt682jwXEU98zojfvm9LNJw2Jh8cX3u7Bxx0lBjaa+JtPGJ+DRewfZNGiwLIdvF+7Hik1pgv1RmraNGxGDf80batMDnVKKH5cfxsI1xyVpJA7ojFceGWnTA51SikUbTuD7FYdABPrVNGkM6t4Bbz82Co42PmyX70jHl4v3AwSiGr2iwvDBE2PhYuN0y/oDp/Dhol2gFLxVYJs04joG4dMnk22uHrrtxFm8vWA7LCzlLbjXpNElzB9fPTkeXgLRpObYm3URr8zfApPFwrtcukmjfZtW+PaJCfCzcbXFkZwreG7+RuivTX00J9OkEernhe8enWBz/5PUi1fx9C8bUK838vb3YQgBRynaeLnju0cmoK2f8NLrW8nOL8Fjv6xDdaOet3J/k4avuwu+e3ACItrY9lJwrrgcD89fi/L6xuv7ul3D2rTOy8UJ3907HlHBrW3SyCuvwryFa1FUU8er0XQO3Rwd8O3sZCSEKS/O93dHdUTuMgwGM557cRnO5hbbVGNh+tSeePjBQZJszWYWL72xEhknC2yqmpo8Jhb/eny4JGfEwnJ4/YN1OJp6e96CEMMHReK1Z0ZLckY4juLfX23GLoFOsrdCCNCve3u890KypBwWSik+/nEnNuzmbzffnEZC1xB89spEyTksXy/ehyVb0iRrMISga4cAfP3KZMnOyM9rj+KntUelazAE7YNa4YfXp4kuy2zi922p+GrFAckaGoYg2N8Lv7w6XbIzsnr/SXyweLfkpnkahsDf2w0LXp4OH3f+aacb2Xw8B28s3AbYoOHt5ozfXpwBfy9pORO7sy7g+fmbAFBI+alrGAJ3J0f89ux0BEus+nnk7BU88dM6cJRKag2gYQic7O2w6KlpaC8wlXIjaZeu4qEf18DCctI0CIG9nRa/PjYFXYKkRQ+zr5bg3h9XwWS2gJWgwVxLCP75wUnoFiYt0fx8aQVm/bgcepNZsgZDCL6bMx59O0hPNL8bsWX8VlOB/wZ8+J9NNjshALBsxXFs2JQpyfaz/25Hxsl8m0u3b9iciRVrUiTZ/t/8vTjaTPKkGDv3ncGCZUck2f6y7LBNTghgfVM7dOIC/m/Rfkn2izek2OSENGmkZufjk593S7JfvSvTJicEsEYass8X493vt0my33L4jE1OCGB19C5crcCr32ySdK/sTTtvkxMCWCuX5pdW4/n/rpd0zx89fQUfLrGeV6m3FstRlFbV4+mv18Es0OyviYwLhXhz0XZQGzWq6nV49L9rbksebY6cglK8tGAzKJXmhDRp1OkNePj/Vt+W2NkcF0sq8cz8DWCpNAehSUNvMuPh79egppG/63ATVytr8fgv6yU7IQDAUgqj2YJHflyDstoGUfuyugY8PH+tZCcEsP4+LCyHR39dh6tVtaL21Y16PLhgDfRmaU5IkwZLOTy5ZAMullVK+oyK6ojc8Zy/UIoDh3JlV5ucv+AAzAKdWwHgamEVtu3Mll2Zc8Hvh6E3mARtyivrsWZThuwyy4tXH0f9LbkLt1Jbr8fitSdk7Z8CWLk5HRVVwg9BncGE+atsG7yva1Bg455TuFpSI2hnMlvw/YrDsjQ4SrH7eC7O55cL2rEch2+XH5SnwVEcPXkZpy7cPld+I5RS/HeVfI2M3EIcP3P76qVb+WbtIVkaLEeRk1+G/VkXRW3/b6M0R7g5jbySKuxIOydq+/3WY+Aotfk3wnIURZV12HDijKjtL7tSYGZZm3/rLEdRWa/DyiPiDviC/WkwmM02N2LkKEWd3oglhzNFbX8/nIE6vVGyg3CjhsFsxoKD4k7+8pSTqGzQ2Vy9llLAzLL46YC0FzQV1RG541m3IV1RufbaOj0OHckVtFm/OVNRfw+9wYw9+4WjEBu3Z8kucw5Yc0u27s4WtNm8O1tZW3EKbNol/KDdcTDntsRXW2AYgvW7sgRt9p44j/pGYadLCA1DsEZE40hWHipqGhVprNqVKWiTfu4qCkprFGms2C2sceZyCc7ml8kvb08Ilu8R1sgrqULq+auyOxwTAizZmyFoU1Jdj/2nLykq2b5kf6ZglKq6QY9tGedka3CUYumhTMHfWKPBhHUppxVprDh6EiYL/2/MaLZgxfFTsq8Hy1GsTT2NRiP/y5OF5bD0eJYijS0nz6FaQgRJRXVE7mgaGgzYufs0b0KnFBiGYO36dN7tRqMZm7adVNS9lRBg9Xr+NwyLhcXaLZnKOsRSYPWmdN4HLcdRrN6SrqhDLEcp1mzLEOzIu2JrhqIGXRxHsW7XSRgFQvUrd2QoajTGchSbD5xGo44/VL9ypzLnk+Uodp3IRfUtq19uZMXuTMXN5Q6fvISSyjpem5X7Typu/JZ+vhB5xfxh9FUHlWlQCpy7Wo7Tl2+v43Nd48gpEAWeOgVQUFGDE+dvr7HTxNrj2TZHEG6loq4RB87cXv+miU3pOTCZ5TvqgHXF0I6T53m378g+j3qD+DSUEAazBZsycni378/NQ3m9fEcdsE43rUk/rWgf/xRUR+QO5sLFMtFpFTE4juL0mULeAfxyfiX0euFpFTEoBS7llfMOrsWltaitU/ZmQEX2U1XTiNKK25dS2kpVjQ6l5c0PfDqDCZevVipydgCgUWdCftHtyyWBa9frYrHsN7EmTGYLzhfwT89k5RYqcwxhXZ2Uk3f7cskmMnKvKnrDB6z31qmL/FNAaeeUawBAloBG6nnlGgwhyLxUxLs9/YL8iEsTGkZYIzOvyOYcsFvRMgwyLt1etLCJjMtFipzo6xp5AufqciG0CiudMoQg44qAxhXlGqAUGVf4z5XK/1AdkTuYBpGcCKlwHIXBYG52W0ODsjeLm/fV/Petb2w5Db48kQaBt3+bNXimRRpa8jh49qUzmBQ7OmIaLMdJSp6UpsF/jzaK5A1J1hC4tlISNMVgGHK9aFZzCG2zRUPoOG6t7ikHQgjqBc5HjYLpvhsR0qjTGRRHXThKBTXq9UbFThtHKWoFrke93ig7n60JCqC6Ba7rPwHVEbmD0drJKyLU7L54loza2bXcLcCrof3jNeQWdWoOvlofLavR/L7+jONoWmLYEggtRW6p/hxC50Ruy4AboZTCTsN/HHYtosF/PQBAK6BvC4LH0RK/QyJ8HHZajZJUMKsEET7ndpoW0ABg/wcfB0Q0VP6H6ojcwXh5SqtvIIaTkz3seJyaltLQaBi48nTt9RIoD24L1hLwzff78HR3bpEHBwB4eTRf5Mrd1VFRXsXNGs2fEwd7LRxk9rG5Fb6+JYQQwdLzNmnwnCsANhcL48NHQMNHQW+WJigV3k8rd1fF9xbLcfAS6FXj5+Gi2DnkOApvgXPu6y69MzMflFJ4uwpcD1dnMIodUCKs4eYMRYlaABiGgY+Qhouz4mksDSHwdWuZZ9/djuqI3MF0aO9/vSOsXDQagmFDInm3BwV6ISy0laLftUbDYFC/CN6301Y+rugS0UbRIK5hCHolhPM2UHN1cUD32DBFD1qGIYjpHAhvHudMq9VgYI8OijQIIegQ6otAnutKCMGI3p2UaQAI9PNAB4HS9Yl9OiselHw8nBHVvg3v9lG9OykeXF2dHJDQOYR3++ienZWOSXCw06JP1zDe7aO6RygO02sYBoOi+bscJ8ZHKJ5uoKAYFtOed/vIuI6Kc11YjmJkXEfe7YmxEcpWrsHqtCXGRfBrRHdsGY1ogeOI6qh4iomlFKOi+DVU/ofqiNzBMAzBhHHxih60LEuRPJa/dTkhBJPHdVOUl8CyHMYnCbdHn5wUryg5kuUoJo6JF7SZNDpO0YOW4ygmiWhMToxVpEEpxZRRcYKVaCcNV6YBAkwdGS+oMXFItCINhhBMHhYrOG0yYWC07P0D1vt/4qAowR4kSX26KJoC0jAESX0i4erE3wBxdI/OcFQQpdIwBIkJEYLRiuGxHeAm8B2kaAzsEo4Ab/4KlgMi28JXYhXZ5mAIQUL7IMFS793bBSHEx1N2BIkhBJ0D/QSrq3YNao3ObfxkPxcJgFAfT3QP5y/DHu7rje5hQYocaT83Fwzo2Fb25/9JqI7IHU7iiCjZbe0ZhqBzpzZo385P0G7o4M5wcpLXDIphCMJCfNA1Urhk8oDeHeHh5iSrmRnDEPj7uqN7XJigXa+4tvD1cZX18CCEwNPdCQN68L9RAkBs5yCEtPGSFd0hAFyc7DGsbydBu05t/dE5nL/rq5iGnVaD0f35o2AAEBrgjYTIYNlRKkKAcQOjBG38vd0wIDZcduSFUirqzHi5OWFk9wjZGixHMXlgjKCNi6M9xvXuKvtcsRzFVBENezstpvaLlj3wsRzF9AGxgjYahsGM/rGyNThKMbO/sAYhBLP6C7+UiGnM6iesAQCz+8Yqenma1Vf4ZQAAZveOlR2lYgjBzF6xLZYndbejnqU7HHd3J7zwr1E2f44QAkcHO7z0vPhnnRzt8epzY2RpaLUavPrCGNEftZ2dBm88P8bmNyVCrD/qN58bIzoQaDQM3n52rM0DBrmm8/azY0WdPkII3n5qDLQaxvY3MgK8+eQoODqIO31vPJwIB3utzRoUwOsPjYSbhB4tr9w3HK5O9rIG2BfnDoWPhPyi52cNgYerkyyNp6cORJCfp6jdM5MHwNdTXv7Dw0m90CFIvH/KY0m9EdTKQ5bGnKHdEN02QNTugeE90C7Ax2YNAmBynyj0iuCfwmpi9oB4dAnxt12DAKPiIjA0SthRB4ApvaLQvX2wzQ4PQwgGRYZjbLfOorZJcZ0xqHO4LI0e7YIxtaewEw0AQzu3x+joCJtfnjSEICqoNe7pIxxdVfkfqiPyN2D4sC548rFhAKTlaDEMgZOTHT7+YApCQ6Q1qerftyNefCYRhBDJGg72Gnz0ziR0bC+tm2WPuLZ44/mx0DBE0o+bYayOznuvjkdUpLRuljGdg/DeC8nQahlJgx9DCJhrDkxCtLQmVZ3C/fHJSxNgb6eVpEGurVJ57dFE9E8Qf5ADQHhQK3zx4kQ4OUhzFJosnr93KEb0EX+QA0CQvye+fnEyXJ3sbRqYnpjWH+MHS5t28fd2w7fPT4aHi5NNGg8k9cTMEdIe5N7uzvjuX5Pg4+Fik8asYfGYN7aXJFt3Z0d89+QkBHi726QxoW9XPDOhvyRbF0d7fPfoBIT6edk0wCZ2i8ArU4ZI+k052mvx7bzx6NjG1yaNgZHheHfmCEkadloNvro3CdGhrSVrEAL0aB+M/8weLSmKoGEYfDpjNHq0C5bsrDOEIDqkNb6ekyS4uui6PUPwwcQRGBQhfXqFIQQRrX3x3ZxxcBSYUlS5GbX77t+IQ0fO4/sf96CwqAYaDQP2lgqg19uox4TgmSdHICTEx2aNE2l5+PaHPbhSUAmNhtxW1bXp37pGBuKZx4ejfbjwtE9zZGYX4Ksfd+Pi5fLmNa4dR+cOrfHMI8PQuYP42+StnM4txuc/7cK5i6XX99ecRvswX/zrwaGIkejo3Eju5TJ89vNunMotEtQIC/LBM/cOQo/oMJs18gor8cmvu5GeUyCoEdzaE0/NGoT+8fwJkXxcLa3BfxbuxvHsK2AYclsuT5NGQCt3PDl9AIb2sD0Br6SqHh//thuHsy6BCGj4ebni8Un9MLqP8NRSc1TWNeKjxXuwL9PaO+bWsHrTsfm4O+OR5N6YOMD2HJbaRgM+XrEHO9JyQWkzGtfaxHu6OmFeYg/MGCw+BXAr9XojPlm9D5tTz4Kl9LbVG00abk4OuG9YAu4b2t3miJPOaMZnG/Zj3YkzsLAsQG9u5keIdTWRi4M95gyKx8Mjeto8zWA0W/DllkNYefQkTNcaC96kce3/O9nbYUbfGDyR2EeSg3AjZpbFf3ccwZKjWdCbzLd1X246K/ZaDab0jMZzo/rBXmubg8ByHL7bexwLj6Sj0WgCQ3BTU8ImDTuNBhPiI/HCqIFwltj9+m7GlvFbdUT+ZlBKkZGZj3Ub0pGeeQV6vQkMQ+Dm6oghgzsjeWwcQoJtd0Bu1Th1uhBrN6YjJS0POr0JhFg1BvWPQPKYWISH8a/IkKqRk1uMtVsycCTlEhp1RhAArq6OGNCrA8aNikXHdtLagQtx7mIp1mzLwMETF9DYaC1S5OLsgL4J7TBpVKwsJ+dWLuaXY82OLOw9lov6RiMoR+HibI+eMWGYnBiHqIg2snJjbuRyUSXW7MrCzqPnUNdoAOUonJ3skdAlBFNGxCG+c5BijYLSaqzdcxLbjpxFXaMeFpaDi6M94joFYfKwWPToEqp4+XJRRS3W7j+FLUfOoLpeBwvLwcnBHjHt22Dq0Fj0jgpTPK9eVt2ANQdPYsPhM6iq18FiYeHkaI8uYf6YNjgW/aPDFddrqahtxLoj2Vh7JBsVtY0wW1g4OdghIsgP0wfFYHBse5sH1Vupqtdh/fHTWHM0G6U19TCZWTja26FDm1aY3j8Gw2M7wF7hW3edzoD1J85g5dGTKK6uh8lsgaO9Fm39vDG9XywS4yIUJeoCQIPBiI1pOVh+5CQKq2phvKYR0soL0/pEY0xcJzg7NL8aTiqNRhM2Z57FsmNZuFJRA6PZAgc7LYK8PDCtdzSS4zrD1VF+MjBgLQu/5eQ5LD2eiUvlVTCYLXDQahHg6Yap3aMxPj4SHk4tsyz+bkB1RP5mnM8rw9a9p1FWUQej0QJXFwe0D/PF6CFdRet8UEolDUCX8iuwZV82SsrroTea4epsj7bBrTB2SFe08nIV1QAgqnOlqAqb92ejsKwWeoMZLk72CG3jjbGDuqJ1K+HrJ1WjsKwGG/efRkFpNXQGM5wd7RDs74XkgV3Rxk94qbNUjZLKOqw/kI0rJdVo1Jvg7GiHQF8PJPXvitDWXi2iUVbdgPWHs5FXVIUGvRGODnZo4+OOpD6RaBcoPp0m5bpX1jViw9HTOH+1AvV6I5wc7ODv5YakXpHoGCTuSErRqG7QY8Px08gpKLMeh70Wvh6uGNO9M7qGik/ZSdGo0xmwMeUMsvNLUa83wl6rQSt3F4zu1gkxYQGin5ei0WAwYlPaWWReLkLdNQ0fV2ckxkUgITywRTR0JjM2Z5xFWt5V1OmN0GoYeLs4Y2R0B/RsFyLq5EnRMJgt2HbqHI5fKkCNzgANw8DbxQlDI9ujf4ewFtEwWSzYfuYCDl+8glqdAQwBPJ2dMKRTOwzs0FbUyZOiYWZZ7Dp3EQcu5KFGb61O6unkiAHt22JYRDtRJ0+KhoXjsO98Hvacv4hqvR4ctWr0axuKkZ3ai0ZOpGiwHIeDeVewI/cCqvR6sBwHTydH9A4JxuhOEXf19I3qiPwNoJRi96FzWLExFTnnS6DRMOA4DpRaQ68U1lDykL4RmDG+Ozq0tX0KhFKKAycuYOnGVJw6Z50+4Kj136/P3RJgQI/2mJXcHZEyowNHMi5hyeZUpJ0puF2DWP+7X3w7zBqTgJhOtk+BAMCJ7Cv4fUsqjp+yTh9Q+r8HASHWpbe9osMwZ3QCErqIJ+01R/q5q/htayoOn7xkfcBcC703JcyyHEX3zsGYlZiAvtHyluWduliMRTtSsS/jf63nb9WI6xCIWcPjMThOWj7JreTkl2LRzlTsSj8PjlpDx7dqdA1rjVlD4zGiW0dZkZQLRRVYsCsV29LPgeU4EJAbNBiwHIdOQX6YNSgOY7p3lhVJuVxahQV707ApNQcWCwtybUqCwPrbYDmK9gE+mDUgDuN6ylvGW1BZg4X70rAu5QxMZsstGtbjCPP1wuwBcZjYs6usKEdxTT0WHEjDmpRs6ExmaAgBe4tGkLcHZveNxbRe0TZPHQBAeV0DFhxOx8qUU2gwmq5Pdd14rlp7uGJW7zjM7BkDJxlTB1WNOiw4mo7lKSdRazDeNFXY9N++ri6Y1SMGs3vFwVVGlKNWb8CiExlYkpqFKp2+WQ1vZyfMTIjBPT3iZEUgGowm/JaSgd/TslDe0NishoejI2bER+PenvHwduYvRMeH3mzG7+lZWJSWgeL6ButzkaOgwPXr7+Zgj2kxUbi/ezz8XIVfBv+OqI7IHY7FwuLT73di8+7s6/O9fGgYAhCC158ejWH9hZd93gjLcvjvwn1YuTWj2Xn/WzUoBV56ZDjGDhHPJm+CUorvlx/Cog0nJGmwHMWzcwdjykjp2eSUUizYcALfrzrcbI5EcxqPTe2He8Z2t2mAXbIjHV8s3Seq0XSc9yf1xCMT+tiksebAKXz4+24QAmGNa/fEzGFx+NeUgTYN4ltPnMWbC7cBkKYxoW9XvDJjqE1TFXuyLuClXzeDo1RQoynPILFbBP49a4RN0wiHcy7j2fkbYWZZYQ1YcwIGdQ3HR/eMtmmATb14FU/8sg4Gs0VUAwB6dQzBF3OT4OIofYA9WVCCh39Zg0ajSZJGXFgbfDN3HDycpQ+wZ4vLMW/BGtTo9KK1YQgBOgf44Ye5EwQri97KpfIq3L9oNcrrG0ULfTGEILyVN365ZyL83aUPsAXVtbh/8WpcrakTXTbLEIIgT3fMnzUJwV7Siz6W1DXg/qWrcamyWlRDQwj83Fwxf8ZEtGvFXzvlViobdXhg5VqcLisTXWKsIQRezk74depEdPZTNt19p2HL+K2umvmToZTio2+3Y8uebAC3J7vdCstRcCyHdz7fhP1HcyVrfPnrHqzcmmHVEHk4sRwFRyk+/G4Htu6X3rb6u2tOiFQNAPh84V6s3pkpWWPhRqsTcuM+xDT+b8Uh/LY5RbLG8l0Z+GLpPkkaTcc5f+NxfL/miGSNDYdP4/3fdokO3sD/7okluzLw2Yp9kjV2puXitV+3guWka6w7ko33Fu+SXM764Ok8PP/LJlhYTlSjaZfb08/htUXbJBe0SzlfgCd/WgejRdhBAP6XmHjgdB6e/9X6vaRw8koxHv5xDfQmaRoUwPHzBXjs53UwWaQ1DDxXXI77fliJBoOwE3KjRtaVYsz7eTX0puabVN7K5Ypq3PPTCtQ0ijshgPWanCspx72/rESDQVrDwMKaOsyavwLlDeJOCGC9t/IqqzB7/gpU66R13S6rb8CshStQKMEJadIorKnDrIXW7yWFap0es35bgTwJTghgrYxaVt+Amb+tQGFt8x25b6XeaMSspSuRU1Yuqc4JSymqdXrMXLICeVXVkjTuRlRH5E9m/fYsbN93xqZiPBTWN5l3Pt+EotIaUfsdh85izfYsWd/vg//bjksFFaJ2B9Mu4LdrToitfLZgN3IulYjapZ7Ox3crD8vS+Hb5IaTnFIjaZV8sxmeL98rSmL/pOA5lXRK1O3+1Au8u2ilLY9nuTGw/cU7UrqC8Bq/9utXmOi2UAhuOnsbaw9mitmW1DXj+l02goDaVPKcU2Jl5Hov3pYva1jYa8PTP669NvUnX4CjF4ZzL+HnncVFbndGMx35eBwvL2VSwiqMUGZeL8PUWcQfUZLHgkflrYbawNmmwlCKnqBwfbdwnbstxeHjhWujNZpvKkbMcRV55Nd5at0vUllKKx5asR53BYFMlXpajKKqpw0trtkmyf2b1ZlRIdHSua1CKioZGPL1qkyT7FzZsQ1Ftnc0adXoDHl2xQZKz/vq2XbhUVW2zhs5kxoMr1ykuXf93RXVE/kQ4jmLJ2hRZ5Y8ptf6414s4GJRSLF53QlH54zXbMkXtft+UKnsFBUMIlm8VH5QWb02VXTFTwxAs2Zomard0Z7r842AIftuaKmq3Ym+m7JLXhBAs2i6userASVBqm4NwXQPAop2pog/aNYdPwWxhZVe0XLQnTfRBu+7EaehMZlkVLSmAxfszRCMWm9NzUKszyNOgFMuPZEFnNAna7cy+gLI62wbWJjhKsS7tDGoahaMJB3Mvo6CqVlapfo5SbM8+j5LaekG7lCuFOFdaIUuDpRQHzl9GXoXwm352cSnSCopknSuWUqQVFOF0camg3aXKKhy4eFm2xtmycqQWFAraFdfVY8vZXFn3FUsprtTU4EDeZZs/ezegOiJ/Iumn8lFcViu7gRbHUWzYfhJGE/+D9syFElzMr5A9WLAcxZZ9p9Go4w/bXrpagZPnCmX3jmE5il3HzqG6TsdrU1ReiyNZl2X3Q2E5ikOZl1BSwR9SraxtxO6UXNkaHEeRfu4q8ooqeW3qdUZsOnJGtgalFGfzy3DmMn8EyWCyYM2hU/I1AOSX1SDt/FVeGzPLYsXBLEWN2cprG3HodB7vdo6jWHogQ1Hp7jq9EbuyLvBup5Ri8cEMRd10DWYLNqWfFbRZfDhTUZ8SluOwNlV4mnTx0UzFjQtXppwS1jiuTENDCJalnhS0WZqapUyDIVgqorEs/SQ0Cq6HhiH4PVX4JXB51ilFS+g1hOC3tEzZn/87ozoifyIbdpxU/OBo0Blx8Dj/g3bj7lOKNUwmC3Yf4Z8O2Lg3W7EGx1FsO3iGd/umA6cV16wghGDTAf6H+ZYjOYoGPcD6gFp/gH9aY0fKOZivFXNSorHuIL/G3swLaDQIv6FL0VhzkH9QOnLmMqoapM3388EwBKsO82ukXryK4mrhN3RRDUKw8jD/oHTmaikullYp6qZLAKw4wj8o5ZVXISu/WJHTRimw7Bj/cZTU1uPIhSvKmjxSiuUn+DVqdAbszLmgSIOlFKvSTvHm7ujNZmw4dVaZBkex/lQO9Obm82osHIeVmdmKuumyHMX2s+evLyW+FUoplmSeVHTNWUpxMO8KiuuU/Qb+jqiOyJ9IfmGV4jbcGg2DopIafo2iltEoFNC4WlqtXIMhKCyrFdCogdLe6wRAYbmARlmNYmeH5aigRkGZtQquUo38shp+jfIaxQW6WI7iShl/CD2/vFbRGz5gdT6FNK5W1CjaP2AdXK+U82sUVPBfK6lQAAWVAtdcYJstFFXX8U6XXa2SH1m9kapGPQzm5iOsRbXSEkfFaDSZUcszgJfVN8LEKnPUAcDEsiivbz5ptUanR6PE5F8hOEpRxJO0arBYUCUxMVcICuBqbcvcP38nVEfkT0Sn8K0VsA6uOj3/fhp1yjUA4e/a0AIalFJBDZ1RXp7AjbCciIbB1CIP2gY9/zSWzqj8AQhYp3j4NVrmmjcI3Fd6k0mxIwJAMHKjM5pbRENoxYmuBQYkwDodxqvRQteDoxRGnmhaSwys1/fF8311ppY5DgBo5NnXn6LBEylpUY2WvB4tuK+/C6oj8ifi4qSsjDFg9Zidnfn34+qsrIxxE85O/PtpCQ1CCJwF6jG4ONopjlZoGBENJ3vFAx8B4CpwrlwcW6bnhJvAOXd2sFccPQIAV4H708nevkWcNleB6+HsYNciGkLlwluqB4hQvRKl5cqbYAiBA083aBcJHZylwld4zMW+ZY7j7tJo/nf4ZxzH3YzqiPyJhAXb3uL7VliWQ3Ab/jLjYUHKNSwshxABjdA2XsqPg+MQEsCvERIgvYAQH5QCIQIl2UNaeyueYmIYIqgR6u8tubYFHxqGoK3A+Qj194JF4bI/DUMQHsDfoyjM30uxk6BhCNq2FtDwEy6fLwWGELT14z9XLaFBCBDq68m7PbRVC2gACPbx4E1+DPX2lL0y7kZ83VzgwFNoro2nO7QK+/4AgJujA28FVD83V15nyxYctVr48lQn9XRyhBuPA2ELWoZBoIdb8/p2Wvi6CLfjkAIBEOIpvUDb3YLqiPyJJI+IUTzwubs6ol93/tLfycOiFGs4OthhSO8I3u1Jg5RraDUMRvbjb1c/dkCXFnnLHzugC++20b07t4BDRTF+QFfe7cO7d1TcNIzlKCb05694OzimHdwEojJSNSYJaPTpHApfD2UPWpajmNKPv+NtXHgggnw8FK1o4SjFVAGNToF+6BToCyWXnVJgep8Y3u2hrTyR0DZQcbRtRm9+DV93Vwzs2FbR/csQghk9+TU8nByR2KWj4lUz07pF8Zbfd7TTYkJMF8WrZibERPL2bdEwDKbFdVW8amZ0ZEe4O/JXvJ0VF63ommsIweD24XdluXcx/lBH5MMPP0T37t3h5uYGPz8/jB8/HufOiRdnuluJ7RIkGM0Qg2EIxo2MgZ0d/xtERLg/IsL9ZC8j0zAEY4d0hZPAlEJoG2/ERwbLnjrRMAQj+naGhyt/Dwd/bzf0iwtXVEdkYLf28BVo6Ofp5oThPSIUafSIDEGwP/81dXG0R1If+Q9ahhB0bdsaHYP5yz/b22kxqX+U7OtBCNC2tTdiwtvw2mgYBlP7xyh60Lb2ckPvTqEC34Ng5oA42fsHAE8XJwyOaidoM7NfHJT40c4OdkiM43fUAWBmn1hFESQ7jQbjukUK2szoFav4hWBSAr8TDQAze0QrXpkzLUG4bcSMbso0WI5iRjd+hwoApsdHK141M1NEY2qM8LkU1aAUs+OENe5W/lBHZP/+/Xj88cdx7Ngx7Ny5E2azGSNGjEBjo7SSvHcbhBDMmtBD5mcBrVaDcYniN+rs8T0kl+y+VYMQgokjY8U1xnaXXUeEApgqod/MzNHdFNURmTmqm7jGiHhFNVfmjEoQtZs2JBYAkfWmz1GKuYniGpMHxEDLMLKL5d07IkHUeZ3Yuysc7LSynZF7hyaIOktJ3TvDzclBtsY9g+JFG9ONiouAj5uzLA0CYGa/WNGeNkO6tEMbT3dZDighwOSeUXAXaejWt30own29ZWkwhCApthN83YSjXHHBbRAV6C9bY2indgj29hS06+Tvi15hwbIiFhpC0DssGBH+wh2rQ7w8MaxjO1nXXEMIogP8ERco3BTUz9UVyZGdZGu09/FGv7b8jvrdzB/qiGzbtg333nsvunTpgpiYGCxYsAD5+flISxOveHm3MnpoV4wfaZvXa21iS/Dei8nwbyXe/G9I7wjMTBYfvG7SgHVAeuvp0QgNFM/P6B3bFvMm97FJo4lXHhyBjmHi3YTjIoLwzKxBsjSemzMY0R353/Cb6BTmj1fvHSZL47FJfdGra5ioXdsAb7z7QKKsmaZ7R3XHkPgOonZtfNzx8bwxgLVHok1MHRiDsb2E374BwMfdBV89lHzNYZW+fwIgqUdnTBsgft+7Ozvim4fGQ8MQmx7ohBAMiW6H+4Z2F7V1tNPi+3kTYK/V2KTBEILeHUPw2MjeorZ2Gg1+eGACHO3sbBpgGUIQG9IGL4zpL27LEPwwdzzcHB1schQ0hCAiwBdvJA8VtSWE4JvpyfB2drZZI8zHCx9OGCnJ/otJY9Da3c2mc6UhBK3d3fD5pDGS7D9OGokwby+bNbxdnPHtlGRJUeZ/jxiKzn6+Nmu4Ozrg5ynjW2TV2N+RPzVHpPba+mhv7+YHOqPRiLq6upv+7jYIIXhm3lBMHmONCIi9IWoYAq1Wg/dfHofe3cIl6zw6awDumdBTsgajYfDOM2MEc0Nu5b4JvfDw1H7X9yGqwRC89tBIjB0kPYQ5IzEe/5o9SLIGYHVCpo6QHuIfNyAKr947DAwhkjWemNwP946RHt0a2SMC7z04ChpGusaDY3viiQl9JWsMjG6HTx9KgpZhJGvMGhqPF6YOkjyV1yMiBP99ZDwctFpRjaaH6oQ+XfHWzBGSNWLatsEPj02Co710jcS4jvj4ntGSp6c6Bfph/mNT4OpoL1ljYJdwfHlfsmjEpYlwP2/89uhUeLo4iQ4wTZt7tQ/GDw9MgL1WWl5RoJcHfn9oGnzdXMU1rv3FhARg/v2TJK8g8nd3xdIHpyHQw12yRqfWvvjtvilwc5SWu+Tt7ISl905DWx8vSU4uIUB4K28svXcavJ35p3hvxM3RAYvnTEEnf9/r31MIa4dfDyybOw3+btLyNpzt7bBo+iTEtgmQrOHn6opls6YhyOOfl6TaBKFyYvgy4DgOycnJqKmpwaFDh5q1efvtt/HOO+/c9u9S2gj/HTmcchErNqYh/VT+9YchR5tazXOws9Ni1KAumJIUj9Ag/tUGQhzPuowVm9JwLPNysxoaDYMR/Tpj2thuaB8qrw11+pkCLNuahkPpF0GIdQqCo/9rNc8wBMN6RWD6qG7oFO4vS+NkbhGWbkvDvjRrVVmrBgVDiHVqhQCDEzpgRmI8ojqIR0Ka40xeCZbsSMeuE7nX981RzqoBa+2TAbHtMGNEPLp1CpalkVtQjiW70rHt+FlYOA4MYW7W4Cj6dA3DzOHx6BUpL0x7qbgSi/ekY/OxHJgtLDQMA47S6w94lqPo2SkEM4fEoX+UdOf2Rq6UVWPJvgysP34aRpOlWY34doGYNSgOQ2Lay8pZKqysxZIDGVhzNBs6kxnamzQIWI5DdFgAZg6IQ2JcR1kapbUNWHwwA6uOnkK9wdisRmSQH2b1j8OY+E68SZdCVNQ3YvGRTCw/dhK1OsP/NACAWDU6tm6FWX1jMa5bpGRH50aqG/VYciwTS49noapRf10D/8/eW8fXdVzr38/sQ2JmZmaZmSF2zAzBJmlTTuE2pbRNuSnepmkaNDPHjh2wHTOJJYuZWTqSDp897x9H8rVlbTh7K236e/18PrnNzR7t79k0s2bNmrVg+w4tLItIX09sm5KJtROSRRs690urN2Df7ULsuZWPjoGhMRmhnu7YPjkDGyakcQaP8mnQaMKB3ELsupWPVu3AmIxAN1dsn5SBTdlpkrbNGswWHMgrxK7b+Wjo6x+T4efijG0TMrAlO403QJVLRosFhwpLsDMnDzU9vWMyvJ0csS0rA9uy0uHpKM6Y+m+SVquFu7u7qPH732aIfOUrX8GHH36IK1euICQkZMw2RqMRRuP/JW7SarUIDQ39rzREKKUwmi1QMgyUAtvTGlt6ce5iCTq6B2A0WuDspEFMhC8WzU6CizP/jMIw3EELMVra+3H2UgnaOrXQG8xwdtIgKtQHS2Ylws2V/yMwmi1gCIFKgNHercWZS3fR0tkPnd4EFycNwgI98disZHi6OfH+rclsARHB6OobxOnLd9HU3ochvQnOjmqEBnhg2YxkeHvwr3ebhjNIqgU6yB6tDqev3kV9aw+GDCY4OagQ7OuBZdOT4O819va9EY1UW+XaEjmivkE9ztwoRXVLN4b0JjhqVAj0csXyaUkI8uGfGZmtVrCsMGNAZ8DpW2WobO7EoN4EB7USAZ6uWDY5EWEC21jNViusLIVGqeAd4IcMJnx4pwylTR0Y0BmhUSnh5+GCZRMTEMWzVRewbRM3W61wUCl5GTqjGWfzylHS0AatzgiNSgEfN2c8lp2AuCB+41ksw2i24KOCCuTVtWBAb4RKoYCPqxMWZ8QjOZTfeLayLIwWKxwFGCaLFZ8UV+FObRO0OgNUSgU8nR2xODUOaWEBvH8rlmG2WnGhtAY3axrRrzdAwRB4OjliQXIMssODef+WpRQGs0WQYWVZfFZZiytV9ejXG8AQAg9HB8xPiMbkyNBxYbCU4kp1PT6rqr2XVt3D0QGzYyIxIzqc1zNDKYXeYoGDkj+eiVKK63WNOF9ZjT69rQiiu4MDZkaFY3ZMJK/RaQ/jTlMzzlVUoVevh5W1MaaGh2J+TBSv0WljmKFRKCUZwP9pfeEMka997Ws4ceIELl26hMjISNF/Z8+FfBFksVhxKbcahz7KR1Fly738Ec6OaiyaloC1C9IRw7P7QYysLIurBbU4+Eke8sqbYLbYGE4OKiyYGI9189ORECHN6zAilqW4XlKHQ+fzcbO04V6tFEeNCnOzYrBhbgaSI/k7TiFRSnGrvBEHLubjWnEdTMMMjUqJOelR2DAnAxnRQbIZudXNOHCpAJ8V1cA4bIhoVApMT4rEplnpmBjH33GKYRTVt2H/5QJ8Wlh5L+OmWqnAlPgwbJqZganx4bKTs5U0tuPA1QKcy6+4l8VRpWAwMSYUW2ZkYEZihOzOqry1E/uvFeDDgnIMDmdAVSoYZIYHYcv0DMxNipadSr66oxv7bxbiVF4ptAbbpEPJMEgNDcC2qRmYnxQDtcy8EnXdvThwuxDH8+7eG8QUDEFSoB+2Ts7AkuQ4QSNOSE19/ThwpwhH8ovRM1wllyEE8f4+2DYpA4+lxMNRJS/pWJt2AAdyi3Awvxhdg0OgsMUTRPl4YfuEDDyemiA7kVbH4CAOFhTjQH4R2gcGQYevI8LTA1uz0rE6JUn08gqXunU6HCoqxt6CQrRotRh2YCLMwwNbM9KxNjkJHjI9An0GPQ7fvYs9BQVo6O+7xwhydcOWtDRsSE6BtxP/ZEhIWqMRR8tKsLuwALX9vfe8Wv7OLtickoZNyanwc5a39XbQZMLxirvYWZyPqt7uewxfJ2dsSkrF5qR0BLrwT4a+KPrCGCKUUnz961/HsWPHcPHiRcTGCgfd3a//JkPk1GfF+MeBy+jV6oeXPR68rQqGwMpSpMYG4UfPLUJEkP0Ju87dKMNf93+Grr4hXkZihD9+9MwixIXZb/RczKvCa/suoK1n4N75xmLEhvjgR08sREoUfyT5WLpaUoff7T+Pps5+XkZkgBd+tHU+smLH9qDx6XZFI3514FPUtffyMkJ9PfDDDfMwNdH+ZZDCulb8Yv8nqGzt4mUEerriB2vnYo7AttKxVNbcgZ8e+BilTR28DD93F3xvxSzBbaVjqbq9Gz899DEKGlrHZIwssXm7OOGlx2Zg5QTu3Cxcaujuw0+OfozbtU28DA8nB3xjwTRsmmL/NsbW/gH8+PhHuFbdAAUhD23XHGG4Omjw4uzJeHJalt1GaOfgEH566mNcrKgFGT7fgwzbsqSzWoXnpk/E8zMn2R2A2KvT46dnPsFHZVVjMghsO88cVUo8NSkL35g91W4jdMBgxM8+Oo8PSm3pFMZiADaDeltWBr4ze7rdS0Y6kxm/OH8Bx+7ehZVlHwrWHmEoGQYb01LxwzmzobFzychoseDXlz7D/uIiWMZgjHAUDIM1iUn46Zy5cLLTQDRbrfj99cvYVZh/ry7OaM7IM348Nh6vzl0AV7V9xpuVZfHn29fwTsEd6C2We894LMbiyBj8es4ieDp8sZdzvjCGyIsvvoi9e/fixIkTiI//vw7S3d0djiIs4P8GQ4RSijcPX8P7J26Kas8wBI4aFf7y/TV2xTLs+OAWXj88dmzNWAy1UoE/fmsVJiaFiWYcOJ+HP+y9II5BCBQKBn948XHMSBMfZ3DiWgle3fUxKKjgtllCAIYw+PWzS7EwO04041xOOX6440OwVByDgOBnWxdixRTxA+zF4mp8990PYGWpYL6IkU7lh+vmYeNM8QPsjYoGfP2dEzANL/eI0UvLZ+LpeeJ3TOXWNeMr7xyDwWwRvVX6ywsm42uLxO+YKmlux7PvHsGQ0SSa8eT0LHz/sVmiDYXKji48/d5h9OkNohnrs1Pws8cXiPZW1ff04ckdh9A5OCSasTwlHr9dvUR0htKWfi227zqMln6t6LwX8+Ki8Le1y6EWaSh0Dg5h+77DqO3pFfVeEQDTIsLwz7UrRcd99On1eOLQEZR2dopiMIQgMzAQ765bAxeRXp4BoxFPHz+G/DZxlY4ZQpDo44tda9fCQ+Qgrjeb8dzp47jW2CBq1xtDCKI9vbBn1XrRmVZNVitePHcSn9RVi2qvIAQhru7Yt3IDgly/mOMiYN/4/bkuPL3xxhvo7+/HnDlzEBgYeO+fAwcOfJ7Yf6v2fZgr2ggBbMseeoMZ3/r9UdS39Ij6m+MXC0UbISMMo9mCl/5yHBUNnaL+5uzNMtFGCGCbQVksVnzv9ZMoqm4R9TefFVTjF7s+EmUgALbtxFaWxQ/fOYNbZQ2iGDfK6vHy+x/CyopnsJTilT0f4VJRjShGfm0LvvPuB7BYWVEd4EiLXx8+j7O54hL6lTZ14GtvH4fRYrErMdafPriMYzeLRbWtbu/GV945Br1JvBECAP/85CZ2XckV1bappx9fevcIBg3ijRAA2HE1F29evCWqbbt2EM+8fwR9OvFGCAAcyinGnz8R9131DOnw1M7D6BwQb4QAwOnicvzqwwui8vpoDQY8vecoWrTijRAAuFBZgx+e+kgUQ2cy49mDx1An0ggBbO/v9fpGfPvkGVhFlBIwWiz40rHjKBNphAC2bzCvtRVfPXESZhHVeM1WK1784JRoI2SEUdbViWdP2L4rIVlZFt88dxrXmxpFb71nKUVNbw+eOnlEVLE9Sim+f+EsPhVphAC2xGdNA/3YduoQ+o1jVzX+b9PnaohQSsf856mnnvo8sf82tXcP4O/7Ltn9dyyl0BvN+N17nwi27dXq8Ptd5+1mUGoLnPzlO+cE2w7qjfjljo/sToZFYdsd8cq75wQ7QaPZgp/uOCcpbTulwE/fPyfYCVqsLH688yyoxNzwP9l17l5AK/dvofjR7rNgWfspBMDP9n0sWJ2VUoqf7P8IZisrKdnaq4c/Rd+QcEnyXxz9FAaTfYbOiP5w6hLa+wcF2/3q1AUMGqVVOf7bx9dQ19Ur/FvOXUKvTi8pc+bbV+7gbmuHYLu/XriGDu2g3QwKYN+dQuQ0NAu2fePKLTT09tmdxI9S4GRxGT6rrhNs+86tHJR3dtl9HSyl+KSyGmfLKwXb7skvQH5LqyTGlfoGHC25K9j2aOldXG1ssPu9slKK/NZW7C4sEGz7YVUFPq6tlsQo6+7C23l3BNtebKjF8YpSu/sSK6Wo7+/D6zk37PzLL6b++0Jxv0A6fr5QeKM4h1iWIre0SdArcvJysahZCBejrL4DpXXtvO0+vFEKo8kiafhmKUVDey9yK5p4232cU4EBnVEyo6NvEFeL63jbXSquQZdWJ2nwphTo1xnwaUEVb7tbFY1o6uqXNLBS2ErUn8kp421X3NCO8hbxs8nRsrAsTtzm78yr27uRU9ssK+314ZtFvMebe/txqbxWcnZcBSE4cKuQt033oA5nSyqkMxiC/bf4B6UBgxHHC+5KvlcKhmDPbX6GbUtpkXQGIdh9O5+3jdlqxZ7cfMnvFUMIduXwM1hKsSMvT9L5AVt3uiM3j3diQynFe3m5smoS7cjPE7wP7xfmSU4wxlKKXYX5gsUo3y/MlVwDx0op9t0thMEi7Hn5ouuRISJRZosVRz8tkJzmHLB1UMfOc3e0VpbFoU/yJacgH2Ec+ZS7E6SUYv+n0juOEcbB8/m8bfZfyJdXEIohOHBRgPGZPAZDCPZ9xs/YdzlfVoEuAmDPZ/wd7f6r8hiUAnsu5/G+mweuF8pisJRi//UCXjf6wVtFsnYkWSnF4dvF0Ju4O9ojucWyvg8rS3GioBRaPbeL+0Rh6b1dXVIZH5VWonOQu7TFmbvlGBTwlPEyKMXl6jo09vZztjlfVYNunbCnjEsspchpakFlZxdnm2v1DWjq10quV0kBlHd1oaCtjbNNflsrKrq7ZTGatFpca+Re7i3v7kJOa4usekFdeh0+reVecmno78OlxjpZk4EBkwmnqyok//0XRY8MEYkqKG9G/6C89TkrS3HuGvfsuLyuAx29wu5vIcbHt7jjEurbelHf1iur0K2VpbiQV8VZ7r6jdxB369tlfdRWluL63XoMGcburLU6A25VNMpisJSisLYVnRxLDmaLFZ8V18gq0EUB1LT1oKGzb+zjlOJsvvQZ/ohaewdQ1sK95HAmv0w2o3dIj/y6Vs7jpwvKZD0PABgymnCzppHz+AeF8hkmixWXK+s4j58pll+ok2UpzpdzD0of3q2Qnd6bEIKPyrk9eh+WVcpmKAjBh2XcyzNnKipkbyNXMgzOlHEPrmcqK0UH//IxPqzgZnxYVSGrWi9gu1dneIyEc7WVsgx1wDZ5+qCK38P636BHhohE9Wp143Ie7aCec3bcM04MvdHMGfvQMzA+DJalGNQZxzw2XgzAlghsLPVy/Hcp4jpXv84ge9AbUc/g2PdEb7LImn0/wBgY+zpYlqKfxwNgF2OI+9n2iohTEaNujnsFAN08fLEihPCep3M4h4ccKRiG9350DA7JfrcYQtCj47mOIfkMQgh69NzX0T2kk7yUPCJKKbr1PM9cp5NU1PN+WVmWn6HXyTYSrJSig6fAa5deJ9vYYSlFh+6/v4jsI0NEorhm//aK5dndIfeDvl9cv9c6TtdhY4w9gP5bruMR42EGO/bzELN1WjSD5/2R43IWzZDp1QFsy2X8jPF5Jmae8wjFEogV33WMV5/F9Z0DgIXKZ1Dw3w+ufCH2Msx892rcnjn3vfp3vFf/LXpkiEiUq7P99QfGkqODmjOPgauTvIyGI1IM5y75PBkA9z1xdRw/hhvH73VzGp/nwcdwFSjLbh9j7HM5a9R2V8/lZHD8XgXDwFEtL7Po/zG4n62LRl7WTzEMN5lZPwHbrJLrXgGA+zg8d1v6cO7f6imycBuf6HCKcm6Gg6wAzxHx1V7xcHCQPctnCIG7hpvhptGMyxIT371y08h/rwgAL558JW4ah3HxsHpJqIXzRdMjQ0SiUmICZae7VjAE2UncxdPiw/2hkTlgMAxBRhx3jYnIIG+4yDQUCCFICPfjTJsd5OMOb4FaM4IMAGF+HvBwGfvD9nZ1QoiPu+yO1s/dBYFeYyffcdKoEB/sC5nZ2uHu5IBI/7FrvTAMQWZksOyO1lGtQkIwd2bdiVGhsoJVAVuK+dTQAM7jU6LDZDMYQpAVHsx5fNo4MABgQgQ3Y2pk2Li40CeEc2cInhIeKvuZWynFxDDu65gcKq1Q4/2ysCwmhXFfx6SQENmeMAvLYlIoz70KCZXtsbBSiskcNc8AYEqwfAYATA7mvueTg+TfK4YQTA0Wn7Tyi6pHhohEebg6YsGUeFmdoJWlWL8og/O4s6May2cky9vdwFJsWJjJeVyjUmLN7FRZtVAopdg0P4vzuFLBYOOcDNkd7eZ5mZwGFSEEm2dnyDo/Qwg2zU7nDbbbPCsDclYDGEKwfnoab/XTLTMyZM2UFAzB6knJcOLxSGyZniFrWUPBEDyWkQAPZ+4Z35ap6bIZ8xKj4e/OXb9j0ySZDEIwNSoMEd7cRQA3TUiTNWAwhCA1yB9JgX6cbdZnpkg+P2Az1KO8PTGBxxBZkyatsu/9CnR1wayoCM7jjyck2J1CfbQ8HR2xKDaG8/iimBh4yvQCOKlUWBGfwHl8VliE7JouKoUC6xK5szVPCgxBlIen7MnT5qQ0mWf4z+uRISJD6xbK6wSD/NwxQSAF+7p58hhebk6YmcFf42Tt7HRQGQwXRzUWTuRPwb5qeoqsJQe1SoFlkxN52zw+OUmwCjGfCCFYNZV/QFiSFQ8njmUuMaKgWDstlbfNvNRoePIM8EKyshQbp/N3TtNiwxHoIb2jtbIUm6bxp6vPDAtCtJ+X5I7WylJsmcrPSAjwRXpIgGQj10optk7O4G0T5uWB6VFhsnJKPDGZezIAAP6uLlgYHyPZ80IBPDmJ21AHAHcHB6xISpDMIIRg+4RM3vvgpFZhQ2qKZAZDCLamp/Gmq1crFNiali75eSgIwcaUVN6ihAqGwRNpGWAkvr0KQrAqPpF3+YcQgqfTsiWdf4SxJCpWdqG9L4IeGSIylBwdiFnZ0ZI/iK9vniXoiYgO8cHSaYmSI7i/vnGW4BJSsK871s1Nl2wofHXNDMFqpj7uznhiofgaKKP1/LIpgktIbk4OeGHpFMmMpxdOgJcr/xKSo1qFry+fLun8BMCmmRkI4lj6GZFKocC3H58pjUGAlROTEOXvzduOYQi+u3yWJAZDCBakxPAuy9h+C8H3lkpnTIsJw+Qo4eWElxZKu1cKQpAZGojZccIVwb85bzoYQuwelhQMQWKALxYnCRf8/NqsKVAq7B/6FAxBpLcnVqYmCbb98rRJcFDxl64fk0EIglxdsTFd2HPz7IQJcJUQx6EgBN5OjtieyW+0AcATGZnwdnS02+BhCIGrRoNns4QNgC0paQh0dZXEcFCq8JXsSYJt18YnIdLD024GAaBkFPjGhKl2/d0XVY8MERkihOAXLz6GxCh/uz+6b2yZjbkTxVUj/tHTC5GVEGK3MfL86qlYNl24cwKA72yai+mpkXZ3gtsXZ2P93AxRbb+6cjoWTRBfvG5Ea2ak4qnFE0W1fXbRRKyeZr+be+mEBLy4TFwht80zM7BtDvdS1FgiAGanRuG7q2aLar96UjK+vGiyfQwCTIoJxSvrF4hqvzgtDt9ZZt8gzhCC1NAA/HbzUlHtZ8VH4icr5tl+nx2MuAAf/HXr46Le+UmRIfj16kUgdjAUhCDc2wP/2LpKVKxXWnAA/rj2MRBCRMcIKQhBgJsr/rV1Ne9S3Iji/Xzw+voVUDCM6P5EwRB4OznhvS1r4KQW9tSFe3rgzXWroLSHQQjcHBzw/qY1vIGqIwpyc8W7a1dDo1SKHmAVhMBJpcKOdWvh4ywcT+bj5IQda9bCSaWyi+GgVOK9VWsQ5CrsDXTTOGDXqnVw02hEMxhCoGIYvPP4akR4cC/3jchJpcaux9fD29HJLoaCYfDm0pVI8La/wvoXUZ9r9V25+m+ovgsABqMZP3vjQ1y8UzVmifMREUKgYAh+8OxCLJ9lXyl1k9mCX777Mc5eL+VlMISAEOClLXOxfkGGXQyLlcUf9p7Hkc8KBRmAzRPyxJIJdhlIVpbFX49exu5PcvkZDAGlFM89NgUvLJ9iF4NSijdOX8dbZ2+CMIQzw+gI/8kF2fjmipl2xclQSvHuJ7fx99PXADxcRn00Y8OMNPzPmrl2BzjvuZyH3x//TBRjxYRE/GzDQqjsXJ46ersYvzj6qa1UO0dvMMJYlBqLX29aIroK64jOFJTjh4fP3cvEOhZmhDErLgJ/3LwMznbuujlfVo3vHjoDw3DOHD7G5MhQ/G3Tct7dMmPpanU9vnnoAwwaTWOWar+fkR4cgDc2r4SXiIH1ft1uaMKLB0+h32AAQzBmTNIII8HfF29tWgV/V/vc84WtbXj+0Al063RgCBnz3VIQAiuliPL2xDvrVyPEw90uRnlnJ545cgxtg4OCjFB3N7y3di0ivYQH7/tV09uLp48dQaNWy8kY+e8BLi54d9VqJPjYN3g3afvx1MmjqO7tufd7uRjejk549/HVSPPn9xaOVvvQIJ764AhKuzs5GQQEFBQeGge89dgqTAzkDrb9Isie8fuRITKOKqttx+GP83HuWinMlgcjrv29XLFuUQYen50CD1fp6/+VjZ04cr4Ap6+UwGh+cI+6t7sz1s/PwIpZKfDxEFeCeizVtnTj8MUCnLxS8lB6bU9XR6ybk47Vs1Lh5yk9xqChvRdHLhfh6JWih7KlujlpsG5WOtbMTEWQt/Tn3tzdjyNXinDoSiEG9A8mW3NxUGPN9FSsn5GGUF8PyYz2vgEcvlaEg1cKHyo056RWYfXUFKyfnoZIfy/JjC7tEI7cLMb+K/noGpUcTqNSYtWkZGyclobYQB/JjJ5BHY7dLsHea/kPFbNTKxR4PCsRG6emISnEXzKjT2fAidy72H0tD8192geOKRkGy9LjsXlKBlJD/CUvRQ4YjDhZUIpd1/NQ39P3wDEFQ7A4KRZbJmcgKyxIMmPIZMKpwjLsvpWHqs4Ha0UxhGBBQjS2TMzA5Aj7vZgj0pvNOF1Sjp2381HW/mAFbQJgbmwUtk3IwDQZsStGiwVnyyqxMycPha0P1qMiAGZEhmNbdgZmR0VIzpZqslrxUWUVdubmIafl4SrdU0JD8URWBuZHR0vOlmphWXxSU42d+Xm40fRwzavsoCA8mZ6JRTExvLEnfLKyLC7W12JnYR4uN9Q/ZICm+wfgybRMPBYTB40I79dYYinF5cY67CzKw/n6mocYid6+eDotC4/HJPDGt3xR9MgQGUdZWRaFNa3o6B2EyWKBi6MG8aF+vAOkdsiAiroOaIcMUCkV8HRzQmKUP+fHzLIURXWtaO8dgMFkY8QG+/AOkIN6I8rq2jEwZIRCwcDD1RFJkQGcM25KKYob2tHao4XeZIazgxoxgT6I8OOegegMJtyta4d2yACGIfBwcURKZABnQCilFKXNHWjq7ofeaGNE+nkhOoA7XsFgsqCkrg39QwYQArg7OyAlIgBqnhl3eUsnGrr6MGQ0wVmjRpiPB+KDuGc5JrMFxfVt6BuyZRN1d3ZAclgAHHi2Rle1d6GmsxdDRhOc1CqEeXsgIdCXc2AxW633GCxL4e7sgKRQf97A1trOHlR2dGPIaIKjWoVgDzekBHMPwhYri+LGNvQO6mFlWbg5OiAp1A8uPPkp6rv7UNHRhQGDEY4qJQLd3ZAeEsDJsLIsSpra0TOog9nKws1Rg8RgP17PQVNfP8raOjFgNEKjVMLP1QVZoUGcAyTLUtxtaUfXoA4mixWuDhokBvnCgyeXRqt2ACVtHdAaDFArlfBzcUZ2SBDnN0Upxd3WDnQODMFoscDVQYN4f194u3B7JzoGB1HY1g6twQCVQgEfZydMCA7m3GlCKUV5exfaBwahN5vh5qBBjK83/Hi8E106HQpaW9FvNELJMPB2csLE4GDeAbKiowst2gHoTTZGtI8XAty4JwG9ej1y21rQZzBAyTDwcnTEpKAQ3gGyqqsbzf1a6MxmuGg0iPLyRLA7dx/XbzQgt7UFvUYDGBB4OjpiUmAw7wBZ29OLhv4+DJnMcFGrEeHpgTAPD872gyYTbrc1oc9g+249HBwwMSAELmpuT1lDfx/qevswaDLBWa1CmLsHIj25+zi9xYxbbU3oMdiKZXo4OCDbL5g3j0mTVouavh4MGI1wVqkR4uaGGC+ePs5qwe32RvQY9LBQFh5qG8NDw/O+Dw6gqrcbWqMRTioVglxcEc+zDGOyWnG7oxHdhiGYKQt3tQMyfILg4yB9QipXjwyRcVDvoB4nrhbjwIV8tI+q90IATEuOwMa5GZiaHC55tqDVGXDyxl3sv5iH5m7tQ8cnx4dh4+wMzEyJlJyzZFBvxAe3S7H3szzUj1HfJDs6GJtmZWBuWrTkrX06owlncsuw53I+qtu6HzqeFh6AzTMysTA9RtRa+VgymC34ML8ce6/mo7T54RoqScF+2DojA4vT4+1eNhiR0WzBR8WV2HM9H0WNDxfdivX3xtZpmViWkSBqPX4smSxWfFpahd038pHb8PAMMdLHE9umZGJFegKvgcEni5XFhYoa7L6Zj5t1D9dpCfV0x/bJGViVnmT30sSIrCyLy9V12H07H1eqH54hBrq5YtvEDKzJSIaXxGRdLKW4WtuA3Tn5uFD18AzRz8UZ27IzsD49GT7O0jpcSiluNDZid14BPq6sesi17+3kiK0ZGdiYngp/F2m7EyilyGlpwa6CfJypqHjI7e7h4ICtaenYlJqKYIn9HKUUBe1t2F2UjxMVZQ/lwHBVa7A5JRVbU9IR5u4hiQEAxZ3t2F1cgGPld2EclWHVWaXCxqRUbE1OR7SndA9geU8ndpcU4FB5MQzWB0tTOCqVWBeXgm3J6Yj3kh4fUd3Xjd1l+dhfXgjdqOq1GoUCa2KSsT0xE8ne0j2A9dpe7K3Ix96KAgyYH/TIqhgGKyOTsD0hC+k+gZIZzUP92FuZj70VuegzPVi2QUEIloUnYntcNrJ9uXNJfV56ZIjI1Mc5FfjJu2dhtloF18wTw/zwt6+vgrebfR3h5eIafP/t0zBaLADlX2uOCvDC619bgwA7l0JuVjTg22+dgm64qudYjJG1zVAfd7zxlTV2L1Pk17bga+8ch1Zn5Fwzv7dG6+GKf76wWnBHx2iVNLXjxXeOo3tQx7lmfm+N1sUJb35pNRKCuXM2jKWq9i48/+4xtGu517MJsVW2dXdywBtPrkJ6mH0dSH13H57bcRSNvf3cjOH/ddao8fctK0TtGrlfLX1afGn3UdR09fKsNdvkoFLirxuWY1as8K6R+9UxMIjn9x1HaTv3ejZgu18qRoHXVi/F4kRxgdkj6tHp8cLhE8hvbuVlMIRAQQh+/dhCrBKxa+R+DRiN+Mrxk7jR0CjIIAB+tmA+NmfYl7NBZzbjG6c/wPnaWkEGpRQvz5qNZ7Oy7Bo0jBYLvvPxhzg9XKiNi6EYfue+NXkavj7Rvrgrk9WKH178GIfLSgQZVkrx5ayJ+P6UmXYtG1lZFj+/dh47S/JFMZ5IzsAr0+bZNRFkKcXv71zCG4U3BRgMrJTFhthU/HrGIqgY8ZM0Sin+t/Aa/px/BYyI61gekYDXZiyDg0L8BIpSindKb+PXuZ8KMGzXMS84Bv87cyWclOOT7ViMHhkiMnT8ajF+sfNjzkF1tBQMga+HC3b8YBN8eRIv3a9zOeV4+b0zACCq5oeCIfBwdsTO720WHTPxWXENvv32SVDKHeQ4muHsoMaulzbzLtfcr5uVDfjKv46BZalohoNKiR1f34g4nqWU+5Vf14Jn3zwMs5UVzVApFHj3y+uQJtJQKG3pwBNvHoTRYhGVs4UZDjr+1zNrMEmkoVDT2YPN/9qPIZNJNIMQ4PWtK0VtLwWApt5+bHx7H/r0BlEMAlsA9Z/WPYYlyeJ2M7UPDGLDu/vQOTAkKsnXyDD06xWLsCZdXIB2t06HDTv2o7lfa1cisVcWzcW27AxRbbUGAzbuPYCanh67GN+bNQMvTBbelgnYjJAthw6iuKPDrgR1X500Gd+ZLm6LuNFqwRPHj+BOa7NdjKfSMvHTWXNFGSNmqxVfOnMclxrq7Krxsj4hGb+ft1gUw8qy+PonH+DD2grRDAJgaWQc/r7wcVEGD6UU37v8IQ5VFosk2BhzQqLw9sI1omJYKKX4+a1P8X5ZjmgGA4KJ/iHYuXADNCKNkdfyP8PrxdfEMwhBilcA9i3c8m8zRuwZvx9t371Pt8oa8MtdnwAQZ4QAtqRLnX2D+NrfjsEsompqYW0rfvT+WZsXRCTEylL0Denx4t+PQm80C7avaO7Ed9/9QLSBMMIYMpjw5dePQKsTrsxa19GLb7xz0m6GwWzBC28eRbeIirwtvVp85Z3joo2QEYbJYsWX3z6G1r4BwfZdA0N4/t2jMJjFGSGAzbCzshRf3XEC9V19gu379QY8+/4R0UbICINlKb657xQq2roE2w8ZTXhm5xH06cQZIYDtHaeU4rtHPkRh08NLUaNlsljw7J6j6BwUZ4TcYwD40amPx1wmGi0Ly+L5g8ftNkIA4BcfXcDFqlrh30QpXjxxym4jBAD+cOkKTpeVi2J888xpu40QAHj91k0cKhE3WP7g049wu6XJbsb7hXl4vzBPVNufX7lgtxECAIfKSvB6zk1Rbf9w+wrO2GGEALb36kxtBX5/67Ko9n8vuGGXETLCuNhUg1eufyKq/ftlOXYZIQDAguJWexP+59qHotofqi60ywgBbP1JcU8bvnnlpF1/9+/SI0PkPr1+/Kqkv7OyFJVNXfg0t1Kw7RsfXLNVP5XAqO/oxZnbpYJt3zx707YVUwKjrW8Ax26UCLZ99/xtmCwWuztAK0vRO6jHwWsFgm13XsqFzmSym8FSiiGjCbsvC3e0e68XoE9nkMQwWix47/IdwbaH7hShfWDI7gy5I1VI37wk3JmfLCxFQ2+/3QMrhe1a/v7ZdcG2H96tRGVnt+RMv3+9KNx5XqyqRWFru+SU6q9dvCJYIv5GQyNuNDRKZvzh0mXB96WwvQ2f1tRITtX/hytXBGudVPZ043h5qeRKtH++cRUGC//EplHbjz3FBZIZ/3vnJgZMRt42XfohvFVwWyIBeLvgDrr0Q7xttCYj/jdf+B0fSxTAnrJ8NA3087YzWMz4U544o+hhBsXxmruo6uOfdJhZK36Xd0ESg6UUnzRVoqDr4di0/7QeGSLDqmjsRFFtm+SOgyEE+y/k87Zp7OzDjbIGzrwWQiIE2Hshj7ej7egfxPnCKsmDBaXAvs/yeH+jVmfA6ZxSyQyWUuy/WnAvp8RY0pnMOHqrWDLDylIcvln00Pbj+2WyWHHgZoHkZ25lKU7k3oVWz+1BsrIs9tzIFxwc+RjnSirRNcjd0VJKsetmnuRU6iyluFxZh6Ze/o521+08WWnOcxpbUNnB39HuvJMnK815eWcXClr4vTs7c6UzAKCpX4vr9Q28bXblF8hidOl0+LSmmrfN7qJ8WYwBkwmnKyt42+wrKZQV5GiyWnCs/C5vmwNlxaK9w2PJSlkcLOP3dBytKoFpVOCrPSKEYG85/+Tpg7oyDJhNvG34pCAEu8vzedt82lSFboOwN5mPsasiV/Lff156ZIgM69ClAnnF5ShFYU0rKpo6Odscvlwoi0EpUNPWg4LaVs42R6/Z53ocS629A7heXs95/MTtu7IrU/YO6nGhmLujPZNXBh2PESFGQ0YTzhZwd7Sf3q1Cn4hlKD6ZLVaczOX2Ul2pqkebdpDzuBhRChzJ4fZS5TQ0o6arV/KsFbB1tAdyijiPl7Z1oKilXV4xPkKwL6eQ83hdTy+u10v3VAC2GKE9udwDRtvAAD6trpHHIAS78vI5j/fq9ThZXia7UN7OfG7GkMmEQ3eLZTN28CzPmKxW7CmRbqiP6L1C7smTlWWxszgP9vtv/08UwM6SPFg5+iRKKd4vsW+5ZLRYSrG7NB8mnsnT+6U5kmvTALa6RwerCjHEY8zsKL8jq3iolVKcrLuLPqNeuPG/UY8MkWHdKW+SVVxuRIXV3G6vnCr5DIYQFPAw8mrsC1obSwqGQX4ND6O2Wdb5AVsCq/xabkZ+XYvs8u4KhiCvjuc66lokJ1EaESFAXj0Po14+g6UUOfUPJ2oaUU5Dy7iUqb9dx8NobJFdJdRKKW7yXEduk3yXsZWluNnAzShole71vMegFLfHSJw1ouKODtmGOju85ZdLFT1d0Fukz/BHGEUd7Zy/ta6vF/1G/mUVIVEAtX290HIsz7QNDaJdJ89QFzqP1mRErVaeoQ4A/SYD6rS9Yx4zs1YU97TLMqgAQGcxo4JneSanU37/bvutwjFh/049MkSGNaCT98EBtoFPy3Me7ZC82TdgS33OF0zaNw4MQgCtnvs6+nUGWa5UwLYmOjrb6f3S6o2yjTaWpRjgWTYZMBglL5ncY1BbMCqXtAb57xUA9PI88wGDcVxyBPQJXIfUfDkPnIePYTTKmu2NaIDnnmtlDqwjGjRyz1oHxolhslpt2/vH0Hhdh+1cYz8TLuNBGmPsc40ng8to6jfJ7xOFziUUBzMeDKPVAjMrvCFCjMbzvo+HHhkiw5KaMOx+UQreOh9KiQnDHoTwM1TjcB1C55Ga+Ox+ERDe86gUjORqwPcYhJ+hVDDiq6TxiC8r5ni8V0KM8XgewgxGttFmOw//dcid7QH891w1DsaUIGOcnjkATm+aXC/b/eJ6JuPK4DjXeD0Pfsb4fB8AoOY4l/LfwSDjea/G7/eOhx4ZIsPycZefCpellLeMvK+7s+zB1cqyAgwX2bNKlqXw5mH4uDnLXjah4L9X3q7OYGR+eIQQePGk9OZL9y1WCobw3ytnJ9mDK0MI/Fy5309vZydYqbylAIYQ+Apch5x4BMBm8/m6cF+HmKqrYsR3HqkZWEfLy5GH4TQ+1+Gu0XB6oXycxuc61AoFXFRj55XwcRwfBkMIPBzGzq7rzXMf7RXXuTw1DuPiaQO474mrSj1ug7uPw9jXoWAYuKmkZVsWy/hP6ZEhMqylkxJkGwkqpQKz0riTTy2ZmCB7SYMQgnnpMdyMrDjZAx+lFAszuDNhLs6Ik71sYmUpFmdwJ9Fakh7HGXwmnsFiSTo3Y3Hq+FzHktR4bkaK/OfBUoqlPIyFiTEgMl07LKVYnprAeXxuXPS4zPQf52HMiAyXXcyLIQQrkxM5j08MCYaHiFL2gowkbkaaf4DkdPAjUhCClYncjDgvb0R6eMp66gpCsDw2nnNZL8TNDam+/rIGcQUhWBARDQeO0g6eDo6YGhQqK8ZJQQimBYXBk8PYcVCqsCA0WhaDIQRpPgEIdhk7MRchBI9HJMhiEABRbl6I9eAuXrkyMll2PFiAkyvSvKWnlf889MgQGdbjU5NkLZ0oGIJlkxPh6sTdyS2ZEA9HniJoYhjzM2N5vTfz0mLg4Sy9uq+CIZiWGI4QHw/ONtPjIxDgIb2jZQhBRkQQb7XYzIggRPl5Se5oCQFiA3yQHs79wSUG+SE1NEByR0sABHu6YWpMGGebcG8PTIsOk9V5eDs7Yl5CNOdxfzcXLEiQ19G6aNRYmsxt7Hg4OuDxlARZnjC1UoGVqdyDq7NajfXp8jpaAmB9egrncY1SiS0Z6bIGV0opNqdzp3pXMAyeSM+Qvbtha1o653FCCJ5Kz5R8/hHG9tQM3jZPpWXKMqStlOJJAcaTKVmyvG1WSvFkCv+9eDJJHoOlFE8nZfO22Z4gjwEATydm88Z7bYuTxyAgeDI+e1zivcZTX6xf8x+Um7MDlk1OlNzRWlmKDXO4Ow4AcFSrsGZ6KhgZjI2z+BkqpQIbZ6ZJ7gStLMWmWRm8bRiGYMvMTMkeJJZSbJnJzyCEYNuMTMkx6JQCW2dkCAZxbpsmr6PdOi1T8Hlum5IhufNgCMHmyRmCsSZbJ8ljbMhOFSwWuGVCumQPkoIQrEpLgqtAIb8tWemSr0NBCJYmxsFbYIlnU3qqpPOPMGZHRfJWpgWADSkpkg0qBSGYGByMWG/+mkyrE5KgUSolGesKQpDo44t0/wDedsti4uGu0UhiMIQg3M0d00K4DXUAWBAeDV9HZ0l9FkMI/JycMT+c21AHgGlB4Qh39ZDEIADc1Ro8FsltqANAhk8gEj39JD13ApvnZlUUfxmEOA9fTPQLkfxuKRmC9dH8Y8h/Qo8Mkfv09TUz4O/pKskYeXbpJCSECRdae+GxKQj39ZTE2DwnA9mxIYLtnpo/EfHBvnYzCICVk5MxM0m4tsmWmRnIiAiym8EQgoXpsVjEs2QyojWTUjAtLszuzoMhBDPiI7BqgnBtk8fS4rEgOdpuhoIQZEcGY/MU4SJoc+KjsDIj0e7OXEEIkgL98Mx0/pkYAEyKCMGWien2MxiCKB9PvDhrsmDb1KAAPD9top0E23UEubvh23OF66dEe3vhpdni6qyMZvi4OOPl+bME2wa5ueHH8+ZIYrg5OODnC+YLtvV2csIvFyywm8EQAie1Gr9ZuEiwratag9cWLLHbWGcIgUahxB8XLhU01B2USvxlwWN278oisAW7/nXRMsG/VTIM/rZgma32kZ0MBgR/nb9MMLCWIQR/nbMcSmJ/pg8Cgr/MWc65vHSvHSH404xlUDMKSflE/jj9MbiqhWNAfjvlMTgp1ZKMql9PXgrvL1h8CPDIEHlAni6O+Oe318LXw8WuAXbjnHS8uHKaqLaujhq88fU1CPZ2t4uxYkoSvrN2tqi2ThoVXv/KakQFeNv1si7IiMVPNs0X1emolUr877MrkRDsJ5pBAExPCMevtywR5RVSKhj8efvjyIwIEu19IQTIjgrGn7YvE7VjhWEIfrfxMUyLCRPddTCEIDnEH/+7fQXUAp2T7TcR/GLlQsxP5J+1jWbE+HvjzSdWwVEtvJxHCMGPls7B42ncSx+jpSAEYZ4eeGf7WrgIeCpG9O1507E5W3wFWgUhCHBzxXvb1sLTSdyS4ZenTsRzUybYxfB2dsLOzWvhJzI244msTHxrurhvdoTh5uCAnRvWCXpDRrQ+OQU/mm37ZsW8WwpC4KJWY+eaNYjyFFd4cllsPH41d6HoQVxBCByUSryzYjUSfcQVnpwbEYU/zl9yrwqxkBhCoFYo8a+lK5HhLy4WYWpQGF5fuAIKhhHVn9gKTzL4+8LHMTWI3+Myoky/ILy1cDXUCoUoBhnm/Gn2Y5gXKu7bTfTyw3sL1sNBqRTltRh5br+auhiPRXDHT92vKDdv7Ji/ES5KtWgGAPw4ez7WRdtXPfrfpUfVd8dQ74AOv9134V7tmNGue4YhYFkKL1dHPL98KtbPTrN7xqDVGfC7gxdwLqd8zAq5I2Xi3Z0d8OziSdg2z77S4AAwZDDhD8c+w6lbd221Z0Y96RGGq6MGT87LxrMLJ9m9bGQwWfCnU5dw9GbxvaJ/92MYYsu14aRRYdusLHx50RS7t7SaLBb89cOrOHC9EEaz5SEGGf7/HVRKbJqWjm8umc67xXksWawsXv/kOnZfy4POZAYhDxYlHLkrKqUC6yem4qWlMwWXMkaLZSn++dlNvHc1B4NG0737f78Isc0QV2Uk4X+Wzoazxr5KmZRSvHstB/+6fAv9BuOYDIYQMIRgWUo8frR0Dtwc7QvepJRiz50CvH7pBnp0ek4GACxOjMVPl8yFl4QdMYcKivHnS9fQOTg0Zsl2hhBQSrEgLhqvLJoHf1f745ZO3i3Fa5euoGVggJcxKzICv1i4QLQRcr/OVlbiN5cvobG/f0zGyH+bHhaGX8ybj0iRRsj9ulBXg19evoiavl5exsSgYLw6ZwHivbnjs7h0rakBP798AeU9XbyMTP9AvDp7PlJ8/e1m5LQ145Wrn6K4q4ODYStrn+Ljj59Pn4fsgGC7GUVdbfjJtY+R19nKy4j39MErU+ZjelC43Yzy3k78+MZHuN3RxMuIcvPCTybOw9wQ8ZOUEdVqe/CTW+dwta2O93mEuXjg5ax5WBLGv7Q03rJn/H5kiPCos28QRy8X4cS1EvRodTBbrHByUCMxzA8b52ZgVnqU7BwOPQM6HLtWjOPXitHRNwizxQpHjQpxwb7YODsD8zNi7B5UR6tvSI8TN0pw5FoR2voGYDJb4aBWITrQG5tmpmNRZhw0dg6qo6XVG3DqdikOXitES68WJrMFDmolwn09sWl6BpZmxYua2fNpyGDCqdxSHLheiMbuPhjNFmhUSoT5eGDj1DQsz0yEs4O8Etc6kxlnCsqw/0YB6jp7YRhmBHu6YePkNKzIEo5zEJLBbMHZ4nLsvVmA6s4e6E1maJRKBLi7YMPENKzOTIYHT9CzGJksFnxUWoU9N/NR3tEFvckMtVIBP1cXrM9KwdrMFNnbl81WKz4tr8aeOwUobm2H3mSGSqGAr4sz1mQkY0NmCvwkGAf3y8Ky+Ky6Frvu5CO/pQ06kwkqhQJeTo5Yk5qMTZmpCHRzlcVgKcXl2jrszM1DTnMLhkwmKBkGXk6OWJmUhM3paQj1cJfFoJTiakMDdhXk40ZjI4bMZigIgbuDA1YmJGJLWpokA2Q041ZLM3YW5uFKQz0GzTZj112jwbLYeGxLzUCsF3/ciRhGbnsrdhXl40J9DQZNtsRubhoHLI2OxbaUdCT5CC9RC6mwsw27ivPwcX31vWRhrmoNFoZHY3tKJtJ8+WNbxOhudwd2l+XhTG0FtMNJxFxUaswNjcYTiZnI8guSnSiwsq8Lu8vz8EFdGfqNBrCgcFGpMSMwAk8mZGOSf4hsRq22B3sqcnGi7i76TXpYKYWzUo0p/mF4Mn4CpgWEj0vCQ3v1yBDh0IDeiFO376KooQ1anRFqlQI+rs5YmhWPzEjhl45SKthmyGDC6dxS5Na12BhKBbxcHLE4PQ6TYkLHhaE3mXGmoBx3aprQrzdAyTDwdHHEwpRYTIsJF/RqiGEYzRacK67A9eoG9OuNYBgCTydHzE+Mxsy4CMGoazEMk8WKj0urcLmqDv06AwgBPJwcMTc+CnPjogQ9J2IYZqsV5ytqcLGyFn16W30FD0cHzIyOwIKEGN4kXmIZ1uHB8pOKGvTqdGAp4O6owfTIcCxJiIVGYPlGDMM2WNbjXEUlevV6WFgW7g4OmBIWimUJcYLbXsUwKKW43tiIMxUV6NbpYLZa4e7ggOzgYKxMSICzmt/IE8u43dKMUxVl6NQNwWy1wk3jgMyAQKxKSIKbht/IE8vIb2/F8fJSdAwNwWi1wE2jQZpfANYmJMNdYPuuGAYAFHe240hFCdqGBqC3WOCm1iDZxw/r4lME82OIZZT1dOJQRTFahrTQm81wVWsQ7+WLDXEp8HPiN/JGunYhTlVfFw5VFaFpsB9DZjNcVGrEevhgQ2wqAp35+12xjFptDw5VF6F+oBdDZhNcVBpEuXlifUwaQl08xoXRNNiHQzUFqNX2YNBigrNSjTAXT6yPTkOEq9e4MFp1WhyuzUeVtguDZiOclGqEOHtgbWQ6Ytz4vUxiGZ2GQRypy0OFtgMDZiMclSoEObpjdXg64t2FvUxi3q0e4xCON+SjtL8VA2YDHBQq+Du6YWVoOpI8ggQZYvTIEBml+s5e7LiQg5O378JssYIZdlkRAAzDwMqyiPL3wrZZmVg1OUVSNszmnn68fzEHx2+XwGC23Fu+uZ8R5u2BrTMzsX5KqiQvR1v/AHZczsXh20XQGc33XG/3M4I83bB1WgY2T0mX5OXoGhzCjqu5OHi7CAMGIxQMubdTQjHM8HdzwdYpGdgyJQNOErwcvTo9dlzPxb7bhejXG0YxbP/u7eyErZPS8cSUTNHxC/drwGDEjlt52HsnH91D+jEZnk6O2JydhicnZ8HDzuUJwFZ4bNedfOy+k4+OwaEHGcPPxs1Bg02ZaXh6Upbgbo6xZDBbsDuvALty89CsHYCCYcCytooWIwwXtRob0lLw7MRsScsTJqsV+woL8X5uLhr6+6Ecfs73MxxVKqxPTsaXJkxAsIRv0cKyOFBShPfzc1Hd2wPlsGt6hMFSCrVCgTWJyXguawIiPOz3DrCU4khpCd4pyEF5dxcUhAE7zBhZYlEpFFgZl4DnMyciRoJ3gFKKE5WleKcoB0Wd7Q9cB0MIQG3/+3hMAp7PmIhEb3FxGKMZZ2or8HbxHeR2tDx0HSN6LCIOL6RNkuwd+KSxCm8V38LN9sbhZ2BLNHiPQYEFYTF4IWUSsv2Eg+TH0mctNXir5BauDC8fUAqwoGBAQIjtmc0JisLzyZMxNcD+JRAAuNFej3+V3sBnLdXDz/lBhpVSzAiIwJcSpmBWUJQkRm5XI/5Vdh3nWyrvrQWzoCAgw+MJi0m+YXg2fgrmBwkH4o+lot4WvFNxDedaSu+tPdsYADP8nmV6heCp2ClYHJQoyctR3t+Gd6uu4mxzyb2xYzQj2SMQT0RNxbKQVFmelEeGyH26UdGAb75zEiaLhXfr4UicwcykSPzhiWVwsiPfR15tM1585zj0JrMgAwAmxoTiL08+DldH8QNsSVM7nn/3KAYM/DVYyPD/SQsNxD+eXGlXTpHK9i586f2j6BnUCW6hZAhBnL8P3nxyNXx5sn6OVl13L57ZcRRt2gHBbbMMIYjw9sQ7T6xBoLt493tznxbP7DmKht4+UYwgd1e8u3Utwr08RDM6Bgfx7L5jqOzqFmSMBFO+v3ktYnzFD349Oj2+dOQYilrbBXdGKAiBu6MD3lu/Bsn+4l3jWoMBL5w8ea+IGx9HQQic1Wq8s3o1soLEz5qGTCZ89cwpXG6oE8XQKJX41/JVmBYqLggRAIwWC7710Rmcram89y3zMZSMAm8sfRxzI8QPTCarFT/47ByOVtwdMy5mNIMQgv9dsBxLo8QPTFaWxU+vfYLdZQWiGBTAa7OWYG0sd+6U0WIpxW/vXMS/Sm6NGVswmsFSilenLMK2BPF5Syil+EvhFfy18KoohpVS/DBrLp5LmiR68KOU4u2ym/hN3nnRjG+mzsQ3UmbYNcDuqcrBz3I/vDdQc2nkeT0XPxXfS5tn10aB4/UFeDn3JAiIKMbmyGz8JGMpFHZknz7XUoLv3zkCCvAzQMCCYkVoOn6e8TjUjLRl+0eGyLBya5rxpX8cHjNQk0sMIZgYE4p/vLBKVPxHSVM7nvz7AZitrOh8FApCkBIWgHe+vE6U16KqvQubX98Pg9kinsEQxPj7YNeXN4gKeGzo7sOGf+7FkNEkOleEgiEI8/LAvhc2iQp4bOsfwNo396JPr7eL4e/qgsMvbBEV8Ng9pMPat/eiY3BQPIPYlp2Ofmkr/N2EPQr9egPWv78PjX39onNeKAiBq0aDI89sERVvMGQyYcOeA6jq6raL4aBS4sj2zYgRyEEBAAazGVsPH0ZRW5tohm1HhAIHN21Csp+wwWO2WvHUiSO42dwk+t1lCIGCEOxduwHZgcLBiFaWxQtnTuBCfa1oBoHNRb5zxVpMDxWeibOU4lufnsapqjLR22VHhqF/LVmFhRHc2ZBHRCnFy1c+wv7yQru35P5t7nKsjBa3Y+pXt8/jrZLbdhKA30xbjM1xGaLa/qXgCv5SeMVuxo+z5+FLSZNEtX279CZ+nfep3Yxvps7EN1Nnimq7vzoXP845Yzfj2bgpeDlD3PbtU41F+O7tY3adnwBYH5GFX2QKb48GgPOtZfjGrf0A+I300YylwSn4XfYaSeU27Bm//5/dvjugN+Jrbx0Hy1K70qqzlOJWVQP+ee6GYFuD2YIX3z5mlxEC2FyFRQ1t+PPpy4JtzVYrvvzecRgt4o0QwJaYrLKtC78+eUGwLctSfGXXcbuMkBFGQ3cffnzsY8G2lFJ8bf8pu4yQEUb7wCC+c/hDUe2/feQ0OgbEGyGA7Xn06vT42qFTogq7/eCDc3YZISOMAaMRLxw8LorxykfnUWmHETLCMJgtePbQcVFl6H9z6RIK7TBCANv3YbZa8czRo5yVYe/Xn29cw42mRrveXZZSWCnFsyePiapk+8/c2zhfV2MXg8K2K+r50yfQrdcJtt9ZnIeTdhghIwwA+OrHp9AyqBVsf7iyBPskGCEEwEsXz6Cmv0ew7Yd15ZKMEAD40bWPUNLdLtjus5YaSUYIAPwq5zzudDQJtrvT2SjJCAGAvxZdxqWWGsF2d3vb8NMccf3OaL1TcQNnm0oF29UOdOMHd07YfX4K4GBdLo41FAi2bdX34zt3Dt37O3sYZ5qLsa9W2vtij/4thsjrr7+OiIgIODg4YPLkybh169bnzjx1+y6GDCZJWTMpBfZdzofBxN/Rns0vR8+gXhKDpRSHbxRhQM/f0Z6/W43WvgFJGS1ZSnEqrxTdg/wd7bXqetR29UpiWCnFp3er0Nzbz9sur7EVxS3t0hgsxfWaBlR1dPO2K2vvxM36JkmZOa2UorClDUUt/B1tQ28fzlfWSGZUdfXgel0jb7uOwUGcLC2T9F5ZKUWzVosL1fwdbb/BgIPFxZIZXTodzlRU8LbTmc3YWZgnKTsuO2y4HS/n78xNViveyc+RxKCg0FssOHS3WPC3vJkvrTOmsMXH7L1byN+OUvyz8KakDKYUtmvZeTdPsO2bxTellzQgBDtKcwTbvTW85CNFDCF4p1T4Xr9bdsuuZYn7pSAEb5fdFGy3o/KW5BgJhhC8XSY8md1TcxtUYv5oAuCdimuCE5uDtXdgGY4xkqL3qq6BlVlUU0ifuyFy4MABvPTSS3jllVeQm5uL9PR0LF68GB0dHZ8bk1KKPZfzZZ1j0GDCufxy3jZ7r+RDThFak8WKUzn8He3ea/ky62IAR2/zd7R7b+TLqiFCCMHB20X8jFsFshgKhmDfbf7OfF9OoWzGnjv5vG325xXKLgK2O4efcbCQ/3mJYewUYBy9exdmq1UygyEEO/L4B75TFWXQmc2SGQCwIz+Xt6P9qKYKvQa95PNTUOwozOMtsPhZYy1ahwYkM1hKsbskHyae+32nvRlVfT2SBwsrpThQXgSd2cTZpqS7HfldrZJLGlgpi2M1Jeg3Gjjb1Gl7caWtTnKafiulONdYgXYd9/1u1w3go8YKyRWnrZTiSlst6gd6Odv0GfU42VAimcFSivyeZpT2cU9shiwmHK7Lk3yvKICqgS7k9nBPbExWCw7U3ZFVxqJV34+rHdWS/16MPndD5E9/+hOee+45PP3000hKSsI///lPODk54d133/3cmPm1LWjs6pP8UQO2jvbgNe6Br7ylE6XNHZBZvBUHrnO71hq6+3CntlnWS8RSiv03uBmdA0P4rKJWVhVallIcuF3IOWAMGIw4W1Ihi2FlKY7mFcNkGbszN1osOFZQIpvxQXE5Bo1jd+YspTiQVyy7QNf5yhp0D3F7qfbmF8ouNHa9oRFN/dxeqj0Fwi5dPrGUoqi9HeVdXZxt9hYVyKoOSwHU9PUir62Vs82+kgLZJd7bhgZxtamBm3G3UHbF0z6jAZ/Wc3fm+8rlM3QWM07XcnupDlYWSvYijMjCsjheU8LNqJZ/HQBwuJrbGD9SW2RfLvgxpCAEB6u5v4GTDcWwsNINdRuDwcEabmP9XPNd6K3yDHUFYXCwNpfz+MX2CvSbpRvqNgbBoTphT5gcfa6GiMlkQk5ODhbcV3OBYRgsWLAA169ff6i90WiEVqt94B8paujqk/qT74mlFI2d3Odp7JbPoACau7kHi6Ye/uUOsWrrH+Sc8bX0ae2KoeGSVm/EEMcA3qYdEBWzICS92YJe3dgfVdegDkYOI8UeWVgW7QODYx7TGoyiYhaExFKK5v6x322z1YqOwSHZDABo7ON5t/r7ZRnqI2ro6+M8Vt8nbzJwj9HPzajp7ZVltAG2MY2X0dcju6qqghA0aD9fhpJh0DDAzajV9kqe4d/PqOdh1A/0yp6cMSBoGOT2VtQP9Eqq5XK/KAUvo2GwV7bRZqUs6gUYynFg1A1yxwY1DvXINgytlKJuiH9ZXK4+V0Okq6sLVqsV/v4PJmHx9/dHW1vbQ+1/85vfwN3d/d4/oaGhkrj64RTdcqU3cVurOqM8S3ZERosVLMeXO14MgPtauIwHKRriYOh47qPdDI7fqzON53X8BxkylzLu1yDHfbewLMzjYBgC3NcBAHrL+FzLIA9DNw4MhhAM8ixpDI3DMyGEYIiHwccXzQB4GQNm+UY0pfyMQbNJcszDiFhKee+HziIt9u8BBigGee7HkGV8vnWtmXsZS2c1yXXsAAAG+BgWE8RVCOLX0Di8O3z6Qu2aefnll9Hf33/vn8ZG/qA+LjmqVeMyy3fkySViT54RPmlUSs5MqOPFIACcOLJi2lvLhE8uHOeSkvSMS1y/d1yvg+teCWQWtYvBda8EMqTaxeD4vUqGgUogM65oBk8mVKFsr6IZPBVJnVXynwlLKW/VU677aI8opby/dVwY4L8fbiIquwqJEH6Gi0ot21vBEAIXHobUyrMPMEDgquJ5r5Tj8627q7jTGjgp1OPiMXTjYyjVsg1DAHDhuVfjoc/VEPHx8YFCoUB7+4MBO+3t7QgIeDgboEajgZub2wP/SFGEH386XzFiCEEkz3kifOXVhQBsBkK4jwfn8XBv7mP2KMjTjdPYCfF0l/1RA4CXsyOnwRHo5gqVhGy1o+WsVsGLI0Gbj7PTuAx8aoWCM5eIq4MGHgLpwcWIIQTB7mPnElEpFAh0lVc7BRh+tzw9OI9HeHqOy2yMrz5KtKfXuLxbfIyYcWBQAJE8mVxjPb3Hxb0d6c7dn8R5+MhmWFgWkW48z8Pde1xiRKLcuK8j2s1LtjeapRRRbtx5cKLcvOQvxxEgkuc6Il29ZS9jKQiDSFfu64h09YZlHBjRrtyp5SNdfMZhWZFBlKv9GYLt0edqiKjVamRnZ+PTT/9vvzfLsvj0008xderUz42bFh6AKH8vWR0tSyk2TE/nPB4T4IPUsAB5O1oAbJzGzQj2cseUmDBZHRQhwOap3AxvFyfMS4yWtduEIQQbJ3FXIHZx0GB5aoK8HS2EYF12KmeSObVSiXUZybJ3zaxMTeT0fDCEYFNWmrxdMwzB4oRYeDlxZ7zdmimTQQhmRoYjiKcY3NZ07ndCjBhCkBUYiGgv7s58a2q6rAGDAIjz9kGaH3d9ja0p8hkhrm6YEsy9DLwlKV12Z+7t6IS5YZGcxzcnpMlmuKjUeCySO4vrptg02YOrWqHEiqgkzuPrY9LGxUhYG8WdKXZNZOq4GDsbojM4j68IS4aKkVds1EpZbIzizka7KChRtufFSllsjMzmPD7TPxZeannFLa2UxcaICbLOIaTPfWnmpZdewltvvYUdO3agtLQUX/nKVzA0NISnn376c2MSQrB5ZoYsh5SbowYL0/mzIW6eniHro3NQKbE8K4G3zdapGbI6KAVhsDo7mbfNlsnpsnabAMD6Can8jEnyGFZKsWlCGm+bTdlpsnfNbBZgbMxMFZWQjI+xNYvfCFifmiLLiLZSim2ZGbxtViUmChb84xNLKbZn8qf8XhYbD1cZSw4UwJPpmby5HOZHRsNHoMCckJ5Iy+Q1/KYFhyHU1V3yM2EIwfbkDN5MzRm+gUjw9JHcISsIweaENDgoub2CcZ6+mOAXLNnIVRCCNdHJvEs8oS4emB0UJXnypCAMHgtLgI8jd9kIX0cXLAlNkJVHZHZQNIKduTMcu6odsDoiTTKDAcEEn1DEunN7EhyVKmyIyJKecwUE8W7+SPXkLregYhTYFDlR8nIZARDi5InJPtxG9HjoczdENm7ciNdeew0//elPkZGRgfz8fJw9e/ahANbx1vLsRHg4O0j+6LbNzoJaoGrqorRY+Lk5S5qFEwJsmpYOJ4HYhlkJkQj1cpfEYAjB6gnJgvVmJkeFIj7ARzJjSUocAgRqwaQGByArNEgSQ0EIZsdGItKHfzksxtcbM6PDJX3YCkIwISwYyYH872WwuxuWJMRKeq8UhCDR3xcTw/jTlns7O2FNarJkRoSnB2ZHRfC2c9VosDU9XVL3pCAEAS4uWBzDb6hrlEo8lZElicEQAi9HR6yI4zfUlQyD5zKlzdYYQuCiVmNdAr+hzhCCL2dOkjSxYWBLib85kd/AJYTgK+mTIcVfMVKw7IlE4VowX06dImvy9FQi9+x7RM8nTZI8eWIpi2eTJgq2+1LCZMlJtqyU4rnEyYLtnowV/h1cYkHxXIKw139L1AQwRJqZwILiufhpgknX1odnQ61QSk6W92zsdFnF78To3xKs+rWvfQ319fUwGo24efMmJk8WfgnkytlBjX88vxpKBWNXh84QgtnJUXhuoXC9A41KiTeeWwO1Umk3Y2J0KL6xdLpgW6WCwZvPrIaTWm3XIM4QgpQQf/zg8TmCbQkheH3bSrg7OtjFUBCCGD9v/HyVuJoKf924HL4u9hluCoYgxNMdv1+7RFT711Y/hmAPd7uMEQUh8HN1wd/WLRfV/lfLFiHGx8tuhqeTI95cv1LUR/3K/LlICfCzm+GiUeOddauhEBGM+r2ZMzEpJMSud3ekKN17a9ZAI2CoA8DXJ03F7PBIu78PFaPAeyvXigoQ/lLmBCyLibOro2WIrWrqO8tXw9NRuDDklsQ0bEiwz1M1UoDyX4tXwd9ZuIbRqpgkPJsiPNA/xADw93nLEebmIdh+QWgMvpE+zS7GiF6bsQzxnsKxAtMCI/By1lxJjFcnL0aGj3BBxXSfIPxiorg+YbR+kDEPU/0jBNvFufvhd5Mel8T4WtIMUVV4w1y88OeJawHYnxrlyZjJeDyU3xMNAH6ObvjbpI0gxL79MwTAmrBMrA+3752Uoi/UrpnxVkpYAN76ylo4aVSCg99IR7kgPRavPblMVEcOAHGBPnjvxfVwc9SIYNj+d0ZCBP736ZVQKcW5xsN9PLHryxvg5ewk2KGPHJ0QGYJ/PbsGDiKK6gFAkIcbdj+3Ef5urqIYBEBKSADef3ad6B0rvq7O2PPsRoR6eohmxPr5YPczG+AuoqgeAHg4OmDPk+sR4+st6qNjCEG4lyf2PbUR3iKK6gG2HS87t65HcoDfvd8pxAh0c8W+7RsRwBO3cb8cVEq8v34NskNs3hMxDB9nZ+zfspE3SPV+qRUKvLVqFWaG24q+Cb3xCkLg7uCA/Rs3Is6HO0DufikZBv9Y9jgWRsXc+51CDBe1GnvWrEcqT2zI/WIIwZ8WPobVCUmiGQ5KJXauWIuJQeLK2xNC8OtZi7A1KV00Q61Q4p0lqzErNEIUAwB+PHkuXkideO8cQgwlw+D1+SuwJEJ8hd9vZ8zAtzKmi2YwhOCPM5ZhdTS/5+h+PZ80CT8cNkaEljcUwx6BX05ejG1xwl6dEW2NzcIvJi4GEckAgJcz54nyhoxoVXgq/jBpxXAhRiGG7V5+M3kWvpk8WzRjYXAC/jJ5HRSEEc34Utw0/CB1kWjGdL8YvD55M9SMUpAx4p/ZGDEBr6Qv/9y9IcD/49V3R9Taq8WeS3k4cr0YQ0YTlAwDltLhgCcCK8siNSwAW2ZlYGlmAucOEz519A9i79V8HLxeiAG9cRTDFhuQEOSLLTMy8Xh2IpQSdpF0D+qw91o+9t8oQJ/O8H8MACC264j288K26ZlYlZ0MtUhD53716QzYdzMfe28WoHtQd48B2DpfC8siwtsDW6dmYl12iqjqwaM1YDBi/+1C7L6Zj/aBwTEZIR5u2DY5ExsnpMJRwvZfncmM/TmF2H07H8392jEZAW4u2DYxA5uz03i3oXLJaLHgQF4Rdt3JR31v35gMXxdnbMtOx5asdNHG1P0yWa04UlSC93PyUN3dAyXDgFLbhrwRhpejI7ZkpmF7Vga8neyPl7CwLI7dvYv3c3NR1tU1JsNdo8GW9HQ8kZEBPxfh2f1osZTiZHkp3i/IQ2F723Bn+CDDRa3G5pQ0PJmeiSBX+793SilOV1Xg/cJc5LS23Ou0KWydq4WycFKqsCEpBU+nZyHM3UMS4+O6arxblIMbLY0PMoa/QY1CiXXxyXgmLRtRHtJ28F1orMG7xTm43Fw3bPTYyr/bytmzUDEKrIlJwrOpExDnKc4oHK2rLXV49+4dnG+qvmdYsdQ22LGUgmEIVkQm4pmkiUjxlraUfqu9Ee+U3sbHjZUAsT0H6/B1jPSPj4Ul4NmkiaI8IWMpv6sZ75bdxoeNpaB0+DmMMEABCiwMicPTCRMxyS9MEqOktw3vVdzEBw0lw797+B4RAoCCpRRzAmPxdNwkTPOXFk9R0d+BHVU3cLKxCGbWCgVhYB3FmO4XjSdiJmF2QKwkRu1AF3bV3MCJhnwYWQsUhLl3PYAtMHWSTwS2R03B3IB4WUaIPeP3/y8MkRHpTWacy6tAUX0rBgxGqBQK+Lg5Y2lWPBKC+UuaW1kWJosVDiol78MxWSz4qLASebUt0OoNUCkU8HJxwpL0OKSEPbxleTTDaLHCUZBhxfm7VbhV0wSt3gglQ+Dp7IRFqbHICAvk/VuxDIuVxcXyGlyvbkC/zgCGsS0tLEiKwYSIYN6/ZYerwAoxrCyLy1V1uFRZh369AQQEHk4OmBcfhSmRYbwGIUspDBYLHASWxVhKca2mARcra9CrNwCUwsPJEbOiIzAjOpzX80WprSiaEINSilsNTfikohq9OlsRRHdHB0yLCMPc2Cgox4mR29yCsxVV6NHpYGUp3Bw0mBIWioWx0byBkDaGGRqFUvB6C9vacLqiAt06HczDBsjE4GAsjo3lXYqhlMJgtUDNKAS9iSUd7ThVWY6uoSGYrFa4ajTICgzCstg43mDLEYaKUfDeUwAo7+7CifJSdOiGYLCY4aZxQJqfP1bEJfLmarGHUd3Xg2MVd9E2NAC9xQI3tQZJPn5YFZvIm5eEUgqj1QIFwwjuzKjX9uJIZQlaBgegs5jhptYg3ssXa2KS4K7hN2yNVgvIcIwKn5oG+3G0uhiNA/3QWUxwVWkQ4+GNNdEp8HLgN2yNVgsIIVALXEebbgBHqotQP9CHIYsJLio1It28sDYqFb48gamArV4KYNuxw6dO/SCO1hahdqAHg2YjnJUahLl6YG1kGgKc+D2RZtZW3Vyj4J/09Bh1OFZXiGptFwbMRjgr1Qhx9sDqiFQEO3uMC6PfpMeJhkJUajuhNRvgpFAh0Mkdq8LSEObCb9haWCsslIWG4e97B80GfNBUhLL+NgyYDXBQqODn6IoVIemI5NkObI8eGSLjpIaePhy4XYhjeXfvpRZnCEFioC+2Tc7AkpR40UsfXGru0+JgbhEO5RXfqz/CEII4Px9sm5iOZSkJshOCtWkHcTC/CAfzitA5OHRv9hbl7YltEzKwIiWRM8GWWHUNDeFgQTH2FRShTTtwjxHm4Y5tWelYk5IEN5k5OHp0ehwqLsa+wsJ7KcoJgBB3d2xNT8e6lGRR6/186jcYcOTuXewuzL+XopwACHR1xebUNGxMSYWPBK/D/RowGXGs7C52Feajpq/3nlfLz9kFm5NTsSk5Df4SvA73S2c24URlGXYU56GypwvWYYaPoxM2JqZhc1IagiV4He6XwWLGqZpy7CjORWlPx70ARS8HR6yPS8HWxAxRcQt8Mlot+LCuHDtKc1HY1XaP4aF2wJqYZGxLyEQUT34OMTKzVnzUUIn3y+4gr7PlXm4HV5UGq6OSsS0+E3Ee8vIoWFgW55ursKP8Dm53Nt7LbOuiVGN5RBK2x2YhyUteAL+VZXGprRo7K+/gekc9zMO1UpyUKiwNScS2mGykeUvzOoyIpRTXO2qxq/o2rrRXwzTMcFSosCAoHlujJyDTK0TWTJpSittdddhbewuftVXCyNoMEQ2jxOyAWGyJnISJPhGyGQW9jThQdxPn20thGK75omaUmOYbg00RkzHZJwqMzLwrJX1NONRwA5+0lUBvtWVrVREFJnpHY2P4FEz1jZWd26VC24KjjTfwcVsBdFZbBlQlUSDDMwLrwqZhuk8ClDK3IkvRI0NEptq1g/jx8Y9wpap+2BX64C1iht1yLho1vjx7Mp6Znm33R9E9pMNPPvgE58ur77n57pfNGWfLSvrs1Al4cdZku3dR9OsNeOXDT3G2rBIAOBkOSiWemJSJb82eJjgLHK1Bowk///g8Tt4tA+VgALZEXVsy0/D9OTPt3jaqN5vx6oWLOFJSAis7djlrAkDBMNiQkoIfzZkNBzuTmxktFvz2yiXsKyq6V5V2NIcZDvZalZCIn82dZ3emVQvL4rXrV/B+Qd69Wd5YDAB4LCYOv5q7AG4Cs97RYinFX+9cw1v5d6CzmO8949EMSikWRsTgN3MWwdvO7a+UUrxRcAv/yL+BQbMthfTo7I0jrvfZoZH43cwlooI1RzPeu5uDv+ZfQ7/JcO+bG82wUoppgWH4/YylCHHh3o7JpX0V+fhD3mfoMep5GRP9QvC7aY/xJvPi0rHaYvwm9zw6DUNj9icj/y3DOxC/nbIM8RKMnrONZXg17yO06Qc4GAyslEWShz9+PXEZUr0C7WZcaK3Eq/ln0aTr42XEuvni1axlyPK2v0TH9c4a/CL/A9QP9dw731iMCBdv/DR9Gab4RtnNyOupx6uFJ1E92MHLCHb0xA9SlmGWf7zdjNL+ZrxadAzlA61jM0BgBYW/gzu+k/gY5gdw503hUvVAG35dcgR3tY1jMhjYlqa81a74WtxjWBIkPv5mPPTIEJGhms4ePPneIfTq9KJzUqzOSMIvVy0SHVvS2NuP7TsOoWNgUPQ2t8WJsfjjmqW8Lvj71aYdxBO7D6Gxr18UgwCYFR2B19c9LrhteUTdQzps338YVd09orYEEgCTwkLw1tpVor08WoMBTx4+guKODlEMhhCk+vtjx7q1cBUZ9zFkMuHZE8dxu7lJ1BZNhhDEeXtj99r18BLpgTFaLHjh9AlcaqgTxVAQgnB3D+xdvUG0d8RsteLrH3+As7WVotorCEGgiyv2rdiIUDdxg7iVZfG9S2dxtJK7Autoho+jM/Yu24hokfESlFL85MbH2F2WL5rhrnbAniUbkejFv8R6P+O3uRfxZslN0QxnpRo7F260K5bhb0VX8OfCy6IZGoUS787ZgMn+4mMZ3i2/iV/lfyKqLQMCFcPgnzPWY1ZgtGjG/ppcvJJ3GsDDhu1YDIYQ/HXyWiwM5t9+fb9ONRbi5dxjoBQPGbajRWALIP5N1mo8Hsq/Nfp+fdp6F9/PPQCWUow9pXmQAQA/SV2JteHit4hf66zEd3J3w8JaBRkjeinhMWyNFN5FOaK8nhq8lPc+TFazaMbzMQvxdNR80Qy5smf8/n9614y96hwYwjPvH0HvkHgjBACO5d/FHz66JKptr06Pp3cfQcegeCMEAD4qrcTPz5wXlUxr0GjEM/uOiDZCAFvncqmmDt8/eU7UgK83m/Hs4WOoFmmEjDBuNzbjmydPc1YDvl9GiwXPHz+BEpFGCGDzCBS3t+PLJ07AZBWuxmthWXz19Ae409IsOk8ESykqu7vx7PFjMIgousZSim9/dAaXG+tFM6yUor6/D0+ePMJb9G1ElFK8/NlHOCfSCBlhtA4OYPsHh9BnEFcq/NUbF0QbISOMLv0Qtp05iE6duKrCf8y9ItoIGWH0mwzYdu4gmgfFVez+Z8lN0UbICGPQYsITHx9ArZa72un92lWRI9oIGWEYLBY8c/Egyvs6Rf3N0dpC0UYIYMs9YWKteOHKIRT2tIj6m3PNpXgl7zQohI2QEYaVsvjmzSO43dUginG5vRIv5xwDS6mo2igj3teXc47hSnuVKEZOdx2+l3sAVsqKGrxHrvcXRSfwSau4d76krwkv5e6yxYPYkXnmT2Vn8EFznqi21QNteCnvfRjtMEIA4F9VH+NIw8NV778IemSI3Kc/fXwZXUNDkpLxvH8tF4VND1cUHq2/f3YDLX1auzOAUgCH8opxo064EOCb126jprvX7uugFDhTWoHzFTWCbd+/k4e77Z12M1hKcaG6FqdKywXbHigqwp3mZrsZVkpxo7EJh4qKBdueKCvFpfo6u5M8WSlFYXs7duTnC7b9qLoKH1ZXSmJU9nTjzdzbgm2vNNXjcHmJ3Um3rJSiUduPv94R7qBy21vwfkmunQQbo0M3iD/cFjbWy3s78fdC+ztLK6XoM+rxy1vnBds2DvTh97kX7WawlGLIYsJPb34k2LZTP4if3xFvINxjwBbI+vLNDwXbak0G/PiOcLvRogAslMX3bp4SnNjoLWa8fOeUJAZLKb53+7jge29mrXg555ik4mwUFD/IOXovHoZLLGXxo/wjw4aOfSIAflpw7F6MB+dvoRQ/KzrCuXwspF8VH8eAWXhC8Ju7R2CymiXdrz+Xn0K3cUDCr/t89cgQGVavTo8PCsslpwhXMAT7bhXwthkymXA4v1hy1kEFQ7Dndj5vG5PFgv25hZKzJyoIwe47/Awry2JXbr5kBkMIdubwW/+UUuzIFTdDGEsEwI68PMGOdkd+nuTsuxQUO/PzBb07OwrzJKdxZinFnqICQe/OjmLpDCulOFBWBJ1AKfpdd/MkB9ZZKcXxqlL0G7lLlgPArjJ513GuoRIdukHedrsrpD9zK6W40lqH+oFe3nb7qwoklwKwUoq8rmaU9nbwtjtSVwjTcCCnvWIpRZW2C7ndzbztTjeVYNBilDSwsqBo0fXjajv/xObT1jL0mHSSGBRAj0mH861lvO2ud1ajVd8n0dgBhixGnGvhn9gU9DWgZrDDLi/F/TKzFkGvSOVAC0r6GyUzKKU41Sw8sfl365EhMqxjuSWyCkJZWYrTRWX3dteMpVNFZTCYpXUcI4xPy2vQruXuaM+VVaHfYJTOoBTX6hpQ18Pd0V6sqUXHoDg3+1hiKUVhaztK2rk72ptNTagb3rUiRRRAdU8P7jRzd7SF7W2iY0+41Do4gEv1dZzHq3u6caO5UVa9oF6DHh/VcLufWwa1+LSuWhZDZzbjVBW3l6rHoMOp6jJZ34iZteJwBXdnPmAy4nCldEN9RPsrCjmPGawW7KvIl8VgCMHeinzO4xaWxa6KHMmDBWCbEOyp5PY+UUqxo0LegKIgBLsr7/Azqm7KqnukIAS7q/l/556am5JroQC2mJQ9Nbd42+yruyFrdwoDgr21/J66g/XyGACwv/46rwF7tPGmLAYLisMN12ER8CD9u/XIEBnW6aJyyA3bNVtZfFZRy3n8TEm57NLrlFJ8Us49KH1YWiG7LDpDCM6VcccafFhWKbtkuYIhOFvOwyivsHsHz2gpGQZnKiq4GZWVsjsOBSE4U8lzHdXy7xVDCE5XchsJ52qqYH+C6AdFAJysLOU8/kl9teyS5RTAqWrumevlljoYrNINdcBm5J6ouct5/GZbA7Rm6YY6YDPWT9RyxwwUdLeg0yDdUB9hnKzjvo7y/g40Dkk31EcYHzaWchriTbo+lPd3yGZcbKu6tz12tHqNQ8jpbpBltLGguNNdj17j2PdcbzXhSkelLCOaBUW5tg3NurEnaCxl8WmbvMksBdCk60HVYDtnm4/b8mVXUO42DeCutknWOcZbjwyRYXXJmOGPiCEEPYM6zuOdA0OyPmrAtkV1JN/IWOoYHJJdhpshBN1D3J4dqXE094uAoEfHfR3dep2ogFY+sZSih8dD1a3TQVz4HbeslApeh9wUySylvIGe3XqdpGKC94sC6NTzX4dcgwoAOvX81zEeyaS7DTzXwXPMHvXyBPeOF2PAbOT8BsaLYaYshjgMsx7j+DAobPE7Y6mbw3iQoh7T2L+336SXtCQzJoPj9w5ZjLDQ8fEycBlUFtaKIYs8I3pEfSb+5ct/tx4ZIsOSUz5+RITY3LJc4jtmjyxWHgbPMbsYn/N1UFD+65AY8PUAg1Le3yp3ZjEiE4+bc7yeuZknRsRC2XEZwC08DCtlYV/JrLHFF1RoGScG77s7Ts/cwmOIjxeD71zjyTBzMcbp3bUxxn7u4/UNAuBcbhjf5/GfY8jvEe9jjOOzHQ89MkSG5eZof62R0bKylPc8HjKzfgK22bEbT80STyd52UtH5O7Afx1yJ8cEhDfTqrvGYRyWfxi48VyHu8ZBtreCIQSeDtzPVSgNt1h58SQdc1NrZHvBAMCD5zrc1JpxGTQ8eO6Hm1ozLp2tG096db5j9shFxZ3Mzk01Pgw1o4CGI625u3p83isAcFONfa7xZLirxn633Dj+uxRxnYvr+saT4aocP4YrB0PNKKEi45MhlYvxn9IjQ2RY06LDZbu3AWBiBHc1z6mRobLjN1hKMSmcmzE5XD7DwrKYGMbNmBQaIndFAxaWxaTQYF6G3OUfC8tiUgjPdYSEyJ4ZsJRiYjD3dUwOls9gQDA5mPs6pgSFyr5XDCGYHsKdRGtKYJj8ZUVCMD04gvP4JP8Q2f4QBSGYEcTNyPINlv19KAiDaQHhnMfTvAMFa68IMwhvUrN4dz84K+WVZWAIQYZ3MGcsVriLF7zU8soZEAAxrj5w5TDO/B1dEeRof0bc0QpydIe/49j1ZFyVDohy8ZX9bnmqnRHm7D3mMSWjQIp7qKygWwBwVmgQ48qd6j/TS37aeTWjRIIbd5/1n9AjQ2RYmyamyVqeYQjBhPBgRPuO/aICwMbsNMlb+mwMIDHAF2nB3MXz1mekyPoUCIAwD3dMjeBOz7w6JVFSZd/75evsjLkx3OmZl8XFwcXOFOqj5abR4LE47vLoC6Ki7U5vPlqOSiVWJyZxHp8eGo4QNzdZz4RhCDYkcaeAzvQPRLyXj+yOdstwifuxFOflgwn+8gZxK6XYlpjBeTzU1QOzgiNlecKslOKJBO5U1n6OLlgaFi+TweLJhGzO425qB6yOTJEVCG2lFE/GcTMclSpsjMqQdR0spXgydiLncRWjwObobFnPnAJ4ImYyp+eRIQy2RE2StSRHQLA1ajLnAE0IwZbIqbIMaQYEG8Mn8RYp3BQxVfZOqVWhE+Co4O731oVOBSvDM6kgDJYGZj3yiHxRFePnjexw6R0tSym2TcngbRPs4YbZsdI7WpYC2yfx1wvwdnbC0sQ4WR3UE5MyeZcsXDUarElJksxgCMH27AzeXTEOKhU2paVKZigIwZb0NN6KsUqGwfb0dMnPXEEI1iUn8xpMDCF4Ki1L0vlHGI/HJvAuzRBC8HRqluQuUEEI5odHI9CFv0Lpk8lZsnLHTA0KE0zz/lRilmTvDgOCVG9/pPjwV7l+MiFbMoMAiHTzwmR//joq2+KyZC1l+Tu6YE4Qfwr2LTHS7xVgKxy4OIS/jsrGiCxZ3k9HhQqPh/HXUVkTnimrv1ISBqvDM3jbLAtOg4NA1VshrQnjT/M+3z9Z1lKTlVKsC5vE22aabwJ8NPzfKT+DxZrQKZL//vPSI0PkPr20YLqk2AcFIUgN9se8BOHaDd+YMxUKxn4HnoIhiPPzxrJk4QJML86cDLVSYfe1KAhBmKcH1qQlC7Z9fvJEOKnVdg/iCkLg5+KMLRnC9SGeyc6Gu4ODJIaHoyOezBQu8rQtPQO+zs52d4QMIXBWq/FclnANig3JKQh1c5fE0CiU+OrEyYJtV8UlItbT224Ggc0g+9bEaYJtl0TGItXHXxKDAcF3J8wQbDs7JAqT/UMkD0w/mDBHsM1EvxDMDY6SbID+MHuuYGxRilcAlocnSnbV/zBrHhQC29cjXb2xMSpDsi/he2nzOGNQRhTg5IanYoXfPy59M2mO4BKSh9oJL8TPksx4Pn4mPASWkJyUGnw1XnqdlW1R0+DvyF8vRa1Q4uvxiyWdnwBYHTIBYc4+vO0UhMHX45ZJZBAs8E9DnJu8Csyfhx4ZIvcpKzwYv1uzxFZQSeTfKAhBsKc73ty2WlRBuuRAf/xl3TIwDBHdESoYAj8XF7y9dQ0cVMIF6WJ8vPGP9SugZBjxDELg6eSId7esgYtGeEkk1MMdb69bCZWCET1oKAiBq0aDHRvXwoMn4HZE/i4ueG/tGjgolXYxHFUqvL92DfxEFIvzcnTEztVr4axW28VQKxR4d+VqhLgLr2+7qjXYtWodPBwc7WIoGQZvLV+FaE/hYnEOShV2Ll8HPyfxRhVDCBSEwT8WrUCyj3CxOBWjwHtL1iLEVbxRNVKc7C9zlyHbX3hdmiEE/5q/BtHu4o2qke/1tzOWYHoQd+zGvfaE4O+zViHZy99uY+SViQuwMDRWVNvXpi5Htm+I3cbI99JnY0WE8GQAAH6evQQzA6LsNka+kjgNm6LFVWP9Xup8LA5OsJuxPXoinhZpxLwYPxsrQ7mXBrm0KiwDL8bPFvd7IqdhU7h9RhUBsCAgCd9KXCSq/ZrQiXg6StzvuZ8xxScW/5O8QlT7RYEZ+HKMfQYPAUG6RwR+nLLerr/7d+lR9d0x9FlFLb598DQMJlsSnrFukIIhsLIU2eHB+PvmFfCwc7fK9doGfP3gKQwYTWOWar+fkRrkj39uWgkfF2e7GHlNLfjywRPo1Y9dRh34vxLkcb7eeHvTagS42ef2K25rx3OHj6NzSCfIiPD0wLsbViPMw8MuRkVXF545egytAwOCjGA3N7y7ZjVivLljdcZSXV8vnj52DPX9Y5c4B3CP7efsjHdXrkaSn7hKryNqHtDimZNHUdHTLcjwcnDE24+vRmaAfeXaO3RDeOb0URR3tXMyRt43N7UG/1q6ClOC7CvX3mvQ47mPjuFOe7Mgw1mlwt/nr8DcUPvKtWtNRrx44TiutNTzMgBAo1DiL7OWY0kEdzzQWNKZTfjG5ZP4pKmKkzHCUTIK/H7aUqyOsq9cu9Fqwfeun8ap+ru8jJGKtT+fuAhbYu0r125mrfjJnQ9xqLZAkEEI8IP0+Xgm3r4B2UpZ/LrgI+yqvs3PIASUUnwzaQ6+kjDDrl1pLGXxl7vn8U7lFTA8DMXwN/Js7Ax8K2meXcGblFK8VfUZ/lF+HgTcW2IVhIGVstgcMQXfS15qV7wPpRR7667hL2W2OkBCjJUh2Xg5eSVv/MlYOt50E6+VnuAtFDjCWBiQjh8lr4NG5vKUPbJn/H5kiHBo0GDEycIy7L6Rh9quB7PpMYRgUVIstkxOx4TwYMlbQHUmMz4oLsPOm3mo7Ox+iDEvLgrbJmZgSmSoZIbBbMGZ0nLsup2PkrYHU6oTALNjIrFtQgZmRIVLdlUbLRacq6jCzpw85Lc8XPhvengYtmdnYG50pKC7mUtmqxUfV1VjZ14ebo+Rtn1ySAieyMzA/OhoUZ6psWRhWVyorcGO/Dxca3y4uGBmQCCezMjE4pgY3tgTPllZFpca6rCjMA+X6use6j5S/fzxZFomlsXGwUEprdNgKcXVpnrsLM7DJ3XVDzESvHzwdFo2Ho+JhxPPNlQ+UUpxs60JO0tycbbu4YJ+0e5eeColC6tj+GNohBi5HS3YWZaL07XlD+VqCHf1wFNJ2VgTnSxrm3RBVyt2lefgRG3pQ3lOgp3d8FRCNtZFp/Fu0xbS3Z527K7MxdGaIhhHMfwdXfBk/ASsj06Dj4N9k437VdHfib1VOThcWwj9qEym3honbI+dgA1RGZy7S8SodqAb+2pycKguD0OWB2sTeagdsSUqGxsjsxDoJH0nTONQDw7W5eBg7R0MjErg5arUYEPkBGyIyEaos7CnkEut+j4cqb+Dg/W30D+q0JyTQo01YROwPnwiIlz4l0r41GnQ4ljjHRxquIEe04NJyhwYFVaGZmNt6CRE8+ySEVKPcRCnmm/jcOM1dI0qZqdmlFgalIU1IVP+I8sxjwyRcRSlFKVtnejQDsJoscDVQYM4fx9e70Tn0BAKWtugNRqhZBj4ODthYnAw5wBJKUV5RxfatYPQm81w1WgQ4+cNf1fupYVunQ75bW3oNxigYAi8HW0MvgGyqrMbzf1a6M1muGg0iPbxQiCPB6TfYEBOawv6jDaPipejIyYFBfMOkDXdPWjs12LIZIKrRoNILw/e5Qut0Yictmb0GvQgIPB0dMSkwGDeAbKutxcN/f0YNJrgolEj3MMD4TxeliGzCbfbmtE7nI3SQ+OICQHBcOXJKdHY34/a3l4MmkxwVqsQ6uaOKC/ujk9vMeNWWxN6jDpQassfMsEvmHeAbB7Qorq3B4MmE5yUKgS7uSHWi9uTY7BacLu9ET0GPSyUhYfGAZk+QfBy4F4fbxscQGVvNwZMRjgqVQh0cbXtsOEwOk1WK+50NqJLPwQzZeGudkC6dxB8Hbnf9w7dICp6u6A1GuGgVMLf2QVJXn6cDAvL4k5nIzoNgzCxVrirHZDqFcg7QHYbdCjr6US/yQCNQgE/RxekePtzMqyURW5XEzoMAzBaLXBTOyDJIwBBPANkn1GPkp52aE0GqBgFfB1dkOodwGmgs5SioKcJbXotDFYzXFUOSHD3R4izJydDazKguKcN/cMMbwcnpHkFchrolFIU9TWhVd8Pg9UMF6Vte2e4C/d7Mmg2ori3Df0mPRSEgafGxuCacVNKUdrfgmZ9L/RWE5yVGkS6+CLKhdvjp10uh8YAAK5NSURBVLeYUdTbgj6THgwh8FA7Is0rmHPbMqUUlQOtaNJ1Q2c1wUmhQbizD6JduQOLjVYzinqb0WeyGQoeakekegbzzuprBtvQoOuA3mKEo0KDYCdvxLgEcb4nZtaCor5m9Jt0YCmFu9oRyR7BvDtX6ofa0aBrh85igINCjQAHL8S5ck8WzawVd/ub0GfSwUpZuKkckegeDGcld//Tou9A/VArhqx6aBg1/DReiHMN533fS/ub0GsahIVa4ap0RIJbCFzGMYeKvXpkiPwHRCnFraYm7M4vwNmKh2eIno6O2Jqehk3paQh0lTYjoZQir7UVuwrycbqi4qH8FO4aDTanpWFLapqo2AUuFba3YVdRPk6Ul8I8iuGiVmNTciq2pqQjwoO7wxXS3a4O7C4uwJHyEhhH1RdxUqqwITEV21LSEeNp3xLL/ars7cLuu/k4WF4EveVBhoNCibVxydienIkEL1/JjJr+Huwuy8eBikIMjqpeq2YUWB2dhO2JmUgV2MnBp8aBPuypyMfeynxoTQ/OEJUMgxURidgWn4lMH+4OV0gtQ1rsq8zDnso89I5Kx60gBEvDErA9PhsTfUMkMzr0gzhQnY/dVTnoGlWLhQHBgpBYbI+dgKl+3B2ukLoNQzhcl4/dVbfRbnhwhkgAzA6IxbaYCZjhHy3ZA9hn0uNYfR52V99Ci77/oePT/aKwNWoSZgXESt7CO2A24FRTPvbU3ECjrueh4xO8I7Alcgrm+MdDKTFnyZDFiA+bC7Cv/jpqBzsfOp7uEYZNEVMwLyAJKkaaB9BgNeGj1gIcrL+GqsGHvaWJbsHYED4N8/xTJS8bGK1mXOgowNHGqygbeLiGSoxLINaEzsAC/ww48BgYfDKzFlzpLMTx5su4q6176HiYkz9WBc/EfP9sOElMbmZhrbjZU4QPWj5Dcf/D9cSCHHyxPGg25vlPgrPyi7X9diw9MkT+zRowGvG1k6dwpb7hXlzHWBrp+H4ydw6eyLJvHVhvNuNbH57Bx9XVvGu0I+un35sxAy9MmGhXh260WvD9T87hZEWZIMNKKb4+cQq+PXmaXQyz1YqfXv4U++4WimI8lz4BL0+bbdegYWVZ/OrGRbxbnCOKsTUxHT+fvsCuInuUUryWewV/L7guirEmOgm/m7EUajuWjSil+EfxDbyWd0lwzdxKKZaExeEvM5bbvaTzXtlt/PLOpwAB5/bckbXm2UFR+PvMVXCxM3vowep8/PjOWVBKBdfMJ/mG4p8z18FdbV9n+0FDMf7nzklYWZaHYbtXaZ5B+NeMTfDS2LcU8mlLGb5z+whMrJVnXX447srND29N2wY/O5dCrnVU4aU7+6G32gzbMePHQGAFRbizN/455QkEO9k3KcjpqcVLd/ZgwGLgjFFjQMCCItDRA69PfBIRLvYZ7Hf7G/FSzvvoM+tAQMa8XyMMH40r/pz9NGJd7YuJqh5owXfz30G3SXvvXKM1wnZXOeP3Gc8i0c2+mKhGXQdeLvgn2o29PIzhmCilI15NeRapHsI7KO9Xm6ELPyt+A836DjBgwOLhrd/3YqIYDV5OehZZnol2Mf7demSI/Bs1YDRi0/4DqOjqtivHwrenT8PXporbz603m7H18GEUtrfZxXh+wgT8YKa4bXEmqxVPnzyC601NdhWI2pqSjlfnzBdljFhYFi+cPY7zdTV2pSZYFZeIP81/TJQxwlKKb50/jZPV3JVkR4sAWBgRgzcWrBQVw0IpxctXz2EfT7n5sRjTg8Lx/qJ1ooPSfnXnPN66K77UO0MIsnyCsHvhRtHGyF8LL+MvhVfsYiR4+OHgom1wFhlf8nbZTfwm/1PRDAUhiHD1wqEFT4g2RvbX5OCnuWc4B9WxGIGO7jg472n4OAjvrgKAkw2F+EHOMUA0g4G3xhkH5nwJAQJbP0f0aetdfOfOAQDiaosoCAM3lQN2z3hedMzE9c5KfOPOLl6j8EEGgaNCjfemPi86niG/txbfuPMuLKxVFIMBgUahwhsTn0eCu7isn2XaRnwj958wWc2iGUpGgT9lPo80j0hRjLqhVnwr92/QW01jGgdjMQgh+GXqc5jglSCK0aLvxPfy/4hBi14Ugwynf/tB4rOY6mP/TqN/l+wZvx9t35UhSim+dvKU3UYIAPz56jWcLBU3WH7n7Fm7jRAA+NedO9hbKG6w/NGFj3G9qdHuKpV7igvwdl6OqLa/unrRbiMEAI5XlOKvt6+JavvnO1ftMkIA26DycV0Vfn3zoqj2bxbdsssIGWFcbanHj699LKr9rvI8u4wQwGaE5Xa24DtXz4hqf7SmyC4jZIRR1teBr14+JipL8LmmcruMEMCW2KluoAcvXD4s6p2/0laNV3Jt1yz23bJSilZ9P567sp+3EN+Icroa8HLucVC7GCy6jUP40tVdMIwKHh1Ld/ta8P2cQ6AQZyCMMLRmA56/sQODZoNg+5qBDryUsxesSCPExqDQW834yq330cdR4fZ+teh68J2cHaKNEMBmdBmtZnwz5110GrSC7buM/fhe/tuijZARhoW14vv576BF//By12j1mwbxPwX/FG2EjDBYSvGz4ndRP9Qu2F5n0eMnRX8XbYQAtoKhFBS/L3sPVYMPB9X/N+qRISJDd5qbcaW+QXK2yd9fuiL4tyUdHThb9XDMiVj98dpVmHiqqgJAbV8vDpeWSE6g+Jdb16Az83e0bYMD2FGcJ5nxRu4t9Bv4O9pegx5vFNyUdH4K4L3iXHTo+MtjD5lN+Ev+VcmMAxWFqNf28rYzWi14Le+SJAYLitP1ZSjt7eBtZ2VZ/D7vojQGpfispQa5XQ/vXrpflFL8Pv+8pIRbVkpxu7MRV9pqBdv+sfi8BIKNUdLXik9bygXb/q30gkQGi+qBLnzYVCLY9p8VF8C9qMTPaNH14mRTvmDbd6s/g4VyLyvxMXqMgzjScEuw7Z66yzCw4g2EEbGgGDDrcahBeNJxuPEqBsx6SQwja8aBhs8E237Qeh29pgHRBsKIKCjMrBX7Gz4RbPtJ+010GnskMGzf4f76s3b93RdVjwwRGdqVly8rNXHrwAAu1dbxMwrkMXr1enxc9XDg0/3aU1Qgi6Ezm3Gqooy3zd679nkQRsvMWnG4nL8zP1ReBKvMAnP7y/h/5/Hquw8FvtojhhDsKS/gbXO2vgL9JuHZLZcUhGB3eR5vmwst1WjX8xtd/AwGu8pzedvc7GhA3WCvrNTzuyrv8LYp6mlBSV+bZAYDgl1V/J6nmoEu3OqqkzwZICDYVc1vILfp+/FZe7mslO17am7weql6TUM411okOfU8C4oD9Td5/37IYsQHzTmyGMcab8LEcn9jRqsZJ5tvSK7rYqUszrTchs7C/Y1ZWStONl+x22AbEQsWFzvy0G/i/sYopTjV8pnkd5cFi1s9Regy8k9s/hv0yBCRqK6hIZytqJTVcSgIwa487gFDazDgeGmpLAZDCHbkczMMFjP2lxTKYhAA7xdwD0pmqxW7i/Nll6p/vzCXs6NlKcX7JbmyCluxlGJnSR5vtdz37ubIKi5npRR7y/Jh4DFm3i/LkV1c7nB1MQZG7bC5XzvLc2QXfjtdX4puA7erflelXAbFhZYqtAw9vDNlRHtr7sgqLseC4nZXA6q0D+8aGdH+2juyroOCorS/DUW93B6kw/V3ZBV+owAadT241c3tQTrRmCv7G+wyDuBKB7cH6WxLHkys8DIUnwYsBpxvK+I8/llHIQYtes7jYmRkzfi4jbtfvNlzFz0m4SUiPlkpi3Nt3B6kwr4KtBm6ZDEIgLOt4patv8h6ZIhIVEFbm+zS61ZKcbu5hfP43c5OwWUVIbHDW365BvDKnh4MCSyrCIkCKOvugpFjcG0c6EePQV7HQQXO06UfQsvgwJjH7FGXXoeWwbE7oCGzCZV93bKMHQAYMJtQ0z/2GjVLKfK7WmQPGEarBWW93IPrnY5G2e+vhbIo6m7lPH6rs0E2gwLI6+YewG911ssqLjeivO6Ht32O6HZXnezrYECQ2829np/TXSercitg81Ll9zRwHi/orZc8wx+RkjDI663nPF7YVy/LoAJs11HQx8Por5NlfAK251HUX8d5vLi/Fgoir8I4BUVxP7dheFdbI/s6WFCUaPk93v8NemSISJTWwD3btEc6k4lz0NEax4dhpZQzhkNrlL4EMFr9HOcar+v4jzN4PAx2MziWXgbNJtmGjhDDyrLQW6UvL4lhALbEWp83QzsODIYQaHkYcpbJ7mcM8ASTjs7uKUUE4GWICTQVIz6GlLiN0aKUYpDnfgya9aICpfnEgkJr5r4fNo+L/C9Rax7iPDZk1UN8VTNuDfAw/lv0yBCRKKlpxEeLrxKvSjF+j4fr99pb34CXwXGu8bpXfAx7coAIMjh+77gyuO7VuDLGPhdDxBdcFGZwP9vxul9c2ToB2wxdrijlv47xeCYUAvdK5ux7RPzX8d/BICAiGPLfX74kbeP3PD5/hpJISzb3RdIjQ0SifJz4y06LlaejI2cODh8n6XUn7pezSsWZSGu8rkPJMHDTjJ3kysdxfBgEtmq5Y8nL0WkcuiabvDlSpntoHGTFCtwvX4574qBQwkGgNLtY+XCkZSfDKbk/TwYAeNuZLIzzPDz1V3xF5gDhEwUVYLjKfreslOVNnubn4Gp3ld7RYimFl5r/OhQyGRT8DC+1i+zlBhDAU839XD3VrpD7GSoIAy8BhtwMWwwYeKu582d4qF3BylxWJCDw1kjPov1F0SNDRKKyg4Pg7SSvM1cQgpWJ3Elvkv38ECIzkZuCEKxI4M7AF+nhiTgvb1ndk4IQPBYdx5kMzN/ZBVn+gbJm4QpCMCc8ijOJlptag5khEbIMBYYQTAwIgS+HAahiFFgcHiePAYIkLz+EuXqMeZwQghWRibIYBECYiweSPLlrhayKTJZtVPk6OCPLhzv51MqIZNmeF1eVBtP8IziPrwhLkW0kOCiUmB0Qw3l8eWiqbCe9kjCYHxjPeXxpcKr8JQ1QLAhM4jy+KDAVVpkMK6VYFJjKeXxBQJrsmJ2RarFcmuefPi6Mef4ZnMdn+2bYvaV2tFiwmO3HzZjmzX1MrCgoZvpmyz7Pf1qPDBGJUikU2JaRIXt3w5YM7g+OIQRPZGTK3qWxLZ2bQQjBU+lZsronK6XYnpbB2+aptGxZAZhWSvFkKn9a/CeTs2QFFbKU4qmULH5GYqY8BiieSsrizUT7RLy86wCApxKzeRlbYuVdBwOC7fHZvMsvm6IzZS2zKwjB5uhMaHg8RGsiMmTNwBWEwZrwdN6U9Y+HpMJBRvl0BWHwWEgKr0dkYWAyXGUUKFMQBrP94xHo5MHZZoZfPHw10ivvMiCY4BXJm+o9yysKIU7SJzYEBPGuQbzZVRPcQhHrGiwrKDbE0QcZHlGcx8Oc/ZHmHi3LS+WtdsMkb27D0M/BCxO9ksHIGIZdlE6Y/gXOripWjwwRGdqYlir5NVUQghnhYbxVYwFgXXKS5BgLBSHICAhEoi9/jYiV8YlwVqkkXQtDCGK9vJEdyF9meklULDwdHCV1HgwhCHZ1w6zQCN52c0IjEegszcVNQODt4IhFEdwzYwCYHBCKKHcvSQYoAeCiUmNFFH+NiBTvAKR5B0jyWBAAaoUCa6NTeNtFu3tjmn+4ZK8IIQSbYvg7wEAnN8wPjpXMYCnF5hh+49NL44TlYSmSGVbKYnM0/4zSWaXB2vAMeYyoibxt1AolNoRPlDzwWSmLzZGTedsoCION4VMkM1hQbIzgL0tBCMHGsGmSzg/YZvgbwoX/fl3oDMk7gAiAtaEzBMtSrAqZJdlLRUCwMnimoJG8PGi2ZM8LA4KlgdOhYqQbyV8UPTJEZMjfxQU/mz/P7r9jCIGrRoNXFy4QbOvh4IjfLlwoieGgVOL3ixYJtnVSqfDHhUvtZowElf1p4VLBj1qtUOCvC5bZvbZLYLuWvy5YJjj4KxgGf5u/HAxjn7lDABAC/G3+44LBdoQQ/G32cqh4goz59JfZy+Aoog7Ma9Mfg0ahtHvQoAB+P+0xuKuFZ9e/nrIULiqNJKPq1UmL4esoHJ/xSvYieGqcJA3iL2fMR5iLcDG3/0ldAH8HV0mMryfNQry7cP2UryfORYiTpyTvy9MxU5HhFSLY7kuxsxDt6mc3gwBYFzYBU3yEC61tiZyGJPdgu+8VAcHiwDTM8+ee4Y9oVegkZHlF2f3uMiCY4ZuAJUHCBUEXBWRhmk+SJEamZwxWBgvX+Zruk4K5fpl2T54YMEhwC8OaEOE6Xxke8VgcMM3uvoQBgzDnIKwPXWznX34x9cgQkaktGen47swZotsrCIGbRoMd69ciTMAbMqJViUl4Zc5c24ApkuGkUuH9NWsR4+0tirE4Oha/nb8IDBH32SkIgYNSgXcfX40UP3GFsGaFReCvC5ZBQYioF48hNkPnzSUrMSFQXCGsiQEheGPBSigZRtQAy4BAQRj87/zHMT04XBQj1ScA7y5YazMURDAICBgQvDbzMSwMixXFiPPwxY75G+CoVIkaNEbejVcnLcTKSOHBAgDCXT2xa/4muKo0dg1M/5M5F5tjM0S1DXRyw845m+GhdrSL8bXk6XgmfpKott4Oznh/1jZbMKYdjKdiJ+NrieKKQrqrHfHOjO0IdnK3i7E+PAvfTRE3kXBWavDPKU8gwtnbLuNwSVAqfpi6TFThSQeFCn+b+ARiXQPsGsRn+cXj52lrRDFUjBK/z9yOZI9Q0YM4AUG2VxR+mb5ZlCGmIAx+lrIVmZ4xdjGS3MPwq7QnoRSxu4chDL6bsAVTeJZXHvobEES5BOGXqc9BoxAuCkkIwVdiNtgV58GAIMTJD79IeRGOCvuqYH9R9blV362rq8Orr76K8+fPo62tDUFBQdi2bRt+9KMfQa0WV7Xzv6H67og+KCvH7y9dRrNWO2ZZeAUhYCnFjIhw/GLBfNFGyP36uLoKv750CfV9fZwMK6WYGhqKX8ybj2gvcdU479dn9XX45eULqOrt4WVkBwbhF7PnI8mXOyCSSzeaG/GzK+dR1t3Jy0j3C8AvZs5Hur99pcEBIK+jBT+98gmKutp5GUnevvjZtAWYFCg8Yx2tku52/OT6x8jpaLlXwv5Bhu2/xXp445XJ8zEzOMJuRlVfF3588yPcaG/kvY4IV0/8eMI8LAjlX1oaS/UDvfjJrXO43FrLwbBdR4izO36QNRfLwu0vP94ypMUrOWdxoaUKZPhbGOs6Ahxd8d20OVgdyR0QyaUuwyB+lvchPmkuBwg4GT4OzvhG0hxsiuKPBxpLfSY9fllwBmeb745ZvXakTLyn2glfjp+J7dGTRQ3e92vAbMDvS87gdJMt4/HoJYgRhqvKAc9Ez8DTMTPA2OlF0VtM+FPphzjZnAvLcNG/+ykEBBQUzkoNtkZMw3Oxc+321BitZvyj4iyONd2CmbU8tMgxUinZQaHG+rCpeCFmoSgD4X5ZWCverjmHo41XYWBND1VfHrnzKkaJFcFT8OWYZVDzbKkdS1bKYk/dRzjSdBE6q/HevXnwWgiURIHFAZPwfMwKuw0ElrI40vQJjjR9giGL/t4zHs1gCIM5vhPwfPRaOCnHZ+fb5yV7xu/PzRA5e/YsDhw4gM2bNyMmJgbFxcV47rnnsH37drz22muizvHfZIgAto7vSl09duXl41ZTE4ZMJigZBh6OjliZmIAtGemCMSFCopTiemMjdhXk41pDA4bMZigIgbuDA5bHx2NbWjqiJBggoxl3WpuxszAflxrqMGgygSEE7hoNlsbEYVtqBuK9fWQz8jvasKsoD+fra+6lI3fTOGBJVCy2JqcjxVecp4VPxV3t2FmSh4/qKu8xXNUazA+LxhPJmUj3s9/IGa2ynk7sKsvDmbpyaI3G4UFCjdkhUXgiMRMT/ILtHoxGq6q/G7vL8/BBXSn6jAawoHBWqjE9MBxPJmRjin+obEbdQA/2VuTjeG0x+kwGWFgrXFQaTPILxRPx2ZgRGCl7F0zTYB/2V+fjWF0Ruo06WFgWTkoVsn1C8ETcBMwKiOLcfSVWbXotDtTk4mhdAbqNQzCzVjgp1Uj1DML2mImYGxgnO8dJp2EQR+pzcbguD52GQZhZCxyVaiS6B2Br1CQsCEqQnVOjxziE4425ONqQg3a9FibWAgeFGrFuftgcMRkLA5OhlrnVW2vW42RTLo423Earvm+YoUKEsy82hE/G4qA0WYG6ADBkMeDDljwcabiBVn0vjKwZGoUKoU4+WBs6BYsC0+GklDez11mM+KQ9D8ebrqFR1wUTa4aaUSHQ0QurgqdicWA2nJXSg4EBwGg14UJHHk42X0GjrgNG1gQ1o4KfxhPLgqZiUcAkuKrkpSows2Zc7crHBy2XUD/UAsMww1vtgcUB07AgYArcVfK3rP879IUwRMbSH/7wB7zxxhuoqakR1f6/zRAZLUqpqMGhpKsDRypK0Dqohd5igZtGg0RvX6yPT+HN02APo7ynE4cqi9E8qIXeYoarWo1YDx9sjE+DvxP/iz3yighxqvu7caiyCI2DfRgym+GiUiPa3RsbYtMQ7ML//MQyGgb6cLCqELUDPRgym+CsUiPS1QsbY9IQyrEl1l5G81A/DtYUoFbbjUGzCU5KFcJcPLEuKg1RbvxLXWIZbXotDtcWoErbiUGzEY5KNYKd3LE2Ih2x7vzBxSMcIUaXYRBH6/NR3t+BAbMBjkoVAhzdsDosHQkeAePC6DUO4URjPu72t2LQbICDQgVfB1c8HpKOFE/h5TQxDK1Zj1NN+Sjua8KA2QCNQgkfjSuWBqUh3VPYABPDGLQYcLYlH4W9DRiw6KFiFPBSu2BRYBoyPSPHhaG3mvBxWx4KemsxYNFBQRTwVLtgrl8asr2iBb0aYhhGqwkXOwqQ11eJAfMQGKKAh8oZM3xTMdErYVwYtsGyALm9ZRgw62wxb0pnTPZOwSTvZMHU6GIYFtaKmz0FyO0tuZc51FXljCzPZEz2Shf0nIhhWKkVeb1FyOktwIBlEJRSuCidkeaRhEleWYJBoGIYLGVR1F+E3N5cDFgGwFIWzkpnJLolYpLXJKgZ/tUBsf37F1FfWEPkxz/+Mc6ePYs7d8auqGk0GmG8L1W3VqtFaGjof60hwidKKU5Vl+Odwjso6GyDctj9TYF7s04GBMui4/F8+kQk+9i/BEIpxbn6SrxVdBt3OpqhIAzYYVfv/TPbJeGxeCFtMjJ8pXkHLjRV41/Ft3C9rWF4CQoPMCilWBAag+dTJmOiv/1LIABwpbUO/yq5icuttWAIAaW2KH4GBGTYDT8rKBIvJE3GtMAISYxbHQ34V+kNXGipeoBhc4nathBP8w/HlxKmYE6QcGDgWMrrbsI75TfwcfP/FQ77PwaBlbKY4BOKZ+KmYGEwd94JPpX0tuKdyms411x6zzCyMWzr3lbKIs0zGE/FTsHS4CRJHV2lth3vVV3Fh81FsFLbuUczEt0DsS1qCpaHpNm9dAAAdYOd2FFzFR8058PCWkGG3dX3M2Jc/LAlcipWhmZJCiRt1vVgd+1lfNCcCxNrHpMR7uyDjeHTsCpkot1LBwDQbujD/vpL+KDlFvRW0wNu95FlryBHL6wLnYFVIVPsXjoAgG6jFocbL+J0yw0MWQ1gCHMvWdYIw1fjgdUhM7AyeAYcRMQvjFa/aQDHmy/iTOtVDFp0DyxHjvy7p9oNy4NmYkXQbDhJ8EAMWnT4oOUCPmy9BK1lEAowsA7vKhn5dzelC5YGzsLyoLlwUdrvgdBbDTjb9ik+aruIPnM/GDD3dq6M/LuL0hkL/GZhaeACuKns3/JstBpxvuM8Pun4BD2mnjEZjgpHzPadjcX+i+Gh9rCb8UXXF9IQqaqqQnZ2Nl577TU899xzY7b52c9+hp///OcP/ff/1wwRs9WKH13+GAfLi8GMsV5+vxSEgIDgT/Mew4oY7uRno2VlWbx68wLeu5sjikEB/HbGYmyMSxPNoJTiD7mX8I+iG2PGFoxmsJTilf+PvfMOk6M69vbbPTluTtpVzllCCSUkQOQMJgdjbGycMdgYcMC+DhhwxnwYB2wTDJiMySJJAiGUc87Srjbv7OTUfb4/ZlcW0nZPz/QCQszvPvtco1PTb/eEPtV16lRNm8e1I40nZgkhuG/9+/x69UJDDEUIbpk4h6+OPj6nCfbBzUv5xao3upyB7IxvjJ7Jd8aekBPjiZ0r+fGKV5C6HA4tdU9U1w6dym3jT8lpOeS/+9Zx6/LnAQwxLh4wkTsmnJXTUsWbBzbxveVPoiJ0Gd1r6WfUjuXnE87PaRlhccs2blr+GCmhZGFkcgLmVo3gzokX48phgl3ZvoubVjxEQk1lZQBMKRvCXROvxJPDMsLGzr3cvOrvRJWEIcbYogH8asK1+HMI8e8I1/P91X+hMxXJuhVUQmKIt5Y7x19Pid34BLsv2sQP195HezJoiNHXXcXPxn6NckexYUZTvJWfbLiX5nhb1m2zMhKVzjJ+MvqbVDmNLxO3Jzu4c9PvqY81Zt3+KyNTYi/itpE3Uusy/pAWTAX57dbfsje61xDDa/Vy8/Cb6efuZ5jxadBH6ojceuut3HXXXbo2mzZtYsSI/02a9fX1zJkzh7lz5/K3v/1N83WfhYiIEIKb336FZ7dtNLxDvfsmdf8p53L6oGGGGD9+/w0e2qTd5lpLvz3hTC4aql+Dolt3rVjA/euW5Mz42fGncPUIY4mC961bzD2rF+bM+P7EuXx1TPYtegD/2rqMn66YnzPj66NncPO4uYZsn9q1mtuWv5jT8SXg6iFT+NFEY1v0Xt2/kRuXPpUz44L+E/jFcecYcqoWNm3lmx/8myNTKPUYEqfUjOSeyRcbiowsa9vFDR/882D0zohkJKZXDOEPk680FLVYH9jHV5b+BUVVDdeKkJEYV9KfP025zlDUYnvoADcs+xNJNZ0TY6ivlvsm32AoarEv2szXl/+emJI0XI9ClmT6uiq4d9K3DeVNNMfb+faqewinY4ZLkluQqXCW8LuJ38Vvy17qvz3ZyffW3E3AgKPTLRmZYrufX4+/hRJ79jLnoVSYH67/Ja2J9pwYHqubX4z9ARWO7DsQY0qMn2/8OY3xxpwYDouDH4/6MdXO7Eumnxbl4ojkHMu8+eab2bRpk+7foEH/q1jX0NDAiSeeyIwZM/jLX/6ie2yHw4Hf7//Q37GmRzet4ZkcnBD4Xxb4N998kX3BQFb753ZszMsJAfjeolfY2tGa1W7+3m15OSEAP14yn7Wt2u3ju7X4wO68nBCAu1a9w5Im7Zbo3VrdWs//5eGEANy3YTFv1Wdvwb2ls5kfrHgp5+ML4KHty3hx74astnvD7Xx32TM51yMQwDN7VvPk7uzfl+Z4kJuWPZGTE5JhCF4/sJGHd2b/vnQmo9y47BFEDk4IZJaFFrds42/bF2S1jaWTfGfFv3JyQroZazr2cP/W17PaJtU0313995yckG7GtlA9f9jyQlZbRajcvvavxHNwQiCTt7Av2sxvt/wnq60Qgp9u+AvhlHEnBEBBpTnewW82P2TI/teb/5aTEwKZEuqBZJB7Nms/3B6q/7fjwZyckG5GJB3l11vuM9Tx95+7/pmTE9LNSCgJfrf1d6Z7z3xalbMjUlFRwYgRI3T/urfn1tfXM3fuXCZNmsQ//vEP5F7sLPpplCoED6xellchLNH1+kc2rtG3E4I/r11qokCMxL82rsxq9cD6D/LeQSFLMg9u7DlP6FD9dePSvKtZWiSZv25cmtXuwS3L8spfgEwuz183Z59cH96+LO9y1DISf9vyfla7x3Ytz9lB6JYE/H3r4qw32qf3rCCl5t+t5F/bF2ftEfL8/lVElVReFS0F8O/dS0gqaV27Vw+spjMVzZMheHrvB0TTCV27Bc3raE0E82KoCF45sILOpH5796Vtm2iItR3MociVsaB5DS3xgK7d+s7t7I405FX9U0Vleccm6qPNunY7wnvZFNqZN2NTaCc7wvoPHQ2xRlYH1ufN2Bvdz+bQNl279mQ7yzqW5c1oTjSzrnNdzq89FvSReQbdTki/fv349a9/TUtLC42NjTQ2Nn5UyKNei+v3si/UmfeNXBGCxzatJZ7WvtGubjnA5o6WvNs1KULlqW3rD2517UlbO1pY3lyfd+8YRai8uGszbfGops2+cIB3Gnbm3Q9FESpv7d9OfaRT06YlFuaVfZvybqClCsEHzXvZ3qkdQQql4jy7e23+DAQbAo2sa2/QtIkrKf6za2Xe75UA9kTaWdq6R9MmpSo8vmuZqcZsLYkQi5q0b+aqUHls95K8S3dDZofNG43aESQhBI/vWWyqf1NcTfFqw2pdm6f2vWeqT4kiVF46oO+sP7f/3bydaMg4oC8d0HekX2xYZKoXiozMywfe1bV55cBC04xXDyzStXmjaYFpxuuN7+jaLGjJHo3Lxnij6Q1Tx/i06iNzRObPn8/27dt58803qauro6am5uDfZ1WPbVpruuNpMJng9d3aywGPbzHPSChpXty5WXP8iW3mGaoQPLtjveb4kzvWma5ZIUkST27XfsJ4bvd6062+LZLEf3ZqR6le3LuBVFfBqPwZMv/ZtVpzfH7DZiLpZC8wtCNh7zVvpz3LE3o2yZLEk7uXaY6vaNvNgVjAHAOJJ/doMzYF69kVbjbV5FECnt73geb43kgzGzr3mnLaBILn9mtHwlriAZZ3bDEVylcRvFC/WHM8lIqwuHWNqS60KiqvNb6PInr+DSSUJAtb8osiHMpY0LKUhNLzb0ARCm83v2easbR9JeF0z78BIQRvN79tyolWUVkfXE97sj3vY3xa9ZE5Itdee21mnbeHv8+qdgTaTXdVtUgye3XyRHZ2mmdYZZk9IW3G7mCHaYYsS+wNaUcr9gQ7TLdel4C94YDm+O5wh2lnRxWCveEOzfE94Q6sJp5aIfN0vDusfXPaG27vJUabNiPSZuoJHzLv1e6INmNf1PwNWEWwN6rNqO8FhshynPqYNj8XNcY6NO+XB+K9w+hMhTUn8OZEuylnqlsxJUEo1XP0sz0ZICX0l9KMKCXStCd7vp+EUhHiatw0Q0WlNdHz555Uk4TSIdMMgNZE9hy9Y02f7aSNj1mRlLmnVgBZgrDOcUIp/bVrIxLoM/TGDDOE0D1OJJ3Me+mnW6oQuu95NJU0faMV6L/n0XSyF27lmSUePUZvlDwKZ7kOs05b93E0x7pqbJhVLAujNxRXUh85Q0WQVHuepGOK+d95t6Iax4r10nVkjtXz9zfei9cR12AkesEJyc7oveuIKbFeO9anRQVH5GOU12CPHT0JAT6b9nH8dvNNkCT0Gb7eYEgSXh2GN8dGbD1JliQ8OgyPzW564su8V9rvh0dnLBf5bNrbLN1We684O16dc3Vb7aYdQ0C3BofbYu+VJ3C3Vfszd+dRyKsn6W2t7S2GjKS5TdhlMVeu/FB5NI7Vmw3V3BoMZy9eh0uj90qvMj6O67Ac3T1kPgoVHJGPUcNKykxPrmmhMrBYuzX60OJy04yUqjKwSLtfzeCiUtMMRVUZ6Ne+jkH+UtNTkug6jibDV2Z6cpUliYE+7foCg3ylpE1uybNIEkN82kWbBvrKe4EhM8SnXVp+gLfctJNgkWQG6VzHAK+5/kWQmbwHerWvo7/HPENCop/Ocfq6s5foz86AWneZZm2XWld53juxDlWp3Y9do5dMpaM0r2q1h8tjceHVqCVSai/CnqWUuhHZZRsltp7LPfisXty9MLlbJJlyjVoidtlOkS17LZNskpCodOReRfvTroIj8jHq8pHjTedWFDuczOuv3WH1suHjTDPcVhtnD9QuL37JUPMMqyxzweDRmuMXDx5Lb3giFw/W7uJ6/sAxppcbFCG4dPB4zfEz6kaZbhqmCMElgyZqjs+rGY5fJ2JijKFyqU432pmVQ6hw5F7q+nDGJQOmaI5PLOlPnbvU1PSqIri4/1TN8WH+Pgzz1ZiKhAkEn+s3TXO8zl3OhOKBpqNtF9TN0Bwrc/iZVjbS1E4QCYnzamdqjvtsbmaXT8RicrfJ6TUzNB0ah8XOiZXTTO9oOanyeBwakShZkjmpcrZpxvGlU/DolJQ/qfIkU86hjMz4ovHHZLn3bCo4Ih+jptXUMbCoJO+vqixJXDlqPHaLduXIseXVjCmryvsmaJEkLhk2FrfOksbgojKOr+6X9yRukWTOHzSaYof2U0qNx8/JdYNN1BGROLXfMKrc2pNnqcPN2f1H5f3UZ5EkZlUNYIBPO+ritTm4aMC4vK9DRmJ8aR9GFmt3IrZbrFw68Li8Pw8JGOwrZ2JpX00biyRz2cAppibXGlcRMyq0e/RIksQVA4xVw9VSsd3NiVUjdW0u7T/DVHTHbbFzao1+K4QL+840xbBKFs6o0W+FcF7tTFM7QSTgzBpthwrgrD6z86pT0i0VlTNqtJ0dgNOrTzC9o+X06tm6NvOqzDNOrZ6ra3NC+QmmHBEVlZOqTsr79Z9mFRyRj1GSJPHVCVPzLjplky1cOUr76btbXxs/La+bYKbRl8Q1I7Wfvrv1lTFT817WEAhD/WauHzUtb4YqBNeP1H767tZ1w6fmvZNLEYLrR2afOK8eMgUpz1uUiuD64dpPxt26bNBkrJIl72J5Xxo2M2uJ9wv7TcJhsebtjFw7ZGbWuhdn103AZ3Pmzbh64AxsWUq8n1IzjlK7Ny+GBFzSf3rW8usnVIym2lmSl5MrIXFu7TR8Nv3lhMmlw+nnrsrrSV9GYl71JEod+tWrR/oHMszXL2/G9LJx1Lj0l8MGeGoZWzQsT4bMuKLh9Pfod3quclYyuWRC3ozBngEM9Q7StSu2F3N82fF5/dJlZGqcNYz2a0eJj2UVHJGPWRcPH8NVBpyJQyWRcWL+fOq59PFmL3t/1sARfGWsdnhaiyGAP8w9m8HF2XsqnFg3mJsmzsqJ0a1fzTid0WXaT/jdmlrVlx9OPjkvxh1T5jGpMnun3zGl1fxi6hl5MW4eN4fZNfo3J4DB/nLumXpeXg7oV0bM4LS67M0Oa93F/H7aRQA53wavGDSZ8/tlb3ZY7vRy79QrkKTc3CoJOLduPJcPyP6d9Ntc3DvlaiySnJOjICFxUtVIvjBY/8kYwGmx8cfJ12KTc3OqZCSmlg3hy0PmZbW1yhZ+M/GLOGRbzowxRf35+rCzs9tKMneOux6vzZWTwyMjM9jbh28NuyirrSRJ/HDU9RTbvTlN4jIyfVyVfGf4lYbsvzv8i5Q7SnJmlDtKuHn4dYbsvzr4WqqdlTkz/DYfNw3/mqFeTNf0v4Z+7twcNxkZt9XNTcNuMlWg7tOsz+ZVf4KSJImfzjyZa8dk1uOzhewtkoRNtvDAqedxYr/sk163bp0yh6+PP94wwyLJ3Dv3HM4aaLzD7zfHzeB7x51gmCFLEnfPPINLhhrv8PvFkVP48eSTkSDrzdYiZW75P5kyj2tHTDbMuHTwBH455QxkJEPXAXDL+BP52qjskYpund1vNL+Zdj4WSTbM+PrIWdw85kTDjJNqhnPv8ZdglS0G3qsM4/NDpvGD8acb7iI8rWIQ9027EocBRvcEfGG/4/jphPMMM8aX9OPP067FabEZZpzWZwy/mniJ4Rv5MH8f/jz1S3htzqyfR7fTNbtyBPccd5WhpnoA/T2V/L/JX6PY7snqjHQzJpUO4TcTjTXVA6h2lfKHid+g1O43wMj8jSrqzz0Tvmp4V0yZo4h7xn+HKmepIadKAgZ5a7lr/LfwaOxkOVx+m5dfjr2JWlelISdXQqLWXcUvx96E3+Y1xHBb3fx49Hfp76kzzKhwlPPT0d+n1GDehsPi4HvDv8dgr/YS5OGMYnsxt4+4nXKH+UTqT6ty7r77cSqX7n2fRr2xewcPrlvB4oa9B2+GqgCLLKGoKnaLlYuGjea6sccxpCR7lKInLdy/i79vWM6C/bu6cggkVCG62tmrWGWZ8weP4otjJjOyNL9s7fcP7OXBjct4Y9/2rqflruuQMixZkjh74Ai+OGoKY8vz6y65onk/f9u0jNf2bQUyE5DSdR3dy1Bn9BvOdSOnMKlCP0yrpbVtB3hwy1Je3rvp4HkrQiB33bNUIZhXO4zrRkxhWmX/vBibAk38c+sH/HffBtKq2vU5fJhxQvVgrh02jVlVxh3PQ7U92MK/tn/A83vXkFIVLJKMKkSXIyBQhGBG5UCuGTyNuTXZuzn3pD3hNh7ZuYTn9q0ioaQOY2QSUyeV9eeqgcdzcs1Iw07IoaqPdvDv3e/z7N4VRJUk1h4Y44r7csXA4zmtZmxejOZ4J0/seZ9n9y0lnI5jleTM5wHQ9RsZ4a/l0v7TOb3PhLyWWtoTIZ7a9x7P7V9CMB3FIskHlwOlLsZgbzWf6zuTM2omG3Z0DlVnMszz9e/xfP17BFLhHhl93ZVcUDebM2qmGXZ0DlU4HeXlhnf5b8Mi2pOdPTJqnOWcU3sCp1fP0Ewe1VMsHee1pnd5qeEdWpMdB79XQNfvUaXcXsJZfeZyWvWsvLYxJ9UkbzYt4rXGt2hKtPR4HSW2Ik6tPolTquboJqhqKaWmWNS6iPlN82mMNx6MkAgEsiSjCAWf1cfJlSdzctXJeK3GnKlPk3KZvwuOyFGgXZ0dPLt1Aw3hELF0Gr/dwciyCs4fOgq/Q/+pJZ5OYZHlrOvie0MBntm2gf3hILF0Eq/NwfCSci4cqp80CpmS7xKSbpIsQEMkyNPb17MvHCCSSuKzORhUVMpFQ8ZS5tT/MSeVNEgS9izX0RwN89TOdewJdRBOJfHa7AzwlXLR4DFUuvR/zN3N0OwW/ZtwazzCs7vWsSPYRjiVwGNz0M9bzIUDx1Lj1v8eptQ0qhA4suyU6UhEeW7POrYHWwinkrisNmrdRVwwYBx1nmLd16ZVBUWoWRnBZJwX9q1lS2cz4VQcp8VGjdvPuf3GMcCr79h2M+yyVXeCj6QTvLR/LZs6DxBKxXFYbFQ5fZxdN55BOtuBARRVISUUHLJNlxFLJ3m1YR0bOusJpmLYZSsVTh9n9BnHML++Y6sIhZSanZFQUrzZuJ41gT2EUzGsspUyh5dTqscxskjfsVWESkpN4ZDtuoyUmmZB83pWd+wklI5hlSwU2z2cWDWO0f5+uq9VhUrSACOtKixuXc+qwHZCqYzTU2TzMKtiHGOLBmZlpNQU9iwMRagsb9/Ayo7NhFJRJEnCb/VwfNlYxhUPNcBIYpcdWe1WBzaxomMD4VSmrLrX5mFSyWgmFI/UjXwJIUiqCWyyPavdhuBmVnSsIZSOIITAa3Uzrng0E4vHZn1tSiSwStkZ28LbWNGxglA6hCpUPFYPI30jmVA8AWseDuGnRQVH5BiWoqq83bCdh7Yu54PmfQf7mHisds7qN5Krhk1iTGl+UYduqUKwqHEHD29fzuKmXSS7GC6LjdPqRnDVkMmML+2T1xNot4QQLGnZxSM7lrGwacfBCpJOi5WTa4ZzxaApTCrra5qxom0Pj+1axjuNW4h3MRyyldlVQ7h84FSmlevfnI0w1gX28/juD3izcePBipt22cLx5UO4bMA0plcMNr32u6mznif3LOGNxnUHK17aJAuTygZxSf/pzKgYZrrmw/bQAZ7Z9z5vNK4m0lXx0ipZGFvcn8/1ncmsipF5Pa0fqj2RJp6vX8z8xhWE05kKkhZJZqS/HxfUzWJ2xVhsJm/O9bFmXm54j/lNywh19QaxSDKDvXWc2+cEZldMMF27oinexmuN7/JG0/t0psJAZq1/gKcPZ9XMYXbFpLwiAoeqLdHBm02LeLP5XQKp4EFGraua02rmMrt8qulCWoFkgAUti3i7eQEdqUyrAgmJamcVJ1eexMzy6bjziAgcqlAqyHttC1jU8hbtyTYEAgmJckcFcyrmMb1sNh6TEYFIOsyy9gW81/o6bcnmg4wSWznTy+dxfNmJeK3m5pC4EmFFxwKWtL1Ka+LAwb4yflsp00pPYUrpPPw27dpInzUVHJFjVC/s3sAvV71Jcyx8MKR/qCySjCJUxpZW86tpZzGyJHtC6OGaX7+Fn616jYZoUJcxoqiSX0w+i/FluS+DLGzczs/WvMLeSMfB4/XEGOwr5/8mnsXk8tyXQZa27uL/1rzErnCrBiNzbf08pfxo3FnMqDS2pnuo1nbs4//WPs+2UJPuddS4irl19JnMrdbfVtqTtgQb+Pm6Z9gcbNC9jgqHnxtHnpl1W2lP2h1u4s6NT7G+c2+PDJnM8lep3ctXh57JmX2y73g6XPXRVu7Z/ARrAjuRJfmIZm3dDL/NzXUDz+A8nRoaWmqJd/D7rY+xMrAFGfmI7ZoSEgKBx+Liiv6ncUHt3Jyd0I5kkPu2/5tl7esPnnNPDJfFwYV1p/C5ulNzdkJDqTB/3fkoS9tXZ5Y5NRgO2c6ZNSdzSd9zcmZE01Ee2vMoH7QtRXT9X0+ySTZOrjqRi+suyvnpPaHEeWLfwyxpew+B2iNDQkKWLMwqn8vn6i7HJufmvKXUJM/XP8KS9rdQhaLJkJCZUnoCF9R+HkeOzpsi0rx64FHeb3uV9MG+OEd+JiAxvngm59dej9Niznk7FlRwRI5B3b9hMfeseceQrSxJOGQrf51zMTOqBxhmPLRtGT9b9RqQvZaYLElYJZn7ZnyOE/sMNcx4evdqfrjyvzq3vkMYZBJcfz3lQs6oG2WY8Ur9er6/4hmEEFm3MWduHxI/m3ge5/ebYJjxTtNmvrv8cRShGmII4PYxZ3PpAP26DYdqaet2blrxcGa5x+Cem28NP52rB51gmLE2sJubVz5IXE0Z7uT6hUEn86XBpxpmbAnu43urHyCqJI5wcrT0ub4n8LUh5xp2FPZEDnDr2j8RTEUN14s4vXo63xxqPMH1QKyFH6z7Ax3JoGHGCRWTuXHY1VgkY5Gk1kQ7P93wW1oT7YYZk0vG8Z1hXzbsKASSndy1+R4a402GGBISo/wjuXHYN7EbdBQi6TC/3/or9sf2GupIKyExyDOEbw79Hk6DVVDjSpQHdvyKPdFthhl9XP356uAfGo7AJNUED+3+FTvC6w0yZCocfbh+0B34PuPRkVzm78KumU+BHt++yrATApmllbiS5ksL/sOmjiZDr/nv3vX836rXEBgraKoKQUpV+Nrip1jVtt8Q482GLfxg5Qsaz0Y9MBCkhcrNy57h/eZdhhjvN+/glhVPG3IQIHOtKoIfrnqOdxq3GGKsbt/LzcsfIy0UwwyAX65/kVcb1hlibOls4KYVD5HMwQkB+OOWV3lh33JDtrvDTRknREnm1E7+Hzvf5D973zVkeyDWxvdWP0AkHTfshAA8tW8hj+5505BtayLAbWvvy8kJAXi18X3+uftFQ7adqRA/Wv/HnJwQgIUty/nrjqcM1aqJpKP8fOMfcnJCAFZ0rOPPOx4yxEgoCX6z9XeGnRDIJFhuDG7i/u0PGPqepNQk923/DfWxfQZ/6RnGzsgOHtjxRxQD3XgVkebBXb9lT3R7TowDsb38beddpNTsDf1UofD43t8bdkIyDJXWRAMP7voFyV5stnesq+CIHOVqi0e4Y/lrOb9OIEiqCt9f8lJW21Aqwe3LXsq5/oQgk7NyywcvZL0JJpQ0t654PkdCF0cIvr/8uawTWVpVuHXls+Qb47t95XMHE1r1zuUHq59GFUZvTf+TBPxkzXNE0/qdOoUQ/HTd06TUnkPN2fSrDc8TSPbcdv1Q3b3pmYwTkgfj3i0v0hLvue36ofrj1meJphN5Mf6+8xX2R1uy2v1t5/N0piJ5Vc58ct+bbA9nd6Qf3fMibYnOvBivNC5iY3BHVrtn979CU7wlZ4ZAsKh1KasD6w2cy2vsi+7Pi7EysJpl7dmd3AUtb7IrsiMPhsqm0Hreb8vu5C5rX8i28HpEjgwVlT3R7bzXOj+r7brOJWwMLsv5N6ii0hjfw8KW/+b0us+yCo7IUa4nd64lreZfXXR9RyPr2g/o2j2/Zx1xJZVXwS0Vwa5wO0tb9uravVK/kWAqnjejKR5iYeN2XbsFTVtpTYTzmrwF0JmKMf/AJl27pW072R9tz2tiFWRaq79cv1bXbmPnfraFDuRdIjwtVF6sX6FrsyvcxJrAblNlyF+oX6o73hhrZ0nbprxLhMvIvFD/vq5NIBliUcvqvMt3W5B5qUF/4oukY7zdvDR/hiTz8oGFujZJJckbzYvyZsjIvNr4jq5NWk3zZtNbef0+ILO0Mb9JP0qlCpW3m+ebYrzd/Lrug40QggUtr+RdTj3juL2aNbqzuPVlpDynSIFgSdurKELJ6/WfNRUckaNYiqry0Nblef+oIXMTfHTrSs1xIQQPbVuW9/G7GY9s139SemT7UlN9SiySxCM79M/z0Z1LTXUFlpF4dOcHujaP7frA1O4UCXh01/u6N9on9ywxxRAIntj9vu6N9tn95hgqgmf2vU9a1b7R/rdhianPXEXlpYYlxBXtMPprjUtM/T4UVN5sWkY4rR1Beqd5KUk1lT9DqCxuXU1HMqhp837bCmJK/qF8FZXVgQ00x1s1bVYH1hBMh/JmCATbwtvZH63XtNkc2kBbMnsUS49RH9vH7qh2BGlPdDuNcePLPj2pPdnCtvAGzfHG+F72RLfkHHE5VOF0J5uCxpZJP+sqOCJHsdZ3NNIYzf/GAZmb4It7N2qO7wy1sTPUZuInnWG8Xr+ZtNrzj7YpFmRdoMHU07ciBO827yCc6nlZI5iK8UHrLlNdgVUEazr20xLv+T1PqWkWNG3OKdfhcAlgZ7iFvZH2nseFYH7jOlMMgMZ4gK1B7UjY/MbVphmBVIR1gT2a4282rTT1mQNElQSrOrQjYe80rzA1IQGkRJrl7dqRsIUt+tElI1JRWdquHQlb3LbCVMM0yEQTPmhfpTm+tH25qQ60kIm86C3PrOxY2gsMCyvataNtawJLkDG3jVzGwprAEs3xdYH3TV+HhMy6Tv2IXkEZFRyRo1ht8ezr/EYUTadIaOQ+tCUivcJQhCCY6vmJrrcYAB0auQ8did55rwDaNc63MxkzPbEeZCTDPf57XEkdrKlintHzdahCJZSK9QqjI9XzdQB0avBzZiS1nfFAypyjDpkJXO843TU8zMgiyQfrjfTISHaadqhkSSak93mk8stxOVSSJBHSiaqEUrkl8/YsQSit/Z6H0ubfKxWFsA4jogTJvWvThyVQCXXVZilIXwVH5CiW2SfWQ6UVrVA0/j0/Rs9hei12foyej5XuzfdK41gfy+fRq4yeP4/Mzqjecaj0lmbMT0hdDJ33pDfeLwn972jvrPNLKDrvVW/lEqR1jvNpYQiE7nEyS47mv79pHYdf7aX3ysgOoIIKjshRLb/NWFOqbLJIMm5rz5UkfXZzlRkPVZG95/3//l5l9Hwsv633GFrHytaWPSeGxnW4rXbTIfqDDI3ztUhy1jb2RqX3nrhNVv08yNBpnOY12FRNTyoiC8NjniFUvDoVSn0GG7fpSQih2xelt/qZ6DE81ty69PYkCRm3znvusrjzTiI1ynBazH/mAB6T1Vw/Kyo4IkexRpdW48zSFyWbZEliSkWdZmGoof4KfCYdHhmJ0SXVODTOtc5dQrnD3E1QAgZ4Symx93wTLHd46esuMT2FVzp99HEX9zjmttoZ7q82lYAJUGRzMdDTcx8WWZIZX9LfNMNlsTPMX6M5flzJICwmf/5WycIof19tRulQ02XnJSTGFA/UHJ9YMtz0dQCMKdaurDu+eLjpyVUgGF00RHN8tH+46c9cRWWkX7u44Aj/cNNOriIURviGa44P9Y0wHQlTURjm1e4CPsQ7ChVzEQuByhCvdpHEQZ7RphkgMdBjvBDjZ1kFR+Qoltfm4KJB40ztBFGF4PPDp2iOOyxWLh90nDkGgmuHTtUct8oyVw6ebPpGe83gaZoOlSRJXDnIeNXSniQjccXAqbqT5+UDjjeVJyIjcXH/KbqN9y7tP90UwyLJnFs3CbdV28G8qO+MvLfVdjNOrZ5AkV37yfH82pmmlk4sksys8jFUOIo0bc6qmWXqOmRkJhQPo9al3Xn69OpZpiZXCYmh3v4M8mo7bSdVzcz7+N3q46xmpE/b2ZldPtNwhVctldpLGVs0RnN8SsnxOGRzDzZeq48JxZM1x8cWTcVtMfdgY5edHFei/Z4P842nyJZfx/NuWSQLk0pONHWMz4oKjshRrquGHmdqJ0i508PJtfol2C8ffNzBVtv5yGdzcGZffc//4gHHmWouZ5etnNdPv4/Kef3GZ+1CrCdZkriw/0Rdm9Nrx+I2sawhEFzUT9sxBJhbNYpiW/69KhShclE/fadsatlQqpzFphgX9p2uazOmaAD93VV5u5+KUDm/Tn+CHuStZYSvf95P+ioq5/bRL4lf46pgQvGIvKMiAsHZfebq2pTai5lSOsFU5OXMmpN0f2Meq4fpZcfnzZCQOKXqZN2S+A6Lk5nlc/NmyMicUHGybrl6q2xlZvkpeS/PyMgcX3Yidh2HSZYszCg7I+/vlYzMxOITcPfSctixroIjcpRreHElFwwYk3c04dYJJ2GV9T/mvt4Srhw8Ke8J4+axJ2ouy3Srwunli0P1Jy49fWPkCXizLCH5bS6+OnxO3ozrhs6kLMsSksti55sjTsnr+BJw2YDjNZd+umWVLXxzxBl5MiTOrj2OgV7tJ3zILAF9Y9hZeTFkJOZUjmFkkfYTPmSiVF8deg757D6QkZhcMoyJJdpP+N36wsBz8/ruysiM8g9kaln28PmV/c9GknKflmRkBnnqmFk+Iavt5+rOxiJZcp78ZGT6OKuYXZE9InhOnzOxy7nnIcnIlNpLmVORvY/RvKozcFlcOTsjMjJeq4+5FfOy2s4uPx2v1Z8zQ0LGaXEztyL7d39q2SkU2crzYEjYZDtzKy/I6XWfZRUckU+BfjntTKZW9cv55nHj2NlcOGisIdsfTjyNuTVDcr7Rfmn48Vw1RDuMeqi+M/okzqwbnSMBLh1wHNcPMxa6vn7obC7qf1zOjLNqx/LNEcbCqJcPmMZVA3PrDisBc6pG8N1RpxuyP7duEl8aclKODInJZYO4fcz5huxPqhrH14eemRNDRmJUUV/uGHOZIftpZSP59rALc2YM8vbhp2M/byiKNq54CDcNv7Kry6pRhkytq5yfjL7e0HLFMN8Avjv82i6GMYqMTLmjmB+P/ho2uedk8UPVz1PLd4ffgCzlxiiy+fjBqG/jtGRfEqlyVvGdYd/CKlkNP9zIyHisbm4ZcbNuomq3Su1lfGPo97DKNsOTuIyMw+Lg28O+j9+mvRTXLZ+tiBsG345dduTEsMk2vjL4Nkrs5VntXRYPXxr0Y5wWj2GGhIxFsvL5AbdT7tDO0Srowyp03/2UKKGkue2Dl3hu94YeW7V3S0ZCkiR+NGke1wwz5iB0K62q/HTlqzy2c+XB9vJaDIDvjjuR64dPz2nJRREq96x7g39sX6LPkCSEEHx9xAl8Y+ScnBhCCP60+W0e2LoQSZI0l526+V8YMoObRs3LqZW6EIIHdyziT5vfAI5s1X4445L+U/n+6DOx5rh09MTuxfx280sg9BiZ78NZtRP5wZgLsOXYrv3F+mXcs+lZFKHdjrCbcWLlWH405lIcluwT66F6q2kVv9r4OGmNVu2HMqaVjuCOMdfg0slx6UlL2tbxq00PkdBpaNbNGF80lB+Ovk53J0tPWt2xibs2/52oEkdC6vFaZGRUVIb5BvDDUV+hyObLibEpuI1fb7mfcDqaldHfXcetI79Bqb04J8bO8C5+t/WPBNPBrIwaZzU3D/8OFY7sk/ehqo/t495t9xBIdWRllNkr+NbQ71HlzG3ybo438MDOX9GebEZC7rESave/F9lK+fKgW+nj6pcToz3ZzD92/YKWRP3B8z2Skbk+r7WIawfcRp07eyTvWFcu83fBEfmUaVNHM49uW8HTu9YdUaSswunlmmGTuGTweCpc+a9Nbg+28u/tK3hy12piyodLW5c63Fw5eBKXDppItTv/z2R3uI0ndq3giV0riaQ/PHEU2ZxcPmgylww8jtosyxh6qo928J/dK/jP7uVHFFvzWh1cPGASlwyYTD9Pad6MplgnT+1dzn/2LD2i0ZzbYueCfpO4uP8UBnp73iVjRK2JEM/vW8aTe5bQdlghNIds45y647io3zSG+KrzZnQkw7xUv5yn9i2mJfHhZnY2ycLpNcdxQd/pDPfX5s0IpqK8dmAZz+xfRGP8w4WerJKFk6omcH7dLEb4+uadTxRJx3izaRnP1y+kIf7hUuMyMrMrJnB2n1mM9g/KmxFTEixoXsaLDe+wL9b4oTEJiePLxnNmzQmMLRqaNyOhJFnctoxXDrzNnuiHm/JJSBxXMpbTqucytmhETg70oUqqKZa1L2d+05vsihzZ3XqMfzSnVJ3MuOKxeTPSaprVgeW83fw6OyLbjhgf7hvJ3IpTGVc8Me9EWkUobOhcwaLWV9kePrKK9EDPcGaXn8bYoqm6uSd6UoXCltAqFre+wrbwmiPG61xDmFl+JmOKjscm987W+E+7Co7IZ0ChVIL17QfoTMaxSjKlDjfjyvpo5oMIIVgfqOdALEBMSeGxOhjiq2SAV/spJ5JKsq6jgc5kHFmSMozSPpoJoUIINgcbqI92EFWSeK0OBngqGOTTzleIKynWttfTmYwjSVBsdzGupFZ3V8n20AH2RVuJphO4rQ76ussZ4tN+kkoqadYF6gkkowiRYYwpqcWp81S/K9zI3mgz0XQCl8VOrbucId4+mhNLSlVYH9hPIBlFFYIiu4tRRbW4rdo3pX3RJvZEGokqcZwWB1WOUobpTMJpVWFjZz0dyQiKUPDbXIzw1+LVqaHSEGthT6SBiBLDIdupcJQw3DdAk6EIlc3B/XQkw6TUDGOYr1a3XkhzvI3dkX1ElRg22UapvZjhvkGak5cqVLaF6mlPhkiqabxWJ0N8tRTZtHfgtCfa2RXZQ1SJYpVtlNiKGeYboskQQrAjvJ/2ZJCEmsJrdTHQ04diu3Z0IpAMsDu6k2g6ilWy4LcVM8Q7VHPyEkKwO1JPW7KThJrEY3XRz11DqV17aSGU6mRPdDvRdASLZMFrK2KwZ4TuBLk3Wk9bop24msRjcVHrqqHMUaJpH0mH2BPdSiwdRpZkPFY/Az0jdSfIhlgDLYk2Emocl8VFjbOacp0ISEwJsy+6mVg6hCRJuC1++nlG6yaANsUP0JpoIa7GcMouKp3VVDi07w0JJUp9dAMxJVPJ1WXxUesejcOiHcVqTTTRmmgkrsZwyE7KHVVU6CyRpNQ4DdF1xJVOBCpOi58a1xgcOrtyOpIttCTqiStRHLKTEnsllc46TfvPqgqOSEEHFU7FeXH/Gh7dtYQ9kbYjxieV9ufygdM4sXpk3jtOoukErzas4fE977Mj3HzE+Jiivlw2YDonV43WdTD0FFdSvNG4hif3LmZrqOGI8eG+PlzcbyYnV4/TdTD0lFBSLGhey7P732NT8MhuwgM91VzUdxYnV0/EZWA9viel1DSLW9fxfP1CNgSPfAqtc1Vyfu0JnFQ1GY81v4JgilBY2rae/zYsYF3nkU+h1c5yzukzh5Oqpua8NPE/hsrqjg282vgOqwNHPoWW20s4vWYuJ1bOwJ9nsS5VqGzo3MT8prdYHVh7RGi/2FbMKVUnMqdyFkUG8gp6khCCLaHNvNP8BqsDK48Iu/usfuZWnsTs8rkU27Un/2yMXZGtLGp9ndUdS45guC1eZpXPY3r5yZQayF3QYuyLbef91tdYHXj3iMqkTtnNtLJ5HF92KmWOqrwYAAdiO1nW/jJrAu+giA9HS+2yi+NK5jG59HTKHflPzC3x3azueIl1gddJiw9HS62Sg7HFpzCh5CwqnAPyZnQk9rI+8AIbA6+QEh9ud2CRbAz3n8rYkvOocBaWWPJVwREpCIAPWndy47J/E+1a+ujpg5aRUBH0dZfy5+OvoW+OyxRrOvZw44qHCaZiSFkYVc4i7ptybdYdHYdrc3A/N638Jx3JsOZac/e/l9q9/O646xjm75MTY1e4ke+t/iutic6D56vF8Fvd3Dnhi4wu6p8Toz7Wwg/W/pkD8TYdRuY9dFuc3DH6i0wo0d96fbia4+3csf7/sT/WpLme3c2xy3ZuHXkdk0tzSyDuSHbyy41/Ynd0fxaGhFWy8K1hX+D4stwSiEOpEL/dei/bwzuzMmRJ5ksDr2VWRW67sqLpKPfv+CNbQpuyMiQkLu9/DXMqcqsLkVDi/Gv3H9kQXJWFIQOC82qvZG7FmTkt6aTUJE/s/RNrO9/XZXSPnVp9KSdXXpQTI62meLHh/7E68BYyFs1iX935GDPLL+TkqqtzWtJRhcKbjQ+wquO/SFgQWRgTS87h5OqvIOewpCOEyvstf2Nl++OaOSUZRoY/suh05lbfhEUyV1jys6iCI1IQC5q2cOOyxxBCGCqOZZFkPFYHj8y6Xne55lAtbdvBN5f9E9UwQ8Ih23nw+C8z1G8sn2FdYA/fXP5XUqpiiCFLMjbJwn2Tr2d0sbGktG2her614j4SStpQ4Sq5a/K7Z+L1hraXAuyNNvGdVb8nmk4YYkhdScc/Hf0lQ9tLARrjrXx39W8JpSKGGQC3jPgCsyr066d0qz0Z4Pa1d9GRNNbcrNux+tqQazix0pijEEyF+OmGX9KaaMupkNg1/a/glGpjO42i6Qh3b/4FjfEDOTEurL2E02uMbXtOKHHu3fYz9sd259RO/tSq8zmrz6WGbNNqir/u/Bm7I5tz6h80s+wMzq39giFnRBFpHtvzC7aHV5FLj5cJxSdzXu03DTFUofDC/l+xNfSu4eMDDPPN4ry625AMODxCCN5svIfNna/mQJDo75nKWXU/z8nhKSi3+buwffcY1NZgIzcvfwJVqIYrdCpCJZJO8JUl/yJooDPrnkgrN614BMWgE5JhCOJKkq8v+wftCe0uod06EOvgppX/MOyEQCacn1LTfGflP2iKB7LatydCfG/VX0goKcMTkopAFSq3r3mQ/dHWrPahVJTb1t5v2AmBTBEsIVR+tvFBdoWPXIo6XDElwY/W3WfYCTnIQPDrLf9ia2h3VvuUmuLnG+817IRkGBn9efvDbOjcmtVeEQq/2fKHnJ0QgIf2/JvVgbXZz0kI7t9xb85OCMAz9f9hefsHhhgP7f4T+2O7cnJCAF5veo4lbe8Ysn1y3/05OyEA77W9wnutrxiyfaXhrzk7IQCrA2+yqOVJQ7aLmv+VsxMCsDX0Lgub/2nIdkXbozk6IQCCPZGlLGy6N+dzK8i4Co7IMagHtr6DIpSci4QrQqUx1smze1dmtf3njgUk1XTON0AVQUcywlN7s9/MH9/zLjElmXO5cxVBRInzxJ73sto+u/89gqloXoykmuaJPe9ktX3lwPu0JXJvwS7IbKl+bO/8rLZvNy2lMd6aVylyIQSP7ck+Kb3ftpJ90Yb8GMDje1/Iare6Yy07I7vzYkhI/Gfv02QL8m4JbWJLaFPeZduf2f9kVwdYbe2N7mB9cEXeXY5fbHg8ayfbpvg+VgUW5c14vfEJUmpC16Yj2cTyjlfJt9vtwpYniStRXZtIOsDStqfzOj7AsrZniKQDujYJJcyytkfyJAjWB/5LMNWY3bSgvFRwRI4xNceDvHlgU95l4QXw711LdG+0wVSMVxrW5N1HREXwn70fkNJpix5LJ/lv/bL8GULw/P6lxBXtmhIpNc3z9Yvz7uuiCJXXGpcT0okgKULlhYb8JwsVlUWta+hIhjRthBD8t2FB3pVxVVSWd2ykKX5kMvOheuXA23mXvBYINod2sC+qH915velNU6XU98Xq2dHDVtRD9VbzfFOl1FuTLWwObdK1WdRijhFKd7K+U/+B4P3W100x4mqUtYElujYr2l831ek2LZKsDbyta7Mu8Br5OjqQaWC3LvC6rs2W4HwUoX0vyCYJiQ2Bl/J+fUH6Kjgix5ie2bvC9DEOxDp5v2WH5vh/968kneVpLZs6khEWNGnfzOc3riam40QYUVRJ8Eajdqh+Uct6gin9p7VsSqlpXm9crjm+on0zLYmAKYYQglcPaE8YG4M72B9rMnErzyQyvnpAO4K0K7KP7eE9eTtU3YzXGhdqjjfGm9gY3GyqwZyMzBuN2hNfR7KdNYFVphlvN72hOR5Jh1gZeM80Y1HLa5rjCSXGsva3TTfje7f1Zc3xtJpiefurOS8tHa4P2l7UjFKpQmFl+wumvlcCwcr2F1A17klCCNa0P5P38TMMlfUdLxyxU6ig3lHBETnGtLJtj6nOrZBJXF3dfuT21W6t6dhDPv1DDpVVklkT2KM5vjawx3QLeYskszawW3N8fWA3VpMMkFivw9gQ3GX6OgSC9Z3ajuHG4E5TT8aQiYr0tNW3W5uDO0x+4hnGRp08kW2h7SYJGcbm0BbN8V2RnaYmvW7GtrA2Y190V9ZlFSOMXRFtRmN8Hymhv6ySTQJBfWyn5rm2Jw8QV7PncmVTW7KBuBrpcSycbiOcbjfN0DtOQg3Tmao3zUioITqT5o9T0JH6WByRRCLBhAkTkCSJ1atXfxzIz6wCyeyJptkkwRGVSD/ESEVN38wFENJhhNIxUy3kIZO4GtZhhNMxU12HIXMz14uqRNIxzDptAMG0PkM20dm4W2EdRjQdzbu6pmGGEst76efDx9FjmIuAdSuuaP/OYr3ESIs0abXnJ/C40vPEno+0jvXpY/TsNCU1/j0fJXrxWAX9Tx+LI3LLLbfQp09udR0Kyk/5FiXL5Tj2XmBIWRjWPLqQHsmQdHu7WCVL3iW4D5VebxeLlG/f5MMYOlsHrZLFpFvYfRz91uu9sdFf7/OwSBbTDm7mODrX0UtbMPXKkeu9j7lKa8uo3IsMrferN7erajHyLeueC6M336vePFZB/9NH7oi88sorvP766/z617/+qFEFAZVOn+GumlpShaDUoV1uu8zh64XlBiixazNK7V7TT/mSJFFi167qqTdmVBZJplSnbHiJ3Zd1h0U2yUiUOrQrhxb3AkNCotSuvde/yOYzlY/QrRKdCqhFtt6pFaR3HL81vwqsh8tr1f7MfXlWeT1cLotHMwrl66XrsEpWHHLP5fu91uJeYUhIuCw9v19uS+8wMsfq+T1xWvymEm4/xLDmV123IH19pI5IU1MT119/PQ8//DBud/ZS0olEgmAw+KG/gnLT6bVjTeeICASn1mhX2zy1ZqzpZRNFqJxaM05zfF71+F5hzKvWZsyt6h3GiVXjNcdnV0ww/XmoCObqFBybXjbB1PEh85nPqdTu1jypZJzpaIIEzKqYqjk+tmg0DpMNwyQkZpQdrzk+1Dccj0XbATbGkDm+bIbmeD/3YIps5iYsGZnJJTM1x6ucfU23mZeRGVc8UzMqWGyvpMY52FRkUkZmuG+qZp8bl9VPP/d4U46ChEw/93hc1p4dUKvsYIB3umlGpXM4Plv+5fEL0tZH5ogIIbj22mu54YYbmDzZWDv6O++8k6KiooN/ffv2/ahO75jVSdUjKLbn1z8EwILEjIoh1OmUep9ePpQqZ/5PZDIS44v7McSn/aMeV9yfAZ7KvG+BEhKDvdWMKdKurjrUV8tIfz9TEaQaZymTSrXLsNe6KphYPMxUMmmxzcv08jGa42WOIqaXjTPFcFuczCrXdnZ8Ng+zyqeaYlhlGydUTNMcd1qczKmYbYohITGncpbmuE22MafyJFMMgcoJOqXeZUlmdvlppiZwFZVZ5adojkuSxKzyM/M+fjdjRvlpujbTys42tVymojK1TL8S7XGl55jamSNQmVR6rq7NuJLzTTPGlVyQ9+sL0lfOv8Zbb70VSZJ0/zZv3sy9995LKBTitttuM3zs2267jc7OzoN/+/bty/X0PvOyyVYuHTA178lVQXD5QO3JAjI32sv6T8/7RqsiuGyAfrlvSZK4pN/MvG+BAsEl/WZkzQG5qO+svCMWEnBh31lZkzjPrz0h72UNCYlz+szKupZ+dp85eTNkZE6rnonDoh+NOL3GHGNuxfF4rNpdfAFOrpprijGtbHLWJZ7Z5SfmPbnKyIwtGk+ZTmdagOllJ+ad3CsjM8gzgmqXfuO440pOwCblF0GSkKlx9qevS79FweiiWThlD/kkXEvIlNirGejRjkoCDPEdj8daktf9RELCYy1lsE//nlXnnojf1ifPqIiEQ/YyxDc3j9cWZEQ5fyo333wzmzZt0v0bNGgQb731Fu+//z4OhwOr1cqQIZkv/OTJk/n85z/f47EdDgd+v/9DfwXlri8MnsVwf3XOeRwScH7ficyuHJbV9rL+0xlf0i/nPA4ZiXnVY5hXrf2E361zaicztWxozk6VjMT08uGc2WdSVtuTqiYwu2JMHgyZccWDOK9OO0TfrWllo5lXNSXnG62MzBBvHZ/rm71/ypiiIZxZMzun43cz6tyVXNZP/8kYYLC3P+fXZrfriVHhKOXyfvpPrQB9XDVcXJf7k6eMTJHNzxX9LslqW+Yo49K+V+bFcFs9XNHvmqy2XpufS/p+KWeGhIzD4uTyfl/Oauu0uLm03zfyYEjYZBuX9cveB8Ym27mw7035uSGShYvqbs7KkCUL59TeSr7Ozjm138+aWCtJMqf2+UGXXW4cCTilz+1YTS4bFqStj6zp3d69ez+U49HQ0MBpp53GU089xbRp06iry94mutD0Ln+1JcJ8+f1/sSPUbPiJ/5Sa0fzquM8Z3nkTTMX4+rJ/sLmzwRBDAmaUD+Oe467AYbEZYkTSCb678h+sCRgrpiUhMbFkIPdM/Dxuq8MQI6Gk+NHaf7KsfYuhd0pGYri/L3dPuB6fTf8Jv1spNc2dGx/ivbbsvVC6Gf09Nfxq3NcoNphUqwiVP2x9hLeblxlkyNS4yvnF2G9S5ig29BpVqPx95+O83rTIMKPMUcIdo2+kymmsmaIQgif2Pc1LB4z1Bel2Qm4b+V1qXMaaKQK82PA8LzQYK3TV7YR8Z9gt9HUba6YI8HbzyzxX/7BhhsPi5KuDb6O/x3j7+SVt83l2/1+7vrv632AJGZts5wsDb2Ww13jX5TWBd3hu/x+6foPZGRbJyqX9bmOoz3jX5a3B93hh/68QKFl/65l+yBbOrbuVYX7tXJrDtSe8lJfrf4wq0gaWajKUeTW3MrxonmFGQRkdld13d+/ezcCBA1m1ahUTJkww9JqCI2JOkXSCe9a/wgv7V6MIccSPu7sVvc/q5PODZ/KlobNzDifHlRR/2PwKz+1fQUpNAx++TXUz3BY7lw+YwZeHnKS7hbMnJdU0f972Ks/s+4BkV12FQxndHV6dso0L+07nhqGn6m6p7UlpVeGfu17n6X2LiClJJKQPvV/dz1A22crZfY7nK0POMuxMdUsVmb4xT+17m6gSP4KR4UhYJJlTqqbwlSEX4LIYc6a6JYTg2fo3eXLffMLp6MH3/3CGLEmcUDGJLw/+HF5rbjlFQghebVzAU/teIpgOazIk4Piy47hu0KUU2bR3mWjpneZFPL3/OQKpzh7b23dHmI4rmcDnB1xJib04Z8YHbe/zbP2TtCfbemR0/9sY/ziu7P/5rEsyPWlNYCnP1z9KW7JZlzHMO4aL+15HpTP3JNRNwZW82PAvWhINuoyBnpFcUPslql3Gnalu7Qyv5dUDf6U5sVeXUecazpl9vkIf1+CcGfXRjbzReD9N8e1IWBB8uNiajAUVhSrnEOZVf5Vat7HO1IeqOb6VBY1/oCm+qUdG97+V2gdyQtXXqfMYd6YK+p8KjkhBH1IgGeW5fSt5es8KGmOdJNU0ToudIb4KLhswjVP7jM55Uj1coVSMF+tX8dTepRyIBUiqKZwWG/085VzS73hO6zMOV5YchGyKpBO8dmAVz+xbQn20jYSawiHbqHWXcWHf4zmtZiIeg1EQLcWUBG82rub5+sXsi7SQUJPYZRvVzlLOq5vOqTWT8GbJc8imhJJkYctqXmh4l72RRhJqEptso8JRzJk1Mzi1eip+m7mdHSk1xeLWNbzYsJDd0QbiSgK7bKPUXsSp1dM5pWo6xTrbjo0orSosa1/Dq43vsDO8l7iawCZZKbb7ObFyBvOqZlFiN7fNVBEKawLrmN/4FtvDO0moCSySBb/Nx+zymZxYeQJlDu3EaiNShcqG4DrebnqD7eFtJNQ4smTBZ/VyfNlMTqg4kXJHhSmGEIKtofUsan2dbaGNJNQYsiTjtniZVDKTmeXz8nJADmfsimxicesrbA2vJaHEkJBxWTyML57B9PJTqXKa2wAghGB/bAtL215mW2g5CTWGBDgtXkb6pzOl9AyqXQNNMQAOxLayqv1FtoeWkOiqyuqQPQzxHc/E0rOpcWVfPs6m1vgO1gVeYEdoYVehMoFddtPfezxjS86j2jmqV+oMfVZ1VDoi+ajgiHw0EkJk/YEllRQLWlazqmMroXQMGQm/zcPM8rFMKRuZNf/ECCOlplncuoYVHZsIpaJIgN/mYWrZGKaVjcmaoGmEkVYVlnWsZWXHeoKpTFVEn9XDxJJRTC2dkDVyYoShCpXVgXWs6FhNKBVCReC1ehhTNIqppZOwy/pOnlHGxuB6VnQsJ5wOowgFj9XDcN9IppRMxZElcmKEIYRga3gjqzqWEEoFUUQat9XDIM9wppTOxGFx9gpjV2QzqwLvEUoHSKtp3BYP/TxDmVRyAi6LfnTGKGN/bCtrAgsJpdpJixRO2UOtewgTS07EZdFf6jLCAGiMbWdd55uEUq2k1QQOi4cq52DGFZ+CO0udD6OM1vhONna+TijdTFqNY5c9lDkGMrr4dDxWfQes+9aejdOR2M3W4MuEU42k1Bh2i5ti+wCG+8/CY6vsFUYwuZcdwZcIpfaTUqPYZA9+ez+G+M/Ga9MvdmmUEUk1sCf0HOHUHlJKBKvswWOrY4D/PLw2/QiQUUZBxlVwRArKWx3JEE/te4eXGhYTUeIfCsFaJBlFqJQ7ijivdjbn1c7KeekAIJiK8Nz+t3n5wLuE0tEeGcU2H2f3mc15tXNw5xGBiKRjvHTgbV5tXEBnKvQhRvf/9lm9nFY9m7NrTsKXRwQirsR5rfEt5je9TUcq0CPDbXFzcuUczqiZl1fBrqSa5O3mN3mr+Q3aDls+6P7fTtnJ7Io5nFZ1OsX23OtXpNU077W+yTvNr9KabD4Y/j6UYZcdTC+bw0mVZ1KaR3RAEQoftL3Fu60v05xo+BBDQkagYpVsTC6dw9yKcyh3GM/16JYqVFZ3vM3i1v/SlNjTxVAB0cUQWCQL44vmMLPiPCrziA4IIdgQfIdlbc/RGN92BKP7/48sOoHjyz5HpTP36IAQgu2hhaxsf5rG+MaupYIPM0BiiG82k0ovoco1PGcGwJ7we6zreIzG2JoeGACCfp5ZjCu9nCrX2LwYDZElbOh4lMbYsg8x6EoPF6j0cc9gdMmVVLuzJ5f3pJbYcrYG/kVT9L2u5U4AtYshIVCodE1jaPE1VLmzJ5cX1DsqOCIF5aVd4QPcuvbPBJLhrFsoJSQGeWr45bivUOow/tnUx5r54dr/R2uiI2uCq4RErauSn4/9GhVO4xNsc7yNn228l8Z4S1aGjES5o5Qfj/omNS79p79D1ZEMcNfm37M/1pA1sa47mfLWEd+hzm281UEoFeKP23/H7sguQwyP1cN3hn2Xfu7+hhnRdIS/7vwtO8LdibranP8lU97CQK927ZTDlVDiPLTnt2wJrclqKyNjle18YcD3GOrLvrOqWyk1yVP7fsfG4JIec28OZ8iSlcv73cIwv/HJTxEpXm74A+s738rKkMi0KDiv7hZG+LXrmhwuVSi80/Qn1gX+e9BB02OA4JSa7zKy6FTDDCFUlrbez7qOxw0xBCozK29iZPH5OTAEa9v/xtr2vxtkKBxX/k1GFV9hOCohhGBb58Osb/tdj7kePTFGltzAiJIvFyIfH4Nymb8L3XcLAqAh1srNq/9kyAmBTJ2OXdFGvrv6PsIpY432WhMd3LL6D7QmAoZ22QgEDbEWblnzezqTIUOMzmSIH63/LY3xVkMMFUFrooMfrv8tbYmAIUYkHeHnm+6hPnbA0E4eFZXOVJCfbbqb5niLIUZcifObrXexJ7LbMCOSjnD35jtpiDUYYiTVJPdvv5sd4a2GdkOoqMSVGPdu+yX7orsNMRSR5h+772ZraJ0hexWVlJrgb7t+yW6d7rMfeo1QeGLvPWwKfgCQ9f1SUUmLFI/s+SU7wsZ2MQmh8mL9b1nf+bYhhkBBReHZ/XeyLbTEIEPwduMfWRd4sesY+r/DzO4SldcP3M2W4FuGGMBBJ8QoAwTvNf+GzYEXDDPWtv+dte1/z4EBK1vvZVPgMcOM7Z2PsL7tdx86RjbGpo4/s7njL4YZBX08KjgiBaEKlR+u/SvRdDynYlKqUKmPtfDrLY9ntRVC8PMNfyeYiuTGQKU10cndm/9lyP63W/9Oe7IzZ0YwFeaeLX/BSIDwgR3/oDnemjMjmo7x6633GmI8uuch6mMNOTOSapI/bPutoTb0z+3/N3uiO3KqOCkQKCLN/dvvJqUms9q/1vgfdoQ35sxQhcrfd91F3EAn20Utz7IltDzHImUZx+vfe+4kku7Mar2i40U2BheQzVk7kgHP7r+TYCq7A7qp83XWd76UIyOj1xvuoiO5P6vdrtA7B52QXPVe829oi2/LatcQWcLa9r/lxVjR+keaY9kjZ22x1axr+21ejE0df6Ypujiv1xb00ajgiBTEio6t7Is1o+RR0VJF8F7rWhpj7bp2m4K72Bbem1fVTBWV1YGt7I0c0LXbHdnP+uDWvBnbwrvZHt6ja9cUb2ZFYE3ejPrYATYEN+vaBZIBPmhfkldJahWVtmQrawOrde2i6Qjvt72dV4VRFZVQupNVHR/o2iWUOO+2vpYXQyCIKRFWdOjXK0mrKRa3/jfn43czUmqCFR1v6tsJlSWtT+XFAIEqFFZ1vJyFIVje/gT5FPXKUARrO57Pare24zETPVckNgSyvw8bOh7NmyFhYVMgu6O0rfORrqWp/BjbAsZquxT08ajgiBTEC/WLTPXekJF46cD7ujYvNSzCYoYhybx04D1dm9cazV6HzCuNC3Rt3mxeaJoxv0k/jL6odUHeJci7GW82v6Fr80H7QkNREy1JSCxoeV3XZnVgMUk1njcD4N3WV3UjSJuCS4kq+TfHFAg+aHsZVee92BleQSjdaoKhsrLjZRSR0rRpiK2nI7mXfKIh3Yz1gVdIqdrLpG3xbbTEc4tOfZihsD34OglFe5k0mNxHY2yZKca+8AKiae0IUizdTEPk7azLMXqM5tgSwqlCC5GjRQVH5DOu9kSQD9o25fWE3y0VwUsNizUnjEg6xqLWVXlFXA4yhMr8xiUHi6YdrqSa4p2WJSavQ+Xd1uXElJ4nT1WovN280DRjRccaOlPak+eClvwiFYcyNoc20ZrQvpm/2/KmKYZAsDe6k4aY9s38/bb5phq/AbQkGtgT1V4OWN7+mokn/IyCqTZ2hrVzWFYHXjXNiCshtoW0I0gbAi/n/YTfrbSIsy24UHN8S/BF0wy1yxnR0o5eYGSOox1B2hvKLwJ2qCQs7A4+Z/o4BfWOCo7IZ1yN8XZTE1K3QukoUSXR41hrIoAi8p+8u5VQk3R21QI5XIFk8GDVVTNShKKZtBpVYkQVY4m5ehIIWhNtPY6l1TSBVM/8XNWi44i0JZt7hdGaaNIZa+yV71ZbUo/RkPfT96FqTzZq8xP7TTMkZAI6jI7kvryf8LslY6Ezpb182Znc3yuMUKpeczyUMv9egUQ4pZ3vEk7tM+3gCgQRHUZBH68KjshnXHEN5yEfaUUSYr3K6PlYcbX3GHGN69D6995kJD6G61CEYmpZ5lAldN6TlMiezGqMoe38mV36gYyTkNBZ0kjqjBlnSLrH6Q0GSCRV7eTeVFeFUjMSCAMMc86nQCWlw0irsV5wcFXSas8PNQV9/Co4Ip9xOfMoSKYlt0blzXyKnuXO0K/6mYtcGgXUepVh6ZnhkHvvvdJiWCQLFim3XjxacmowAOy9dC26jF74TAQqDlnvOnLrxdMzQ2D/iBnZjmOXjTVP1JOEpMuwyR7yTbj9H0PGpsOwyi7TERGQsWWpsFvQx6eCI/IZV42rrBd+1FBk82o6HBWOYqxZyrUbkcvioMjW882j2ObD0Qttuq2SlVKN/iguiwuv1VwPGMgkk1Y4ynrmy1ZK7eZ6p3Sr0qldoK3CUdUrjAqd/iiVjj698t0qd+gx+prO38gwtAvNVTj6mWYIVEodtZrjpY7+vZC/kabErt3VvNjer1dyRIrs2uXS/fb+vbBsouK3axfl89kGmI6ISIDXZrzwX0EfrQqOyGdcJXYfM8vHZO0doycZiXP6zNCsVui2uphbOcncrhlkTquertm51ybbOLlyhukdLXMqpmpGPmRJ5uTKOaYZU0qPw6fTjXZuxUmmbuYyMmP8Yyi19+zsAMyuMNfWXEJmoGco1U7tCXx62SmmJgwJiWpnX/rqdHGdUnqa6ZyEYlslAzzaVVwnlJxhmuG2FDPYO0VzfGzxWabzN+yymyG+2Zrjw4vOMc2wSDYG+7S/O0P8Z/dKPs0g35ma4/185/RKjsgA3/mmjlFQ76ngiBTEubWzTSWTCuDMmum6Nmf1mW1u1wwqZ9bol8o+tXq26R0tp1efoGtzYuUJphmnVM3VtZlVfoKpG62KyomV+o7GlNJZ2KT8Oy4LVE6o0C8rPr54Ok6d5YjsDMGs8tN1y3EP90/BYy3OmwESx5ediazjiA/wjKfIln8ESULmuNKzdJfDqpwjKHMMJN9lDQmZMcVnYdVZDitxDKTKOdZUjY+h/tOx6yxpeG196OOebqrGRz/vSbh0Gvo5rWXUek42xahyz8SdpdleQR+fCo5IQUwoHsIgT5+8oiIyEnMqJlDhLNa1G+brz0j/wLyiCTIyk0tGUevW7wXT113DhOJReTNG+YcwyKvfpbPCUca00kl5M/q7+zLCp9/C3G/zM6N8Vl7OiIxMlaOKsUXjdO1cFjezKublzSi2lTK+WPsJH8Am25ldof1kqycJGY/Fx8RifefTIlmYVX5engwJh+xiYsmJ+naSzPTyi/NmWCQbE4pPz8KQmFx6GfklekpIksy44nOyWo4rvdJUxGJU8UXZbUquNFHjQ2VUyWVZ7YYUX22qVsmw4mvyem1BH40KjkhBSJLEz8Z+Ca/VndMEKyPT31PNd4ZfYsj+9lFfpNTuz2mJRkam2lnGd0cYu3HcOPQLVDrLcr6OEnsR3x12vSH76wddS62rJmeGz+bl5mHfMNRw64p+V9HfPSBnhtPi4ttDb9Z9wu/WuX0uY7B3eE7OiIyMTbbztSHfxyZnj6jMq7qIEb6JOTEkZCyShesH3Y7DQDLqjPJzGV00I0eGhITM1QN+gNuavWnjhOIzGFd0KrlFLCRA4qK+P8Jn014m69aIopOZUHJhDsfvZgjO6PNDiuzZn/D7e2cysfTzOTIyOqH6Nkodg7La1bgnc1z5N/JiTKv4HuXO0VntSp1jmFB+W16MMaU3UuHSd6IL+nhVcEQKAqDSWcLvJ36LCkdRV4NufUnAcH9ffj3h67itxnYulNr93DPhRqpd5YYmDQmJAZ4a7prwbXw2Y7sKfDYPPx9zE33dNYYZNa5Kfjn2Zors2nkbh8plcfKDkTcz0NOva0LT50hIlDlK+fGo71PmMJaIapft3DTsuwzp6nKbnSHjtxVx64jbdZNUD5VVtnLD4O8x0j/OEENGxm318u1hP6LGpZ0UeagskoXPD7iJMUVTDDOcFhc3DP4xde7skx5kcnc+V3cj44vnGGbYZAefH3gH/T2jDDEkSeKMPt9kYvEZXQz9W6eMBatk43N972CQ9zhDDIATKm9gUuklhhgSFmQsnNnnxwzxGe/we1zZFzmu7LqDx8jGkJCZU/0DhvpPM8wYVXwlx5V/0zADJKZV3MKwYuOO2KCii7ucEckgA8aUfYehhWjIUSdJGOnA9QkplzbCBfWOgqkIL9S/xwv179KRCmGR5IMVUyVJQhEqda4KzqudzZk1x2O35J5nEEnHePnAu/y3fiFtyc4eGVWOMs6pPYEzambitOS+GyauJHi9cREvN75DS6L9IEMAchejzF7CmTVzOLVqNm6NLbt6Sqop3m5eyOtNb9EYb8YiWboYAhkZBYViWxGnVJ3IKVVz8eSx4yatpnmvbRFvNL3OgfgB5K7W7wKBLMkoQsFn9TG34iROqjwZny3334kiFJa2LWJBy2vUx/Z+iCEho6LgtniYWX4ycypPpchWkjNDFSqrAu/xbssr7Ivt6JHhlF1MK5vH7PLTKbaX58wQQrC+czFL2l5kb3Rzjwyb5GBS6Tyml59Nqb06L8a28BKWtT3P3ujaQ5yF/zGskp2xxacwpfQ8yhzGHLbDtTu8lFXtz7A3uryLISFQkZFRUbFIVkb45zGx9CLKHAPyYtRHl7O+40n2Rd7vct4yDAm5i2VhkO9kxpRcQrlTfzlRS02x1WzqeJx9kYVd7uGhDIGERD/vSYwqucxQJKQntcfXsT3wKPWRNw4es5vR3Se5j2cuQ4qupNxl3CksyJxymb8LjkhBPUpRFd5v28DKjq2E0lEsyPhtHmZWjGVc0WDd5QVVqKTUFHbZrmunCJUV7ZtY0bGRUCqKRCaiMa1sLOOLh+ouL2QYSWyyPavd2s7NrGhfTygdQSDwWT1MLBnNhOJRunkxQgiSaiIrQwjBptBWVnSsIpQKoyLwWj2M8Y9kYsk4LDpbl4UQpEQCq5SdsT28nZWB5YRSQVRU3BYPI3wjmFB8HFZZOxHyfwwbcpZz2RPdyaqOJYTSQRSRxm3xMNg7nPHFU3WXYoQQpEUCSxYGQH10F6sCiwmlAygihdPiob97GBOKp2PT2YL9P4YVOUsdlKb4HtYEFhJOdZASSZwWD7WuIYwrno1d1o7gCSFQRALZAKMtsZ/1nW8SSrWRFgkcspcq5yBGF52Iw6IdwRNCoIoEkmRBzpIwHEg2sKlzPqF0M2k1hkP2UuYYyIiieTgt+hE8RU0gSXJWRijVyPbgq4RSB0ipMeyyh2JHf4b6T8dpKdZ9raomQJKQJf2HhWi6mR3Blwml6kmrUWyyG7+tH4P8Z+Ky6i9bqSJT5E+W9OvSxNNt7A39l1BqD2k1glV247H1pb/vHFxWY1HCgnpPBUekoI9dnalOFrYs5J2Wd2hPZjrxSkhUOio5qfIkZpXPwm01V7QpnA6xuHUBi1rfpD3ZevDpp8xezuyKeUwvOwGv1djyipai6TDL2hfybuvrtCWbDm4/LbaVMaN8HseXnoTP1nOdEaOKK1FWdyxgSdsrtCbqDzL81lIml53ClNJT8NvM1RJJqnE2BN5hefuLtCT2Hkzs81hLmFB8KhNLTqPIbu7mnFYTbAkuYHXHc7TEdx5kuCx+RhWdxriSsym2a9cAMSJFTbIzvID1Hc/QEt9yMAnSIfsYVnQ6o4rPo9je1xRDFWn2hheyKfAkLfH1Bxk22csg32kML7qAEgO5EdkYB6KL2B54gpb4KgSZnklWyUNf76kMKbqYYsdwUwwhFFpi77I7+BhtsQ8QZFoeWCQ31Z5T6O+/jGLHWJMMlY74+zQEH6Yj9i4qmeq5suSi3D2PPr4r8TsmGsqD0mYIQoklNIceIhB/C9HliEiSg2LnSVT6rsHnON4Uo6CPXgVHpKCPTTElxiN7HmFJ25KuIHjPXyerZOWkypO4uO5i3af3npRUEzy57xGWtC9EFWqPDAkJWZKZXjaHz9VdhT3H4mZpNcULDY/yftsbpIVCT7sXuvNBJpXM4qK66wwlUh4qRSjMb3yU91tfJq1R/rw7nDymaAbn1d2Ay5Lbco4QKotaHmNJ67OkRJzuZMbDGQLBMN80zuzzTTzW3BwrIQTL2p5gWdvjXeW+tRgqAzxTOKXmJrwGkjUPZ6wPPM2K1n+SUEMHj9cTo4/7OOZWfx+fLfellq2dz7Oy9QESakCDYUGgUOkcz4yq23SLeWlpT+hl1rb9nrjSpssodYxhcsWPKHIMyZlxIDKfTW2/Iq40ARY4bNdKN8NvH8HY8p9S5Mh9GaQt+jbb239GPL2/R0b3v7ltQxlW9jOKnLkvgwTj77G7/Yck0rt0GQ7rQAaU/gK/c0bOjII+HhUckYI+FgVTQe7ecjcHYgcM1daQkBjuG86NQ2/EYbDsezQd4d7td7E3ustQcSwJif7uQXxz6Pdx6YTHD1VCifOXnXexK7LZIEOmxlnHV4f8EK+BHRcAKTXJo7vvYlt4NUa2aErIlDlq+OKgnxqOjigizbP77mZLaLEh+0yCazlXDbiTYruxOhmqUHi94TdsCr5hmOG2lvC5fndT6jAWuRBC8G7T79nY+ZxBhgWHxcvZdb+jzKld/Oxwxsq2/8f6jkcNM6yyi1Nqf0+F01iCK8DG9r+yoePPBhkysuRgds0fqcghl2FX50Nsar/boLWMLFmZVHkvFe6ZhhkNocfZ1nZH139l+/7KSMiMqvgD5Z5TDDPaIs+xs+2mruNnY2RyWgaV/ZYyz/mGGQV9fMpl/i7smikoLyWUBL/b+jvDTghk0sa2hLZw/477UQ0UUEupKf6847fsi+42XKEz055+Fw/s+B1pNZ3VXhEK/9z9O3ZFtuTAUGmM7+evO+8iqWZv7KYKlSf3/YHtBp2QbkZ74gD/3PV/uk3fDtoLwcsN97Il9L6h43czgqlW/r3nh8TSIUOvWdD0gGEnpJsRTXfw9N7vE0l3GHrNsta/G3ZCMgyFhBLixf03EUppd+o9VOs7HjHshHQz0mqU+fU3EkzuM/Sa7Z3/MeyEZBgqioiz6MC36ExsN/Sa/aHnc3BCAFRUkWJF8zcJJNYbekVL5LUuJ8SIg5BhCBQ2tnybQHyZIUZn7J0uJ0Q1yBCAys62m+iMLTDEKOjoVcERKSgvzW+az57onpyrjAoEazrXsKRtSVbb91rfZntkS84MFZWt4U0sbnsnq+2KjnfZHFqTc3EkFZV90Z0sank1q+2m4FI2dL6fc7lzFZXm+H4Wtjyb1XZXZDVrA2+Sa0EsgUog2cSilsey2h6IbWJ1x3M5Hb+bEUl38F7zg1lt2xM7WdX+cF6MhBLi/eb7stqGUg2sbDPuIBzKSKsxPmj+TVbbeLqV1a2/zpkBmUTWFS0/z2qZUoKsb/tpnow0a1t+QLaAuKLG2NKaT70OgUBlc8v3EFkeOlSRYmfbzeRXzE2ws+0mVJHK47UFHS0qOCIF5SxVqLzZ/GbefUQkJN5o1n+qFkLwdsvrmOnk+Xbz61lvtItaXs2ruihkbrXvtr6WNbrzfutLWWtCaDNUlra9RlrVv9Eub3sx7x44ApXVgddJqnFduzXtL+RdVlugsDn4FnFFP/KyIfCcKcbu8CIi6TZdu62dz5r4zBUaYksJJvfr2u0MPZf370Og0pZYRyCxTdduf/h5ExOwSji1g0Bita5Vc+QlFBEmPydBJaE00BF/T9cqEHudtNqWJ0OQVtsIxObn8dqCjhYVHJGCctbazrUEUoG8Xy8Q7IrsYk9kj6bNtvAmWhKN5Hdzyqgp0cCOyBbN8b3RHeyPGcs90VIg1cbm0BrN8Zb4fnZFNuQccTlUUSXEpuBSzfFgqoVt4aWmeuCk1DgbOxdqjsfSnWwJvoOZpmmKSLOxU3vCSCoRtnS+aooBsDnwovY5qAm2dD5v6vOQkNkafF5zXBVpdnT+B0wxLOwIPqk5LoRgd9D40pIWY0/wcV1GffBfmHkYAAv1wUd0LZpC/8TcVGTpOkZBn1YVHJGCctay9mWmOtBCprrl8o7lmuMrO5Z2FaQyw7CwskN7Al8T+KAXGDJrAtrLTOs7l+QdDemWhMy6gPZT5Zbg+6amim7KBh1HZGd4CapJBwEEWzrf1hzdH12G0rVVM3+CyvaQdrStMbaKlBo2zdgVfF1zvD2xgbiiH5XJzlDYF35NczyU2kosvR8zjrpA4UDkNc2lk3h6P5HUFlMMUGiPvYOiEW1LKe2EE8sw47SBQjixlJTSbuIYBX2SKjgiBeWszlSnqadvyFRQDekkSIbSQVNPrZCZMMLpoOZ4ZszcpjEVlVBKmxFJd5qudyBQCaUD+gzTP2Whm0waTQfyXjI5VHqMWLoDc0/f3ccJaI7FFWMJs9kUVzs1xxK9NCGm1DBC9Oz8JXuJIUiTFhENfm9N7IK0GuhxJLMk0ztK99r5FvRxq+CIFJSzFI2bYy7KVLDUPo4qFFNLJt3KxugNKUJ7d46KYtbXMcbohQk8G6M3ykfpRVXMR1wMMHrpM9dyELKN5Sqta+lNhtD43LX+PR9p5bL0JkMUElY/tSo4IgXlLK/Vm3eyX7ckJNw6dT7cVo/p5Z8MQ7sgWKZYmPnr8Fi9OgztsVzk0akY65Q9pqNHAG6L9l5/h+w1HQUDcMra74dD9tIbXptD1v7M7VnKohuVTdb+7trk3mHI2LBolE63yb1XV8mqcb5W2VwF4UNl0ziWpRcZvXmsgj5eFRyRgnLWcN9w09EKBYXhPu2S1kO9I0xPfCoqQ7wjNMeHeEeZfgoXCAZ5R2qOD/SYZ0hIDPSM0Rzv7xlr2hGRkOnvGac5XucZh1knQUKmn0e7UFe1a7yp42cYFmrdkzXHK51jeiFnx0KNDqPEMQqZ3JtBHs6ocE3SHPfZh2KRzLVMAJlixzjNnjouW39ssrlWAyDhtg3BouHs2C3V2C21Jhlgt9Rit+ReWbego0MFR6SgnDWjbAa2LI20sqnIVsT4Yu2J57iS43HKuXfEPVQui5vjSqZqjo8ummS4MqqWbJKdySWzNccHecdRYjPX00VCZlLpyZrjfVzDqXAMwFx0R3Bc6Rmao+WOAfRxjTY1iQtUxpWcrTnut9fQ1z3VJENhdMkFmuMuaxn9vHNN5bsIFEYUfU5z3G7x0d93lmnGkKJLNcctsou+vs+ZzNtRGeC/SnNUlmz08V2OuWlCUOu/RjNPSpJkKn2fx9x3V6LKdy2STtPIgo5uFT65gnKW2+pmZvnMvJdOJCTmVc7T7Uprl+3MKj8xb4aMzOzyk3W7uVokC7PKT8t74pORmVY2F6dF22GSJZnp5WeR741WRmZc8Sw8Og6TJElMKTuHfCMWEjJDfdPw28p17SaUnpd35EVCpq97fNYy72NKLjIR3ZEodwynIkvL+pHFnzOxRVjCb+tLlWuCrtXgIjMMcFkqqMlSgr2f/xJTDJtcRJVnnq5NjU/bGTIiWXJR5TlH16bCczESufWfOlQSVso92o5hQUe/Co5IQXnpzJozcVgcOeeKyMgU24qZWzk3q+1JlWfgtnhydhRkZDxWLydWnprVdmb5KfhtxTk7PBIyDouLEyu0n/C7Nan0ZErslXkwJKyyjbmV2W+yY4rmUu7ol4dTJSFLFmZXXJHVcohvFpXOoXk5bhISMyq+kNWur2cqNa7xeTJgWsVXstpVOsdT656epwMqmFT+jaw7oUocI+nrOZV8HdBxZTci6TjqAF7bAPp6P5c3Y3jJdzRzULrlsFZT5782r+MDDCj+NhadnB0Aq6WEmqKv582oKfo6VktJ3q8v6JNXwREpKC9VOCr4ztDvYJWshidYGRmXxcX3hn8Pr06CZ7eK7SV8Y+gt2GV7Tgy77OAbQ75PkS37zclr9XPD4NtxWFw5MaySlS8P+j6ljuzLLk6Lm+sG3oHL4jPMyDRAs3DVgNupcGZfQ7fJDi7v/394raWGJ1gpQ+GivrdR7cre5t4iWbmg78/x26pzmMQzHYtPr72VPu7szeIkSea02l9QbO+fEwPghOpbqPNo51X8jyExp+ZnlDqG5eyMTK34Dv282ktxh2pK5U8od04gV0dhTOnX6ec73ZDt6PIfUO6akTNjcNGX6Oc3FkUYVPI9yt2n5czo47uaOn925xOgj//blLkvyun4AGWez9HH/+2cX1fQ0aWCI1JQ3hrqG8rtI2/HZ8skomlFR7on3wpHBT8e9WNqXDWGGf3cA/ne8J8cdCq0Jo5uRrG9lO8N/wl93f0NM6qddXxn2C8otVd86FiHq5vttfr59tD/Y4BHfwngUJU6qvna0Lspd9TqXkf3e+iyeLl+8M8Z7B1rmOG3lXPdoN9R5Ryoy+ieUOyymysG/JyhPu08msPltpZw+YA/UuMa2XUkraf2DMMmOTmv788Y7p9jmOGw+Div35/o455ogCFhkeyc0udnjCg60zDDJrs5re4+6jwzsjAyozI2ZlX9mJHFFxtmWGQHJ9TcR1/vqYYYElYmVfyAkSXXGWbIko3JVX+iznu+AYYMyIwsvYXhpTcaZkiShVEVv6ePrzufJBtDYkDxjQwp/aHhGjqSJDGw7B6q/V8l87nqMSyARLX/qwwsvdt0nZ6CPnlJIlszjk9QubQRLuiTU0pNsbxjOW82vcmOyI4jxkf5RzGvch7ji8cj55lQpog0awIreLv59R7Ltg/1jmBuxamMKz4Oi8YugOwMhY3BVSxqeZVt4SM7kw5wD2V2xemMK5qKVc4vWVcVCttCq1nS9gpbQyuPGK91DWZ6+ZmMKZqBTXbkxRBCZVdkDcvbX2RbaCmH545UOgYwpexcRhWdgF125skQ1EfXsbrjBbaH3j0ir6PE3peJpecz0n8ydp1t2tkYTfENbOh4lh2ht4/Ih/Db+jCm5CKG+U/DYWJbbmt8I5sDz7Ar9DoqH65r4bFWM7L4cwz2n4XTkv/20EBiCzuCT7E79CKq+HDHZpelgiFFlzLAdx5Oa/67VELJ7ewNPsH+8LMo4sOVTO1yKf39l9PXdxFOa/7J09HULhpCj9EYehLlsEJoVrmYPr4rqPFditNq/GHjcMXTe2kJPUpL+DEU8eFigRbJT4X3cip8V+K09subUdBHr1zm74IjUlCv6kDsAK3JVuJKHJfFRbWzmnKHdhJkXImwL7op04ZeytSy6OcZrTtBNscbaU02E1diOC0uKhxVVDiqNO2Taoz66HpiSuam5rT4qXWNwqFTY6Qt0UxL4gBxNYZDdlJmr6TS2UfTPqXGORBbS1zpRAiBw+Kj2jUGp84EGUi20JKoJ6FEsclOSuwVVDq1kznTaoKm2FriSgcqCg7ZT6VrNE5LseZrgqlWWhP7SCgRbLIDv62CCkd/zadIRaRoja0mrrSjijR2i48yx2ic1jJNRiTdTmtiNwkljFVy4LWVU+EYpMlQRZr2+CoSSiuqSGKT/RQ7RutOkLF0gLbEDpJqCItkx20to9wxTJMhhEJnYhUJpRlVJLDKPnz2UTit2p9hQgnSnthKUgkhSzZc1lLKHCM0d2MIoRJOrCKpHEAVcSyyH7dtJE6b9meYUkJ0JDaRVDMMh6WEUscozXwQIQTR5GpS6f2oIoZF9uG0jcBhG6jJSKsROhMbSKmdSFixW4opcoxB1tjpJoQgkVpLKr0XIaLIsg+7dSh221BNhqLGCCXXkVYCIMnY5GJ8jnHIGjknQgjSqQ2kld0INYIke7FaB2OzaW+vV0WcSGLNwaqsVrkYj2M8spSf81zQx6uCI1LQUa/G2C5WtL/CmsBbpA97QrRJDiaWnsrk0jMod9TlzWhN7GFNx4usC7xG+rAeJlbJzuiiU5hQcg4VTu2bejYFkvvYEHiBjZ0vk1KjHxqTJRvDfKcwtuS8rDs59BRKNbA58DxbOp8neVifFBkrA30nM7L4Aiqco/MOU0dTjewIPsv2zqdJHla+XMJCnfckhhZdTLlzQt6MeLqFvaFn2B18nMQRvVhkqt0nMrDoMsqcU/NmJJU2GkJPsz/0CEml6bBRiTLXHOp8V1LqmpX3ds+0EqAl8iSNwX+RVOqPGC9yzqbKdw3FrhOzJpxqSVGDtEeepjX0IMn07iPGPY7plPu+QJHrFKQ8I4CqGiYYfYZA+O+k0kd2+nXaJ1PsvQ6v60ykLEmt2owosdhzRMIPkk5vPGLcZhuPx/tFXK6zkQoOxjGlo8YReemll/i///s/1q5di9PpZM6cOTz33HOGX19wRI49KSLNKw1/ZmXH68hYNIt9ScgIVI4vO59TqnOrEaAKhQXNf2VF+7MHj9Mzw4JAYXzxWZxc/XXkHCYNIQRLWx9kRfsjhhjD/KdwYvX3sORQf0UIwdqOR1jR+hckpKyM/t45zKn+MdYcl3S2Bh5nVevvkCAro9o1nRk1d2LLshPicO0NPsPa1p91FcLTZ5Q6JzGl6g/YdSq99qTG8Itsar21awlHawuwBVDw28cxruoB7JbclkLao6+zvfXbCJFAe7t0huGyDWdE5b+wW7WjdT0pFFvA7tavoIpux7YnTobhsA5iUOUj2K3626IPVyyxhIbWa1FFELo++SMlAypWSx215f/GbhuSEyOZXE1721WoavvBY2kxZLmKsvJHsdmyJzQX9OnQUeGIPP3001x//fX88pe/5KSTTiKdTrN+/XouueQSw8coOCLHllSh8J+9v2RraDm51LwYWzSX8+tuNOSMCKHyUsPdbA5qd3k9UhJDvNM5t+6HhpwRIQQLmn7Dxs6XcmLUuY/jrLpfGc5hWdryJ9Z3aLdpP5IgU+Eczel1vzfsjKxv/ysb2v+SE6PIPoST6v6qW+b8UO0I/IuN7b/JgWHBY+vHzD4PG3ZG6kOPs6XtDrQn1cNlwWmtYXLNE9gt+vVTutUafpYdbTd3/Zcxhs1SzujqZ3EYzJnojL7C7tYbuv7LSD0VCxa5iKFVz+OwDTDEiMTfoaH1mq7jG2PIkpu6yhdw2LSrIR+qROID2lovA9JgqNaJBUlyUFb+DHa7doXfgj49ymX+/kh2zaTTab797W9zzz33cMMNNzBs2DBGjRqVkxNS0LGn1w88yNbQMnItvLWu8x0WNBubkN9reThHJwRAsD38Pgua/mbIenX74zk6IRnG/uhKFjb93pD1psCzOTkhGYJKS3wDixp/Ych+d/DlnJyQbkZncjuLG2/DyDPMgcibOTkhGYZCJLWXZU3f0mxRf6jaYu+ype0nB19tTAqJ9AHWNH1ZsyHboQrFl7Gj7btdxzfOSCmtbG6+BlWNZ7WOJtexp/XrXcc3WtRNQVE72dl8BYqq3c26W4nUVg60XYdxJyTDUEWU+pbLUAx0/U2n99LedjWQwpgTkmEIkaCt7XIUpdHgawo6VvSROCIrV66kvr4eWZaZOHEiNTU1nHHGGaxff+ROhEOVSCQIBoMf+ivo2FAw1cay9hfzfv17rU8TU8K6NrF0kKVt/8mTIFjZ8RzhlH5b8pQaY1nbv/JmbOp8ic7kkXkFh0pRk6xozc1B+B9BZVf4LdoT23XtVKGwpu3evBmN0cW0xdfq2wnBprbfkU/BLYFCe3wlLbH3s9ru7PhtzsfvZoSSG2iNvpnVdl8gPwYoxFPbaYtm/+43df6ua2ks1yC1QlLZT3vkqayWHcF7u7rU5lq9VkFRW+iMPJLVMhy+HyFieTGEGiQSfjDH1xX0addH4ojs3LkTgJ/85Cf88Ic/5MUXX6SkpIS5c+fS3q7tUd95550UFRUd/OvbN7d1z4KOXq1sfw0z/SQUkWZNh/6Esb7zNdMN5tYFXtUd3xqcf0Tiay6SkNkQ+K+uze7wOyQNPN1qMyxsDjyna3Mg+h5xpdUUY1un/sTXFl9OJL2X/EvPW9gd1I8KBRPrCCU35M0Amf1B/ck1ltpBKLGE3CfWbkk0Bv+ha5FMNxCMvYHxCMKRag09qBulUpQ2QrHnTTBUAuEHEUL79aoaJhp9wgRDIRJ5uCsHp6DPinJyRG699VYkSdL927x5M6qa+cH+4Ac/4KKLLmLSpEn84x//QJIknnzySc3j33bbbXR2dh7827dvn7mrK+iokCLSLG9/2WSHWMHSthc1b7RCqKxsfwEzHWIFKqs6/ouqc6Nd2/EMZhwqgcrGzhdJq0lNm42Bp003ftsWfJmkEtG02Rb4j+mmbPvC84krHZo2u4OPm2Y0RRcSTR/QtKkPPma68VsgsYxI8sj6N91qDj2KfoGtbBJEUxsJJ9ZoWrSFH8Vs08JkejfhxGJNi87o4+TvTGWkqM1E4m9ojseiT4NJJ0KITmKxXJc+C/o0K6d9XzfffDPXXnutrs2gQYM4cCBz4xg16n8Z0A6Hg0GDBrF3717N1zocDhyO/Io4FXT0KpBsIqqYX2YLpDLH8ViPLCwVSXcQSjebZkSVDoKpZortRyYXptQYHck9phlJNUIguZdy55G7EIRQaYlvxIxDBaCIJO3J7VS7eu5w3BpfY6phGmQchY74Rmo8PTdna4stN80AQSC+Fre352TPjsQHvcCAzsRKPPbBPY4F40swE6nISCaUWIHX0fPnEUksxayTABaiiWX4nD1/HvHEsl5gWIknluJ1ndbjaDK5jMzzrZn3y0oyuRS3+0ITxyjo06ScHJGKigoqKiqy2k2aNAmHw8GWLVuYNWsWAKlUit27d9O/v/HS2wUdG4rrPJnnfqxwj45IQu1dRk9KZMlRyUUJtedjZWqR9M5GtqTS8/KOKpQjKm/mzdBZQkqL3vlMkqq2E5s2sYT1P8mkdRmdmmNGJSGj6DK0I0vGpc9Q1OyJpkZ0eLXTQ6WqnZh32lREL7znBX16lH/vZR35/X5uuOEG7rjjDvr27Uv//v255557ALj4YuO9Ggo6NpRvyfVcjpVLDZCjgSFrMnrvvdKqpCl19Rwx/3Sszchweuf90mf0xvslkPQYOdR+0SboH0fCPEMC3cJj+RYlO5KT7b0yuoVamwC9c64FfTr0kTgiAPfccw9Wq5Wrr76aWCzGtGnTeOuttygpKbRr/qyppwhGvnJr1JVw65Q5z5lh7flYDotft3hZTgyNtuUWyYFFcqD0QrKeS6NYlyRJOGQ/ia7S2Wbk1CkI5rCUEk3r7xAyIodFu7y8w1JBStXf6ZRdQreWiN1SScJE0m1GaWyy9nXYLFXEUxsx4xwKFKw6DKtcRXchtPwlsOi8V7Jc0cVIa9pkl4Ss85kXdOzpI+u+a7PZ+PWvf01TUxPBYJD58+czevTojwpX0FEsn62MOtdwUwmYEjJDvZOxW1w9jjssHgZ4Jplm1LrG4LFqOQlWBnlnm3zSlyhzDMZv67nniSRJDPLNM83w2WopdWhXwuzvO8N0xMJpKaPMqd0duNZ7NmZvMVbZR7lTuztwlfcczCV5giw5KXOdoDle5jkPs8tlElZK3Kdojpd4zsN8hEpQ5NbuQOx1n4v5ZRMFn+tczVGX+1zMOSEAaVyu800eo6BPkz4yR6Sggg7V1LJzTEUSBCpTys7StZlYcq5pxnGl5+najCk532RypGBcyYW6vVRGFl9oOgFzVPFFuozBRWYZMkOKLtZdSurvv8jE8TOOYX/f57DoVImt8V5oyqGSsFDjvQCr7NW0Kfechyz17AAbk4VS99nYdJ7yi9xnYpHMVI+24HfNw26t1bTwOOdhkXMrN384w2WfoVvq3W6fgcUygPydQxmbbWyhuupnTAVHpKCPRSP903FZfEh53KAkZIpsFQz2TtS1G+idgtdanidDwmUpYohvhq5dH9d4im1984y8SNhkN0N8J+lalTuHU+4YkTfDItkY4j9D18pvH0Cla0rek7iExCC/vtPmslZT7Z6bN0Mg6O/XzymzW0qp8pxNvttrBQq1vst1bSyylwrvxXkzQKHKf42uhSw5KPNdTf63ZIVy77W6FpJkodh7nSlGse+6LAwJr/dLeR4fQMXj+aKJ1xf0aVTBESnoY5FFtnFh3XfzeKWEhMQFdTdn7TUjSxbOrr0tz4lP4uza27Im1kqSxLw+P+iKBOTu8MyruR2bnL3L6Ozq27FIjjwYgllVt+Gw+LJaTqm8HZvsycvhmVTxfVzW7D1axpTfit1SnNdnMqr0Zjy27N2Xh5TegsNSST6OwsDib+K1Z++fUld0E05rv7wY1f4v4XPoO9EAlf5v4LQNy4MhUeq5Eq9zdlbLYt+XcNjG58Xwus7H49R3cAHcnquw22fkwZBxOE/B5TYXSSvo06eCI1LQx6bBvolc2Pe7yMiGohYSMhbJyiX9bqefx1hXzjr3mK7mdVZDE6yUoXB27W3092SfLAAqncM5s/aXWCW7wUk840ydVH0LA70913g4XCWOQZxa92tsksswA2B65U0M9mvnIhwqr62OOX3+hE325uQojCv7JoOLLjBk67JWc3zNX7DJRTkxhhZ/hUFFVxuytVvKmFj9TxyW7kRJY+rrv5YBRV83ZGu1FDGi6iEc1rqcGBWey+hXfJshW4vsZVDlIzisg3NiFLvPpa7057pLcd2SJRe15Q/jsI0il9u/x3kKVaW/M8SQJDulZQ9is0/MgSFhd8ykpOR+pF7cnVbQp0MfWffd3lCh++6xqd2R9bzW8BeaEruRsRxRll1GRkWlj2soZ9TcQK17aM6MA7HNvNH4J5ri2w62lz9U3f9W4RjEydVfo86tnXSppdb4dhY2/Z7G+AZdRom9P7Mqv0Ffz+ScGYHEbhY3/4bG2Cpdht9Wx9SKb9DPOytnRji1n+XNv6Ip9kGPu4K6GR5rDePLv0Vf77ycGbF0I+taf0FTdCEZp6lnhtNSxYjSb9HXd07OjKTSypa2n9IS7a78eXi+UGbHiN1SzsDib1HruzRnRloJsLv9DtqiL9Fzc7rMtmirXEpt0Teo8l1raPI+VIoapL7jJ3REnqXn5nQZhkUqosL/VSr9XzXUmfpQqWqUls6fEoo8gaC76d+hU0GGIUteir1fptT/nZwdBCHiBDt/SSTyMNBdSfhQRmabryS58Xi+gM9/S69slS7o6FAu83fBESnoE5EQgvrYVpa3vczW8DISShQJcFi8jPQfz6TSM6hx9VzpMhc1xbaxquO/bA8vJqFkioU5ZA+DvdOYUHouNS5jbc311JbYyfrA8+wILSCphBEI7LKbvp6pjC0+n2rXmJwno8MVSO5hc+BZdobeJKEEAYFVdtHHPZlRxRdR7ZpomhFK7mNH8Bn2hF4hqQRRSWOTPVQ4JzK0+BKqXFNznvAOVzRVz57QU+wP/ZeE0oEgjVVyU+KcwMCiy6l0zTT9RJxIN1EfeoID4WdIKm0IUlgkN37HWOp8V1Pmnmu6XktSaaEl/AQtoSdIKi0IksiSG499FFW+ayhxn6Zb/8SI0kob7ZH/0Bb+NynlAEIkkSUXTtsIyn3XUuQ+E1kyV4laUQMEI/+hM/IIaWU/QiSQJBd26xCKvNfic5tN1AVVDRGLPkUk8hCKshch4kiSC4tlAB7vtbhcFyDLHlOMgo4+FRyRgj516v4aZptMw6l6dgT/Syi1j7QaxSq78dn6Mth/Ll6NLbG5MqLpA+wNPks4tYeUGsEqu/DY+tLPex5eu35lYKOMeLqJhtDThFM7SKthLJILl62WPt7z8dqzR4CEEFkZSaWF5vBTRJObSashLLILu6WGSu9FeOwje4WRVtppizxJNLkeRQ0iS05slipKPRfh0ShnnisjM1k+RSy5ClUNIkkOrJZK/O4LcNonZ329EYaqhohEnyaZXIaqdiJJdmRLBW7XeTjs03uFIdQoidizpJIfZCqHSlZkuRy762xs9plZnTxDDBFHib2EmlyMUAOABUkuxeI8Ddkxp5cYSYi/hki8C2oAJBnkEiTHSeCYi5TFyTPCKOjTr4IjUtAxp8boUjZ1PMKBg8sHgq56lUhICFRq3Mczsvhqqt25L4EAtMZWsL3zXzRFF3UdEzJhcamLqVDunMqQ4muocue+BAIQiK9mT+eDNH+o9fyHGcWOSfQvupZKT+5LIADhxHrqg3+hLfoK/wuFf5jhtY+nj/+LlLnPymtSiCW30BS8n47oC13LRd3LLRLdBa1ctjFU+q6j1HNhXpGUZGo7baE/E4o81bV80N3D5H8Mu3UEJb4vUuS5LK9ISjq9h2D4fqLRJ7o6vh7JsFoH4/Ncj8dzRV5LB4rSQDz8APHoYyCi/K+o2P8YsqUfTs91OD1XI+UR5RBKM+nI30hHHwcR6pGBXIPV83msnmuQ8ohyCLUdEfkHRB8H0cmHi6N1/W+5Asl9FbivRtLZEl3Qsa+CI1LQMSMhBBs6/sna9j/3mCNxqLrHx5d9jVHF1+Q0we7ofIT1bb85OFFrMzI5FMOKr2dEyddyYuwP/odNbT/tcnL0anhk1uf7+a9hWOn3c5rEWyLPs621e3dSdkal51IGl/0s61PsoQpEX2NX69e6ckn0GJkcgBL3efQv+3VOywiR2DvUt12HEClDDI/zNPqU3Ycsuw0z4on3aW27BiFiBhjgcMymvPTvyDlMsKnkKkJtVyNEyBDDapuCr+xBZLnYMENNbSTR/nlQO7IwMhzJOhpH6T+QdCqkHi6R3oFo/wKoLQYYMlgHIZU8iGSpNswo6NhSLvN3YddMQUe1Nnb8i7XtfwbIWoCre3xN2/9jU+Bhw4ydnY+xvu3XZCpXZGNkEge3Bv7K5o77DDPqQ8+wqe0OQDVQSCzD2Bt8iC3tvzLMaI28zLbW75CZKIwxmiP/YXvb7Rh9HumMvcXO1q8gSBtgZI7ZEX2B3a03IoSxYnPR+GL2t16dWQIwyIjE51Pf9mWEMFbVM5FcSUvrZQgRNcgQJBLv0dJ2VVfkJLvSqY0E2y5BiKBhRjq1gmDbFQg1ZoihpneSaLvMoBOS4Yj0JhLtVyAMNgwUSj2i7QqDTgiACuldiPYrEb3SzK+gY10FR6Sgo1aN0eWsab8/r9eubruPptjKrHbt8XWsa7s7L8bWwN9ojC7MahdKbmVj64/yYuwLPkxj+KWsdvHUni4nJPe6Iy2Rp2gOP5HVMpluZGfrDXRPmrkwArGXaA79PaulonRQ33otPe9I0ZNKNP42bcE/ZLdUo7S2XkWmFHkuDIVkchmdwezOoRAJgm1Xg0jmzFBS64kE7zDAUEi2X9e13JNLlVwFkd5BsvN2AwyB6LgBDDlTH2agNCAC38vhNQV9VlVwRAo6arU58KiJyp8WNnf8O6vdzs5HTPSnkdke+FdWq33BR/Oq9trN2N35YFarxvCjXdGafFZaJeqDf8kaFWmLPNYVpchvNbc59BeE0J/MOiNPoIoI+fVdEXSE/oaaJWIRjT2NKjryZKiEI/9CVSO6VsnYKwi1ifx6u6gkok+iZokmqIl3EMqevBlq/GWEckDfLLUM0lvyZCiQXIhI78rjtQV9llRwRAo6KhVONdAQXZx3PxSBQn30XSKpRk2beLqNhsgbJnquqLTFVxBK7tS0SKkhDoSfM8UIJTfSmVivaaGocZpCj5N/QzNBPL2bYOIDbQuRoiX0EGYas6WUJjpjb+kwVDrCD2KmwZwqOglHtSNIQghC4b9hplGeEDGisad1beKRf2Du9pomEdWPUqUjD5F/2fmuY0Qf1x0XkUdMMiyI6GMmXl/QZ0EFR6Sgo1I7gy+a6qQLmT0iO0Mvao7vD7/YtfvGDMPCntBzmuNN4ZdRRUpz3CijPvSU5nh77DUUETbFAEuXM9OzOmPvkFbbTDNaw49qjsYS75NW9ptkyATC2vlBqdQa0umtmOumKxEOa0fClPQO0qkVmOumK4hHHtIeVQ6gJhdhrpuuSjr6iDZDDUDidZMMBWJPGs7dKeizqYIjUtBRqVBqv0kXAUAinKrXHI2k9pl2dgQq0ZT25BlN7zXVHTbDUIimdmuOx1N7kDBXoAsUYmntyE4ivQfztwuFhA4jmd5t8vgAKkkdRjq9pxcYgrSifRylV64DVGW/5nKZmt6LOWeq+0DtCBHveUxpwJwz1SURAbXT/HEKOmZVcEQKOiqVFlHM3gQFCik1qss4vJx5PpSUqh2NULLkEhhVWmeHgyKimFlqOHgcnevI5G2Yv13ovR+qiPYKI3McrbHe+TwyW361xrT5uUkFNJyEXmMAWp97L71XvX6sgo45FRyRgo5K2aT8usIeKgkLNp26EtZeYICETaeuhKWXSlfbZO19+BbJTW88HVt0rkOWPPTG07E+w90rjMy55j6WiyRJ+3ulN5abLIBGp+beLIkua3Rq7qX3KsMoFDcrSFsFR6Sgo1I+e79eOIrAZ9M+jtfe30QSaUYSMh6bdtl3j21gV80NMwwLHtsgzXGXbZBpBlhw27RLyzttgzDvJFhw6jDstiEmjw8gY9dhWK3m+xeBjNWq/XlYdMaMS0K29NMsmCdbBtAbUTDkSu1KrpZaML3kB0g+kIrMH6egY1YFR6Sgo1KD/WebfsYXwCD/2Zrjdd6zeiV/o7//As3xKs8ZyJLGU20OjFrfxZrjpe5TsOhETIxJocp7ueao3zkHm6XKNKPce5XmqMs+FZulP+YmWJUS7zWao3b7GGy2MZi79al4PddqjlqsA7Haj8fsjhan5/OaY5KlEtlxokmGjNWt/XlIchE4zzDJsIA7v/L7BX12VHBECjoq5bZWUeuZZaqOSF/PHNzWCk0bh6WEWu9pphgVzuPx6kRdrLKHPt4LTDg8Mn7HOHyOEdoWkoNq7+XkP2FIuGxD8DkmaVtIFsq912DmlmGz9MHvPEGHIVHi+2LexwewyKV4Xafr2ng9X8RMdEeSPLhd5+naOD3XYm63iQ2HW9v5BLC6rzHJAKv7Mt1xyX2FSYaK5LrUxOsL+iyo4IgUdNRqZPEVpuqIjCi+Iqvd4KKr8t7CK1AYUqz99N2tfv6ryDzl5/OkrzKgKPvkXOW7qmvnTD4MQa3/K1n75pR7L+uK7uR326jy35C1b47fczGy5M+bUeL7StbGdG73echyBfk5bhJez3VZe9rYnachW+ryZMg43Fciy/rLGbJjNpJlSN4Mi+sCJIu2ow6A7TiwjsubgWMekrU3llkLOpZVcEQKOmpV6ZrIceU35vXaSeU3U+Eal9Wu2DGSCeX5lV8fWfINKt0zstp57IMYU/Er8kkoHVB0PVWeU7PaOa21DK/4U9d/5eKMSFR5r6bCc2FWS5ulgsEVf+86fi63DolS90VUeLWXGrplkYuoq3iYzMSXC0PG6zqDUt/Xs1tKLirK/t3lsOTGcDhOoMifvWy5JNnwlz7S1eU2l0ncgtU2CU/RDw0wZOyl/wDJnzNDso7E5v8/AwwJqeQ+kEtzZmAZgFRkvFdSQZ9dFRyRgo5qjSi+nOPKvwOQdXmje3xS+c0ML77EMKO//wLGl/8IkA0zRpV+m6HFxpcRqr1nMabiHiQsBpZpMuMDi7/KkJLvGGaUuk9meMX9XZERY4wa3xcYVPpjw12Efc6ZDKn4J5JkN8wo81xG/7J7DDNcjsn0rXiiaxdNNkbmFuZznUtN2f8z3KnYbh9DRfkzSIYm8cwxnc5TKS99MGvEpVsW2xD85c8gGZrEMwybYyb+soe1E0gPf5W1Dkf5kyBXkv12nonKSbaJOMoeRTLYqViyVCGVPt6VvGqMgXUEUumjSFo7cgoq6BBJwmjbzU9AubQRLujYVktsLZsDj7E/8k7Xv0gI1K7tt5mvcF/viQwvupwK19i8GB2JDewIPEJD5HUEAukwhkBQ7Z7D4KKrKHdNzosRSmxmT/BfNIZfQpBGwoJAQULuugqVctds+hVdQ5lrZl6MaHIbB0L/oDn8DIJUF0Plf5EShSLnLGp811LqPikvRjy1i5bQg7RF/oMq4mQm2g8zvI5pVPiuo9h1umEn5FCl0vvoCP2NQOTfCBEhs4OjmyEBaZz2SZT4vojPdV5ejLRygHD474QjD3d1yT2SYbONw+f9Im7XRXklXapKC/HIP4hHHkKIQI8Mi3UETs91ONwXG3Z0DpVQ20lHHiYdfRjUth4ZkmUwVs/nsbgvMezofJgRhOi/EdFHQG0+hAEZByUNln5I7qvBfSmSySTtgj7dymX+LjgiBX2qFEu3sjP0EuHUflJqBJvswWfry0DfWbisZb3CSCjt7Av9l1BqN2k1jFX24LbW0s93Li6r2Z0jGSWVDg6EXyCS3E5ahLFIbpzWPvTxno/LVtsrjLQapCX8HNHUZtJqCFly4rD2ocJzPi7bwF5hKGqY9shzxFLrSatBZMmJzVJNqecCXDrbaHORqkYJRp8jnlyDqgaQJCdWSyU+9wU47aN6hSFEnGjsvyQSy1BFEAkbFkslbte52O3je4mRJBl/hVRiCULtBMmGLJdhd52F1XZcXo7UkYwUavwNlOTiTDVTyQJyKRbnaci2Kb3EUCCxAJFc1FUxVQa5GMlxMtiP7xVGQZ9+FRyRggoqqKCCCiroE1Mu83chR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrE1AutFT86dW/oCQaDn/CZFFRQQQUVVFBBRtU9bxvZmHtUOyKhUAiAvn37fsJnUlBBBRVUUEEF5apQKERRkX7fpKO6joiqqjQ0NODz+Xq9SE4wGKRv377s27fvmKxRUri+T7+O9Ws81q8Pjv1rLFzfp18f1TUKIQiFQvTp0wdZ1s8COaojIrIsU1dX95Ey/H7/MfsFg8L1HQs61q/xWL8+OPavsXB9n359FNeYLRLSrUKyakEFFVRQQQUV9Imp4IgUVFBBBRVUUEGfmD6zjojD4eCOO+7A4ci9C+WnQYXr+/TrWL/GY/364Ni/xsL1ffp1NFzjUZ2sWlBBBRVUUEEFHdv6zEZECiqooIIKKqigT14FR6SgggoqqKCCCvrEVHBECiqooIIKKqigT0wFR6SgggoqqKCCCvrEVHBEgK1bt3LeeedRXl6O3+9n1qxZvP3225/0afWqXnrpJaZNm4bL5aKkpITzzz//kz6lj0SJRIIJEyYgSRKrV6/+pE+nV7R7926++MUvMnDgQFwuF4MHD+aOO+4gmUx+0qdmSvfddx8DBgzA6XQybdo0li5d+kmfUq/ozjvvZMqUKfh8PiorKzn//PPZsmXLJ31aH5l+9atfIUkSN9544yd9Kr2q+vp6rrrqKsrKynC5XIwdO5bly5d/0qfVK1IUhR/96Ecfuqf87Gc/M9QX5qNQwREBzj77bNLpNG+99RYrVqxg/PjxnH322TQ2Nn7Sp9Yrevrpp7n66qv5whe+wJo1a3jvvfe44oorPunT+kh0yy230KdPn0/6NHpVmzdvRlVVHnjgATZs2MDvfvc7/vznP3P77bd/0qeWt5544gluuukm7rjjDlauXMn48eM57bTTaG5u/qRPzbQWLFjA17/+dZYsWcL8+fNJpVKceuqpRCKRT/rUel3Lli3jgQceYNy4cZ/0qfSqOjo6mDlzJjabjVdeeYWNGzfym9/8hpKSkk/61HpFd911F/fffz9/+tOf2LRpE3fddRd3330399577ydzQuIzrpaWFgGIhQsXHvy3YDAoADF//vxP8Mx6R6lUStTW1oq//e1vn/SpfOR6+eWXxYgRI8SGDRsEIFatWvVJn9JHprvvvlsMHDjwkz6NvDV16lTx9a9//eB/K4oi+vTpI+68885P8Kw+GjU3NwtALFiw4JM+lV5VKBQSQ4cOFfPnzxdz5swR3/72tz/pU+o1ff/73xezZs36pE/jI9NZZ50lrrvuug/924UXXiiuvPLKT+R8PvMRkbKyMoYPH85DDz1EJBIhnU7zwAMPUFlZyaRJkz7p0zOtlStXUl9fjyzLTJz4/9u7v5Cm+jAO4N9zVpsMRmF5JCQtKdiFN2uSkl1YLksisMJuvFghk2LWoqJW3nSxdWVdtAujgim5WH+ozIiCBtKKwpEYjZgrVMg/hf3TYOBie7p4YbzSW+3Vs/fn9j4fOBf+tovvw8Hj1992NhNWrFiBuro6hMNh0dFU9eHDB9hsNly5cgV6vV50nIybmppCfn6+6BhzEo/H8eLFC1gsltSaLMuwWCx49uyZwGSZMTU1BQBZe75+xW63Y/v27bPOY664e/cuysvL0dDQAEVRYDKZcOnSJdGxVLNhwwYEAgFEo1EAwMuXL/HkyRPU1dUJybOgv/TuvyBJEh49eoT6+noYDAbIsgxFUfDgwYOc2IYbGhoCAJw+fRrnzp3DqlWrcPbsWVRXVyMajebExZGIsHfvXuzfvx/l5eUYGRkRHSmj3r59C4/Hg7a2NtFR5uTjx49IJBIoLCyctV5YWIhIJCIoVWYkk0kcPnwYVVVVKCsrEx1HNX6/H/39/QiFQqKjZMTQ0BDa29tx5MgRnDp1CqFQCIcOHYJWq4XVahUdb96cTiemp6dhNBqh0WiQSCTgdrvR2NgoJE/O7og4nU5IkvTbIxKJgIhgt9uhKAqCwSD6+vpQX1+PHTt2YGJiQvQYv5TufMlkEgDQ2tqK3bt3w2w2w+v1QpIk3LhxQ/AUv5fujB6PB9++fcPJkydFR/5X0p3v78bGxrBt2zY0NDTAZrMJSs7SZbfbEQ6H4ff7RUdRzbt37+BwOODz+ZCXlyc6TkYkk0msW7cOZ86cgclkQnNzM2w2Gy5cuCA6miquX78On8+Hq1evor+/H52dnWhra0NnZ6eQPDn7Ee+Tk5P49OnTb59TWlqKYDCI2tpafPnyZdZXIK9duxZNTU1wOp2Zjjon6c739OlTbN68GcFgEBs3bkw9VlFRAYvFArfbnemoc5bujHv27EFPTw8kSUqtJxIJaDQaNDY2Cvvl+pN059NqtQCA8fFxVFdXo7KyEh0dHZDl7Pw/Ih6PQ6/X4+bNm7Pu3rJarfj69Su6u7vFhVNRS0sLuru78fjxY6xevVp0HNXcuXMHO3fuhEajSa0lEglIkgRZljEzMzPrsWxUUlKCLVu24PLly6m19vZ2uFwujI2NCUymjpUrV8LpdMJut6fWXC4Xurq6hOxK5uxLMwUFBSgoKPjj82KxGAD8dFGXZTm1m7AQpTuf2WyGTqfD4OBgqoh8//4dIyMjKCkpyXTMeUl3xvPnz8PlcqV+Hh8fx9atW3Ht2jVUVFRkMuK8pDsf8NdOyKZNm1I7WtlaQgBAq9XCbDYjEAikikgymUQgEEBLS4vYcCogIhw8eBC3b99Gb29vTpUQAKipqcGrV69mre3btw9GoxEnTpzI+hICAFVVVT/dch2NRhf8NTNdsVjsp2uIRqMR9zdPyFtkF5DJyUlatmwZ7dq1iwYGBmhwcJCOHTtGixcvpoGBAdHxVOFwOKioqIgePnxIkUiEmpqaSFEU+vz5s+hoGTE8PJxTd82Mjo7SmjVrqKamhkZHR2liYiJ1ZCu/3086nY46Ojro9evX1NzcTEuXLqX379+LjjZvBw4coCVLllBvb++scxWLxURHy5hcu2umr6+PFi1aRG63m968eUM+n4/0ej11dXWJjqYKq9VKRUVFdO/ePRoeHqZbt27R8uXL6fjx40Ly/O+LCBFRKBSi2tpays/PJ4PBQJWVlXT//n3RsVQTj8fp6NGjpCgKGQwGslgsFA6HRcfKmFwrIl6vlwD845HNPB4PFRcXk1arpfXr19Pz589FR1LFr86V1+sVHS1jcq2IEBH19PRQWVkZ6XQ6MhqNdPHiRdGRVDM9PU0Oh4OKi4spLy+PSktLqbW1lWZmZoTkydn3iDDGGGNs4cveF5oZY4wxlvW4iDDGGGNMGC4ijDHGGBOGiwhjjDHGhOEiwhhjjDFhuIgwxhhjTBguIowxxhgThosIY4wxxoThIsIYY4wxYbiIMMYYY0wYLiKMMcYYE4aLCGOMMcaE+QGSVvAy2mt4ewAAAABJRU5ErkJggg==\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotCompPinPow(fuelPin)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"ab592f78\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Multi-pin example\\n\",\n    \"\\n\",\n    \"For the last example, we'll define a semi-covoluted but demonstrative block that has two fuel pin types existing on the same lattice grid. The use of yaml anchors `&`, aliases `*`, and merge keys `<<`. This helps use similar fuel and clad definitions (e.g., material, dimension) but overwrite things like `latticeIDs` and `flags` that we want to be specific to each fuel pin type.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 24,\n   \"id\": \"686d9504\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"BP_STR = \\\"\\\"\\\"\\n\",\n    \"blocks:\\n\",\n    \"    fuel: &fuel_block\\n\",\n    \"        grid name: fuel grid\\n\",\n    \"        fuel 1: &fuel_def\\n\",\n    \"            shape: Circle\\n\",\n    \"            # Use void material because we don't need nuclides, just components with flags\\n\",\n    \"            material: Void\\n\",\n    \"            od: 0.68\\n\",\n    \"            Tinput: 25\\n\",\n    \"            Thot: 600\\n\",\n    \"            latticeIDs: [1]\\n\",\n    \"            flags: primary fuel\\n\",\n    \"        clad 1: &clad_def\\n\",\n    \"            shape: Circle\\n\",\n    \"            material: Void\\n\",\n    \"            id: 0.7\\n\",\n    \"            od: 0.71\\n\",\n    \"            Tinput: 600\\n\",\n    \"            Thot: 450\\n\",\n    \"            latticeIDs: [1]\\n\",\n    \"        fuel 2:\\n\",\n    \"            <<: *fuel_def\\n\",\n    \"            latticeIDs: [2]\\n\",\n    \"            flags: secondary fuel\\n\",\n    \"        clad 2:\\n\",\n    \"            <<: *clad_def\\n\",\n    \"            latticeIDs: [2]\\n\",\n    \"        duct:\\n\",\n    \"            shape: Hexagon\\n\",\n    \"            material: Void\\n\",\n    \"            Tinput: 25\\n\",\n    \"            Thot: 450\\n\",\n    \"            ip: 15.3\\n\",\n    \"            op: 16\\n\",\n    \"grids:\\n\",\n    \"    fuel grid:\\n\",\n    \"        geom: hex_corners_up\\n\",\n    \"        symmetry: full\\n\",\n    \"        # Kind of a convoluted map but helps test a lot of edge conditions\\n\",\n    \"        lattice map: |\\n\",\n    \"            - - -  1 1 1 1\\n\",\n    \"              - - 1 1 1 1 1\\n\",\n    \"               - 1 1 2 2 1 1\\n\",\n    \"                1 1 2 1 2 1 1\\n\",\n    \"                 1 1 2 2 1 1\\n\",\n    \"                  1 1 1 1 1\\n\",\n    \"                   1 2 1 1\\n\",\n    \"# Stuff that isn't germane to this example, but necessary to make the blueprints build correctly\\n\",\n    \"assemblies:\\n\",\n    \"    fuel:\\n\",\n    \"        specifier: F\\n\",\n    \"        blocks: [*fuel_block]\\n\",\n    \"        height: [10]\\n\",\n    \"        axial mesh points: [1]\\n\",\n    \"        xs types: [A]\\n\",\n    \"nuclide flags:\\n\",\n    \"\\\"\\\"\\\"\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 25,\n   \"id\": \"1749933b\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.reactor.blueprints import Blueprints\\n\",\n    \"from armi.settings import Settings\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 26,\n   \"id\": \"f2aa9d37\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"def buildMultiPinBlock() -> HexBlock:\\n\",\n    \"    cs = Settings()\\n\",\n    \"    bp = Blueprints.load(BP_STR)\\n\",\n    \"    bp._prepConstruction(cs)\\n\",\n    \"    block = bp.blockDesigns[\\\"fuel\\\"].construct(cs, bp, 0, 2, 10, \\\"A\\\", {})\\n\",\n    \"    block.assignPinIndices()\\n\",\n    \"    setPinPow(block)\\n\",\n    \"    return block\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 27,\n   \"id\": \"e6c09f49\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"[info] Will expand HE, NA, AL, SI, V, CR, MN, FE, CO, NI, ZR, NB, MO, W elementals to have natural isotopics\\n\",\n      \"[info] Constructing assembly `fuel`\\n\",\n      \"[info] Block design <fuel block-bol-000 at ExCore XS: A ENV GP: A> is too complicated to verify dimensions. Make sure they are correct!\\n\",\n      \"=========== Verifying Assembly Configurations ===========\\n\",\n      \"[info] Block design <fuel block-bol-000 at ExCore XS: A ENV GP: A> is too complicated to verify dimensions. Make sure they are correct!\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"multiPinBlock = buildMultiPinBlock()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"701e0de0\",\n   \"metadata\": {},\n   \"source\": [\n    \"Plotting our block-level pin power shows a similar profile to before.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 28,\n   \"id\": \"46ab1a77\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.collections.PathCollection at 0x1bafa69ca50>\"\n      ]\n     },\n     \"execution_count\": 28,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAV7tJREFUeJzt3Xl8VOWhPvDnPWeSSSbLZCEkhAQIO8gWQFZRQEQRUVQQlyoutdWrtta2t9rear1dbK3Xen/WW+0irqiIAm5sKrIIyBrZQdZAAiQhySSZycxkznl/f4SkLpBkZs7knMw8Xz+5F8jkzJPTSd7nvOc9Z4SUUoKIiIjIBIrZAYiIiCh2sYgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmsZkdoCW6rqO0tBQpKSkQQpgdh4iIiNpASona2lrk5uZCUVqe87B0ESktLUV+fr7ZMYiIiCgEx48fR15eXouPsXQRSUlJAdD4jaSmppqchoiIiNqipqYG+fn5zeN4SyxdRJpOx6SmprKIEBERdTBtWVbBxapERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMY+nFqkTfdtxdiXeLt6O4rhLugA9JNju6JWfg+m7DkZeUbnY8S3E1uLCuYh2Oe47Do3lgV+zIiM/A+E7j0c3Rzex4ltKg16G49iNUer+EX6+BKuJhVzOQl3w5OiWM4H2MvkZKP+BdDulbB+jVgFAAJR3CPhmwT4QQHFYoOEJKKc0OcT41NTVwOp1wuVy8aibGbSg7hHkH12N9+SEoQkBKQIeEAgEhAF1KjO/cG3f0HocxWT3Njmuqo+6jWHZqGTZXboZE44+3hIQ4+58OHT2TemJq9lSMyhgV04Osu+EEDlS/guLa96BJPwQEJHQAAgIKJDQkx/VAb+dN6JF6LRQRZ3Zk00i9EtI9D/C8CUgXABWAdvazZ/+sZEE4vgc4boVQks0LS6YLZvxmESFLk1Li7wfW4v/t+xSqENBaeLk2ff7BAZfi+30uiskBdsOZDfjn4X8CAHTo531c44ArcXGni3Fbj9ugCrW9IlpGRf1WrD/5I2jSB9k8oJ5L4+uoc+JojM55CnFKUvsEtBAZOARZeQeglwMt7isAUABbT4j0FyHUnPaIRxYUzPjNNSJkaf/4qrGEAGixhHz988/s/QT/Ovh5xLNZzebKzfj74b9DP/tfS5pmStZWrMVLR1+ChY9HIqLSuxPrSu9FQHpbKSEAIAFIlNdvOltc/O0R0TKkVgJ55uY2lhAA0IHAEcjKWyD1qkjHoyjAIkKWtbH8MP5376chfe1f9nyMzRVHjQ1kYWXeMrxw+IWgv05CYl3FOqypWBOBVNYU0Oux/uQD0KEBrRS2r5PQcca7HXvO/DVy4SxGSglZdQ8ga9C2EtJEA7RSyOqfRyoaRREWEbKslw9ugBri6RVVKHjp4HqDE1nXp2WfhjWrsfTk0piZFTle9xH8ugvBlJB/kzhc8zYCusfoWNbUsBkI7EdwJaSJBvjXQAaOGJ2KogyLCFlSiacKa8u+avV0zPloUsfq0wdQ6qk2NpgF+XU/VpevbvV0TEtO+05jf+1+A1NZk5QSB6vno2ndRyg06UVx7UfGhbIw6X4NjQtRQ6VCet4wKg5FKRYRsqRFxUVQwlxsKoTA4uIiYwJZ2NaqrfDq3rC2oUDBZ+WfGRPIwqp9e1DbcBhAOLM/Akdq3jYqkmVJvRrwrUBosyFNNKD+bUgZMCgVRSMWEbKkYnclwj1TIAAc90T/YrkybxnUsI5aG6+wOeU9ZVAi63IHThiwFQl3gxHbsTitFKGdvvoW6QZ0V/jboajFIkKW5An4oYd11Np4bxFPwGdQIuvy6b5wzjQ0q9fqw9+IxRm1tiMgw5uB6hCk25rboqjDIkKWlGSLhxLm6KoIBQ6b3aBE1mVXjPkeE9VEQ7ZjZTbFYcx2RPTvKwgD75fCm5tRC1hEyJJ6JHcK+yhfSokeSZnGBLKwnIQcaDKc8/iNa0S6JnY1KJF1Jcf1MGArAslxMXCLfLUrDHkXEJECCGf426GoxSJClnRtt2HhrSf8+nai3PD04XCo4R3p69AxMWuiMYEsLM3eD874fgjvV59ET+cNRkWyLKE4gYRpCPeqGThuhIjBO/dS27GIkCXlJDpxSU7fsO4jcmmX/uicGP1vDRCnxGFi1kQoIf44Cwh0SeiC3sm9DU5mTb2cNyGcRZg24UBe8uXGBbIw4bgZ4V01o0MkzjEqDkUpFhGyrDt6j4Me4qUzutQxt/dYgxNZ16TOk0J+vxgJiSu7XBkz782Tn3w57GomREi//gR6Om+ETYmBNSIAEDccsA1BaLMiCmCfAmGLgdNYFBYWEbKsEZnd8Z+DQjvyfGTwNBRmxM4vwE72TviP3v/R/A67wZicNRnjM8dHKJn1qEoCxnd5DoqIR3C/AhV0ThyNgRn3RCqa5QghINKfA5QMBFdGVEDtAeH8Y6SiURRhESFLu63XWDw86AoINJ5uaYkqGq+z+eXgabil5+h2yWclw9KG4f7e90MVaqunaZo+PzV7Km7pfkvMzIY0SbP3w4TcfyBOSYZodYBt3FddHBdjTM7TUERc5ANaiFCzITLePLt4tbUhQzR+2PpDZLwOoaS0Q0Lq6IS08BtMBPM2whTdtlcW45WDG/DxqX2AbDxS06WEIkTje6QI4LIuA3Bbr7EYlpFvdlxTldSXYOXplVhfsR4NsgGqUKFLvXmmRIeOgakDMTV7KoamDTU5rbnqA2U45HoDR2oWokGvg4ANEv/eVxIa0uIHolfajeiWfGVML7qUeg3gmQ/peQ3Qy9B4RU3TWhsFQABQu0E4bgUccyBEgnlhyXTBjN8sItShlHtrsbi4CMfdlagL+JBss6NbciauyR+KrAQefX2dJ+DB+jPrcaL+BDwBD+yqHRnxGRibORY5CTlmx7MUTfehxL0SZ7xFaNBroSAOdlsn5CVPRbp9oNnxLEVKDfCthvSvPXvHVAVQ0iDslwLxY2Judo3OjUWEiIiITBPM+M01IkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQRLSJPPPEELrzwQqSkpKBz586YOXMm9u/fH8mnJCIiog4kokVk9erVuO+++7Bx40asXLkSDQ0NmDp1KtxudySfloiIiDoIIaWU7fVk5eXl6Ny5M1avXo2LL7641cfX1NTA6XTC5XIhNTW1HRISERFRuIIZv23tlAkA4HK5AAAZGRnn/LzP54PP52v+e01NTbvkIiIiInO022JVXdfx4IMPYvz48Rg0aNA5H/PEE0/A6XQ2f+Tn57dXPCIiIjJBu52auffee7F06VKsW7cOeXl553zMuWZE8vPzeWqGiIioA7HcqZn7778fH3zwAdasWXPeEgIAdrsddru9PSIRERGRBUS0iEgp8cADD2DRokX47LPPUFBQEMmnIyIiog4mokXkvvvuw/z587FkyRKkpKTg1KlTAACn04nExMRIPjURERF1ABFdIyKEOOe/z5s3D7fffnurX8/Ld4mIiDoey6wRacdblBAREVEHxPeaISIiItO06w3NqOPwBQL4aN8BrD9WDFe9D4oikJ6YiMv69MIlPXtAVdhhm2i6js9KD2FlyQFUej2QkHDGJ+KinAJM69YfdpU/Zk2klNhdcxiry7aiyl+DBhlAss2B/ik9MDn7QiTZuHbs6yq8h7HHtQK1gTIEdC/ilSRk2gtwQdoVSLKd+8aQsSoQOA635w00BA5D6rUQSjJsanckJd2EOBsvlLCydr3Fe7C4RqT9VbjdeHHzNrxRtBO1Ph9UIaCdfYmoigJN15GTkoxbhw/DrcOHwREfZ3Ji87gb/Hj5wBa8emALTtfXQRUKNKkDQPN+S41LwM19CnFn/1HolJBkcmLzaFLD8pMbsKRkNU7Un4YqFOhShwSgQIEOHfFKHC7NHoXr8iYjNzHL7MimkVLiYO0abKt8B6e8eyCgQkIHICGgAJAABHqnTMCIjBuQndjP5MTm8vrWo7b2/+D1fQo07x/97J8FAA12+8VISb4HiQmTzIwaU4IZv1lEqNmB8grc/ta7OOPxNJeP81GEQL+sTnhx9rXISo69Abasvg5zV72Br6or0Dicnp8qBDolJOGVyTehjzP2Bth6zYcn9ryIrVV7IYAW95YqFMSJODx6wd0Ymt63vSJahi41fHb6r9hZ/T4ElLMF5NwEVAASl3X5GQY4p7ZfSIuQUqK27nm4av4bgApAa+HRjZ9PTfkZUlMeOu+FFGScYMZvzq8TAOBYVTVumr+gTSUEAHQpcaC8Are88TZqvN52SGgdLn89bvz4VRx0tV5CAECTEhVeN+asfBXH66ojH9BCArqG/971d2yv2geg5RICAJrU4dP9eHTX37DHdTjyAS1ESolVp/4fdlZ/0Pj3FkpI4+c1SOhYcfJJ7K/5tD0iWkpd3QtnSwjQcgn59+drap9CTe3TEc1FwWMRIehS4u6Fi1Hn87ephDTRpMSxqmo8vHRlBNNZz883fIDjddVB76vaBh/uWr0gpq4me+3Yh9jpOtimwtZEQkKXOh7f/QI8gfoIprOWva4V2OX6EK3Xte9aUfonVPlPGB/Kony+TaiueTykr62pfQr13lUGJ6JwsIgQ1h05hsOVVUENrE00KbHywEGcOPvOytHuWG0VPin5KuR9ddBVgfWnjxofzIK8mg/vl6yFDGFg1SFRF6jHp2VbIpDMeqSU2FL5FhrXNITw9ZDYUbXE2FAWVlv3dzSebgmFitq6F4yMQ2FiESG8uq0IahjnTBUh8GbRTgMTWdcbB7dDCWNfqULg1QNbDUxkXavLtsGr+1p/4HkIAO+VrI6JGaTS+l2o8hcjlNkQoPE0zq7qpWjQo38GSdNOod67FK2fjjnvFuDzrUYgcNTAVBQOFpEYV17nxmeHjoR0hN9EkxJvFO2I+gFDlxJvHNwe9r76+MRXqPC6DUxmTUtProMI8QgfaBySS+rLsK/2qGGZrGp39UdnF5+GLiC9+KpmjUGJrMvtWWDAVlTUuecbsB0yAotIjDvhqgnxGOybXF4f6vx+A7ZkXTV+L2obQj/Cb6JDosQd/aeySusrQjot820n6ysMSGNtVf7jkCEf4TdSoMLVcNKgRNbVOJMR7tAlEdCOGZCGjMAiEuM8DcaVB7e/wbBtWZE7YOC+MnC/W5VPN+Z7rNei/6osvyGnVAT8useA7VibLt1AK1cUtWErkHqtEXHIACwiMc4RF2/YtpLjjduWFSUbua/i7IZty6oSVGP2l0NNMGQ7VhavOCy1HStTRBLCH7oUCIX3prIKFpEY1y3NGdbiyyYZjkQkRfldVlPiEpAWH/6gqAiBvCSnAYmsLS8xO6w1Ik26OjobkMbaMuzdw14joiOA9Pg8gxJZl83WC+HPiAjE2XoaEYcMwCIS4zKTHJjSpxdUJbyrZm4eNiTq71aoCIGb+wwP86oZBdPy+yMjIfqPXK/MvSisNSICAt0dXdAnuZuBqaxpcNr0sNeIxCsO9E6ZYFAi60py3IBQL3P+Nx1JjpuMiEMGYBEh3Dp8KDQ9vEWFc4YONiiNtd3UuzCsq4M0qeN7fYYbmMi6JmQVhnVaRUJiRteLo77gAkB2Qn9k2gsQ6gAroGBQ2nTYlOg/5aeqWUhMvArh3EckwT4ZNlu+kbEoDCwihDHd8tE/q1NI9xJRhMCV/fuiS2pKBJJZT9ckJ6Z16x/SrIgqBAamZ2NU5+g/wgeAeCUO13SdGNLXKhBIjUvCxM4jjQ1lUUIIjMy4EaHdR0RACAVD0mYYHcuyUpJ/iNBPz2hISbnXyDgUJhYRghACf591DZyJCUGVEVUI9OmUid9dPiWC6aznj6Ono3dqcMVNFQLpdgf+ccnsmDjCb3JT98sxMn1gUGtFFAjYFBv+e9C9SFSj/wi/SX/npRiWfl2QX9X4NoLTcv8LzvjcSMSyJHt8IdLTngjpa52pv0aCfbzBiSgcLCIEAMhNTcWCW+YgJyWl1aN9cfZjcJccvHbTLCTbo/tqmW9LjrNj/qW3YFBGTvO+aIkCgVxHKhZcdiu6OGJrpb4qVPxy4J0YmzkEAFotJAoUOGwJ+MOQ+9EnJTZmjr7u4s73YETGDQAaT7e0RECFAhVX5j6K3ikXtUc8S0lOmot05x/R+BPY2mmaxs87Ux9FSjJnQ6xGSAvfDjOYtxEmY1TXe/HatiK8tu1LVHg8sCkK9LMvEUUIBHQdBelpuHVEIeYMHQS7zWZyYvP4tADePLgdLx/YgqO1VbAJpfnN3RQIBKSOrIRk3NZ3BL7Xdzic8YkmJzaPLnV8VrYV75euxoHaYqhCgZSN60AUIaBJHQ41AVd0GYercy9BVkK62ZFNdbRuE7ZXvotiz5azhURAQocCBTp0qMKG/qlTUJhxPTLtPcyOayqffxtq6/6O+voP0HhqS0HjaRvl7N8lEhOuQEry3bDbx5gZNaYEM36ziNA5BXQdnx48jM+PFsPl9UIRAhmJibisb2+Myu8aU6cXWiOlxBdlxVhx4gCqfB7oUiItPhHjc3pgctc+sCmcePy6Q3XHsbpsG6r9tWiQASTZEtE/pQcmZBXCbtC9R6JFtb8Ue10rURsoQ0Cvh11JRqa9AP2dU5Cgxsa6rLbStHK4PQsQCByGLmuhiGTYbN3hcNwAm9rF7Hgxh0WEiIiITBPM+M1DNSIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyTUSLyJo1azBjxgzk5uZCCIHFixdH8umIiIiog4loEXG73Rg6dCiee+65SD4NERERdVC2SG582rRpmDZtWiSfgoiIiDqwiBaRYPl8Pvh8vua/19TUmJim/Rw4VoYPPt+DsspaeP0BJDvi0Sc/CzMmDEJGqsPseJZyvLIa72zfjWNnquH2+5EUH4/umWmYNXwQ8tKdZsezlHKPG29/tQt7K8tR6/MiMS4OXZJSMKvPIAzM7Gx2PEupC3ixrLQIO6qKURuoR5yiIiM+GVO7DEFhegGEEGZHtIyA3oAdri9woHYHPIE6CCGQZEvBwNQRGJA6HKpQzY5oGVJqqPauwRnPCgS0SkgZgE1NhzNhLDId06EqCWZHtAQhpZTt8kRCYNGiRZg5c+Z5H/Ob3/wGjz/++Hf+3eVyITU1NYLp2p+UEiu/2I/5y7dhz5FTUBUBXUpICShCQAJQFIEpF/bFrdNGom/32B441h86hhc/34rPDx1r3j+6lFCEgDj754t6d8dd40diTM9uZsc11c6K03hhxyZ8dGQ/pAQgGvePAKAIBZrUMSyrC74/aCSu6tkvpgfZEk8lXjuyFh+UbINfb4CAgI5v7qvuSZ0wp/s4zMy7EDYldgfZukAN1pR/iA1nVqJec0OBAh06AECBCh0aUmxpGN/pclzUaRoS1ESTE5tH0+txqvZVnKx9GX7tJBqP+TUAEoAKQIMqUpCdciNyU+5CvC36fr/X1NTA6XS2afy2VBE514xIfn5+1BWRQEDDEy9/gvfX7oIiGgvI+aiKAITA43dfgalj+rdjSmuQUuKFNZvwzKfroSoCmt7CvhICmpR4aMp43H3RhTE5wC4+uAcPrf4IgIAm9fM+rul1d1O/Ifjd+MtgU2LvArptlUfw0NZX4NMbWtxXTa+iCzN740+FtyDJZm+fgBZS5i3BC4d/h5qGakicf18BgIBAZ3tX/KDXr+CMy2inhNbh1yqwt+xOuP270Vg8WqIiTknHwOyXkRQ/oD3itZtgioilfvvY7XakpqZ+4yPaSCnxu3kr8MG6XQDQYgkBAE2X0DUd//X8R1i15av2iGgpL6zdjGc+XQ8ALZYQANDO7sunP/4c/1y3JeLZrObDw/vx488+hCZliwMr8O/X3Zv7d+CRdcvRTscjlrGr+jge2PIivJq/1X0lz35sOXMIP9n6Mvx6oF0yWkWlvxzPHXwMtW0oIQAgIVHuK8VzBx+DO1DbDgmtI6DXYvfpW+D270XrJQQANDToVdh16kbUNxyOdDzLslQRiQXvrtqBjz7fi2B+70sAQgD/9fyHKCl3RSyb1Ww8XIxnPvk8pK/9n4/XYdPREwYnsq5jNVX48WcfINg5IAlgwYFdeHP/jkjEsqT6gB8/2foyNF2H3qbBopEOiS+rjuFvB1ZEMJ21SCkx78iTqNfczadh2kKHjmp/Bd4o/msE01nPoTO/Qn3DITSehmkrDZr0YG/ZXZAymK+LHhEtInV1dSgqKkJRUREA4MiRIygqKkJxcXEkn9aydF3i1Y82Bz1YAICUjTMC76760vBcVvXi+q1QQzy9oioC8z7fanAi63p1b1HjGqMQvlYAeGHH5piZFVl2sgiuBk9QJaSJhMQ7xV/AE/C1/uAocNi9Fye9xUGVkCY6dOyrLUK5rzQCyazHFziJM54PgRD2FaDBGziGqvrVRsfqECJaRLZs2YLCwkIUFhYCAB566CEUFhbi0UcfjeTTWtaWvcUoragJabAAGovMolU74fNH/9TwiSoX1n51tPl0S7A0XeKzA4dRWh39V155Aw14Y9+OkPeVBHCkpgobTx43NpgFSSnx5rH1IR0MNPHqDVhWWmRUJEv7vGI5lDCGCQUKNlR8bGAi6zpd9yYQ1itLxanaV4yK06FEtIhMnDgRUsrvfLz00kuRfFrLWrx6Z+Pi0zDU1fuwettBgxJZ17vbd0MJc7GpEALvbt9tUCLrWnb0K9Q1+MPahioE3oiB0zN7a0pwpK4s5IMBoHGoeef4F0ZFsixPoA47XZtCmg1pokPHF5WfQIvyUw5SSpyqfR2hzYY00VDtXQtf4KRRsToMrhFpR0dPVra64LI1qiJwoiz614kUV7rCGiyAxnU1x6uif18dq6mGTYT3o6xJiUPVlQYlsq4ST/jfozRoO1ZX5S9v0+LU1vh0LzyBOgMSWZcuvQjoRrwmJHyB6J+Z/DYWkXbk8TaEvQ0hBDze8I5+OwKP39/qFUWt0XUJjz/8fW517gY/jLhSua4h+tc9eDRjfna8WvS/rny618Bt1Ru2LSvSpNuS2+ooWETaUVJCfNjbkBJISgx/O1aXZI8P+9SMogg44uMMSmRdSXHxQV2FdT4p8dF/fwyHaszPToJB27Eyu4E3JEtQo/sO0apItuS2OgoWkXbUs2tm2GtENF1Ht5x0gxJZV0Fm+N+jlMZsx+p6OjMQaOVeGK1RhUCftEyDEllX96ROYW9DQKCbAduxuvS4TlAQ/p1kExQHHGp0D66qkoA4NcuALQkk2LobsJ2OhUWkHV07cXDYa0RSkxJwcWEvgxJZ13WFFxiynWsN2o6VTe3RG6lhzmZoUuLm/kMNSmRdfVNz0TelC5Qwrm6QkJjVbbSBqazJYUvG0LQxYV01I6BgTOYUKGGuYeoIcpK/h/CGVBXpiZOj8nbvrYn+V4eFFPbLC2s2Q1EErps0BHG26H+/ixxnCib2LQh5BklVBKb074Xs1Og+EgMAu2rDLf2HhnzPFQGgd1oGRmZ3NTaYRc3pPi6ke4g0cajxmNpliIGJrGtcp6lhXTUjoWNs5hQDE1lXdvKcMLegISflVkOydDQsIu1ICIHbrrwwxK8F4lQV10+K/qPWJneOHwE9xBkkXZe4fdwIgxNZ1/cGDINNCe04XwK4d8jomHlvnsu6DEFGfHJIsyICwA3dx8bEGhEA6OHoh/zEXiHNiggoGJR6ITLt2RFIZj3xts7ISroGoQ2rKhLjeiMt4SKjY3UILCLtbMaEC3D9pOCOpgQaz0v/8f6rkJ2ZEplgFjSyex4evuKSkL72l9MmYni3XIMTWVdeihP/N/lqAMHdUkkAuG3AMFzfJ/pPYTVJUOPw/0bejjjFFlQZUSAwKrM3ftA7No7wgcaDp9sLfoZkW2pQZUSBgix7DuZ0+48IprOenhm/RVLcACCotTUqbEoqBnR+ESIGTmGdS2x+1yYSQuBnt07GnMsa7zartHLqQVUEbDYVT/7oaowf2rM9IlrK3LHD8csrLoEAWj1NoyoCAsCvpk3ErWMK2yWflUzp3hsvTJkJm6K0epqm6fN3DhqB34y9NGZmQ5r0Tc3F86O+j+S4hFb3lThbViZ07o8/D/8ebEr0nxr9OmdcBu7r/Vukx2c174uWCeQmdse9vX6DxCi/WubbVMWBC7JfQ4p9GJoOIVumIF7tjEE5C5Bgy4t8QIsS0sJvMBHM2wh3RGu3H8IbK7Zhy97jzYOsLiUURYGu64izqZg+/gLcNHU4euTG3ttpf9224lK8vGEbVu5tvKusEGf3lRDNl65OHdgHc8cWojA/dmZCzuVAVQVe3LUV7x7cDb+mQRUKdCnPlg0JTUpclNsddw4agUu7Rf/C55aUeV1469gGLDq+CXUBL2xCgSZl4xGaENCkjv6pXTGn+1hckTsMaowesQJAvebG+ooV+LxiOWoCVVCgoundjQQEdGjIjM/GRZ2uwJjMKYhTYuP01bno0ofTdQtwsuZleAOHIWBrvjmcgAKJAOKUTOSk3IqclFsRp0bf1X3BjN8sIhZQfKoKS9fvwenKOnj9DUhOtKNPfhamjRuAZEf039shGGW1dVhctAfFlS7U+XxIttvRPSMNM4cNRFZKktnxLMXl82LRwT3YV1mOWr8PibY4dElOwXW9L0CBM/p+8YXDpzXgk1O78GX1MdQ11MOm2JBpT8ZlOUMwwBkbi3jbSpc69tZsx/7aL1Gv1UFAwGFLwQWpI9E7+YKYm11riZQStb7NOONZjoBeBSkDsClpcCaMRbpjChQRvfc5YhEhIiIi0wQzfsfuPCMRERGZjkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi09jMDtCe3LVefPxBEfbtPIG6mnrE2+OQnpmMidMG44Jh3SCEMDuiZfj8AazasB+bdxxDbZ0XiqLAmZqIiy/sjTGFBVBVdtgmmq5j7d4jWLXrMKrcHugScDrsGNO3O6YO6QN7XEz9mLVISonNpSV4/8A+lHvcaNA0pNoTUJjTBTP7D0Sq3W52REvZ7zqNRce+xMl6F+oDDUiOs6Nvamdc36MQWQnJZsezlHJfBVaXf46T9adRr9UjQU1AdkIWLsm6CDkJnc2OZylerQqHaz5Ale8AGvQ6qMIOh60zClKuREZC/3bPI6SUst2ftY1qamrgdDrhcrmQmpoa8nZKjp3Bwlc+x8fvFyHQEIAQArouIQSgKAo0TUe3nlmYefNYXD6zEKpNNfC76Fgqq9148/0tWLJyB+o8PqiKgKY3vkRUtXFfZWUmY9YVhbh+WiESE+JNTmwej8+P+euKMH9dEcpr3N/cV2f/nJJoxw1jh+DWi4cjM8VhcmLzBHQdb+3eiZeKtuFQVSVsQoEmdUgAqhDQpUS8quK6ARfg7uEj0SMt3ezIppFSYnnJXrx0cCOKKk9AFQr0s/tKwdmDJQFcnjsAd/Ydh8HpuabmNduemv34oHQZvnTtggIF8ux/4ux/OnQMSh2A6V2mYkjaILPjmqrK9xX2Vr2GY3UrIaEDEMDZ/y+gQEJDhr0/+qXdhB7Jl4d1cB7M+B31RWT7F4fw+IPz0eDXoGn6eR8nBCAlMGpCX/zyyRuQkBh7A+zh4nI8+NuFqHJ5oOstvyyEEOjdvRP+51ezkJme1E4JraO8pg4//PsiHDp1BnorP0KqEMhIceAfP7wevXIy2ymhdbj9ftz30ftYW3wUANDS3lKFgN1mw9+vmolx+d3aJZ+VaFLHb4uW4s0jW6GcLWjno4rGQfcPw6/GzO5D2zGlNUgp8dGpFZhfvBAKFOg4/+/3ps9f13UGrus6IyZnv4vrPsHnpx4FICGhtfBIBYCOgpQrMarzL6GKuJCeL5jxO6rn13dtP4Zf3/cqfN5AiyUEaCwhALDl86/wmwfnI9DQ0v9Q0efEqWr8x6/falMJARp/CRwursD9j72JWre3HRJah8vjxR3PvY3Dp1svIQCgSYnKOg/mPrcAJ8642iGhdTRoGn7wwWKsO34MEi2XEKBxX3kDAdyx5B1sPVnSHhEtQ0qJx7d/hLeObAWAVl9bmtShS4mHty7BB8d3tUdES1l6aiXmFy8EgBZLyNc//27J+3i35P2IZ7OaE+41WHfqV5DQWikhAM7uqyO1S7Hx9H9Dypb3rRGitoi4a7147IHXoOsSwUz66LrEl5uO4LUXVkUwnbXousR/PvEuPPW+NpWQJpouceJUNZ74v+URTGc9v35zOU5UuppPw7SFpkvUeX24/1+Lg3o9dnR/2bgeG08cb1Nha6JLCU1K3PXeItT6fBFMZy2Lir/EgqPbWi1r3yYAPLxlMY7UnolELEvaX3sQrxe/HdLXvlvyPnZUx05xczecxrpTvzz7t2BeXRLH6lbggGthJGJ9Q7sUkeeeew49evRAQkICRo8ejU2bNkX8OT/+oAged3ADaxMpJd578wv4vA0RSGY9m3ccxbGSyqAG1ia6LrH6i69wsiw2jvSPV1Tjs92HQ9pXmi5x6HQlvvjqeASSWY+noQGv7Nge9MAKNJaRWp8Pi/fvNTyXFUkp8a8D6xHKCYPGmSaJ+Yc3Gx3LspaeXAklxOFLgYKPTq00OJF1Hax5F1JqCK6E/Nve6tciPisS8SLy1ltv4aGHHsJjjz2Gbdu2YejQobj88stRVlYWseeUUmLJ/I1hbcNT58OaFbHRmt9Zuh2qEvo5U0URWLJyh4GJrOvtDTughLGvVEXgjc+LjAtkYe8f2AdPQ3hl/uWibTExg7TtzHEcqq0IcahoPKW18Oh2eAJ+Q3NZUZW/Gluqtrd6OuZ8dOjY6dqD097IjUFWoUk/vnK9e3Zhamg8gdM46fnCwFTfFfEi8vTTT+Puu+/GHXfcgYEDB+L555+Hw+HAiy++GLHn3FNUjNLjlQjn95dQBD58O/qPMM5UubF+W2hH+E10XWLxii+jfsDQdYmFG3eFta80XeKz3YdxptZjYDJrmr/zy5CO8JtIAIerq7D91EmjIlnW20e3QRXh/Tqu1xqwrGSPQYmsa015aDNHX6dAwWfl6wzJY2Ul7nXw6zVhbUNAxcGaRQYlOreIFhG/34+tW7diypQp/35CRcGUKVOwYcOG7zze5/OhpqbmGx+hKD1eGXLmJlKXhmzH6k6Wu8IqbE1q3V546qP7aKzW60OtN/w1C7qUOFkV3i+HjuBYdXXIR/hfV+yqNmAr1na49gy0MKe/bULBCXeVQYms67SvDCLMKiIhcdpbblAi66prKIFAeLejkNBQ4y82KNG5RbSIVFRUQNM0ZGdnf+Pfs7OzcerUqe88/oknnoDT6Wz+yM/PD+l56z1+GHF1ljfKB1YAqDdwHUy0FxGPz7jvz23gtqyqPmDMa6vOH/37yh0wZlGuOwZOzXg1H/QwK66ERL0W/Vf7BXQPEPb8ERCQ7vDDtMBSV8088sgjcLlczR/Hj4e2qC/REW/IUX4s3EskMSG0a8TPxeGI7rtiOuzGvR6SY+BGcIlxxry2kuOj+3UFAMlxCYZsJ8kW/a+rBNUe9oyIgIBDTTQokXXZFAcQxvqQJnFKZO8VFdEi0qlTJ6iqitOnT3/j30+fPo2cnJzvPN5utyM1NfUbH6HI69EppK/7OkURyDdgO1bXNdsJxYDpo7TURDgMLDVWlJJgh9MR/oChCIHcdKcBiaytV3qGIa+tgvTov8tq75ROYa8RCUgdPZKj/4Z5XRJyIMOcEREQ6JKY3foDO7jUuG5hLVQFGteIpMYVGJTo3CJaROLj4zFixAh88sknzf+m6zo++eQTjB07NmLP239wHvILssI6PaPrElfdMMq4UBaV7kzChFG9w75q5trLh0X93QoVReCGsUPCGlxVReCyIX2Qnhz9R2O3DB4a1P1Dvk0A6JvZCUM6R/+AcUPB8LDXiCTZ4jG16wCDElnXxVnjwp4R0aHjkqyLDEpkXblJ42FXwyvyEhr6OK8zKNG5RfzUzEMPPYR//OMfePnll7F3717ce++9cLvduOOOOyL2nEIIXHPT6LBOzySnJmL8lIHGhbKw668oDOtKEEjg6ilDjAtkYbPGDA7r6iBNl5gzPjZuxz29Tz+kxId+qkACmDu0MOoLLgAMSe+KvqmdQx5eVSFwQ8FwJKjRPSsJAM64VIzKGBHWfUSGOgchyx79s0eKsKFP6vUQIQ/1Asm2rshOHGlorm+LeBGZM2cOnnrqKTz66KMYNmwYioqKsGzZsu8sYDXa5OlDkZrmCPmeD9feMgbx8bHxrqnDB+WjV/eskGZFFEVg8rh+6JyZEoFk1pObkYrLhvYJaVZEVQT652ZhZM+uEUhmPXabDbcPGx7S4KoIgYzERFzdt/3fCdQMQgjc3W98SCccBABFKLi554VGx7KsK7tcFvLpGR06pne53OBE1tXbOROKiENoi1YlBqR/L+IHA+2yWPX+++/HsWPH4PP58MUXX2D06NERf05Hkh2//eutUG1qUGVEKAKjL+mHG79/SQTTWYsQAk8+fC1SkhOCKiOKIlCQl4n/vGdqBNNZz3/fMBW9sjOC2leqIpCWlIhn77omJo7wmzwwaiwu6V4QVHFThECcomLeNdcjKYwZlY5mRv5gzO0d3O/Gpr369IXXIz8p+tfSNOmVXIDbe9wc0tfelD8LFzhjo+ACgMOWhYu7PHn2dFYwv3sEeqbMQO/UayMVrZmlrpoxWr9BXfHHF+YiwREPRW35WxVnB5UJlw7Er568AWorj482OVmpeP73NyMrM6VNg4YAMKB3Dp59fA6SYuDqoq9LSojHv+6djQF5ndv0o60IgZy0FLxy/xzkpMXGzFETm6Lg/6bPwGU9ewNAq68tVQgkx8fj9etmY3AMrA35tl8Mnoq7+jSun1Nb3VcKVKHgL6Nn4bKusTOwNpmSPRF39Ljl7BvYt/z7uunzN3ebheldYuvACQC6OMbg4i5PQRXxrd5XpOk0Tp/U6zCq88PtcuAkpIVvhxnM2wi3pOxkNRbP34hl726Fx+2DalOg6xJCNK6e1jQd/Qbl4ZqbRmPitMFQlNgqIV9XU1uPd5YV4d1l21Hp8kBVleY1EUI07qv8LumYdWUhZlw6BPYYOX11Lr6GABZu3In564pQXFENm6I0L85UhEBA19EpNQk3jR+KOeOGGnLFTUelS4n39u/FS19ux47Tp85eIdI4ud60r5Lj43HToCGYO7QQuSmh/7xHgzWnvsIrBzfh87JDZ8ubgC4lVCGgSR1xioqr84dgbp/R6JPa2ey4pjpYdxjLTn6MLyq3QkJCQEBCQoFofh+ekenDcEXOFPRP7Wt2XFPV+I9hf/VbOFz7ATTph4ACCb25fEhoyE4cgX7OOeiadHFYJSSY8TsmikgTb70fa5bvwr5dJ+Cu9SIuzob0TsmYeMVg9OrfxYDE0SOg6fh8yyFs2XEMNXVeKIqAMzURl4zqg2ED82Lq9EJrpJTYcugEPt11CFXueuhSwulIwJg+3XDJwJ6wxdjsWmt2l53G+1/tR4XbDb+mIcVux/AuuZjepy8SbNG/2DIYxXWVWFy8Ayc9LtRrDUiJS0BfZ2dcnT8Yzvjov/IqGK6GGqwpX49T3tOo1+qRoCYg256FCVnjkBEfO6et2qJBr8PR2uWo8h2AX6+DTdiRaOuMgpRpSI3vbshzsIgQERGRaYIZv3moRkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0NrMDEHBoTwk+fmczykur4fP6kZyaiIIBuZg6axTSOqWYHc9SSk9W46PlO1BSWgVPvR+OxHh0zU3H9CuGoEtOmtnxLOVMjRvvbdiNr05UoLbeh0R7HLLTUzBjzED0zcsyO56l1Pl8WLJrH7afKIXL60O8qqJTkgPTB/bDhd26QghhdkTL8GkBLCvejzWlR1Dt80IRAun2REzJ743JXXvDpvD4tokmdWyp3IfPK3bC5a+DJnWkxjkwLL0PLskaBrsab3ZESxBSSml2iPOpqamB0+mEy+VCamqq2XEMJaXE6g+KsOhfq3Hgy2KoNgW6JiGlhKIISACKIjBh+jDM+sEk9BrY1ezIptqy7SjeemcTNm890rh/ZOM+FEJACEDXJUaNLMCNs0Zj+LDuZsc11d7i03hl5RZ8vO0r6BIQAHQpIQSgCAFNlxjUIwe3XDocU0f0jelBtriqGi9+sRXv7tgDXyAAIUTjvgKgKAo0XUdBRjrmXliI2cMGIU5VzY5smjNeD/61dzNeP7AdLr8XqlCgSR0Amv/cOTEJt/Ubgdv7j0BynN3kxObxan68V7IOi0vWotxX/Y19pUCBDh1JagKuzB2L6/MuQabdaXJi4wUzfrOImCDQoOHZ/3obKxZsglAEpH7+/wlUVQEE8LOnb8HEGYXtmNIapJR47c0N+NfLa6EoAnoL+6rp83ffcQluvmF0TA6wSzftw6MvLwMAaC3tq7MD7rXjB+GRmy6FTY29o9jNxSfwgwWL4W0IQGvh12DTq2hcQTc8e90MJNtj7yj2oOsMbv34TZTV17W4rwBAgUAvZyZenTIHOY7Ym9Gt8tfiVzv+joN1JZBobV8pcMYl4YmhP0Sv5Og62Axm/I693z4mk1LimYffwsq3NzX+vYXBAgA0TYem6fjTj1/F58t2tEdES3n9rY3418trAaDFEvL1z/9j3mq88fYXEc9mNSu3HsCv5i2FpssWSwjQOEMCAIvX78LvXv8YFj4eiYgvS07ijjfeRX0rJQQA5NmPDUeP4wcLFsMfCLRLRqs4UefCDctfa1MJAQAdEodrzuCG5a+jylffDgmtwx3w4udFz+FQXWmrJQQAdOhwNbjx0+1/xQlPWTsktCYWkXb20fwN+OTdLQjq9/7Z6fU//uhVnDp+JlLRLGdb0TH886U1IX3t319cjaIdxQYnsq7j5dX41bylCHYOSErgvQ27sejzXRHJZUUefwPuXrAYAV1vLmRtoUuJbSdK8fTq9RFMZy1SSnx/1UK4/N42lZAmmpQocbvwk3XvRzCd9TyzfwGOe8qgQ2/z1+jQUa/58aud/2g+fRNrWETaka7rePuFTxH0aIHGAUPXdXz4euz8EnzrnU1QlNBOr6iqggXvbDY4kXUtXLMDUrblGOy7BIBXVm6JmVmR93bvRXW9N6gS0kSXEvO3fQm33x+BZNbzRdlx7KsuD6qENNGkxGelh3G4pjICyayn3FuN1eVF0EP4KdSho7S+Alsq90YgmfWxiLSjL9cfxOnjlQhptACgaxJL52+A39dgbDALOnnKhS82H271dMz5aJqODZsO4nRZjcHJrMfrD+DddTtbPR1zPhJAcVk1tn51wthgFiSlxCubt4dyLNDM2xDA+7v2GZbJyl7ZtxWqCH2YUIXA6we2G5jIuj48uSGs15UCBYtPrDMsT0fCItKOlr65EUqYiwLdtV6sXxH90+jLVu4IeTakiRACS1dE/7qaVUUH4faGd4SuKgLvrt1pUCLr2nXqNA5WVIZ6LACgcQZp/rYvjYpkWdW+eiw7fiCs0wWalHjzqy8R0KP7lIOUEh+Urg9pNqSJDh1bqvah3FttXLAOgkWkHZ04dBq6Ft4PpKoqOHmswqBE1lVSWh32NgQaZ1ai3fHy6rCvetF0iWNlVQYlsq7iqvBfDxJAcXX0v65OuF0hnb76NnfAj+ooX7Tq0xvgaqgzZFunvLGzDrAJi0g7qnf7wt6GUIQh27G6+np/yKdlmmi6hMcT/efyPT5jvse6+hjYV35jTmt6G6L/yhlPg3GngN2B6H5t1WvG/U72GLitjoJFpB05khPC3oaU0pDtWF1iYnzYp2ZUVcDhiP57Pjjs8SGvO/q65MQY2FfxcYZsJzHOmO1YWVKcca+HaL+5mUM17vtLUqP/9/u3sYi0o259csJeI6IFdHQtiP7bc3fLywh7G1ICeV3TDUhjbd2z08M+B68qAj27ZBqUyLoKMsN/PQgAPTLSwt6O1eUlOWELY6Fqk9Q4O9Lio3twtavxyIgP/6abAgJdEqP/5/DbWETa0bSbxoS9RiQlzYExUy4wKJF1XTF1cHD3WjkHKYFpUwcbE8jCJg3thZTE8I7INF3i+gnRv68GZnfGgOwshDPZJgHcMmKoYZmsymlPwPTu/aGGcYdiVQjc1HcY1Bh4/5kZueOhhHHdjAIFozMHRuXt3lsT/a8OCxk8ulfjbEaIr1VFEbjy5nGIi4/+9yrsnJWKcaN7hXEfEYEJ4/qgU2b032I6Ps6G6ycMDnlfCQEU5GRgaM9cg5NZ020jCxHO8qOk+DhMH9jPuEAWdmu/4SHdQ6SJLiVu6RMbb00xrcvosL5eh46ru15kUJqOJWJF5Pe//z3GjRsHh8OBtLS0SD1NhyKEwOx7Jod0Pl8IwBZnw/RbxhofzKLmzBoVxn1EJG64/kKDE1nXrIuHwqaEdjwmJXD71JEx89480wf2Q6ckR0hH+gLA90YOi4k1IgAwIqsrhmZ2CWlfKUJgan4fdEtJMz6YBWXanZicPSKkWREFCro5sjEivW8EkllfxIqI3+/H7Nmzce+990bqKTqkqbNHYfot44L6GiEa/8+v/jYXWbnRv+ahyZBB+bjvh5ND+tof3TsFgwbmGZzIunIzU/Gnu6cD4uzrJQg3XDIUV40ZGJlgFpQQZ8M/b7wWcaoKJYidpQiBcQXd8KMJsXMwIITA3ydeh8yEpKDKiCoEClIy8Odx0yOYznp+1HcWeibnBlVGVKEg2ZaI3w/+ARQD1uR0RBH7rh9//HH85Cc/weDB0X/eORhCCNz7+HW45vYJAABFbfkFq6oKbHE2PPr8HRg1KXYGiyazr70Q999zKYDG0y0tafr8j+6dguuuGRHxbFZzyZBeeOoHM2BTFKitnKZp+vwtlw7Hz2+YGDOzIU0GZnfGq9+bjRR7fKsDbFNZmdynJ/5v1tWIU9X2iGgZ2Y4UvHP595CX5Gy1uImzHwPSO+OtqTcjNcoXqX5bomrHk0P/AwNSezTvi5YoEMiIT8VfCh9ATmL4C/Q7KiEj/AYTL730Eh588EFUV1e3+lifzwef79/XUNfU1CA/P79NbyPcEW38eDcWz1uDL9d/1Xw1jdQlFFVA13TExdsw5foLMfPOi5HfK9vktObatecEFry7BevWHwDQWOh0XUJRRPN7pFx8UT/MnjkSFwyMrrfTDtbhk2fw+qfb8OHGvWgIaFAVBbqUzTMlmi4xun833Dy5EBMG9zQ3rMlO1dbh1c3b8eb2naj1+WBr2lcAIAQ0XcegnM649cJCXH1B/5hYdHk+Lr8Xr+3fjlf2b8Xp+jrYhNJ8J1EFAgGpo1tyGm7vPwI39xmGBFtsnL46F78ewLKTG7HoxFqcqC+DKpTm31NCCGhSR1pcMq7uehGu6XoRUuOSTE5svJqaGjidzjaN35YqIr/5zW/w+OOPf+ffo7WINCk5Uo5PFm1Bxclq+Oob4EhJQM8BuZg8cwSSUhPNjmcpZ87UYdnHu1BSWgWPxweHw468rum4fMogZGYkmx3PUmo9Xny4aR++KilHXb0fCfE25KSnYProAejWOXZO8bWFLxDA0r0HsO1EKWq8PsSpKrKSHbhyQD8M6hLbBwHfpuk6VpUcwpqTR1Dt80IRAmn2BEzN64OxOd1jbnatJVJK7HIdxrqKHXA1uKFJHSk2BwrT+2Bs5iDYlOidXYtYEXn44Yfxpz/9qcXH7N27F/3792/+O2dEiIiIYkswRSSo60B/+tOf4vbbb2/xMT17hj7Va7fbYbdH9x34iIiI6N+CKiJZWVnIyor+u3oSERFR+4jYnbGKi4tRWVmJ4uJiaJqGoqIiAEDv3r2RnMxz+URERBTBIvLoo4/i5Zdfbv57YWHj3fVWrVqFiRMnRuppiYiIqAOJ+FUz4QhmsQsRERFZQzDjd+xeFE9ERESmYxEhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZnGZnYAsia/twFrlmxB0eq9qK12Q1EUpGYkY9z0YRg5ZTBUlR22iabp2LThINavPQBXtQdSl0hJTcDwC3vi4kkDEG/nj1kTKSWKDpRg5Yb9OONyw9+gITXJjkG9u2Da+IFIdtjNjmgpB05VYNH23ThZXYt6fwNSEuzok90J1424AFkpSWbHs5TjtS68vW8nDruqUOf3IykuHt1T03BD/0Ho4Uw3Ox61QEgppdkhzqempgZOpxMulwupqalmx4kJVWU1ePf/VuCjl9bAXVMPRVWgazoAQLUp0AI6OuWm4+q7J+Hq709GQlLsDhz1Hj8WL9yMJQs340xFHVRVgda0r87+OTklAVfNHI7r5oxGekbsDhwBTcd7n+3Em8u349jJSqiKgK5LSACKIiB1ibg4FdMnXIBbrhyB/OzYHTiklFi++yu8/Pk2fHn8ZOO+khJSAooQzY+bekEf3DlhBAZ1zTExrfk2lBbj70Wb8dnxI1CEgASgSwlFCAgAmpS4qGt33D10JC7JLzA7bswIZvxmEaFmR/eU4JfX/wXVFbXN5eN8hCJQMDAPv3v7x8jIdrZTQus4U1GLR37yBo4eKYfUW/4RUhSB9Iwk/Ol/b0H3gqx2SmgdHq8fjzz7ATbuOAoBoKW9pSoC8XE2PPWTazDygm7tFdEyNF3H7z5Yhbc27YAiGgvI+aiKgJTA76+bimsKB7ZjSmuQUuIfO7bgDxtXQxUCWkv76uznHxwxDj8eMRbia4WOIiOY8Zvz6wQAKD1Shp9d9WSbSggASF3i6N4S/OfVT6HO5WmHhNZRW1OPn973KoqPtl5CAEDXJaqq3PjJva/gZGlVOyS0jkBAw8+eXoJNO48BaLmEAICmS3j9Afz4z+9ix4HSyAe0ECkl/vu9T7Bg0w4AaLGEAI37SpcSj7yzHB9+ua89IlrKv3ZuxR82rgaAFkvI1z//zNb1+N+tGyKejYLDIkLQdR2P3fgs6mu9bSohzV+n6Sg9XIanH3gpcuEs6M+/fx8nS6qgaW2fTNQ1CY/bi1//7C1YeBLScC+8sx7b9h1vdVD9OikldF3iof9ZhLp6XwTTWcvi7Xvw9pZdrZa1bxMAHnlnOY5WxE7J3XKqBL/b8FlIX/vM1vVYffyIsYEoLCwihG2r9uD4V6ea1zcEQ9d0rP9wO04VV0QgmfWUnqjEhrUHoLdhJuTbNE3i2NEKbN961PhgFlTvbcDbK4sQSu/SpUSdx4dln+81PpgFSSnxr7VbEMoJAwlAQmL+F0UGp7Kuf+7YAjXE0yuqEPjnji0GJ6JwsIgQ3v/nKihhXAWjKAqWvrzGwETW9eGS7VCU0M8vq6qC9xbGxi/BlRv3od7XENY2FqzYHhMzSNuOleJweWXQsyFNNF3inS274fGHt787gtPuOqw4erDV0zHno0mJtSeO4Zir2thgFDIWkRhXedqFTSt2BnVK5tt0TceH81ZH/YCh6xIfLtkW0mxIE03TsX7dAVRVug1MZk3vfroD4awJlACOnazCroMnDctkVQu37oQaRsEFgPqGBizfdcCgRNa18MDusLehCoG39u00IA0ZgUUkxp0urjCkQNRVe+Cp9RqQyLrq6rxw14W/ZkHqEmWnXAYksrbjp6tDOi3zbSfKqsPfiMUdKa+CFkbBBQCbouBEZfS/ro65qsIeuCSAYzXVBqQhI7CIxLh6t3GLAevroruIeD1+w7bl8UT/IkyfQacJPPXRf7rB7TPmteX2G/catSp3QwNCn79tpEuJuobo/xnsKFhEYlyigTckc6QkGrYtK0p0xBu2LUcM3AguIT7OkO0kJRq3360qOcGY7zEpPvr3VVJcHJSQlvX+myIEUuKj/2ewo2ARiXFdemRBhHluGgCcnVKQmBzdP9hJyQlISQ2/bCmKQE6XtPADWVz33AxDbhzVrUv032W1d+fMsNeIBHQd3TtF/77qmZYBPeRlvY0EgJ687btlsIjEuLSsVIy7chhUWzhXzQhcdcclUX+3QkURuGrm8LCvmpkwsT+caQ4Dk1nT9ZcODWv9kRBAr7xMDCjINjCVNc0aOTjsNSJJ9nhMvaCPQYms6/q+F4Q5H9J4auaG/oMNyUPhYxEhzPj+JGiB0M+6SgBX3DbBuEAWNv2awrCvmplx/UgDE1nXpaP6hnVaRUpg9mWFUV9wAWBIXg76ZncK+SojVQjMHjkYCXHR/waLWY4kTOvZN6z7iEzML0BeSuy9NYVVsYgQhk7oj4IL8kJ6R11FEbh45khkdc2IQDLrye6ShosnDwhpVkRVBXr1ycaQYbHxHir2eBtuvHx4SEeviiKQlpKIy8f1NzyXFQkh8P2LLwzpKiOBxv110+ghhueyqu8PGRnU3Xq/TpMSdw+90OBEFA4WEYIQAo+/cT+S05OCurGZoiro3j8XP/7LbRFMZz0/feQqdOvRKagyoqoCqU4HfvvnOTFxhN/kzpljMHZoj2+8a2xrFCEQp6p45ufXwWHQIs6O4Kqh/XHbuMKgvqZpr/7PnOnIz0gzPJNVDevcBb+dMCWkr31k9MUY1zU2DgY6ChYRAgB0zsvE00t/gU656a0PsKLx/H3fwh7403s/gyMloX1CWoQjyY6n/nor+vTvAnF2X7REKAJZ2U785fm5yOocW+8ibVMVPPGjGbhkRC8AaLWQqIpAUmI8nntkVkysDfm2/7ziEtx50QgAaPXUg6oIqIqCp2+cjikDe7dHPEv53sBh+O1FUyDQhn119vO/HHMJfsDZEMsR0sK3wwzmbYTJGLVVbrz3z1V4/5+rUF1eA9WmQuo6IASEALSAjq69snHNDybjilsnID7BmEs0OyK/L4CP3tuOxW9vRsmJSqiq0rw4UwgBTdORkZmMq68fiauvG2HIFTcdla5LLN+wFwtWbMeew6ehKgoACV02nlbQNB1JifGYOWkw5kwdjuzMFLMjm2rNgSN4df12rD94rLG8icYFlopQoOs64lQVM4YNwG3jCtEnu5PZcU21/fRJvLhzCz46fAASgAIBTUqoAs33G7msR2/cNXgERnXJMzNqTAlm/GYRoXPSAho2LvsS21fvRW2VG4qqwJmRjHFXFWLwuL4xdXqhNVJK7Nh+DJ+vOYCaag90KZGSmojhIwswZnyfsK5Iikb7j5Zh5cZ9OOPywN+gISXJjsG9u+DS0X0Nu/dItCg+U40lRXtwsroW9Q0NSE2wo092J8wYNgDOxNiaiWxNuceNdw7sxhFXFer8PiTFxaN7ahqu73cBcpJiu9iagUWEiIiITBPM+M1DNSIiIjINiwgRERGZhkWEiIiITBOxInL06FHcddddKCgoQGJiInr16oXHHnsM/hh4d0giIiJqm4jdD3jfvn3QdR0vvPACevfujV27duHuu++G2+3GU089FamnJSIiog6kXa+a+fOf/4y//e1vOHz4cJsez6tmiIiIOp5gxu92fYckl8uFjIzzvyeJz+eDz+dr/ntNTU17xCIiIiKTtNti1YMHD+LZZ5/FD3/4w/M+5oknnoDT6Wz+yM/Pb694REREZIKgi8jDDz8MIUSLH/v27fvG15SUlOCKK67A7Nmzcffdd59324888ghcLlfzx/Hjx4P/joiIiKjDCHqNSHl5Oc6cOdPiY3r27In4+MZ3zSwtLcXEiRMxZswYvPTSS1CUtncfrhEhIiLqeCK6RiQrKwtZWVltemxJSQkmTZqEESNGYN68eUGVECIiIop+EVusWlJSgokTJ6J79+546qmnUF5e3vy5nJycSD0tERERdSARKyIrV67EwYMHcfDgQeTlffOtly38PntERETUjiJ2ruT222+HlPKcH0REREQA32uGiIiITMQiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyDYsIERERmYZFhIiIiEzDIkJERESmYREhIiIi07CIEBERkWlYRIiIiMg0LCJERERkGhYRIiIiMg2LCBEREZmGRYSIiIhMwyJCREREpmERISIiItOwiBAREZFpWESIiIjINCwiREREZBqb2QGIgnHy8Gkse/FTlBw8ifpaLxJTEtC1dxdccddkdCnINjuepVRV1GHF4q04cuAU3HVe2BPikZXjxGXXFKJnvy5mx7MUt8eHFav3YOe+UtS5fYizqUhPc+DSi/pj2AV5EEKYHdEy/IEAVn55EBv2H4PL44UigLSkREy8oBcmDCyATeXxLQVHSCml2SHOp6amBk6nEy6XC6mpqWbHIRNt+3gH3n7qPWxZ+SUURYGUElKXEIqAEAK6rmPk5cNww8+uRuHkwWbHNdVXe0qwcN5arF25G1JKCAC6LiGEgKIIaJqOfoPzcN1t43Hx5YNjepAtPVWNN5ZsxtJPd8PvD0Ao4uy+AhRFgabp6NY1HbOuGoEZUwbDZlPNjmyayjoPXl29DW9/vgM19T6oioCmNw4fTX/ulJqEGy8ailsmFCIpId7kxGSmYMZvFhGyNCkl5v/hXbz06zehqAp0TT/vY5s+f9cfbsacX8yMyQF21Ydf4s+/WggBQGtpX50dcK+4fiQe+K+rocbgAFu0+zh+8ftF8PkamgfUc2l6GY0Y0h2//8U1cCTG3gB75HQlfvD8Oyh3uaG3MmQoQqCgcwaev+c6ZKclt1NCsppgxm/OoZGlvfHEIrz06zcBoMUS8vXP/+uX8/HWk0sins1q1izfiT89vAC6prdYQoDGGRIAWP7uFvzv44th4eORiNh94CQeenwhvN6WSwgASNn4sW1nMf7zd+/A3xBop5TWUFpZg9ufXYCKmtZLCADoUuJoeSXu+OsCVLvr2yEhdXQsImRZ2z/diXn/9UZIX/uvR17Hl6t3G5zIukqPn8GTDy8AgpwEkhJYsXgblr2zJTLBLKje68d//u4daJrepoG1ia5L7Nhbin+8vi6C6axFSokf/WsJauq9rRa2r9N0iZNVNfjl68simI6iBYsIWdbCp9+HEuLCN9WmYOHT7xucyLo+fGtT46AaysSGAN6etzZmZkVWrtmLmlpv86xQMKSUWLysCJ56fwSSWc/WQyU4UFoRVAlpoukS6/YexdGyqggko2jCIkKWdOpoGTYt3d7q6Zjz0QI6vvhgG8qKyw1OZj0+bwOWLtwMXQuxSEigtPgMdmw5YmwwC5JS4u0PtiGc5UNeXwAr1+w1LpSFvbGuCKoS+s5SFYG31+8wMBFFIxYRsqTl81ZBUcJ7eQpFYPm8z4wJZGHrP9kDj9sX1jZUVcHShZsNSmRd+w+dxtHjZxDO5I8QwOJlRYZlsiqX24tPdhwMaTakiaZLvLNxJwIhHlBQbGARIUsqPXQq7G0IACePnA4/jMWVFp+BagvvR1nTdJw4WmFQIusqOVUd9jakNGY7VldaVRPUGprz8fga4PJ4DUhE0YpFhCypvs4b8mmZJpquw1Mb/av26+v9EMGuUj0HT114syodQb23wZDt+HzRf+WMx2fcOhgjt0XRh0WELMmRkhjyQtUmqqrAkZJoUCLrSkyMhwxpleo3OZLtBqSxtsSEOEO2k2DQdqzMyBuS8eZm1BIWEbKkvL65YW9DSqBrn+i/lXleQSdogfBmj1RVQfdenQ1KZF3dumaEvQ0hgPzcdAPSWFtueipsYa7TAoCUBDucjgQDElG0YhEhS7r8jklhX04qpcTld0wyKJF1jZ08EMkp4f2i1zQdV84eZVAi6+pT0Bl9CjqHddddKYFrrxhmXCiLSnUkYOqwvmFdNaMIgVnjBkM1oNBQ9OKrgywpKy8TY64aASXERZiqTcH4maPQKTf8I2Cri4+34crZo6CEOGAIAeT3zMLAYd0MTmZN108vDKvkOhLicOmE/gYmsq4544eEddWMlBKzx8b2ez9R61hEyLJm//Tq0O8joumY9dAMgxNZ1/Q5o6Da1JDujyElcMOdF8fMe/NMuag/MtIcIRU3IYDrpg9Hgj3614gAwLCCXAzqlh3SrIgiBCYN7oW8TmnGB6OowiJCljV4wgDc+z+3h/S19z1zJy4Y18/YQBaWnZuOX/3PjQBE0IVixo2jMeXqwsgEsyC7PQ5PPToLcTY1qDKiCIGRQ7vjrhvHRTCdtQgh8MydVyMj2RFUGVEVge5Z6fjtTZdHMB1FCxYRsrTrHpyOe/9yOyDQ6r0yVJsCCOC+/70TMx+Y1j4BLWTMxAH49V9uhmpToLZyxZGiNg4q1946Hvc8fFXMzIY06VPQGf/vt3OQ5LC3Wkaa9s34C3vhDw/PhC3G3qm4szMZL/9oDnLTU6G08joRZz/65mbhxftnIyUx+q/EovAJaeE3mAjmbYQpuu1evx/v/OUDfL54E4DGwUHXdSiKAqnrgBCYcN1oXPfgdAwcGzszIedy7FAZFr/6OT5+vwiBhgAUVYGuSyhCQKLxXYoLx/TCzFvHYfTFsbHW4XzKz9Ri4Yfb8N7yHajz+KCe3VdCNL7GNE1Hv17ZmDV9OC67eECrBS+a1dR7seDzHXhjbRHKa9ywKUrzDc8UIRDQdeRlOnHzhGGYNXYIEuJtJicmMwUzfrOIUIdy5mQVVr78GUoOnoKnth6OlER07dMFU+degoyc6L+kMhh1NfX49IMiHDlwCu46H+wJccjKceLSGcPQtXsns+NZis8fwKr1+7FrbylqPV7E2VRkpCXh0ov6oV+vHLPjWYqm61i75wjW7z8Gl8cLRQg4HQmYNLgXRvXOj7nZNTo3FhEiIiIyTTDjd+zOMxIREZHpWESIiIjINCwiREREZBoWESIiIjINiwgRERGZxtIXejdd0FNTU2NyEiIiImqrpnG7LRfmWrqI1NbWAgDy8/NNTkJERETBqq2thdPpbPExlr6PiK7rKC0tRUpKiuE3yampqUF+fj6OHz/Oe5S0gvuq7biv2o77qu24r4LD/dV2kdpXUkrU1tYiNzcXitLyKhBLz4goioK8vLyIPkdqaipfqG3EfdV23Fdtx33VdtxXweH+artI7KvWZkKacLEqERERmYZFhIiIiEwTs0XEbrfjscceg93Ot6luDfdV23FftR33VdtxXwWH+6vtrLCvLL1YlYiIiKJbzM6IEBERkflYRIiIiMg0LCJERERkGhYRIiIiMg2LCICrr74a3bp1Q0JCArp06YJbb70VpaWlZseynKNHj+Kuu+5CQUEBEhMT0atXLzz22GPw+/1mR7Ok3//+9xg3bhwcDgfS0tLMjmM5zz33HHr06IGEhASMHj0amzZtMjuS5axZswYzZsxAbm4uhBBYvHix2ZEs64knnsCFF16IlJQUdO7cGTNnzsT+/fvNjmVJf/vb3zBkyJDmm5iNHTsWS5cuNS0PiwiASZMmYcGCBdi/fz/eeecdHDp0CLNmzTI7luXs27cPuq7jhRdewO7du/GXv/wFzz//PH75y1+aHc2S/H4/Zs+ejXvvvdfsKJbz1ltv4aGHHsJjjz2Gbdu2YejQobj88stRVlZmdjRLcbvdGDp0KJ577jmzo1je6tWrcd9992Hjxo1YuXIlGhoaMHXqVLjdbrOjWU5eXh7++Mc/YuvWrdiyZQsmT56Ma665Brt37zYnkKTvWLJkiRRCSL/fb3YUy3vyySdlQUGB2TEsbd68edLpdJodw1JGjRol77vvvua/a5omc3Nz5RNPPGFiKmsDIBctWmR2jA6jrKxMApCrV682O0qHkJ6eLv/5z3+a8tycEfmWyspKvP766xg3bhzi4uLMjmN5LpcLGRkZZsegDsTv92Pr1q2YMmVK878pioIpU6Zgw4YNJiajaOJyuQCAv59aoWka3nzzTbjdbowdO9aUDCwiZ/3iF79AUlISMjMzUVxcjCVLlpgdyfIOHjyIZ599Fj/84Q/NjkIdSEVFBTRNQ3Z29jf+PTs7G6dOnTIpFUUTXdfx4IMPYvz48Rg0aJDZcSxp586dSE5Oht1uxz333INFixZh4MCBpmSJ2iLy8MMPQwjR4se+ffuaH//zn/8c27dvx4oVK6CqKm677TbIGLnpbLD7CgBKSkpwxRVXYPbs2bj77rtNSt7+QtlXRNS+7rvvPuzatQtvvvmm2VEsq1+/figqKsIXX3yBe++9F3PnzsWePXtMyRK1t3gvLy/HmTNnWnxMz549ER8f/51/P3HiBPLz87F+/XrTpqraU7D7qrS0FBMnTsSYMWPw0ksvQVGits9+Ryivq5deegkPPvggqqurI5yuY/D7/XA4HFi4cCFmzpzZ/O9z585FdXU1ZyPPQwiBRYsWfWOf0Xfdf//9WLJkCdasWYOCggKz43QYU6ZMQa9evfDCCy+0+3Pb2v0Z20lWVhaysrJC+lpd1wEAPp/PyEiWFcy+KikpwaRJkzBixAjMmzcvpkoIEN7rihrFx8djxIgR+OSTT5oHVV3X8cknn+D+++83Nxx1WFJKPPDAA1i0aBE+++wzlpAg6bpu2pgXtUWkrb744gts3rwZF110EdLT03Ho0CH8+te/Rq9evWJiNiQYJSUlmDhxIrp3746nnnoK5eXlzZ/LyckxMZk1FRcXo7KyEsXFxdA0DUVFRQCA3r17Izk52dxwJnvooYcwd+5cjBw5EqNGjcIzzzwDt9uNO+64w+xollJXV4eDBw82//3IkSMoKipCRkYGunXrZmIy67nvvvswf/58LFmyBCkpKc3rjZxOJxITE01OZy2PPPIIpk2bhm7duqG2thbz58/HZ599huXLl5sTyJRrdSxkx44dctKkSTIjI0Pa7XbZo0cPec8998gTJ06YHc1y5s2bJwGc84O+a+7cuefcV6tWrTI7miU8++yzslu3bjI+Pl6OGjVKbty40exIlrNq1apzvobmzp1rdjTLOd/vpnnz5pkdzXLuvPNO2b17dxkfHy+zsrLkpZdeKlesWGFanqhdI0JERETWF1sn+ImIiMhSWESIiIjINCwiREREZBoWESIiIjINiwgRERGZhkWEiIiITMMiQkRERKZhESEiIiLTsIgQERGRaVhEiIiIyDQsIkRERGQaFhEiIiIyzf8HSkww4J2r1zoAAAAASUVORK5CYII=\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotPinPow(multiPinBlock)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 29,\n   \"id\": \"aec706b3\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,\\n\",\n       \"       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29], dtype=uint16)\"\n      ]\n     },\n     \"execution_count\": 29,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"primaryFuel: Circle = multiPinBlock.getComponent(Flags.PRIMARY)\\n\",\n    \"primaryFuel.getPinIndices()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"5c98dcb0\",\n   \"metadata\": {},\n   \"source\": [\n    \"The ordering is worth discussing. `primaryFuel.getPinIndices()` being sequential `[0, 29]` would imply, at first, that all the `primaryFuel` pins reside in some sequence adjacent to each other. However, the lattice map has `primaryFuel` in the center of the block, and then in the second and third full rings. This ordering is still consistent with `Block.getPinLocations` and is a side-effect of\\n\",\n    \"\\n\",\n    \"1. How the hexagonal ascii maps are processed,\\n\",\n    \"2. How pin locations are discovered within a block,\\n\",\n    \"   - For each clad component, extend it's spatial locators\\n\",\n    \"\\n\",\n    \"We can see the first \\\"pin\\\" location in our block is not the center, but the north west pin in the block.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 30,\n   \"id\": \"34f9f9ce\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"assert multiPinBlock.getPinLocations()[0].getRingPos() == (4, 4)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 31,\n   \"id\": \"388c2c64\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"array([30, 31, 32, 33, 34, 35, 36], dtype=uint16)\"\n      ]\n     },\n     \"execution_count\": 31,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    }\n   ],\n   \"source\": [\n    \"secondaryFuel: Circle = multiPinBlock.getComponent(Flags.SECONDARY)\\n\",\n    \"secondaryFuel.getPinIndices()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"99bff8cd\",\n   \"metadata\": {},\n   \"source\": [\n    \"The component level pin plotter shows that we can still collect the same power profile by connecting\\n\",\n    \"\\n\",\n    \"1. `Block.getPinLocations`\\n\",\n    \"2. `Block.p.linPowByPin`\\n\",\n    \"3. `Circle.getPinIndices`\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 32,\n   \"id\": \"125a9793\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.legend.Legend at 0x1baf72d34d0>\"\n      ]\n     },\n     \"execution_count\": 32,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAOQNJREFUeJzt3Xl4VOXd//HPmUkme8IWCIEAYVFAFimKIlVRKC51QSpYWisguP3ASuljC60bj1ZsxaXlse4GulC1VMANRSloKwqyRERwARMSwhZISCB7Zs7vDyQaEUgyZ3KfzLxf1zVXneTM93xzd8J8cp/7nGPZtm0LAADAAI/pBgAAQOQiiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwJsp0AycSCAS0a9cuJSUlybIs0+0AAIAGsG1bhw4dUnp6ujyeE895uDqI7Nq1SxkZGabbAAAATZCfn6/OnTufcBtXB5GkpCRJR36Q5ORkw90AAICGKC0tVUZGRt3n+Im4OogcPRyTnJxMEAEAoIVpyLIKFqsCAABjCCIAAMAYgggAADDG1WtEAAAtl23bqq2tld/vN90KQiA6OlperzfoOgQRuNaOwwdUVlsVVI2EqBh1TWzrUEfutadyjyr9lUHViPXGKi02zaGO3Otw9Q7V2OVB1Yi24pXo6+pQR+5l1+ZKdlmTXltdY2v33lpVVHgkK9rZxuAKlmWpc+fOSkxMDKoOQQSutOPwAV26Yp4jtV4fcWtYh5E9lXs06+NZjtSa039OWIeRw9U7tDx/tCO1RmUsCeswYtfmyt4/qkmvDdhe5Rz4X0X5MpSeFqfo2HR5PDEOdwiTbNtWYWGhdu7cqV69egU1M0IQgSsFOxMSqlpuFOxMSKhquVGwMyGhquVKTZwJkaRqf6oCaqWOaXGKj/NIXp8sT6yDzcENUlNTlZubq5qaGoIIAMBFbI8kS54G3pqDw7Atk1O3XiGIAACM4TAsOH0XAGBMOByGzc3NlWVZys7ONrL/lo4ZEQAAgpCRkaHdu3erXbt2pltpkZgRAQCgiaqrq+X1epWWlqaoqOb/276mpqbZ9+k0gggAAF8ZPny4pk2bpmnTpiklJUXt2rXTnXfeKdu2JUndunXTvffeq+uuu07Jycm68cYbjzk0s2rVKlmWpTfffFODBg1SXFycLrzwQu3bt0/Lli1Tnz59lJycrJ/85CcqL//67Ks33nhD3//+99WqVSu1bdtWl112mbZv3173/aP7eeGFF3T++ecrNjZWTz31lJKTk7Vo0aJ6P8eSJUuUkJCgQ4cOhX7QgkQQAQDgGxYsWKCoqCitXbtWf/zjH/Xwww/rmWeeqfv+3LlzNXDgQG3cuFF33nnncevcc889+r//+z+tXr1a+fn5GjdunB599FEtXLhQr732mpYvX655875eqFtWVqYZM2Zo3bp1WrFihTwej6666ioFAoF6dWfOnKnbbrtNW7du1ZgxY/TjH/9YWVlZ9bbJysrS1VdfraSkJIdGJXRYIwIAwDdkZGTokUcekWVZOvXUU/Xxxx/rkUce0Q033CBJuvDCC/XLX/6ybvvc3NzvrHPfffdp2LBhkqTJkydr1qxZ2r59u7p37y5Juvrqq7Vy5Ur9+te/liT96Ec/qvf65557TqmpqdqyZYv69etX9/Xp06drzJgxdc+nTJmic845R7t371bHjh21b98+vf7663r77beDH4xmwIwIAADfcPbZZ9e7RsbQoUP1xRdf1N0z54wzzmhQnQEDBtT9d4cOHRQfH18XQo5+bd++fXXPv/jiC40fP17du3dXcnKyunXrJknKy8urV/fb+x8yZIhOO+00LViwQJL0t7/9TV27dtV5553XoD5NI4gAANAICQkJDdouOvrre+xYllXv+dGvffOwy+WXX66ioiI9/fTTWrNmjdasWSPpyILYk+1/ypQpmj9/vqQjh2UmTZrk2AXHQo0gAgDANxwNAEd98MEHQd9P5WQOHDigzz77THfccYdGjBihPn36qLi4uMGvv/baa7Vjxw796U9/0pYtWzRhwoSQ9eo01ogAAPANeXl5mjFjhm666SZt2LBB8+bN00MPPRTSfbZu3Vpt27bVU089pY4dOyovL08zZ85s1OvHjBmj22+/XaNGjVLnzp1D2K2zmBEBAOAbrrvuOlVUVGjIkCGaOnWqbrvtNt14440h3afH49Hzzz+v9evXq1+/fvrFL36hBx98sFE1Jk+erOrqal1//fUh6jI0mBEBAOAboqOj9eijj+rxxx8/5nvfdYZMt27d6q4zIh25Fsk3n0vSxIkTNXHixHpfu+eee3TPPffUPR85cqS2bNlSb5tv1vn2fr6toKBAbdu21ZVXXnncbdyIIAIAQAtWXl6u3bt364EHHtBNN90kn89nuqVG4dAMXCkhKsaVtdwo1hvrylpuFG3Fu7KWK1kNOzOkYbX4qAmlP/zhD+rdu7fS0tI0a9Ys0+00mmWfaJ7HsNLSUqWkpKikpETJycmm20Ez23H4QNB300yIiomI24LvqdyjSn9lUDVivbFKi01zqCP3Oly9QzV2+ck3PIFoK16Jvq4OdeRedm2uZJc1+nWVlQHl5tvq1q2LYuPiZFnH/2Ngy8FdGvvOU0F0+bV/nn+j+rZKd6QWTq6yslI5OTnKzMxUbGz9P2Ia8/nNoRm4ViQECKdEQoBwSiQECKdYUd2a9jp/pWTlyPLEnjCEABKHZgAABnEYFsyIAACM6ZrYVq+PuJXDsBGMIAIAMIoAEdkIIgAA1zlYWaFFX3yij/bv0aGqSsVERalDfKKu6tlXp6d2bDH3UcHJEUQAAK7xefF+PbXpQy3ZvkW1gcCRG8PZtixJXsujBVs2qk+bVF3fb7Cu7tVPHgJJi8diVQCAK6zI267LlvxFL237RDWBgGxJga+uMGFLqrWP3Kn2s6JC3f7uG5q64mVV1taaa9gFVq1aJcuydPDgQdOtNBlBBABg3Ls7czTlrcWq9vvlP8nlrQJf/e8buV9o2r9flj8QOOH2cDeCCADAqP0VZbrx7SWSbasxV9gMyNZbedv1xKa1oWotIlRXVxvdP0EEAGDUC599rKpav5o6r/Hs5nWqCfgd6WXRokXq37+/4uLi1LZtW40cOVJlZUeuLvvMM8+oT58+io2NVe/evfXnP/+53mt37typ8ePHq02bNkpISNAZZ5yhNWvW1H3/8ccfV48ePeTz+XTqqafqr3/9a73XW5alZ555RldddZXi4+PVq1cvvfzyy/W2ef3113XKKacoLi5OF1xwwTE34Ttw4IDGjx+vTp06KT4+Xv3799c//vGPetsMHz5c06ZN0/Tp09WuXTtddNFFuv7663XZZZfV266mpkbt27fXs88+26SxbCiCCADAGH8goAVbNirQqLmQ+g5UVmj5jm1B97J7926NHz9e119/vbZu3apVq1ZpzJgxsm1bf//733XXXXfpd7/7nbZu3ar7779fd955pxYsWCBJOnz4sM4//3wVFBTo5Zdf1kcffaRf/epXCnx12Gjx4sW67bbb9Mtf/lKbN2/WTTfdpEmTJmnlypX1epg9e7bGjRunTZs26dJLL9VPf/pTFRUVSZLy8/M1ZswYXX755crOztaUKVM0c+bMeq+vrKzU4MGD9dprr2nz5s268cYb9bOf/Uxr19afNVqwYIF8Pp/ee+89PfHEE5oyZYreeOMN7d69u26bV199VeXl5brmmmuCHtsT4V4zAABHnegeJN/2zs4cXffGoqD257Usnd0xQwsvDe4Dc8OGDRo8eLByc3PVtWv9WwH07NlT9957r8aPH1/3tfvuu0+vv/66Vq9eraeeekr/8z//o9zcXLVp0+aY2sOGDdNpp52mp576+r4648aNU1lZmV577TVJR2ZE7rjjDt17772SpLKyMiUmJmrZsmW6+OKL9Zvf/EZLly7VJ598Uldj5syZ+v3vf6/i4mK1atXqO3+uyy67TL1799bcuXMlHZkRKS0t1YYNG+ptd9ppp2nChAn61a9+JUm64oor1LZtW2VlZX1nXafuNcOMCADAmNzSYgV7Aq7ftpVTUhx0LwMHDtSIESPUv39/jR07Vk8//bSKi4tVVlam7du3a/LkyUpMTKx73Hfffdq+fbskKTs7W4MGDfrOECJJW7du1bBhw+p9bdiwYdq6dWu9rw0YMKDuvxMSEpScnKx9+/bV1TjrrLPqbT906NB6z/1+v+699171799fbdq0UWJiot58803l5eXV227w4MHH9DhlypS60LF3714tW7ZM119//XHHyylcRwQAYEx5TY08lnXSM2VOpqy2JuhevF6v3nrrLa1evVrLly/XvHnz9Nvf/lavvPKKJOnpp58+Jgh4vV5JUlxcXND7l6To6Oh6zy3Lqju80xAPPvig/vjHP+rRRx9V//79lZCQoOnTpx+zIDUhIeGY11533XWaOXOm3n//fa1evVqZmZk699xzm/aDNAIzIgAAYxKifXXXCglGYrTPgW6OfPAPGzZMs2fP1saNG+vWUaSnp+vLL79Uz5496z0yMzMlHZnJyM7OrlvP8W19+vTRe++9V+9r7733nvr27dvg3vr06XPMWo8PPvjgmJpXXnmlrr32Wg0cOFDdu3fX559/3qD6bdu21ejRo5WVlaX58+dr0qRJDe4tGMyIAACM6Z7SJohlqkd4LUs9W333IZHGWLNmjVasWKFRo0apffv2WrNmjQoLC9WnTx/Nnj1bP//5z5WSkqKLL75YVVVVWrdunYqLizVjxgyNHz9e999/v0aPHq05c+aoY8eO2rhxo9LT0zV06FDdfvvtGjdunAYNGqSRI0fqlVde0UsvvaS33367wf3dfPPNeuihh3T77bdrypQpWr9+vebPn19vm169emnRokVavXq1WrdurYcfflh79+5tcOCZMmWKLrvsMvn9fk2YMKExw9dkzIgAAIw5J72L0hOSgqrht21d2+f0oHtJTk7Wu+++q0svvVSnnHKK7rjjDj300EO65JJLNGXKFD3zzDPKyspS//79df7552v+/Pl1MyI+n0/Lly9X+/btdemll6p///564IEH6g7djB49Wn/84x81d+5cnXbaaXryySeVlZWl4cOHN7i/Ll266F//+peWLFmigQMH6oknntD9999fb5s77rhD3/ve93TRRRdp+PDhSktL0+jRoxu8j5EjR6pjx4666KKLlJ6e3uDXBYOzZgAAjmrMWTOS9OSmtZqz9p0mz4y0j0vQ++NvVpSHv62DdfjwYXXq1ElZWVkaM2bMCbflrBkAQFgYd0p/JUb7mnwDuxsHnEkICVIgENC+fft07733qlWrVrriiiuabd/8PwcAMKp1bJyeu+hH8siSpxEn81qSLu/eW5P7nRG65iJEXl6eOnTooIULF+q5555TVFTzLSFlsSoAwLghaZ31l0uu1pTli1Xlrz3h6bwey1LAtnV1r36ac+6oJs+k4GvdunWTqZUazIgAAFxhWHpXvTlmoq7rO0jxUUeupxFleeSxLHm/ekjS4Pbp+vOFV+jB8y5WtMdrsmU4gBkRAIBrdElupXuGjtDtZ5yrl7dv1ab9e1RaVaXYqCh1iE/U6J59dUrrdqbbhIMIIgCAkAhmqj8h2qfxvQdqvAY62BGc5NShnJAempkzZ47OPPNMJSUlqX379ho9erQ+++yzUO4SAGDY0cuUl5eXG+4EoXT0svFHr5XSVCGdEXnnnXc0depUnXnmmaqtrdVvfvMbjRo1Slu2bPnO69wDAFo+r9erVq1a1d2sLT4+XhYLSsNKIBBQYWGh4uPjgz7DplkvaFZYWKj27dvrnXfe0XnnnXfS7bmgGQC0TLZta8+ePTp48KDpVhAiHo9HmZmZ8vmOvc9PYz6/m3WNSElJiSQd9zbJVVVVqqqqqnteWlraLH0BAJxlWZY6duyo9u3bq6Ym+Dvjwn18Pp88DlxIrtmCSCAQ0PTp0zVs2DD169fvO7eZM2eOZs+e3VwtAQBCzOv1Br2GAOGt2Q7N3HLLLVq2bJn++9//qnPnzt+5zXfNiGRkZHBoBgCAFsR1h2amTZumV199Ve++++5xQ4gkxcTEKCYmpjlaAgAALhDSIGLbtm699VYtXrxYq1atqrtdMgAAgBTiIDJ16lQtXLhQS5cuVVJSkvbs2SNJSklJUVxcXCh3DQAAWoCQrhE53nnjWVlZmjhx4klfz+m7AAC0PK5ZI2LqTn4AAKBl4O67AADAGG56B+UWFevwV/cMaKpEn0/d2rR2qCN3yyktUlltcOOVEOVTZvJ3X9gvnBRU7FNFbdXJNzyBuKgYdYpr71BH7lVcvVM1gYqgakR74tTad/wzE8NFTe2XsgOHg6pheRIVHdXdoY4QDIJIhMstKtbIp+c7UuvtGyaGfRjJKS3SiFefcKTWistuDuswUlCxTzd+eJ8jtZ46846wDiPF1Tv1ly8nOlLruu7zwzqM1NR+qT17hzlSK63De4QRF+DQTIQLdiYkVLXcKtiZkFDVcqNgZ0JCVcuNgp0JCVUtNwp2JiRUtdB0BBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRCJcos/nylpulRDl3M/oZC03iouKcWUtN4r2xLmylhtZnkRX1kLTWbZt26abOJ7S0lKlpKSopKREycnJptsJW7lFxTpcXR1UjUSfT93atHaoI3fLKS1SWW1w45UQ5VNmchuHOnKvgop9qqitCqpGXFSMOsW1d6gj9yqu3qmaQEVQNaI9cWrt6+xQR+5VU/ul7MDhoGpYnkRFR3V3qCN8W2M+v6OaqSe4WKQECKdEQoBwSiQECKdEQoBwCgEivHBoBgAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGBMSIPIu+++q8svv1zp6emyLEtLliwJ5e4AAEALE9IgUlZWpoEDB+qxxx4L5W4AAEALFRXK4pdccokuueSSUO4CAAC0YCENIo1VVVWlqqqquuelpaUGuwmNvD3FKq+sDqpGfKxPXdJaO9SRe+UeKFZZVXBjlRDjU7e24T9W36WwvEz//GKzthYV6lBVpeKio9UxIUlX9+qnvm3bm27PVQ7XVuqNXdnaVJynQ7UVivZ41caXqFEdB2hQ60xZlmW6RSMKq3aryl8RVI0Yb5xSYzo61JG7VdTkyB8oC6qG15OguOhMhzpqGVwVRObMmaPZs2ebbiNk8vYU6+qZWY7UWvTApLAOI7kHinXxn+Y7UuuNn0+MqDDy8f69enLTWr2e85lsW5IlBWxbliSP5dGzm9fr9NSOmtLvDF3W/dSI/ZCVpILyIv0t5z96tWCDqgM1smQpoK/H6qX8teqa0E7XdD1HozufqSiP13TLzaaward+/+l0R2r9uvejYR9GKmpytHHXCEdqDUpfEVFhxFVnzcyaNUslJSV1j/z8fNMtOSrYmZBQ1XKjYGdCQlXL7ZZs26Irl/5Vr+d8Lr9tKyBbAduWJNmS/HZAkrRp/x5NW/mKZv13uWoDAYMdm7OhKEc/fW+eluz8UFWBGtmSAjp2rPLK9uvBLS9r+voFKqutOn7BMBPsTEioarlVsDMhoarVErgqiMTExCg5ObneA0DDvPblZ7pt1Wvy23bdh+jxHA0nz3+2SbP++6bsr55His0H83XruudU6a8+6VjZXz3WHdiuX6xfoOpAbbP0CEQKVwURAE2zo7RYt616VY09yGJLevHzzXr+s02haMuVKmqr9Yv1C+QPBOpmQBoiIFsfFe/Q458vD2F3QOQJaRA5fPiwsrOzlZ2dLUnKyclRdna28vLyQrlbIOL8dWu2ArbdiI/Vr1mSntz0YcTMiryxO1slNeWNCiFH2bL1r7w1Ko+gQzRAqIU0iKxbt06DBg3SoEGDJEkzZszQoEGDdNddd4Vyt0BEqayt0T8+3SR/E4OELSmntFgf7A6vNVnfxbZtPb9jdaNnjr6pMlCjN3ZlO9USEPFCetbM8OHDI+avLMCUN3K/0OGa4Bbkei1L//hsk4amd3GoK3faWlqgnMP7gqphSfpX/hqN6XKWM00BEY41IkALt6P0oKKs4H6V/bat7QeLHOrIvQrKg/8ZbYfqADiCIAK0cGU11XLiUiCHa8J/3UO535lTuSv9NY7UAUAQAVq8hGifnDgCmuSLCb6Iy8V7fY7UiXWoDgCCCNDidU9po9qTXAvjZLyWpV6t2jrUkXt1TWgXdA1Llro4UAfAEQQRoIUb1a2nkoOczfDbtn7Se6BDHbnXKcnpOiWpozxBnDdjy9bVLFQFHEMQAVq4GG+Uftp7oLxNXChiSerZqo3O6NDJ2cZc6pqu5zTpGiJHxXt9GtVxgIMdAZGNIAKEgWv7nK4oT9P+zrcl3TLgrIi5+d0POg5QG19ik2ZFLEnjug5ljQjgIIIIEAY6J6XozxdeIUmN+ni1JF3X53T9qNdpIenLjWK90frTGRMV7YlqVBjxyNKQtj11Y8+RIewOiDwEESBMjOzaU0+OHK0oj+ekh2mOfv/6foN1z9ARETMbctQpyel6YsgUJUbHnnSsrK/Cyrnte+vB712rKI+3OVoEIkZIr6wKoHld1K2XXr9qgp7bvF4vbftE1X6/vJZHAdv+KmzY8tu2hnbsouv7DdaILj1Mt2zMaa0ytHDYz/XCjve1OH+tDtdWKsryyG/bR/5Csyz57YBOTU7XNV2H6uL00+UN8sJxAI5FEGlG8bHOHVd2spYbJcQ49/M5WaslOKV1Oz1w7kWaNeR8Ld62RZ8WFepQdZXioqLVMTFJY3qepsyU1qbbdIX2sSm69dSLdWPPEVqxZ7M+OrhDh2sqFOWJUtuYRP0gbYD6pETGIt5vi/HGubKWW3k9Ca6s1RJYtotvBlNaWqqUlBSVlJQoOTnZdDuOyNtTrPLK4K7uGB/rU5e08P8gyT1QrLKq4MYqIcanbm3Df6yAUCis2q0qf0VQNWK8cUqN6ehQR+5WUZMjf6AsqBpeT4LiojMd6sicxnx+MyPSzCIhQDiFAAGYFSkBwinhECBM4IAnAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAY6JMNxAqBTsOqLy8Kqga8fEx6tS1rUMduVf+7mKVV1QHVSM+zqeMjq0d6qhl8QcC+s/WHK3c/KWKy8oVsKWU+BidfUpXjRrQSzHRYftrdlI5B4tVVh3ceyvB51Nmq8h8b31WsleLd3yk3RUlqqitUWJ0jE5Jbq8fdRuk1NhE0+0Zs6dyryr8lUHViPPGKi22g0MduVdpdZ5qA+VB1YjyxCvZ18Whjo5l2bZth6x6kEpLS5WSkqKSkhIlJyc3+HUFOw5o8pV/dKSHZ5feFtZhJH93sX5867OO1Hp+3uSICiPlVdVa+N9sLfxvtgpLy+T1WPIHjvw6Hf3vpLgYjRs6QD8773tqmxRvuOPmlXOwWCP+8pwjtVZcd33EhBHbtvVmwVbN3/aBsot2ymt5FLADsiV5ZB3ZyJIuSu+j6085R/1bpxvtt7ntqdyrX350hyO1Hhp4X1iHkdLqPL2aN9aRWpd1+WejwkhjPr/D8k+1YGdCQlXLjYKdCQlVLbcrLD2sm55arO17DijwVZY/GkK++d+HKqo0f+U6vbxui56+6UfqkRa+ofbbgp0JCVUtN/PbAd2bvUzP56yXx7LqvnbUkTgiyZaW7/pUb+7aqvu/d4VGdx1ool0jgp0JCVUtNwp2JiRUtb6NNSJAI5WUV2rSY//Ul3u/DiEn4rdtFR0u14THXtTOAyXN0CFaItu2NXvj63ohZ70knfS95bcDCti2Zq5fqlfzNzdHi0BIEESARrrz+Te1s6ik3gzIyfgDtg5XVmnas0vk4qOhMGhx3kd6MXeDGvvusCTNXLdEOYcOhKItIOSaJYg89thj6tatm2JjY3XWWWdp7dq1zbFbwHH5+w9q1SdfNiqEHOUP2Nq+t0hrvsgPQWdoyWzb1rOfrz66AqRxr5Vky9bCLz90ui2gWYQ8iLzwwguaMWOG7r77bm3YsEEDBw7URRddpH379oV614Dj/vn+Jnk8Tfm4OMLrsfSP97KdawhhYcOBfG0/tL/RsyFH+W1bi3I3qrw2MtbSILyEPIg8/PDDuuGGGzRp0iT17dtXTzzxhOLj4/Xcc86spgeaSyBga9EHm5s0G3KUP2Br1Sdf6sCh0C38Qsvzz9wN8lrB/XNc4a/RGwVbHOoIaD4hDSLV1dVav369Ro4c+fUOPR6NHDlS77///jHbV1VVqbS0tN4DcItDlVU6VBn8WVQB29buYt7b+NqXhw7UOzumKaIsj3aWFTvUEdB8QhpE9u/fL7/frw4d6p+n3aFDB+3Zs+eY7efMmaOUlJS6R0ZGRijbAxqlvMrB01EdrIWWr6zWmcsElHFoBi2Qq86amTVrlkpKSuoe+fks6oN7xMf4HKuVGOtcLbR8idGxjtRJiOJ9hZYnpBc0a9eunbxer/bu3Vvv63v37lVaWtox28fExCgmJiaULQFNlhQbo5T4WJWUB3cRJI9lKb11ikNdIRz0TGqnzcW7gjo8U2sH1C0xci6Yh/AR0hkRn8+nwYMHa8WKFXVfCwQCWrFihYYOHRrKXQOO83gsjRs6oO6Kl03h9Vj6wYBeap0Y52BnaOnGZX4v6DUiCVE+jerUx6GOgOYT8kMzM2bM0NNPP60FCxZo69atuuWWW1RWVqZJkyaFeteA464+u39QFyTzB2xdMyxyLseNhhnQupNOSW7fpOuISJLXsjQu83uK9UY72hfQHEIeRK655hrNnTtXd911l04//XRlZ2frjTfeOGYBK9ASpLdJ1g8G9mrSrIjXY6l3eqrO6N4pBJ2hJbMsSzecOqxJ1xGxJHksj37S/Uyn2wKaRbMsVp02bZp27NihqqoqrVmzRmeddVZz7BYIif8dN0o9OrSRtxEXNvN6LLVKiNO8yVfKCuLQDsLX5Rn9NaFn4/5tPPpOevjMHykjITLuTozw46qzZoCWICHWp2dvGas+nY9MpZ8sVngsS2mtkvSXadcorVVSc7SIFurX/Udpcq8j6+e8JwmsXssjr+XRI2ddrR906t0c7QEhEdKzZoBw1ToxTvP/3zgt+uBjLfxvtvL2H1SUx1N3x1SPZak2EFC75ASNHzZQ15wzUCnxzpyiifDlsSzd3v8HOiu1m/6yba3e27f9q8OAlgK2La9lyW8HFO3x6oqMAZrQ6yz1Sm5vum0gKGEZROLjnTsF2MlabhQf59x1B5ys1RLEREfpp+cO0k++f7rWbd+pf2/eruKyCgVsWynxsTq7Vxed37e7oryROfGY4HPu/eBkrZbgvLReOi+tl/IOF2lJ3ibtLi9Rhb9GSdGxOiWlva7I6K8UX2SeeRXndS7QO1nLjaI88a6s9W2W7eJ7kpeWliolJUUlJSVKTk5u1GsLdhxQeXlwVyuMj49Rp67hf15+/u5ilVcEd0XG+DifMjpyjBr15RwsVll1cO+tBJ9Pma14b+Freyr3qsIf3PV84ryxSosN/5MmSqvzVBsI7t5WUZ54Jfu6NG6/jfj8DssZEUkRESCcQoBAqBAgEAqRECCc0tgAYUJkzhkDAABXIIgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAmCjTDUSagpxCVZRVBVUjLiFGnTJTHerIvXYWFKm8vDqoGvHxPnXu1MahjlqWA6Vlevn9T/TFzv06VFGluJhodWidpMvP7qtTOof/+6cxDldVaenmT7Vx5y6VVFbJ5/WqXUK8ftj3VJ3ZpZMsyzLdohE5pUUqqwnudzAh2qfM5Mj4HdxZXqgKf2VQNeK8seocH1m/n5Zt27bpJo6ntLRUKSkpKikpUXJysul2glaQU6gpF85xpNYz/54V1mFkZ0GRrp38tCO1/vbsDREVRrbm7dVf3lqntzd8oYAtWZICti3LkjyWJX/AVr9uafrpiO9p1OBTIvZDVpLyig/quTXr9dKmLaqqrZVlWUfGSpLH45E/EFBmm9aacOYgjT29n6K9XtMtN5uc0iJdsPQpR2qtvPLGsA8jO8sLNWnt/Y7UyhrymxYfRhrz+c2MSDMKdiYkVLXcKNiZkFDVcrtlaz/VXQvekCT5A0f+xjj6l4ZtS/6v/u7YsmOvZj37utZ+mqdZ40coyht5R2k/zNupG19cosqa2rpxOfp3mS3JHwhIknKLijX7zX/rrc+3ad6Yy5UY4zPVcrMKdiYkVLXcKtiZkFDVagki718fIEy9tf5z/TZrmfwBuy6EHE/gqw/cJas3676/vy0XT4yGxEcFuzXpHy+p4hsh5Hjsrx7v5+brxheXqLq2tll6BCIFQQQIA/mFB/XbrGVq7EEW25Zefv8TLX5vc0j6cqPy6hrd8OIS1QYCdYGsIQK2rQ07d+nhd1aHsDsg8hBEgDCw6N1Nsm1bTZnXsCT95a11ETMr8vInW3WworJRIeSogG1r4YaPVFYd/ocagOZCEAFauMrqWr30349PejjmeGxJefsOav0XO51tzIVs29ZfPtzY6Jmjb6qsqdUrmz91rCcg0hFEgBZuZfY2lVUG9xe612Pppf987FBH7rV5z15t21/UpJmjoyxJCzd85FRLQMQjiAAtXH7hwaDPevEHbO3YV+xQR+6VV1wSdA1bUt7B4OsAOIIgArRw5VXOrFc4XBH+6x7Kq2scqVNZw5kzgFMIIkALFx/jU1DHGr6SGBf+18eI90U7Uicu2pk6AAgiQIvXtUNr1X518a2m8nosde/Y1qGO3Cuzbeuga1iSurVpFXQdAEcQRIAW7oKBPZQUFxNUDX/A1o/O7e9QR+7Vt0N79emQKk8Qp83Ykn46eKBjPQGRjiACtHC+6Cj96Nz+8jTx09WypMy0NhrYPd3hztzpujMGqYlnOkuSEnzR+mHfU51rCIhwIQsiv/vd73TOOecoPj5erVq1CtVuAEi6+ryBivJ4mnR9DNuWJo46I2JufvfDvqeqXUK8vE34eS1J155xOmtEAAeFLIhUV1dr7NixuuWWW0K1CwBfSW+brN/f8EPJOjLD0Rjjzh+oy87uG5rGXCg2OkrP/PgqRXu98jRisDyWpXMyu+jn5w4NYXdA5AlZEJk9e7Z+8YtfqH//8D/uDLjB+QN6aO6NlyvK45H3JIdpjn7/pyO+p9vHDY+Y2ZCj+nZor79eO1ZJMb6TzowcDSsX9uquP199haK93uZoEYgYUaYb+KaqqipVVX19e/vS0lKD3QAtzwWn99Q/fnut/v7vDXrtg62qqfXL6/EoYNt1MyX+gK0zTsnQTy4cpHP7dzfbsEED09P0yg3X6a8fbtTzGz/WoaoqRR0dK0myLPkDAfXtkKqfnTlIV5zWW14Py+oAp7kqiMyZM0ezZ8823UbIxCUEd2ZDqGq5UXy8c9e0cLJWS9C9Y1vd+dMfaPpV5+q1tZ/qi4JCHa6oVqwvSmmtk/TDs/qoS/vgT2MNB2lJibr9wnP18/OGatnWz7Vh5y6VVlYp2utVamK8Lu1zqvp17GC6TSMSop37vXGyllvFeWNdWaslsOxG3HJz5syZ+v3vf3/CbbZu3arevXvXPZ8/f76mT5+ugwcPnrT+d82IZGRkqKSkRMnJyQ1t09UKcgpVUVZ18g1PIC4hRp0yUx3qyL12FhSpvDy4q33Gx/vUuVMbhzoCIktOaZHKaoL7HUyI9ikzOTJ+B3eWF6rCXxlUjThvrDrHt/x/30tLS5WSktKgz+9GzYj88pe/1MSJE0+4TffuTZ/qjYmJUUxMeP+lHwkBwikECMCsSAkQTgmHAGFCo4JIamqqUlMZaAAA4IyQrRHJy8tTUVGR8vLy5Pf7lZ2dLUnq2bOnEhMTQ7VbAADQgoQsiNx1111asGBB3fNBgwZJklauXKnhw4eHarcAAKAFadRi1ebWmMUuAADAHRrz+c1J8QAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMiTLdAMwr2L5X5Ycrg6oRnxirTj06ONSRu+3ML1JFeVVQNeLiY9Q5o41DHblX3p5ilVdWB1UjPtanLmmtHerIvXL3F6u8Osix8vnUrV34j1VOSbHKghyrBJ9PmSnhP1YtAUEkwhVs36vJZ97hSK1nP7wv7MPIzvwiTbrmz47Uynrh/4V1GMnbU6yxt2c5UuufD04K6zCSu79Ylz4635Far0+fGNZhJKekWBc8/6wjtVb+eDJhxAU4NBPhgp0JCVUttwp2JiRUtdwo2JmQUNVyo2BnQkJVy42CnQkJVS00HUEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBJEIF58Y68pabhUXH+PKWm4UH+tzZS03ivc5OFYO1nKjBAd/Pidroeks27Zt000cT2lpqVJSUlRSUqLk5GTT7YStgu17VX64Mqga8Ymx6tSjg0MdudvO/CJVlFcFVSMuPkadM9o41JF75e0pVnlldVA14mN96pLW2qGO3Ct3f7HKq4McK59P3dqF/1jllBSrLMixSvD5lJkS/mNlSmM+vwkiAADAUY35/ObQDAAAMIYgAgAAjAlZEMnNzdXkyZOVmZmpuLg49ejRQ3fffbeqgzyuBwAAwkdUqAp/+umnCgQCevLJJ9WzZ09t3rxZN9xwg8rKyjR37txQ7RYAALQgzbpY9cEHH9Tjjz+uL7/8skHbs1gVAICWpzGf3yGbEfkuJSUlatPm+KcsVlVVqarq69MiS0tLm6MtAABgSLMtVt22bZvmzZunm2666bjbzJkzRykpKXWPjIyM5moPAAAY0OggMnPmTFmWdcLHp59+Wu81BQUFuvjiizV27FjdcMMNx609a9YslZSU1D3y8/Mb/xMBAIAWo9FrRAoLC3XgwIETbtO9e3f5vrp07q5duzR8+HCdffbZmj9/vjyehmcf1ogAANDyhHSNSGpqqlJTUxu0bUFBgS644AINHjxYWVlZjQohAAAg/IVssWpBQYGGDx+url27au7cuSosLKz7XlpaWqh2CwAAWpCQBZG33npL27Zt07Zt29S5c+d633Px7W0AAEAzCtmxkokTJ8q27e98AAAASNxrBgAAGEQQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMZEmW4AOJ6dX+xWxaGKoGrEJcWpc6+ODnXUshTvP6zlS9Yr5/M9KjtcqZhYn1LTUvSDKwep+6mROSaSlL+rWOUV1UHViI/zKSO9tUMdudeOwmKVVQY3VgmxPnVNDf+xQtMRROBKO7/YrUmn/tyRWlmf/SmiwsgXWwq0KOs/+s9bn8i2bVmSAgFblmXJ47G0+K/v6dT+nTXmumE676L+sizLdMvNJn9XsX4y9VlHai18bHJYh5EdhcW6/P75jtR65TcTCSM4LoIIXCnYmZBQ1XK7la99pAd/u+hI+PAHJEn2V9+zbVt+/5FnX3xSoDm3v6CNH2zXrXdcIW+U10zDzSzYmZBQ1XKjYGdCQlUL4YcgAoSJd9/8WL+f+WKDtg0EjgSSN19ap4A/oF/875iImhkB4B4sVgXCwK78A/rDzBelRmYJ25aWL9mgN/61LjSNAcBJEESAMPDaC2sVsO2vj8M0hiX9M+s/su2mvBgAgkMQAVq4qsoaLVv0oQL+JgYJW9qVd0Cb1uU42xgANABBBGjhVq/YovKyqqBqeL0eLVv0oUMdAUDDEUSAFm5X3gF5o4L7Vfb7A9qZu9+hjgCg4QgiQAtXUVEtq7GrVL9D+eHgZlUAoCkIIkALFxfnk92kVar1xSfGONANADQOQQRo4TpntpO/NhBUDa/Xo6492jvUEQA0HEEEaOGGXthXiUmxQdXw+wO6dOwQhzoCgIYjiAAtnM8XpUvHDpHH07R1IpYlZXRPVd/TuzjcGQCcHEEECAM/vGaIvFFeNeUq7bYtjbv+PC7xDsAIgggQBjqkt9ZvH/qxJKvRgeLyH5+lkVcMCk1jAHASBBEgTJw9vI/ufOQn8kZ55PWe+Ffb4z0SVq762TDdPPMyZkMAGMPdd4Ewcs6Ivnrsn9O05K/v6e1XslVbUyuP16NAwJbHsmRLCvgDGnhmd43+2Tk667zeplsGEOEIInCluKQ4V9ZqCbr2aK/b7rlKk2dcrH+/mq2cz/eo7HCVYmKjlZqWohGXn65OXduZbtOI+DifK2u5UUKscz+fk7UQfizbxbfcLC0tVUpKikpKSpScnGy6HTSznV/sVsWhiqBqxCXFqXOvjg51hHCQv6tY5RXVQdWIj/MpI721Qx25147CYpVVBjdWCbE+dU0N/7FCfY35/GZGBK5FgEAoREKAcAoBAs2BxaoAAMAYgggAADCGIAIAAIwhiAAAAGNcvVj16Ak9paWlhjsBAAANdfRzuyEn5ro6iBw6dEiSlJGRYbgTAADQWIcOHVJKSsoJt3H1dUQCgYB27dqlpKQkxy9BXVpaqoyMDOXn53ONkpNgrBqOsWo4xqrhGKvGYbwaLlRjZdu2Dh06pPT0dHk8J14F4uoZEY/Ho86dO4d0H8nJybxRG4ixajjGquEYq4ZjrBqH8Wq4UIzVyWZCjmKxKgAAMIYgAgAAjInYIBITE6O7775bMTExpltxPcaq4RirhmOsGo6xahzGq+HcMFauXqwKAADCW8TOiAAAAPMIIgAAwBiCCAAAMIYgAgAAjCGISLriiivUpUsXxcbGqmPHjvrZz36mXbt2mW7LdXJzczV58mRlZmYqLi5OPXr00N13363q6mrTrbnS7373O51zzjmKj49Xq1atTLfjOo899pi6deum2NhYnXXWWVq7dq3pllzn3Xff1eWXX6709HRZlqUlS5aYbsm15syZozPPPFNJSUlq3769Ro8erc8++8x0W670+OOPa8CAAXUXMRs6dKiWLVtmrB+CiKQLLrhAL774oj777DP961//0vbt23X11Vebbst1Pv30UwUCAT355JP65JNP9Mgjj+iJJ57Qb37zG9OtuVJ1dbXGjh2rW265xXQrrvPCCy9oxowZuvvuu7VhwwYNHDhQF110kfbt22e6NVcpKyvTwIED9dhjj5luxfXeeecdTZ06VR988IHeeust1dTUaNSoUSorKzPdmut07txZDzzwgNavX69169bpwgsv1JVXXqlPPvnETEM2jrF06VLbsiy7urradCuu94c//MHOzMw03YarZWVl2SkpKabbcJUhQ4bYU6dOrXvu9/vt9PR0e86cOQa7cjdJ9uLFi0230WLs27fPlmS/8847pltpEVq3bm0/88wzRvbNjMi3FBUV6e9//7vOOeccRUdHm27H9UpKStSmTRvTbaAFqa6u1vr16zVy5Mi6r3k8Ho0cOVLvv/++wc4QTkpKSiSJf59Owu/36/nnn1dZWZmGDh1qpAeCyFd+/etfKyEhQW3btlVeXp6WLl1quiXX27Ztm+bNm6ebbrrJdCtoQfbv3y+/368OHTrU+3qHDh20Z88eQ10hnAQCAU2fPl3Dhg1Tv379TLfjSh9//LESExMVExOjm2++WYsXL1bfvn2N9BK2QWTmzJmyLOuEj08//bRu+9tvv10bN27U8uXL5fV6dd1118mOkIvONnasJKmgoEAXX3yxxo4dqxtuuMFQ582vKWMFoHlNnTpVmzdv1vPPP2+6Fdc69dRTlZ2drTVr1uiWW27RhAkTtGXLFiO9hO0l3gsLC3XgwIETbtO9e3f5fL5jvr5z505lZGRo9erVxqaqmlNjx2rXrl0aPny4zj77bM2fP18eT9jm2WM05X01f/58TZ8+XQcPHgxxdy1DdXW14uPjtWjRIo0ePbru6xMmTNDBgweZjTwOy7K0ePHiemOGY02bNk1Lly7Vu+++q8zMTNPttBgjR45Ujx499OSTTzb7vqOafY/NJDU1VampqU16bSAQkCRVVVU52ZJrNWasCgoKdMEFF2jw4MHKysqKqBAiBfe+whE+n0+DBw/WihUr6j5UA4GAVqxYoWnTppltDi2Wbdu69dZbtXjxYq1atYoQ0kiBQMDYZ17YBpGGWrNmjT788EN9//vfV+vWrbV9+3bdeeed6tGjR0TMhjRGQUGBhg8frq5du2ru3LkqLCys+15aWprBztwpLy9PRUVFysvLk9/vV3Z2tiSpZ8+eSkxMNNucYTNmzNCECRN0xhlnaMiQIXr00UdVVlamSZMmmW7NVQ4fPqxt27bVPc/JyVF2drbatGmjLl26GOzMfaZOnaqFCxdq6dKlSkpKqltvlJKSori4OMPducusWbN0ySWXqEuXLjp06JAWLlyoVatW6c033zTTkJFzdVxk06ZN9gUXXGC3adPGjomJsbt162bffPPN9s6dO0235jpZWVm2pO984FgTJkz4zrFauXKl6dZcYd68eXaXLl1sn89nDxkyxP7ggw9Mt+Q6K1eu/M730IQJE0y35jrH+7cpKyvLdGuuc/3119tdu3a1fT6fnZqaao8YMcJevny5sX7Cdo0IAABwv8g6wA8AAFyFIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMCY/w+CptFf/M5eDgAAAABJRU5ErkJggg==\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotCompPinPow(primaryFuel, marker=\\\"s\\\", label=\\\"primary\\\")\\n\",\n    \"plotCompPinPow(secondaryFuel, marker=\\\"o\\\", label=\\\"secondary\\\")\\n\",\n    \"pyplot.legend()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"eb6f7e6a\",\n   \"metadata\": {},\n   \"source\": [\n    \"Rotate 60 degrees CCW or pi/3\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 33,\n   \"id\": \"00afb1b7\",\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"multiPinBlock.rotate(math.pi / 3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 34,\n   \"id\": \"f89722ec\",\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"text/plain\": [\n       \"<matplotlib.legend.Legend at 0x1baf73611d0>\"\n      ]\n     },\n     \"execution_count\": 34,\n     \"metadata\": {},\n     \"output_type\": \"execute_result\"\n    },\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAiIAAAGdCAYAAAAvwBgXAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjAsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvlHJYcgAAAAlwSFlzAAAPYQAAD2EBqD+naQAAORRJREFUeJzt3Xl8VOXd9/HvmUkmKwkQwhIJu7KLFEURF1AeUSuKVrS0yiK4PWCleNuCtxuPVmzFqvW2rjVQ69aq4FJRFFHvioIscWFRQCALW4CQkD2ZOc8fSDSyJTlncp3JfN6v17xqJmd+55erE+ab65xzHcu2bVsAAAAG+Ew3AAAAohdBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxMaYbOJpQKKRt27apRYsWsizLdDsAAKAebNvW/v37lZGRIZ/v6HMeng4i27ZtU2Zmpuk2AABAI+Tm5qpjx45H3cbTQaRFixaSDvwgKSkphrsBAAD1UVxcrMzMzNrP8aPxdBA5eDgmJSWFIAIAQISpz2kVnKwKAACMIYgAAABjCCIAAMAYT58jAgCIXLZtq6amRsFg0HQrCIPY2Fj5/X7HdQgi8Cy7ZotklzorYiXJiuniRjueVl3znexQiaMali9ZsTHdXOrIu8qrNysYcva+8vuSlBDb1aWOvKu4Kkc1obJGvbamOqR9BdWqrvDJZ/FR0xxZlqWOHTsqOTnZUR3eHfAku2aL7N3nuVOszaJmHUaqa77Tjp1DXanVvt0nzTqMlFdv1upt57pSa2DG4mYdRoqrcvRWzpjGvdj2q0vZrWoRn6FWbZPUMiFDMf44dxuEUbZtq6CgQHl5eTr++OMdzYwQROBNTmdCwlXLg5zOhISrlhc5nQkJVy0vauxMiCTFhlor1kpVWttkxSX4FB8IKMYX72J38IL09HRt2bJF1dXVBBEAgHdY8smSZNXzcoitJXtUWlPpaJ9JMXHqnJzmqAYaxq1brxBEAADGbC3ZowsXP+pKrbfPvYkwEoG4fBcAYIzTmZBw1WqILVu2yLIsZWdnG9l/pGNGBAAABzIzM7V9+3a1adPGdCsRiRkRAAAaqaqqSn6/X+3bt1dMTNP/bV9dXd3k+3QbQQQAgO8NGzZMU6dO1dSpU5Wamqo2bdrojjvukG3bkqQuXbronnvu0bhx45SSkqLrrrvukEMzH374oSzL0rvvvquBAwcqISFB55xzjnbt2qWFCxeqd+/eSklJ0a9+9SuVlf1wddI777yjM844Qy1btlRaWpouuugibdq0qfb7B/fz8ssv6+yzz1Z8fLyeeuoppaSk6JVXXqnzcyxYsEBJSUnav39/+AfNIYIIAAA/Mm/ePMXExGj58uV65JFH9Oc//1nPPPNM7ffnzJmjAQMGaPXq1brjjjuOWOfuu+/W//zP/2jp0qXKzc3VFVdcoYcfflgvvPCC/v3vf2vRokV69NEfTtQtLS3V9OnTtWLFCi1evFg+n0+XXnqpQqFQnbozZszQzTffrHXr1umyyy7TL3/5S2VlZdXZJisrS5dffrlatGjh0qiED+eIAADwI5mZmXrooYdkWZZ69uypr776Sg899JCuvfZaSdI555yjW265pXb7LVu2HLbOvffeq6FDDyw2OGnSJM2cOVObNm1St24HFg28/PLLtWTJEv3+97+XJP3iF7+o8/pnn31W6enpWrt2rfr161f7/LRp03TZZZfVfj158mSdfvrp2r59uzp06KBdu3bp7bff1vvvv+98MJoAMyIAAPzIaaedVmeNjCFDhmjDhg2198w5+eST61XnxBNPrP3vdu3aKTExsTaEHHxu165dtV9v2LBBY8eOVbdu3ZSSkqIuXbpIknJycurU/en+Bw8erL59+2revHmSpH/84x/q3LmzzjrrrHr1aRpBBACABkhKSqrXdrGxsbX/bVlWna8PPvfjwy6jRo3S3r179fTTT2vZsmVatmyZpAMnxB5r/5MnT9bcuXMlHTgsM3HiRNcWHAs3gggAAD9yMAAc9Nlnnzm+n8qx7NmzR998841uv/12nXvuuerdu7cKCwvr/fqrrrpKW7du1V/+8hetXbtW48ePD1uvbuMcEQAAfiQnJ0fTp0/X9ddfr1WrVunRRx/Vgw8+GNZ9tmrVSmlpaXrqqafUoUMH5eTkaMaMGQ16/WWXXaZbb71V5513njp27BjGbt3FjAgAAD8ybtw4lZeXa/DgwZoyZYpuvvlmXXfddWHdp8/n00svvaSVK1eqX79++u1vf6sHHnigQTUmTZqkqqoqXXPNNWHqMjyYEQEA4EdiY2P18MMP6/HHHz/ke4e7QqZLly6164xIB9Yi+fHXkjRhwgRNmDChznN333237r777tqvR4wYobVr19bZ5sd1frqfn8rPz1daWpouueSSI27jRQQRAAAiWFlZmbZv3677779f119/vQKBgOmWGoRDM/Amq35npTd5LQ+yfMmerOVFfp977wU3a3lRjC/RtVoWHzVh9ac//Um9evVS+/btNXPmTNPtNJhlH22ex7Di4mKlpqaqqKhIKSkppttBE7Nrtkh2qbMiVpKsmC5utONp1TXfyQ6VOKph+ZIVG9Pt2BtGuPLqzQqGnL2v/L4kJcR2dakj7yquylFNqOzYG/5EVWVIe/Jr1LlLJyXEJ8jvizvitmv3bdOYj55y0matf519nfq0zHClFo6toqJCmzdvVteuXRUfH1/new35/ObQDDwrGgKEW6IhQLglGgKEW1ICnRr1ugpVqNDarBhf/FFDCCBxaAYAYFBSjHtBxc1aaDrMiAAAjOmcnKa3z71JpTWVjuokxcSpc3KaS12hKRFEAABGESCiG0EEAOA5+yrK9cqGNfpi9w7tr6xQXEyM2iUm69IefXRSeoeIuY8Kjo0gAgDwjG8Ld+upLz/Xgk1rVRMKHbgxnG3LkuS3fJq3drV6t07XNf0G6fLj+8lHIIl4nKwKAPCExTmbdNGCv+u1jWtUHQrJlhT6foUJW1KNfeBOtd/sLdCtH7+jKYvfUEVNjbmGPeDDDz+UZVnat2+f6VYajSACADDu47zNmvzefFUFgwoeY3mr0Pf/+86WDZr6wRsKhkJH3R7eRhABABi1u7xU172/QLJtNWSFzZBsvZezSU98uTxcrUWFqqoqo/sniAAAjHr5m69UWRNUY+c1/vb1ClWHgq708sorr6h///5KSEhQWlqaRowYodLSAyvxPvPMM+rdu7fi4+PVq1cv/fWvf63z2ry8PI0dO1atW7dWUlKSTj75ZC1btqz2+48//ri6d++uQCCgnj176rnnnqvzesuy9Mwzz+jSSy9VYmKijj/+eL3xxht1tnn77bd1wgknKCEhQcOHDz/kJnx79uzR2LFjddxxxykxMVH9+/fXiy++WGebYcOGaerUqZo2bZratGmjkSNH6pprrtFFF11UZ7vq6mq1bdtWf/vb3xo1lvVFEAEAGBMMhTRv7WqFGjQXUteeinIt2rrRcS/bt2/X2LFjdc0112jdunX68MMPddlll8m2bT3//PO688479Yc//EHr1q3TfffdpzvuuEPz5s2TJJWUlOjss89Wfn6+3njjDX3xxRf63e9+p9D3h43mz5+vm2++Wbfccou+/vprXX/99Zo4caKWLFlSp4dZs2bpiiuu0JdffqkLL7xQv/71r7V3715JUm5uri677DKNGjVK2dnZmjx5smbMmFHn9RUVFRo0aJD+/e9/6+uvv9Z1112nq6++WsuX1501mjdvngKBgD755BM98cQTmjx5st555x1t3769dpu33npLZWVluvLKKx2P7dFwrxkAgKuOdg+Sn/oob7PGvfOKo/35LUundcjUCxc6+8BctWqVBg0apC1btqhz5851vtejRw/dc889Gjt2bO1z9957r95++20tXbpUTz31lP7rv/5LW7ZsUevWrQ+pPXToUPXt21dPPfXDfXWuuOIKlZaW6t///rekAzMit99+u+655x5JUmlpqZKTk7Vw4UKdf/75uu222/T6669rzZo1tTVmzJihP/7xjyosLFTLli0P+3NddNFF6tWrl+bMmSPpwIxIcXGxVq1aVWe7vn37avz48frd734nSbr44ouVlpamrKysw9Z1614zzIgAAIzZUlwopxfgBm1bm4sKHfcyYMAAnXvuuerfv7/GjBmjp59+WoWFhSotLdWmTZs0adIkJScn1z7uvfdebdq0SZKUnZ2tgQMHHjaESNK6des0dOjQOs8NHTpU69atq/PciSeeWPvfSUlJSklJ0a5du2prnHrqqXW2HzJkSJ2vg8Gg7rnnHvXv31+tW7dWcnKy3n33XeXk5NTZbtCgQYf0OHny5NrQsXPnTi1cuFDXXHPNEcfLLawjAgAwpqy6Wj7LOuaVMsdSWlPtuBe/36/33ntPS5cu1aJFi/Too4/qv//7v/Xmm29Kkp5++ulDgoDf75ckJSQkON6/JMXGxtb52rKs2sM79fHAAw/okUce0cMPP6z+/fsrKSlJ06ZNO+SE1KSkpENeO27cOM2YMUOffvqpli5dqq5du+rMM89s3A/SAMyIAACMSYoN1K4V4kRybMCFbg588A8dOlSzZs3S6tWra8+jyMjI0HfffacePXrUeXTteuBuzieeeKKys7Nrz+f4qd69e+uTTz6p89wnn3yiPn361Lu33r17H3Kux2effXZIzUsuuURXXXWVBgwYoG7duunbb7+tV/20tDSNHj1aWVlZmjt3riZOnFjv3pxgRgQAYEy31NYOTlM9wG9Z6tHy8IdEGmLZsmVavHixzjvvPLVt21bLli1TQUGBevfurVmzZuk3v/mNUlNTdf7556uyslIrVqxQYWGhpk+frrFjx+q+++7T6NGjNXv2bHXo0EGrV69WRkaGhgwZoltvvVVXXHGFBg4cqBEjRujNN9/Ua6+9pvfff7/e/d1www168MEHdeutt2ry5MlauXKl5s6dW2eb448/Xq+88oqWLl2qVq1a6c9//rN27txZ78AzefJkXXTRRQoGgxo/fnxDhq/RmBEBABhzekYnZSS1cFQjaNu6qvdJjntJSUnRxx9/rAsvvFAnnHCCbr/9dj344IO64IILNHnyZD3zzDPKyspS//79dfbZZ2vu3Lm1MyKBQECLFi1S27ZtdeGFF6p///66//77aw/djB49Wo888ojmzJmjvn376sknn1RWVpaGDRtW7/46deqkV199VQsWLNCAAQP0xBNP6L777quzze23366f/exnGjlypIYNG6b27dtr9OjR9d7HiBEj1KFDB40cOVIZGRn1fp0TXDUDAHBVQ66akaQnv1yu2cs/avTMSNuEJH069gbF+Pjb2qmSkhIdd9xxysrK0mWXXXbUbblqBgDQLFxxQn8lxwYafQO76048hRDiUCgU0q5du3TPPfeoZcuWuvjii5ts3/w/BwAwqlV8gp4d+Qv5ZMnXgIt5LUmjuvXSpH4nh6+5KJGTk6N27drphRde0LPPPquYmKY7hZSTVQEAxg1u31F/v+ByTV40X5XBmqNezuuzLIVsW5cf30+zzzyv0TMp+EGXLl1k6kwNZkQAAJ4wNKOz3r1sgsb1GajEmAPracRYPvksS/7vH5I0qG2G/nrOxXrgrPMV6/ObbBkuYEYEAOAZnVJa6u4h5+rWk8/UG5vW6cvdO1RcWan4mBi1S0zW6B59dEKrNqbbhIsIIgCAsHAy1Z8UG9DYXgM0VgNc7AhucutQTlgPzcyePVunnHKKWrRoobZt22r06NH65ptvwrlLAIBhB5cpLysrM9wJwungsvEH10pprLDOiHz00UeaMmWKTjnlFNXU1Oi2227Teeedp7Vr1x52nXsAQOTz+/1q2bJl7c3aEhMTZXFCabMSCoVUUFCgxMREx1fYNOmCZgUFBWrbtq0++ugjnXXWWcfcngXNACAy2batHTt2aN++faZbQZj4fD517dpVgcCh9/lpyOd3k54jUlRUJElHvE1yZWWlKisra78uLi5ukr4AAO6yLEsdOnRQ27ZtVV3t/M648J5AICCfCwvJNVkQCYVCmjZtmoYOHap+/foddpvZs2dr1qxZTdUSACDM/H6/43MI0Lw12aGZG2+8UQsXLtR//vMfdezY8bDbHG5GJDMzk0MzAABEEM8dmpk6dareeustffzxx0cMIZIUFxenuLi4pmgJAAB4QFiDiG3buummmzR//nx9+OGHtbdLBgAAkMIcRKZMmaIXXnhBr7/+ulq0aKEdO3ZIklJTU5WQkBDOXQMAgAgQ1nNEjnTdeFZWliZMmHDM13P5LgAAkccz54iYupMfAACIDNx9FwAAGMNN76CSqq2qtp3dEyLWSlRyoLNLHXlbYVWeqkPljmrE+hLUKnDkK8iai4LK7aoMOhurOH+C0uM6uNSRd+2o2KnyYIWjGgn+eLWPb+dSR96VV1bgylh1TEx3qSM4QRCJciVVW7Uod7Qrtc7LXNDsw0hhVZ7+/t0EV2qN6za3WYeRgsrt+uP6aa7U+n2vh5t1GNlRsVO3fHG7K7UeHHBvsw4jeWUFmrj8PldqZQ2+jTDiARyaiXJOZ0LCVcurnM6EhKuWFzmdCQlXLS9y+td9uGp5EWPV/BBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxCJcrFWoidreVWsL8GTtbwozu/ez+dmLS9K8Md7spYXMVbNj2Xbtm26iSMpLi5WamqqioqKlJKSYrqdZqukaquq7TJHNWKtRCUHOrvUkbcVVuWpOlTuqEasL0GtAh1d6si7Ciq3qzLobKzi/AlKj+vgUkfetaNip8qDFY5qJPjj1T6+nUsdeVdeWYErY9UxMd2ljvBTDfn8jmminuBh0RIg3BINAcIt0RAg3BINAcItBIjmhUMzAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGPCGkQ+/vhjjRo1ShkZGbIsSwsWLAjn7gAAQIQJaxApLS3VgAED9Nhjj4VzNwAAIELFhLP4BRdcoAsuuCCcuwAAABEsrEGkoSorK1VZWVn7dXFxscFuwmNHxQ5VBCsc1Yj3x6t9fHuXOvKu/PJdKq+pPPaGR5EQE6fjEtq61FFk2VO5X2/lr9KG/dtVUl2heH9A7eJT9fPjfqYTUjqYbs9TSqor9UbOl1q9N0/FVRUK+P1Ki0vShR376pQ2nWVZlukWjdhcvFel1VWOaiTFBtQ1pbVLHXnb5qJClVY5HK9AQF1TW7nUUWTwVBCZPXu2Zs2aZbqNsNlRsUMzv5rpSq3Z/Wc36zCSX75L131+ryu1njrl9qgKI+uL8vXc5v/VBzu/Vsi2ZUkKyZYlSz7L0otbP1Hf1I76VZczNKJ9/6j9kJWk3NJCPfvtp5qfk63KYI0sy6odM5/l00ubV6prcprG9ThVl3cZqFif33TLTWZz8V4Nf/0pV2otueS6Zh9GNhcVavhLf3Ol1pJfToqqMOKpq2ZmzpypoqKi2kdubq7pllzldCYkXLW8yOlMSLhqed0727I18bPH9cHOrxW0Q7JlKyRbkmTLVtAOSZLWFeXrv794Sfetma+aUNBky8Z8vnurRi9+Uv/cskoVwRrZkkL2wbFS7VhtKdmj/5f9tq5f+qJKqqPnveR0JiRctbzK6UxIuGpFAk8Fkbi4OKWkpNR5AKif93d8pTu//KeCdqj2Q/RIDoaTN/JWaPaa+bK//wCOFl/szdOk//xD5TXVxxwr+/vHZwWbdcPSF1UVrGmSHoFo4akgAqBx8sr26M4v/qmGHmSxJb2Zv0qv560IR1ueVFZTpeuXvqiaUKg2kNVHyLa1ck+uHlr7QRi7A6JPWINISUmJsrOzlZ2dLUnavHmzsrOzlZOTE87dAlHn1ZxlatjHal3Pbf44amZF3sz9Svuqyhs1WrZsvfjdSpXWRNfUORBOYQ0iK1as0MCBAzVw4EBJ0vTp0zVw4EDdeeed4dwtEFUqgtVakPt57fkNjZFbtker9m52sStvsm1bz21c3uCZox+rCFbrzZyvXOsJiHZhvWpm2LBhUfNXFmDKhzvXqDTo7CRKv+XT/LzlGpTWzaWuvOnrfdu1cX+BoxqWpJc2r9Avuw1ypykgynGOCBDh8sr2yG85+1UO2iHllO52qSPvyi0tdFzDlpTjQh0ABxBEgAhXFqyS5ehgwwGlUXCZc5lL53ZU1FS7UgcAQQSIeIn+gOxGn6b6g6SYOBe68bbEmIArdRJiYl2pA4AgAkS8zknpx1wL41j8lk/dkpv/6rNdk9Mc17AkdXGhDoADCCJAhDu7XR+1iIl3VCNoh3Rp5qkudeRdvVu2V+/U9vI5OJRlS/pVt5PdawqIcgQRIMIFfDG6NHNwoz9cLUldktJ1YstO7jbmUVd1H+xgxRUpKSagCzv2c7EjILoRRIBm4BedTlWMz9+oKGJLGtft7Ki5+d3PM/uqTVySfI34eS1Jv+52CueIAC4iiADNQIeEVpp90lhJVoOvoLk881T9PGNgeBrzoHh/rJ4e+msFfP4GzSL5ZOn0tt10U59h4WsOiEIEEaCZOLNtb/1x4K8VY/mOua6I//vZgLGdh+qWPqOiZjbkoN4t2+vvZ45Xi9i42rE4koNhZXiHE/Q/p12pWJ+/KVoEokZYV1YF0LSGteuj54ZO1UtblurtbatVHaqRz/IpZNu1hyKCdkiDWnfXLzufrjPa9jLcsTkntj5Ob4y4Qc9tWq6XN6/U/upKxXw/VgeGylLQDql3y/a6uvtgjerU3/HCcQAORRBpQvF+Z1c2hKuWFyW4uKaFm7UiQbfkdrqt36W6qef5WrgtWxv371BJTYXi/bFqF5+qCzIGqlNSG9NtekK7hBT9V78Ruqn3ML2Tv1ar9uRqf3WFYn1+tYlL0gUd+6pfqwzTbRqRFOvOmitu1/KqpICL4+VirUhg2R6+GUxxcbFSU1NVVFSklJQU0+24YkfFDlUEKxzViPfHq318e5c68q788l0qd7jaZ0JMnI5LaP7rYwDhsLl4r0qrna1GmxQbUNeU1i515G2biwpVWuVwvAIBdU1t5VJH5jTk85sZkSYWDQHCLQQIwKxoCRBuaQ4BwgQOeAIAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADAmxnQD4bK1ZI9Kayod1UiKiVPn5DSXOvKuzcV7VVpT5ahGUkxAXVNau9RRZAmGQlqS+50Wbd2ovRXlsmUrNS5eZ2R01oVdeyo+ptn+mh3T5n2FKq1y+N4KBNS1ZSuXOoos3+zarde+XKPtxftVXl2t5Lg4nZDeRpcP6Kv05CTT7RmzZXehyhy+rxIDAXVp0/zfV1sLClVa4fB3MD6gzunhGyvLtm07bNUdKi4uVmpqqoqKipSSklLv120t2aMLFz/qSg9vn3tTsw4jm4v36ty3nnCl1uKLboiqMFJaXaW5a1Zp3trV2llWIr/lU9AOSZL8lqWgbSslEKerep+kSf0GqU1CdH1wbN5XqHP//qwrtRaPuyZqwoht23pn/QZlLV+l7Pzt8vsshUK2bEk+y6rdbmSv4zX5tEHq36G9uWYN2LK7UBc+PNeVWm9Pm9Csw8jWgkKNum+uK7XevG1Cg8JIQz6/m+Wfak5nQsJVy4uczoSEq5bX7Swr0biFr+jbfbsV+j7LHwwhB/77wHPFVZV68svlenXDGv3jgjE6oVUbI/2a4HQmJFy1vCwYCun/vbtEL67+sjZ0BEM//K0Y+tHfjYvWb9C76zdo9kXn6dL+fZq8V1OczoSEq5YXOZ0JCVetn+IcEaCBiiordOVbL2nDj0LI0QRtW7vLSzXmrReVs39f+BtERLJtW3e/s1gvrf5Sko753gratkK2rd+/+a7eWrO+KVoEwoIgAjTQLR8tVM7+fbWzHvURtG3tr6rUNe++Jg8fDYVB879aq5ezv1ZD3x2WpN+9+a427ykMR1tA2DVJEHnsscfUpUsXxcfH69RTT9Xy5cubYreA67YWF+r9nI0NCiEHBW1bG/bt0SfbcsLQGSKZbdt6+tMVso696aGv/f71z6/MdrkroGmEPYi8/PLLmj59uu666y6tWrVKAwYM0MiRI7Vr165w7xpw3fPrvqhzwmBD+S1Lf1+7ysWO0ByszNumTXv2Nng25KCgbetfX6xRWVW1q30BTSHsQeTPf/6zrr32Wk2cOFF9+vTRE088ocTERD37rDtn0wNNJWTbevGbLxs1G3JQ0Lb1Xs4m7S4vdbEzRLp/ZX8lv6/xAVeSyqur9c76b13qCGg6YQ0iVVVVWrlypUaMGPHDDn0+jRgxQp9++ukh21dWVqq4uLjOA/CK4soKFVc5v4oqZNvKL+G9jR98t6ewztUxjRHj8ym3sMiljoCmE9Ygsnv3bgWDQbVr167O8+3atdOOHTsO2X727NlKTU2tfWRmZoazPaBBSmvcm/YuqW7elw2iYdy6PDlaLnNG8+Kpq2ZmzpypoqKi2kdubq7ploBaybEB12q1iI1zrRYiX3KcO++tpIB771GgqYR1QbM2bdrI7/dr586ddZ7fuXOn2rc/dDXAuLg4xcXxDzS8qUUgTi3j4rWvssJRHZ9lqWOL+q8UjOavR5s0fbV9p6PDMzWhkLqkNd9VQtF8hXVGJBAIaNCgQVq8eHHtc6FQSIsXL9aQIUPCuWvAdT7L0q97nSS/w6tmLuxyglrHJ7rYGSLdlSf1d3yOSHIgoJE9j3epI6DphP3QzPTp0/X0009r3rx5WrdunW688UaVlpZq4sSJ4d414Lpf9TqxXqupHknQtnV1n4EudoTm4MSM9johvU2j1hGRDgTcKwb2V3xss7xrB5q5sAeRK6+8UnPmzNGdd96pk046SdnZ2XrnnXcOOYEViAQdW6Tqwq49G7WWiN+y1Kd1W53avmMYOkMksyxL159+SqPWEbEk+XyWfvWzE91uC2gSTXKy6tSpU7V161ZVVlZq2bJlOvXUU5tit0BY/OnM83V8y7QGHaLxW5ZaxSfo2ZGXyXJwaAfN16i+vTThlIbNlh18Jz00+ufq1Kql6z0BTcFTV80AkSA5ENBLP79S/dq0kyUdczrdZ1nKSE7Rqxf9Sh2SWjRFi4hQM0acrcmnDZKkYwZdv8+S3+fTI5f+XOf17NEU7QFhwQFFoBFaxyfqnz8fqxe/+VLz1qzS5uJCxVg+2bJlS/LJUo0dUtuEJI3rM1Dj+gxUaly86bbhcT7L0u/OOUunds7UvM9X65Pvth44DGgdWAjPb/kUDIUU6/frkn69NWHwQB2f3sZ024AjzTKIJMW4dwmwm7W8KCnGvXUH3KwVCeJjYjSx7880oc9AfbY9V4u2btTeijKFJLWMi9cZGZ11bqfuivFF58Sjm2taRNv6GGd376qzu3dVTuE+zf9qrbYX71d5VbVaxMfphPQ2uqRfb6UmRGewTXTxveBmLS9Kinfxd9DFWj9l2R6+J3lxcbFSU1NVVFSklJSGrbuwtWSPSmucLcedFBOnzslpjmpEgs3Fe1Va42xFxqSYgLqmtHapIzQXm/cVOl7tMykQUNeWrI+BH2zZXagyh++rxEBAXdo0//fV1oJClVY4/B2MD6hzesPGqiGf381yRkRSVAQItxAgEC4ECIRDNAQItzQ0QJgQnXPGAADAEwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMCbGdAPRZsveQpVUVTmqkRwIqEvrVi515F1b9hSqtNLZWCXFBdQlrfmP1eHs3l+qBcvX6Nvtu1VSXqn4QKzat2yhS07po54Z6abb85SS8kq9vXy9vvhum/aXVSo2xq+0FokaeXJP/ez442RZlukWjcjZUaiyCme/g4nxAXVqHx2/g7nbClVW7nC8EgLKzIiO8TrIsm3bNt3EkRQXFys1NVVFRUVKSUkx3Y5jW/YWasTTc12p9f61E5p1GNmyp1Dn/2WuK7Xe+c2EqAoja/N2KmvJCr335QYd/O0O2bYsS/JZloIhW/07tde4s36mkSedELUfspKUV7BPz72/Um98ulZV1TWyfJZCIVuWJJ/Pp2AopM7tWulXwwdq9Bn9FOv3m265yeTsKNSYW7NcqfWvByY2+zCSu61Qv5ryN1dqvfDYpIgPIw35/GZGpAk5nQkJVy0vcjoTEq5aXvfvVev13y++I0kKhur+jWHbUvD7ZLImd6du/cfb+mxDjm7/xbmK8UffUdqVG/J082MLVFldUztW9sH/lRQMhSRJOTsLdf9LH+iD7I2ac/0oJcUHTLXcpJzOhISrllc5nQkJV61IEH3/+gDN1LtffKsZzy9UMGQfEkJ+KvR9IHlt2dea9a/35eGJ0bD4avN2/d+/vKaKqppjjpX9/ePzb3P1m8cWqKq6pkl6BKIFQQRoBnJ379OM5xeqoQdZbEkLPl+jV5d9HY62PKm8slq/eWyBgsFQbSCrj1DIVvambXrsjaVh7A6IPgQRoBl4eemXsm1bjZnXsCRlLVkRNbMiby9fp6LSigaFkINs29a/Pv4iKg41AE2FIAJEuIrqGr3y2VfHPMRwJLaknN37tGJTnruNeZBt23pxyWo5OT+3oqpGb3++3r2mgChHEAEi3OKvNjo+Idfvs/TKZ1+51JF3rcvZqe+275WTyR/Lkl756Av3mgKiHEEEiHA5u/cpxufsVzkYsrWloNCljrwrt6DIcQ3blvJ2O68D4ACCCBDhyiur1OCzVA+jJArOeyivrHalTkUVV84AbiGIABEuIS6gRp2l+hPJUbA+RkJcrKfqACCIABGva3or1Xy/+FZj+X2WurdLc6kj7+rSzvlqlZYldWrb0nkzACQRRICId07/7mqREOeoRjBka8yQ/i515F09M9uqZ8d0+RwcyrJtaczZA9xrCohyBBEgwgViYjTmtP7yNfKaVEtSt7atdVKXDHcb86hfDh+oRl7pLElKjIvVyJN7utcQEOXCFkT+8Ic/6PTTT1diYqJatmwZrt0AkHTl6QMU4/c16pxVW9I155wcNTe/G3lyT6WlJMrXiGkRS9KVw05SQoBzRAC3hC2IVFVVacyYMbrxxhvDtQsA38tonaIHx/1cstTgxbp+efoAXXxyn/A05kHxgRj9z9RLFev3N2gWyWdZOrV3J90wakgYuwOiT9iCyKxZs/Tb3/5W/fs3/+POgBcM69tdD40fpRifT/5j/LXv//4D+OqzfqYZlw6LmtmQg3pmttXT08coOSFwzLE6GFbOOrGbHrzhYsX6/U3RIhA1Ykw38GOVlZWqrKys/bq4uNhgN0DkObd/D/1r+lV67n9X6c0V61RdE5Tf51PItmtnSoIhW6ccn6mrzxyos/p0M9uwQf26tNc/7xinF5es1qv/+5VKyisVU2esLAVDIfXMTNfY4QN1weBe8jtcOA7AoTwVRGbPnq1Zs2aZbiNskgPurdPgZi0vSopz7+dzs1Yk6N4+TXeP+T+aftGZemvlen27vUAl5VWKD8SofcsWGjWotzqnO7+MtTlo2zJZN196pm64aIjeW/Wtvti0TfvLKhUb41daSqLOG9RTfTq3M92mEYkurivjZi2vSkxwcbxcrBUJLLsBt9ycMWOG/vjHPx51m3Xr1qlXr161X8+dO1fTpk3Tvn37jln/cDMimZmZKioqUkpKSn3b9LQtewtVUuVsBcvkQEBdWjf/D5Itewod30MlKS6gLmnNf6yAcMjZUej4TsOJ8QF1ah8dv4O52wpVVu5wvBICysyI/PEqLi5WampqvT6/GzQjcsstt2jChAlH3aZbt8ZP9cbFxSkuztl6CF4XDQHCLQQIwKxoCRBuaQ4BwoQGBZH09HSlp6eHqxcAABBlwnaOSE5Ojvbu3aucnBwFg0FlZ2dLknr06KHk5ORw7RYAAESQsAWRO++8U/Pmzav9euDAgZKkJUuWaNiwYeHaLQAAiCANOlm1qTXkZBcAAOANDfn85qJ4AABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMbEmG4A5uXsKFRZRZWjGonxAXVq38qljrwtd3uhysodjldCQJkdmv945eXvVVmZw7FKDKjjca1d6si78nL3qrys0lGNhMQ4dcxs/mN1ODvyC7Vo/krlb92tstJKJSTGqUNma428dJAyOqWZbg9HQRCJcjk7CnX5jCxXar1y/8RmH0Zytxfqlzf9zZVaLz06qVmHkbz8vbpq0tOu1PrH365t1mEkL3evJl75V1dqZb38f6MqjHzx+Xd6Jet/9fl/vpXPZ8m2JTtky/JZsixLLz/zkQae1l2/mHCmTh56vOl2cRgEkSjndCYkXLW8yulMSLhqeZHTmZBw1fIipzMh4arlZbZt69V5/9EzD74jn98n2VIoaP/w/ZAtWwe+/uLz77T6s0266v+eo1/fcI4syzLVNg6Dc0QAABHntb9/omcefEeSFAqGjrrtwYDyj79+oOef+CDsvaFhCCIAgIiyZvVWPT1nYaNe+4+/fqAVn2xwuSM4QRABAESU1/7+ifz+xn18+fyWXpv3H5c7ghMEEQBAxNizq1hLP1ir4DEOxxxJKGhr1acbtS13j8udobEIIgCAiPHeG6vl9FRTn9+nd19b6Uo/cI4gAgCIGNty9ji+6sW2bW3P3etSR3CKIAIAiBgVZVUK2faxNzwKO2SrrKTCpY7gFEEEABAx4hMD8jmcEbF8lpJaxLvUEZwiiAAAIkZmlzaOZ0QsScd1buNOQ3CMIAIAiBgjLv6Z43NEQratkZcNcqkjOEUQAQBEjFZtknXG/+nrYB0Rn0454wS1y2i+93mKNAQRAEBE+cW4oQqFGnd4JhQM6fIJZ7rcEZwgiAAAIkrP/pmaevuoRr120vTzNWBwN5c7ghMEEQBAxPn5Fadq6u0Xy7KsYx6mOfj9ybecr8snnNEU7aEBYkw3AABAY1x05anq0TtD85/7RP/73hrZti2fZSkUsuXzWbJtW7ak04b10qVXD1W/QV1Mt4zDIIhEucT4gCdreVVigovj5WItL0pMdHGsXKzlRQmJcZ6sFQl6nZipmQ/8UjfsLtH7b6xS3tbdKiupVEJSnDIyW2vExQPVpl2q6TZxFJZtO7wgO4yKi4uVmpqqoqIipaSkmG6n2crZUaiyiipHNRLjA+rUPjrOQs/dXqiycofjlRBQZofmP155+XtVVuZwrBID6nhca5c68q683L0qL6t0VCMhMU4dM5v/WMH7GvL5zYwIoiZAuCUaAoRboiFAuIUAgWjFyaoAAMAYgggAADAmbEFky5YtmjRpkrp27aqEhAR1795dd911l6qqnB0vBgAAzUfYzhFZv369QqGQnnzySfXo0UNff/21rr32WpWWlmrOnDnh2i0AAIggTXrVzAMPPKDHH39c3333Xb2256oZAAAij2evmikqKlLr1kc+M7yyslKVlT9cvlZcXNwUbQEAAEOa7GTVjRs36tFHH9X1119/xG1mz56t1NTU2kdmZmZTtQcAAAxocBCZMWOGLMs66mP9+vV1XpOfn6/zzz9fY8aM0bXXXnvE2jNnzlRRUVHtIzc3t+E/EQAAiBgNPkekoKBAe/bsOeo23bp1UyBwYEnmbdu2adiwYTrttNM0d+5c+Xz1zz6cIwIAQOQJ6zki6enpSk9Pr9e2+fn5Gj58uAYNGqSsrKwGhRAAAND8he1k1fz8fA0bNkydO3fWnDlzVFBQUPu99u3bh2u3AAAggoQtiLz33nvauHGjNm7cqI4dO9b5nofvswcAAJpQ2I6VTJgwQbZtH/YBAAAgca8ZAABgEEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGEMQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYAxBBAAAGBNjugHgSPK37lFZWaWjGomJcTquc5pLHXlX/uYClZc6G6uEpDgd1zXdpY68K3/TTpWVVDiqkZgcr+O6t3OpI+/K27Bd5fvLHdVIaJGgjsd3cKkjNEcEEXhS/tY9mnTJI67U+tvrNzfrMJK/uUCTz5ntSq1nPpjZrMNI/qadmnTK7a7U+tvn9zbrMJK3Ybsm9vyNK7WyvvkLYQRHxKEZeJLTmZBw1fIipzMh4arlRU5nQsJVy4uczoSEqxaaH4IIAAAwhiACAACMIYgAAABjCCIAAMAYgggAADCGIAIAAIwhiAAAAGMIIgAAwBiCCAAAMIYgAgAAjCGIAAAAYwgiAADAGIIIAAAwhiACAACMIYgAAABjCCLwpMTEOE/W8qKEJPd+PjdreVFicrwna3lRQosET9ZC82PZtm2bbuJIiouLlZqaqqKiIqWkpJhuB00sf+selZVVOqqRmBin4zqnudSRd+VvLlB5qbOxSkiK03Fd013qyLvyN+1UWUmFoxqJyfE6rns7lzryrrwN21W+v9xRjYQWCep4fAeXOkKkaMjnd0wT9QQ0WDQECLdEQ4BwSzQECLcQINAUODQDAACMIYgAAABjCCIAAMAYgggAADDG0yerHrygp7i42HAnAACgvg5+btfnwlxPB5H9+/dLkjIzMw13AgAAGmr//v1KTU096jaeXkckFApp27ZtatGihSzLcrV2cXGxMjMzlZubyxolx8BY1R9jVX+MVf0xVg3DeNVfuMbKtm3t379fGRkZ8vmOfhaIp2dEfD6fOnbsGNZ9pKSk8EatJ8aq/hir+mOs6o+xahjGq/7CMVbHmgk5iJNVAQCAMQQRAABgTNQGkbi4ON11112Ki2veN/lyA2NVf4xV/TFW9cdYNQzjVX9eGCtPn6wKAACat6idEQEAAOYRRAAAgDEEEQAAYAxBBAAAGEMQkXTxxRerU6dOio+PV4cOHXT11Vdr27ZtptvynC1btmjSpEnq2rWrEhIS1L17d911112qqqoy3Zon/eEPf9Dpp5+uxMREtWzZ0nQ7nvPYY4+pS5cuio+P16mnnqrly5ebbslzPv74Y40aNUoZGRmyLEsLFiww3ZJnzZ49W6eccopatGihtm3bavTo0frmm29Mt+VJjz/+uE488cTaRcyGDBmihQsXGuuHICJp+PDh+uc//6lvvvlGr776qjZt2qTLL7/cdFues379eoVCIT355JNas2aNHnroIT3xxBO67bbbTLfmSVVVVRozZoxuvPFG0614zssvv6zp06frrrvu0qpVqzRgwACNHDlSu3btMt2ap5SWlmrAgAF67LHHTLfieR999JGmTJmizz77TO+9956qq6t13nnnqbS01HRrntOxY0fdf//9WrlypVasWKFzzjlHl1xyidasWWOmIRuHeP31123LsuyqqirTrXjen/70J7tr166m2/C0rKwsOzU11XQbnjJ48GB7ypQptV8Hg0E7IyPDnj17tsGuvE2SPX/+fNNtRIxdu3bZkuyPPvrIdCsRoVWrVvYzzzxjZN/MiPzE3r179fzzz+v0009XbGys6XY8r6ioSK1btzbdBiJIVVWVVq5cqREjRtQ+5/P5NGLECH366acGO0NzUlRUJEn8+3QMwWBQL730kkpLSzVkyBAjPRBEvvf73/9eSUlJSktLU05Ojl5//XXTLXnexo0b9eijj+r666833QoiyO7duxUMBtWuXbs6z7dr1047duww1BWak1AopGnTpmno0KHq16+f6XY86auvvlJycrLi4uJ0ww03aP78+erTp4+RXpptEJkxY4YsyzrqY/369bXb33rrrVq9erUWLVokv9+vcePGyY6SRWcbOlaSlJ+fr/PPP19jxozRtddea6jzpteYsQLQtKZMmaKvv/5aL730kulWPKtnz57Kzs7WsmXLdOONN2r8+PFau3atkV6a7RLvBQUF2rNnz1G36datmwKBwCHP5+XlKTMzU0uXLjU2VdWUGjpW27Zt07Bhw3Taaadp7ty58vmabZ49RGPeV3PnztW0adO0b9++MHcXGaqqqpSYmKhXXnlFo0ePrn1+/Pjx2rdvH7ORR2BZlubPn19nzHCoqVOn6vXXX9fHH3+srl27mm4nYowYMULdu3fXk08+2eT7jmnyPTaR9PR0paenN+q1oVBIklRZWelmS57VkLHKz8/X8OHDNWjQIGVlZUVVCJGcva9wQCAQ0KBBg7R48eLaD9VQKKTFixdr6tSpZptDxLJtWzfddJPmz5+vDz/8kBDSQKFQyNhnXrMNIvW1bNkyff755zrjjDPUqlUrbdq0SXfccYe6d+8eFbMhDZGfn69hw4apc+fOmjNnjgoKCmq/1759e4OdeVNOTo727t2rnJwcBYNBZWdnS5J69Oih5ORks80ZNn36dI0fP14nn3yyBg8erIcfflilpaWaOHGi6dY8paSkRBs3bqz9evPmzcrOzlbr1q3VqVMng515z5QpU/TCCy/o9ddfV4sWLWrPN0pNTVVCQoLh7rxl5syZuuCCC9SpUyft379fL7zwgj788EO9++67Zhoycq2Oh3z55Zf28OHD7datW9txcXF2ly5d7BtuuMHOy8sz3ZrnZGVl2ZIO+8Chxo8ff9ixWrJkienWPOHRRx+1O3XqZAcCAXvw4MH2Z599Zrolz1myZMlh30Pjx4833ZrnHOnfpqysLNOtec4111xjd+7c2Q4EAnZ6erp97rnn2osWLTLWT7M9RwQAAHhfdB3gBwAAnkIQAQAAxhBEAACAMQQRAABgDEEEAAAYQxABAADGEEQAAIAxBBEAAGAMQQQAABhDEAEAAMYQRAAAgDEEEQAAYMz/BwKyucymvqv/AAAAAElFTkSuQmCC\",\n      \"text/plain\": [\n       \"<Figure size 640x480 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {},\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"plotCompPinPow(primaryFuel, marker=\\\"s\\\", label=\\\"primary\\\")\\n\",\n    \"plotCompPinPow(secondaryFuel, marker=\\\"o\\\", label=\\\"secondary\\\")\\n\",\n    \"pyplot.legend()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"id\": \"0b146c61\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Bringing it all together.\\n\",\n    \"\\n\",\n    \"Pin-like parameters are ordered by a pin-index, not strictly a spatial ordering. Therefore they are invariant of rotation; `Block.p.linPowByPin[i]` is the linear power for pin `i`, wherever it may be in the block. \\n\",\n    \"\\n\",\n    \"Without looking into the components, pin `i` is located at `Block.getPinLocations()[i]`. If the block is rotated, the locator `Block.getPinLocations()[i]` will indicate a new location, but it still represents pin `i`.\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"armi\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.13.3\"\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 5\n}\n"
  },
  {
    "path": "armi/tests/zpprTest.yaml",
    "content": "metadata:\n  version: uncontrolled\nsettings:\n# global\n  Tin: 20.0\n  Tout: 20.0\n  buGroups:\n    - 100\n  burnSteps: 0\n  comment: ZPPR test case\n  cycleLength: 365.25\n  loadingFile: zpprTestGeom.yaml\n  nTasks: 12\n  outputFileExtension: pdf\n  power: 75000000.0\n  sortReactor: false # zpprs dont sor the right way. need better component sorting for slab...\n  verbosity: extra\n\n# cross section\n  crossSectionControl:\n    AA:\n      geometry: 1D slab\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n      meshSubdivisionsPerCm: 10\n    AC:\n      geometry: 1D slab\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n      meshSubdivisionsPerCm: 10\n    AZ:\n      geometry: 1D slab\n      externalDriver: true\n      useHomogenizedBlockComposition: false\n      numInternalRings: 1\n      numExternalRings: 1\n      meshSubdivisionsPerCm: 10\n\n# neutronics\n  epsEig: 1e-10\n  genXS: Neutron\n  xsBlockRepresentation: ComponentAverage1DSlab\n"
  },
  {
    "path": "armi/tests/zpprTestGeom.yaml",
    "content": "!include 1DslabXSByCompTest.yaml\nsystems:\n    core:\n        grid name: core\n        origin:\n            x: 0.0\n            y: 0.0\n            z: 0.0\ngrids:\n    core:\n        geom: cartesian\n        symmetry: full\n        grid contents:\n            [0, 0]: D2\n            [1, 0]: D1\n            [2, 0]: D1\n            [3, 0]: D1\n            [0, 1]: D2\n            [1, 1]: D2\n            [2, 1]: D2\n            [3, 1]: D2\n            [0, 2]: D2\n            [1, 2]: D2\n            [2, 2]: D2\n            [3, 2]: D2\n"
  },
  {
    "path": "armi/utils/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic ARMI utilities.\"\"\"\n\n# ruff: noqa: F405\nimport collections\nimport getpass\nimport hashlib\nimport math\nimport os\nimport pickle\nimport re\nimport shutil\nimport sys\nimport time\n\nfrom armi import runLog\nfrom armi.utils import iterables\nfrom armi.utils.flags import Flag  # noqa: F401\nfrom armi.utils.mathematics import *  # noqa: F403\n\n# Read in file 1 MB at a time to reduce memory burden of reading entire file at once\n_HASH_BUFFER_SIZE = 1024 * 1024\n\n\ndef getFileSHA1Hash(filePath, digits=40):\n    \"\"\"\n    Generate a SHA-1 hash of input files.\n\n    Parameters\n    ----------\n    filePath : str\n        Path to file or directory to obtain the SHA-1 hash\n    digits : int, optional\n        Number of digits to include in the hash (40 digit maximum for SHA-1)\n    \"\"\"\n    sha1 = hashlib.sha1()\n    filesToHash = []\n    if os.path.isdir(filePath):\n        for root, _, files in os.walk(filePath):\n            for file in sorted(files):\n                filesToHash.append(os.path.join(root, file))\n    else:\n        filesToHash.append(filePath)\n\n    for file in filesToHash:\n        with open(file, \"rb\") as f:\n            while True:\n                data = f.read(_HASH_BUFFER_SIZE)\n                if not data:\n                    break\n                sha1.update(data)\n\n    return sha1.hexdigest()[:digits]\n\n\ndef getPowerFractions(cs):\n    \"\"\"\n    Return the power fractions for each cycle.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    powerFractions : 2-list\n        A list with nCycles elements, where each element is itself a list of the power fractions at each step of the\n        cycle.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    if cs[\"cycles\"] != []:\n        return [\n            expandRepeatedFloats(\n                (cycle[\"power fractions\"]) if \"power fractions\" in cycle.keys() else [1] * getBurnSteps(cs)[cycleIdx]\n            )\n            for (cycleIdx, cycle) in enumerate(cs[\"cycles\"])\n        ]\n    else:\n        valuePerCycle = (\n            expandRepeatedFloats(cs[\"powerFractions\"])\n            if cs[\"powerFractions\"] not in [None, []]\n            else [1.0] * cs[\"nCycles\"]\n        )\n\n        return [[value] * (cs[\"burnSteps\"] if cs[\"burnSteps\"] is not None else 0) for value in valuePerCycle]\n\n\ndef getCycleNames(cs):\n    \"\"\"\n    Return the names of each cycle. If a name is omitted, it is `None`.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    cycleNames : list\n        A list of the availability factors.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    if cs[\"cycles\"] != []:\n        return [(cycle[\"name\"] if \"name\" in cycle.keys() else None) for cycle in cs[\"cycles\"]]\n    else:\n        return [None] * cs[\"nCycles\"]\n\n\ndef getAvailabilityFactors(cs):\n    \"\"\"\n    Return the availability factors for each cycle.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    availabilityFactors : list\n        A list of the availability factors.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    if cs[\"cycles\"] != []:\n        availabilityFactors = []\n        for cycle in cs[\"cycles\"]:\n            if \"availability factor\" in cycle.keys():\n                availabilityFactors.append(cycle[\"availability factor\"])\n            else:\n                availabilityFactors.append(1)\n        return availabilityFactors\n    else:\n        return (\n            expandRepeatedFloats(cs[\"availabilityFactors\"])\n            if cs[\"availabilityFactors\"] not in [None, []]\n            else ([cs[\"availabilityFactor\"]] * cs[\"nCycles\"] if cs[\"availabilityFactor\"] is not None else [1])\n        )\n\n\ndef _getStepAndCycleLengths(cs):\n    r\"\"\"\n    Get both steps and lengths together to prevent chicken/egg problem.\n\n    Notes\n    -----\n    Using this method directly is more efficient than calling `getStepLengths` and `getCycleLengths` separately, but it\n    is probably more clear to the user to call each of them separately.\n    \"\"\"\n    stepLengths = []\n    availabilityFactors = getAvailabilityFactors(cs)\n    if cs[\"cycles\"] != []:\n        for cycleIdx, cycle in enumerate(cs[\"cycles\"]):\n            cycleKeys = cycle.keys()\n\n            if \"step days\" in cycleKeys:\n                stepLengths.append(expandRepeatedFloats(cycle[\"step days\"]))\n            elif \"cumulative days\" in cycleKeys:\n                cumulativeDays = cycle[\"cumulative days\"]\n                stepLengths.append(getStepsFromValues(cumulativeDays))\n            elif \"burn steps\" in cycleKeys and \"cycle length\" in cycleKeys:\n                stepLengths.append(\n                    [cycle[\"cycle length\"] * availabilityFactors[cycleIdx] / cycle[\"burn steps\"]] * cycle[\"burn steps\"]\n                )\n            else:\n                raise ValueError(f\"No cycle time history is given in the detailed cycles history for cycle {cycleIdx}\")\n\n        cycleLengths = [sum(cycleStepLengths) for cycleStepLengths in stepLengths]\n        cycleLengths = [cycleLength / aFactor for (cycleLength, aFactor) in zip(cycleLengths, availabilityFactors)]\n    else:\n        cycleLengths = (\n            expandRepeatedFloats(cs[\"cycleLengths\"])\n            if cs[\"cycleLengths\"] not in [None, []]\n            else ([cs[\"cycleLength\"]] * cs[\"nCycles\"] if cs[\"cycleLength\"] is not None else [0])\n        )\n        cycleLengthsModifiedByAvailability = [\n            length * availability for (length, availability) in zip(cycleLengths, availabilityFactors)\n        ]\n        stepLengths = (\n            [[length / cs[\"burnSteps\"]] * cs[\"burnSteps\"] for length in cycleLengthsModifiedByAvailability]\n            if cs[\"burnSteps\"] not in [0, None]\n            else [[]]\n        )\n\n    return stepLengths, cycleLengths\n\n\ndef getStepLengths(cs):\n    \"\"\"\n    Return the length of each step in each cycle.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    stepLengths : 2-list\n        A list with elements for each cycle, where each element itself is a list containing the step lengths in days.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    return _getStepAndCycleLengths(cs)[0]\n\n\ndef getCycleLengths(cs):\n    \"\"\"\n    Return the lengths of each cycle in days.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    cycleLengths : list\n        A list of the cycle lengths in days.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    return _getStepAndCycleLengths(cs)[1]\n\n\ndef getBurnSteps(cs):\n    \"\"\"\n    Return the number of burn steps for each cycle.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    burnSteps : list\n        A list of the number of burn steps.\n\n    Notes\n    -----\n    This is stored outside of the Operator class so that it can be easily called to resolve case settings objects in\n    other contexts (i.e. in the preparation of restart runs).\n    \"\"\"\n    stepLengths = getStepLengths(cs)\n    return [len(steps) for steps in stepLengths]\n\n\ndef hasBurnup(cs):\n    \"\"\"Test if depletion is being modeled.\n\n    Parameters\n    ----------\n    cs : case settings object\n\n    Returns\n    -------\n    bool\n        Are there any burnup steps?\n    \"\"\"\n    return sum(getBurnSteps(cs)) > 0\n\n\ndef getMaxBurnSteps(cs):\n    burnSteps = getBurnSteps(cs)\n    return max(burnSteps)\n\n\ndef getCumulativeNodeNum(cycle, node, cs):\n    \"\"\"\n    Return the cumulative node number associated with a cycle and time node.\n\n    Note that a cycle with n time steps has n+1 nodes, and for cycle m with n steps, nodes (m, n+1) and (m+1, 0) are\n    counted separately.\n\n    Parameters\n    ----------\n    cycle : int\n        The cycle number\n    node : int\n        The intra-cycle time node (0 for BOC, etc.)\n    cs : Settings object\n    \"\"\"\n    nodesPerCycle = getNodesPerCycle(cs)\n    return sum(nodesPerCycle[:cycle]) + node\n\n\ndef getCycleNodeFromCumulativeStep(timeStepNum, cs):\n    \"\"\"\n    Return the (cycle, node) corresponding to a cumulative time step number.\n\n    \"Node\" refers to the node at the start of the time step.\n\n    Parameters\n    ----------\n    timeStepNum : int\n        The cumulative number of time steps since the beginning\n    cs : case settings object\n        A case settings object to get the steps-per-cycle from\n\n    Notes\n    -----\n    Time steps are the spaces between time nodes, and are 1-indexed.\n\n    To get the (cycle, node) from a cumulative time node, see instead getCycleNodeFromCumulativeNode.\n    \"\"\"\n    stepsPerCycle = getBurnSteps(cs)\n\n    if timeStepNum < 1:\n        raise ValueError(\"Cumulative time step cannot be less than 1.\")\n\n    cSteps = 0  # cumulative steps\n    for i in range(len(stepsPerCycle)):\n        cSteps += stepsPerCycle[i]\n        if timeStepNum <= cSteps:\n            return (i, timeStepNum - (cSteps - stepsPerCycle[i]) - 1)\n\n    i = len(stepsPerCycle) - 1\n    return (i, timeStepNum - (cSteps - stepsPerCycle[i]) - 1)\n\n\ndef getCycleNodeFromCumulativeNode(timeNodeNum, cs):\n    \"\"\"\n    Return the (cycle, node) corresponding to a cumulative time node number.\n\n    Parameters\n    ----------\n    timeNodeNum : int\n        The cumulative number of time nodes since the beginning\n    cs : case settings object\n        A case settings object to get the nodes-per-cycle from\n\n    Notes\n    -----\n    Time nodes are the start/end of time steps, and are 0-indexed. For a cycle with n steps, there will be n+1 nodes\n    (one at the start of the cycle and another at the end, plus those separating the steps). For cycle m with n steps,\n    nodes (m, n+1) and (m+1, 0) are counted separately.\n\n    To get the (cycle, node) from a cumulative time step, see instead getCycleNodeFromCumulativeStep.\n    \"\"\"\n    nodesPerCycle = getNodesPerCycle(cs)\n\n    if timeNodeNum < 0:\n        raise ValueError(\"Cumulative time node cannot be less than 0.\")\n\n    cNodes = 0  # cumulative nodes\n    for i in range(len(nodesPerCycle)):\n        cNodes += nodesPerCycle[i]\n        if timeNodeNum < cNodes:\n            return (i, timeNodeNum - (cNodes - nodesPerCycle[i]))\n\n    i = len(nodesPerCycle) - 1\n    return (i, timeNodeNum - (cNodes - nodesPerCycle[i]))\n\n\ndef getNodesPerCycle(cs):\n    \"\"\"Return the number of nodes per cycle for the case settings object.\"\"\"\n    return [s + 1 for s in getBurnSteps(cs)]\n\n\ndef getPreviousTimeNode(cycle, node, cs):\n    \"\"\"Return the (cycle, node) before the specified (cycle, node).\"\"\"\n    if (cycle, node) == (0, 0):\n        raise ValueError(\"There is no time step before (0, 0)\")\n    if node != 0:\n        return (cycle, node - 1)\n    else:\n        nodesPerCycle = getNodesPerCycle(cs)\n        nodesInLastCycle = nodesPerCycle[cycle - 1]\n        indexOfLastNode = nodesInLastCycle - 1  # zero based indexing for nodes\n        return (cycle - 1, indexOfLastNode)\n\n\ndef tryPickleOnAllContents(obj, ignore=None, verbose=False):\n    r\"\"\"\n    Attempts to pickle all members of this object and identifies those who cannot be pickled.\n\n    Useful for debugging MPI-bcast errors.\n\n    Parameters\n    ----------\n    obj : object\n        Any object to be tested.\n    ignore : iterable\n        list of string variable names to ignore.\n    verbose : bool, optional\n        Print all objects whether they fail or not\n    \"\"\"\n    if ignore is None:\n        ignore = []\n\n    # pickle gives better error messages than cPickle\n    for name, ob in obj.__dict__.items():\n        if name not in ignore:\n            if verbose:\n                print(f\"Checking {name}...\")\n            try:\n                pickle.dumps(ob)  # dump as a string\n            except Exception:\n                print(f\"{name} in {obj} cannot be pickled.\")\n\n\ndef classesInHierarchy(obj, classCounts, visited=None):\n    \"\"\"Count the number of instances of each class contained in an objects hierarchy.\"\"\"\n    if not isinstance(classCounts, collections.defaultdict):\n        raise TypeError(\"Need to pass in a default dict for classCounts (it's an out param)\")\n\n    if visited is None:\n        classCounts[type(obj)] += 1\n        visited = set()\n        visited.add(id(obj))\n\n    try:\n        for c in obj.__dict__.values():\n            if id(c) not in visited:\n                classCounts[type(c)] += 1\n                visited.add(id(c))\n                classesInHierarchy(c, classCounts, visited=visited)\n    except AttributeError:\n        pass\n\n\ndef slantSplit(val, ratio, nodes, order=\"low first\"):\n    \"\"\"\n    Returns a list of values whose sum is equal to the value specified.\n\n    The ratio between the highest and lowest value is equal to the specified ratio, and the middle values trend linearly\n    between them.\n    \"\"\"\n    val = float(val)\n    ratio = float(ratio)\n    nodes = int(nodes)\n    v0 = 2.0 * val / (nodes * (1.0 + ratio))\n    X = []\n    for i in range(nodes):\n        X.append(v0 + i * (v0 * ratio - v0) / (nodes - 1))\n\n    if order == \"high first\":\n        X.reverse()\n\n    return X\n\n\ndef prependToList(originalList, listToPrepend):\n    \"\"\"\n    Add a new list to the beginning of an original list.\n\n    Parameters\n    ----------\n    originalList : list\n        The list to prepend to.\n    listToPrepend : list\n        The list to add to the beginning of (prepend) the originalList.\n\n    Returns\n    -------\n    originalList : list\n        The original list with the listToPrepend at it's beginning.\n    \"\"\"\n    listToPrepend.reverse()\n    originalList.reverse()\n    originalList.extend(listToPrepend)\n    originalList.reverse()\n    listToPrepend.reverse()\n    return originalList\n\n\ndef capStrLen(s: str, length: int) -> str:\n    \"\"\"\n    Truncates a string to a certain length.\n\n    Adds '...' if it's too long.\n\n    Parameters\n    ----------\n    s : str\n        The string to cap at length l.\n    length : int\n        The maximum length of the string s.\n    \"\"\"\n    if length <= 2:\n        raise Exception(\"l must be at least 3 in utils.capStrLen\")\n\n    if len(s) <= length:\n        return s\n\n    return s[0 : length - 3] + \"...\"\n\n\ndef list2str(strings, width=None, preStrings=None, fmt=None):\n    \"\"\"\n    Turn a list of strings into one string, applying the specified format to each.\n\n    Parameters\n    ----------\n    strings : list\n        The items to create centered strings in the line for. Can be str, float, int, etc.\n    width : int, optional\n        The maximum width that the strings are allowed to take up. Only strings are affected by this parameter, because\n        it does not make sense to truncate ints or floats.\n    preStrings : list of str, optional\n        Any strings that come before the centered strings.\n    fmt : str, optional\n        The format to apply to each string, such as ' >4d', '^12.4E'.\n    \"\"\"\n    if preStrings is None:\n        preStrings = []\n\n    if fmt is None:\n        fmt = \"\"\n\n    newStrings = []\n    for string in strings:\n        if isinstance(string, str) and width is not None:\n            string = capStrLen(str(string), width)\n        string = \"{0:{fmt}}\".format(string, fmt=fmt)\n        newStrings.append(string)\n\n    preStrings.extend(newStrings)\n    return \"\".join(preStrings)\n\n\ndef createFormattedStrWithDelimiter(dataList, maxNumberOfValuesBeforeDelimiter=9, delimiter=\"\\n\"):\n    r\"\"\"\n    Return a formatted string with delimiters from a list of data.\n\n    Parameters\n    ----------\n    dataList : list\n        List of data that will be formatted into a string\n    maxNumberOfValuesBeforeDelimiter : int\n        maximum number of values to have before the delimiter is added\n    delimiter : str\n        A delimiter on the formatted string (default: \"\\n\")\n\n    Notes\n    -----\n    As an example::\n\n        >>> createFormattedStrWithDelimiter(['hello', 'world', '1', '2', '3', '4'],\n        ...     maxNumberOfValuesBeforeDelimiter=3, delimiter = '\\n')\n        \"hello, world, 1, \\n2, 3, \\n4, 5\\n\"\n    \"\"\"\n    formattedString = \"\"\n    if not dataList:\n        return formattedString\n\n    if not maxNumberOfValuesBeforeDelimiter:\n        numRows = 1\n    else:\n        numRows = int(math.ceil(float(len(dataList)) / float(maxNumberOfValuesBeforeDelimiter))) or 1\n\n    # Create a list of string delimiters to use when joining the strings\n    commaList = [\",\" for d in dataList]\n    commaList[-1] = \"\"\n    dataList = [str(d) + commaList[i] for i, d in enumerate(dataList)]\n    for splitList in iterables.split(dataList, n=numRows, padWith=\"\"):\n        formattedString += \" \".join(splitList) + delimiter\n    return formattedString\n\n\ndef plotMatrix(\n    matrix,\n    fName,\n    minV=None,\n    maxV=None,\n    show=False,\n    title=None,\n    xlabel=None,\n    ylabel=None,\n    xticks=None,\n    yticks=None,\n    cmap=None,\n    figsize=None,\n):\n    \"\"\"Plots a matrix.\"\"\"\n    import matplotlib\n    import matplotlib.pyplot as plt\n\n    if figsize:\n        plt.figure(figsize=figsize)\n    else:\n        plt.figure()\n\n    if cmap is None:\n        cmap = plt.cm.jet\n\n    cmap.set_bad(\"w\")\n    try:\n        matrix = matrix.todense()\n    except Exception:\n        pass\n\n    if minV:\n        norm = matplotlib.colors.Normalize(minV, maxV)\n    else:\n        norm = None\n\n    if title is None:\n        title = fName\n\n    # or bicubic or nearest#,vmin=0, vmax=300)\n    plt.imshow(matrix, cmap=cmap, norm=norm, interpolation=\"nearest\")\n    plt.colorbar()\n    plt.title(title)\n    plt.xlabel(xlabel)\n    plt.ylabel(ylabel)\n    if xticks:\n        plt.xticks(*xticks, rotation=90)\n    if yticks:\n        plt.yticks(*yticks)\n    plt.grid()\n    plt.savefig(fName)\n    if show:\n        plt.show()\n    plt.close()\n\n\ndef userName() -> str:\n    \"\"\"\n    Return a database-friendly username.\n\n    This will return the current user's username, removing any prefix like ``pre-``, if present.\n\n    Notes\n    -----\n    ARMI uses the user name in a number of places, namely in the database names, which cannot contain hyphens.\n    \"\"\"\n    return re.sub(\"^[a-zA-Z]-\", \"\", getpass.getuser())\n\n\nclass MergeableDict(dict):\n    \"\"\"\n    Overrides python dictionary and implements a merge method.\n\n    Notes\n    -----\n    Allows multiple dictionaries to be combined in a single line\n    \"\"\"\n\n    def merge(self, *otherDictionaries) -> None:\n        for dictionary in otherDictionaries:\n            self.update(dictionary)\n\n\ndef safeCopy(src: str, dst: str) -> None:\n    \"\"\"Check that copy operation is truly completed before continuing.\"\"\"\n    # Convert files to OS-independence\n    src = os.path.abspath(src)\n    dst = os.path.abspath(dst)\n    if os.path.isdir(dst):\n        dst = os.path.join(dst, os.path.basename(src))\n\n    srcSize = os.path.getsize(src)\n    if \"win\" in sys.platform:\n        # this covers Windows (\"win32\") and MacOS (\"darwin\")\n        shutil.copyfile(src, dst)\n        shutil.copymode(src, dst)\n    elif \"linux\" in sys.platform:\n        cmd = f'cp \"{src}\" \"{dst}\"'\n        os.system(cmd)\n    else:\n        raise OSError(\"Cannot perform ``safeCopy`` on files because ARMI only supports Linux, MacOs, and Windows.\")\n\n    waitTime = 0.01  # 10 ms\n    maxWaitTime = 300  # 5 min\n    totalWaitTime = 0\n    while True:\n        dstSize = os.path.getsize(dst)\n        if srcSize == dstSize:\n            break\n        time.sleep(waitTime)\n        totalWaitTime += waitTime\n        if totalWaitTime > maxWaitTime:\n            runLog.warning(\n                f\"File copy from {dst} to {src} has failed due to exceeding a maximum wait time of {maxWaitTime / 60} \"\n                \"minutes.\"\n            )\n            return\n\n    runLog.extra(f\"Copied {src} -> {dst}\")\n\n\ndef safeMove(src: str, dst: str) -> None:\n    \"\"\"Check that a file has been successfully moved before continuing.\"\"\"\n    # Convert files to OS-independence\n    src = os.path.abspath(src)\n    dst = os.path.abspath(dst)\n    if os.path.isdir(dst):\n        dst = os.path.join(dst, os.path.basename(src))\n\n    srcSize = os.path.getsize(src)\n    if \"win\" in sys.platform:\n        # this covers Windows (\"win32\") and MacOS (\"darwin\")\n        shutil.move(src, dst)\n    elif \"linux\" in sys.platform:\n        cmd = f'mv \"{src}\" \"{dst}\"'\n        os.system(cmd)\n    else:\n        raise OSError(\"Cannot perform ``safeMove`` on files because ARMI only supports \" + \"Linux, MacOS, and Windows.\")\n\n    waitTime = 0.01  # 10 ms\n    maxWaitTime = 6000  # 1 min\n    totalWaitTime = 0\n    while True:\n        try:\n            dstSize = os.path.getsize(dst)\n            if srcSize == dstSize:\n                break\n        except FileNotFoundError:\n            pass\n        time.sleep(waitTime)\n        totalWaitTime += waitTime\n        if totalWaitTime > maxWaitTime:\n            runLog.warning(\n                f\"File move from {dst} to {src} has failed due to exceeding a maximum wait time of {maxWaitTime / 60} \"\n                \"minutes.\"\n            )\n            return\n\n    runLog.extra(f\"Moved {src} -> {dst}\")\n    return dst\n"
  },
  {
    "path": "armi/utils/asciimaps.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nASCII maps are little grids of letters/numbers that represent some kind of a lattice.\n\nThese are commonly used in nuclear analysis to represent core maps, pin layouts, etc. in input files. This module reads\nvarious text and interprets them into meaningful data structures.\n\nWe make classes for different geometries to share code. This will eventually be expanded for various symmetries that are\napplicable to cores, assemblies, etc.\n\nThis is as attempted reimplementation of AsciiMaps aiming for simplicity, though inherently this work is complex.\n\nSome vocabulary used here:\n\ncolumn, line\n    column and line numbers in the actual ascii text representation. What you would see in a text editor.\n\noffset\n    The number of spaces needed at the beginning a line to properly orient the ascii representation.\n\ni, j\n    Indices in the grid itself. For Cartesian, j is like the line number, but in other geometries (like hex), it is a\n    totally different coordinate system.\n\nSee Also\n--------\narmi.reactor.grids : More powerful, nestable lattices with specific dimensions\n    Most input lattices eventually end up as Grid objects.\narmi.reactor.blueprints.latticeBlueprint : user input of generic lattices\narmi.reactor.geometry : a specific usage of lattices, for core maps\n\n\"\"\"\n\nimport re\nfrom typing import Union\n\nfrom armi import runLog\nfrom armi.reactor import geometry\n\nPLACEHOLDER = \"-\"\n\n\nclass AsciiMap:\n    \"\"\"\n    Base class for maps.\n\n    These should be able to read and write ASCII maps loaded either from text or programmatically with i,j / specifiers.\n    \"\"\"\n\n    def __init__(self):\n        self.asciiLines = []\n        \"\"\"A list of lines, each containing a list of ascii labels for each column. No blanks.\"\"\"\n\n        self.asciiOffsets = []\n        \"\"\"A list of offset integers for each line above that will be prepended before the contents of asciiLines\"\"\"\n\n        self.asciiLabelByIndices = {}\n        \"\"\"A mapping from grid location objects to ascii labels\"\"\"\n\n        self._spacer = \" \"\n        \"\"\"Individual spacing for one 'item' of ascii\"\"\"\n\n        self._placeholder = PLACEHOLDER\n        \"\"\"Placeholder for blank data. Also holds the size of ascii window for each value\"\"\"\n\n        self._asciiMaxCol = 0\n        \"\"\"max number of text columns in text representation\"\"\"\n\n        self._asciiMaxLine = 0\n        \"\"\"max number of text lines in text representation\"\"\"\n\n        self._ijMax = 0\n        \"\"\"max num of i+j indices (max(i) + max(j)), needed mostly for hex\"\"\"\n\n        self._asciiLinesOffCorner = 0\n        \"\"\"Number of ascii lines chopped of corners\"\"\"\n\n        self.endsWithPlaceholder = False\n        \"\"\"Handling a special case where we don't want to trim a trailing placeholder from a ASCII map.\"\"\"\n\n    def writeAscii(self, stream):\n        \"\"\"Write out the ascii representation.\"\"\"\n        stream.write(self.__str__())\n\n    def __str__(self):\n        \"\"\"Build the human-readable ASCII string representing the lattice map.\n\n        This method is useful for quickly printing out a lattice map.\n\n        Returns\n        -------\n        str : The custom ARMI ASCII-art-style string representing the map.\n        \"\"\"\n        # Do some basic validation\n        if not self.asciiLines:\n            raise ValueError(\"Cannot write ASCII map before ASCII lines are processed.\")\n\n        if len(self.asciiOffsets) != len(self.asciiLines):\n            runLog.error(f\"AsciiLines: {self.asciiLines}\")\n            runLog.error(f\"Offsets: {self.asciiOffsets}\")\n            raise ValueError(f\"Inconsistent lines ({len(self.asciiLines)}) and offsets ({len(self.asciiOffsets)})\")\n\n        # Finally, build the string representation.\n        txt = \"\"\n        fmt = f\"{{val:{len(self._placeholder)}s}}\"\n        for offset, line in zip(self.asciiOffsets, self.asciiLines):\n            data = [fmt.format(val=v) for v in line]\n            line = self._spacer * offset + self._spacer.join(data) + \"\\n\"\n            txt += line\n\n        return txt\n\n    def readAscii(self, text):\n        \"\"\"\n        Read ascii representation from a stream.\n\n        Update placeholder size according to largest thing read.\n\n        Parameters\n        ----------\n        text : str\n            Custom string that describes the ASCII map of the core.\n        \"\"\"\n        text = text.strip().splitlines()\n        self.endsWithPlaceholder = text[-1].rstrip().endswith(PLACEHOLDER)\n\n        self.asciiLines = []\n        self._asciiMaxCol = 0\n        for li, line in enumerate(text):\n            columns = line.split()\n            self.asciiLines.append(columns)\n            if len(columns) > self._asciiMaxCol:\n                self._asciiMaxCol = len(columns)\n\n        self._asciiMaxLine = li + 1\n        self._updateDimensionsFromAsciiLines()\n        self._asciiLinesToIndices()\n        self._makeOffsets()\n        self._updateSlotSizeFromData()\n\n    def _updateSlotSizeFromData(self):\n        \"\"\"After reading data, update slot size for writing.\"\"\"\n        slotSize = max(len(v) for v in self.asciiLabelByIndices.values())\n        self._spacer = \" \" * slotSize\n        fmt = f\"{{placeholder:{slotSize}s}}\"\n        self._placeholder = fmt.format(placeholder=PLACEHOLDER)\n\n    def _updateDimensionsFromAsciiLines(self):\n        \"\"\"\n        When converting ascii to data we need to infer the ijMax before reading the ij indices.\n\n        See Also\n        --------\n        _updateDimensionsFromData : used to infer this information when loading from i,j data\n        \"\"\"\n        raise NotImplementedError\n\n    def _updateDimensionsFromData(self):\n        \"\"\"\n        Before converting data to ascii, inspect the data and set some map dimensions.\n\n        See Also\n        --------\n        _updateDimensionsFromAsciiLines : used when reading info from ascii lines\n        \"\"\"\n        self._ijMax = max(sum(key) for key in self.asciiLabelByIndices)\n\n    @staticmethod\n    def fromReactor(reactor):\n        \"\"\"Populate mapping from a reactor in preparation of writing out to ascii.\"\"\"\n        raise NotImplementedError\n\n    def _getLineNumsToWrite(self):\n        \"\"\"\n        Get order of lines to write.\n\n        Most maps index from bottom to top.\n        \"\"\"\n        return reversed(range(self._asciiMaxLine))\n\n    def gridContentsToAscii(self):\n        \"\"\"\n        Convert a prepared asciiLabelByIndices to ascii lines and offsets.\n\n        This is used when you have i,j/specifier data and want to create a ascii map from it as opposed to reading a\n        ascii map from a stream.\n\n        As long as the map knows how to convert lineNum and colNums into ij indices, this is universal. In some\n        implementations, this operation is in a different method for efficiency.\n        \"\"\"\n        self._updateDimensionsFromData()\n        self.asciiLines = []\n        for lineNum in self._getLineNumsToWrite():\n            line = []\n            for colNum in range(self._asciiMaxCol):\n                ij = self._getIJFromColRow(colNum, lineNum)\n                # convert to string and strip any whitespace in thing we're representing\n                line.append(str(self.asciiLabelByIndices.get(ij, PLACEHOLDER)).replace(\" \", \"\"))\n            self.asciiLines.append(line)\n\n        # clean data\n        noDataLinesYet = True  # handle all-placeholder rows\n        newLines = []\n        lastLine = len(self.asciiLines) - 1\n        for i, line in enumerate(self.asciiLines):\n            if re.search(f\"^[{PLACEHOLDER}]+$\", \"\".join(line)) and noDataLinesYet:\n                continue\n\n            noDataLinesYet = False\n            newLine = self._removeTrailingPlaceholders(line)\n            if newLine:\n                if i == lastLine and self.endsWithPlaceholder and newLine[-1] != PLACEHOLDER:\n                    newLine.append(PLACEHOLDER)\n                newLines.append(newLine)\n            else:\n                # If entire newline is wiped out, it's a full row of placeholders. That seems wrong.\n                raise ValueError(\"Cannot write asciimaps with blank rows from pure data.\")\n\n        if not newLines:\n            raise ValueError(\"No data found\")\n\n        self.asciiLines = newLines\n        self._updateSlotSizeFromData()\n        self._makeOffsets()\n\n    @staticmethod\n    def _removeTrailingPlaceholders(line):\n        newLine = []\n        noDataYet = True\n        for col in reversed(line):\n            if col == PLACEHOLDER and noDataYet:\n                continue\n            noDataYet = False\n            newLine.append(col)\n\n        newLine.reverse()\n        return newLine\n\n    def _asciiLinesToIndices(self):\n        \"\"\"Convert read in ASCII lines to a asciiLabelByIndices structure.\"\"\"\n\n    def _getIJFromColRow(self, columnNum: int, lineNum: int) -> tuple:\n        \"\"\"Get ij data indices from ascii map text coords.\"\"\"\n        raise NotImplementedError\n\n    def __getitem__(self, ijKey):\n        \"\"\"Get ascii item by grid i,j index.\"\"\"\n        return self.asciiLabelByIndices[ijKey]\n\n    def __setitem__(self, ijKey, item):\n        self.asciiLabelByIndices[ijKey] = item\n\n    def _makeOffsets(self):\n        \"\"\"Build offsets.\"\"\"\n        raise NotImplementedError\n\n    def items(self):\n        return self.asciiLabelByIndices.items()\n\n    def keys(self):\n        return self.asciiLabelByIndices.keys()\n\n\nclass AsciiMapCartesian(AsciiMap):\n    \"\"\"\n    Cartesian ascii map.\n\n    Conveniently simple because offsets are always 0\n\n    i and j are equal to column, row\n    \"\"\"\n\n    def _asciiLinesToIndices(self):\n        self.asciiLabelByIndices = {}\n\n        # read from bottom to top to be consistent\n        # with cartesian grid indexing\n        for li, line in enumerate(reversed(self.asciiLines)):\n            for ci, asciiLabel in enumerate(line):\n                ij = self._getIJFromColRow(ci, li)\n                self.asciiLabelByIndices[ij] = asciiLabel\n\n    def _updateDimensionsFromData(self):\n        AsciiMap._updateDimensionsFromData(self)\n        self._asciiMaxCol = max(key[0] for key in self.asciiLabelByIndices) + 1\n        self._asciiMaxLine = max(key[1] for key in self.asciiLabelByIndices) + 1\n        iMin = min(key[0] for key in self.asciiLabelByIndices)\n        jMin = min(key[1] for key in self.asciiLabelByIndices)\n\n        if iMin > 0 or jMin > 0:\n            raise ValueError(\n                \"Asciimaps only supports sets of indices that start at less than or equal to zero, got {}, {}\".format(\n                    iMin, jMin\n                )\n            )\n\n    def _getIJFromColRow(self, columnNum, lineNum):\n        return columnNum, lineNum\n\n    def _makeOffsets(self):\n        \"\"\"Cartesian grids have 0 offset on all lines.\"\"\"\n        self.asciiOffsets = []\n        for _line in self.asciiLines:\n            self.asciiOffsets.append(0)\n\n    def _updateDimensionsFromAsciiLines(self):\n        pass\n\n\nclass AsciiMapHexThirdFlatsUp(AsciiMap):\n    \"\"\"\n    Hex ascii map for 1/3 core flats-up map.\n\n    - Indices start with (0,0) in the bottom left (origin).\n    - i increments on the 30-degree ray\n    - j increments on the 90-degree ray\n\n    In all flats-up hex maps, i increments by 2*col for each col and j decrements by col from the base.\n\n    These are much more complex maps than the tips up ones because there are 2 ascii lines for every j index (jaggedly).\n\n    Lines are read from the bottom of the ascii map up in this case.\n    \"\"\"\n\n    def _asciiLinesToIndices(self):\n        self.asciiLabelByIndices = {}\n\n        # read from bottom to top so we know that first item is at i,j = 0,0\n        for li, line in enumerate(reversed(self.asciiLines)):\n            iBase, jBase = self._getIJBaseByAsciiLine(li)\n            for ci, asciiLabel in enumerate(line):\n                ij = self._getIJFromColAndBase(ci, iBase, jBase)\n                self.asciiLabelByIndices[ij] = asciiLabel\n\n    def _getIJBaseByAsciiLine(self, asciiLineNum):\n        \"\"\"\n        Get i,j base (starting point) for a row from bottom.\n\n        These are the indices of the far-left item in a row as a function\n        of line number from the bottom. These are used in the process\n        of computing the indices of items while reading the ascii map.\n\n        For 1/3 symmetric cases, the base is a constant pattern\n        vs. row number at least until the top section where the hexagon\n        comes off the 1/3 symmetry line.\n\n        The base hexes (LHS) as a function of rows from bottom are:\n\n        Row:    0      1      2      3        4      5       6       7       8      9       10      11    12\n        Base: (0,0), (1,0)  (0,1),  (1,1),  (0,2), (-1,3), (0,3), (-1,4), (-2,5), (-1,5), (-2,6), (-3,7) (-2,7)\n\n        Looking graphically, there are basically 3 rays going up at 120 degrees.\n        So we can find a consistent pattern for each ray and use a modulus to figure\n        out which ray we're on.\n\n        \"\"\"\n        if asciiLineNum == 0:\n            return 0, 0\n        rayNum = (asciiLineNum - 1) % 3\n        indexOnRay = (asciiLineNum - 1) // 3\n        if rayNum == 0:\n            # middle ray: (1,0), (0,2), (-1,4), (-2,6)\n            return 1 - indexOnRay, 2 * indexOnRay\n        elif rayNum == 1:\n            # leftmost ray: (0,1), (-1,3), (-2,5), ...\n            return -indexOnRay, 2 * indexOnRay + 1\n        else:\n            # innermost ray: (1,1), (0,3), (-1,5)\n            return 1 - indexOnRay, 2 * indexOnRay + 1\n\n    def _getIJFromColAndBase(self, columnNum, iBase, jBase):\n        \"\"\"Map ascii column and base to i,j hex indices.\"\"\"\n        # To move n columns right, i increases by 2n, j decreases by n\n        return iBase + 2 * columnNum, jBase - columnNum\n\n    def _getIJFromColRow(self, columnNum, lineNum):\n        \"\"\"\n        Map ascii column and row to i,j hex indices.\n\n        Notes\n        -----\n        Not used in reading from file b/c too many calls to base but convenient for writing from ij data\n        \"\"\"\n        iBase, jBase = self._getIJBaseByAsciiLine(lineNum)\n        return self._getIJFromColAndBase(columnNum, iBase, jBase)\n\n    def _makeOffsets(self):\n        \"\"\"One third hex grids have larger offsets at the bottom so the overhanging top fits.\"\"\"\n        self.asciiOffsets = []\n        for li, _line in enumerate(self.asciiLines):\n            iBase, _ = self._getIJBaseByAsciiLine(li)\n            self.asciiOffsets.append(iBase - 1)\n        self.asciiOffsets.reverse()  # since getIJ works from bottom to top\n        newOffsets = []\n\n        # renomalize the offsets to start at 0\n        minOffset = min(self.asciiOffsets)\n        for offset in self.asciiOffsets:\n            newOffsets.append(offset - minOffset)\n        self.asciiOffsets = newOffsets\n\n    def _updateDimensionsFromAsciiLines(self):\n        \"\"\"\n        Update some dimension metadata by looking at the ascii lines.\n\n        In this case, asciiMaxCol actually represents the max i index.\n\n        \"\"\"\n        self._ijMax = self._asciiMaxCol - 1\n        self._asciiLinesOffCorner = len(self.asciiLines[-1]) - 1\n\n    def _updateDimensionsFromData(self):\n        \"\"\"\n        Set map dimension metadata based on populated data structure.\n\n        Used before writing the asciimap from data.\n\n        Add flat-hex specific corner truncation detection that allows some positions to be empty\n        near the corners of the full hex, as is typical for hexagonal core maps.\n\n        For 1/3 hex, _ijMax represents the outer outline\n        \"\"\"\n        AsciiMap._updateDimensionsFromData(self)\n\n        # Check the j=0 ray to see how many peripheral locations are blank.\n        # assume symmetry with the other corner.\n        # The cap is basically the distance from the (I, 0) or (0, J) loc to self._ijMax\n        iWithData = [i for i, j in self.asciiLabelByIndices if j == 0]\n        maxIWithData = max(iWithData) if iWithData else -1\n        self._asciiLinesOffCorner = (self._ijMax - maxIWithData) * 2 - 1\n\n        # in jagged systems we have to also check the neighbor\n        nextIWithData = [i for i, j in self.asciiLabelByIndices if j == 1]\n        nextMaxIWithData = max(nextIWithData) if nextIWithData else -1\n        if nextMaxIWithData == maxIWithData - 1:\n            # the jagged edge is lopped off too.\n            self._asciiLinesOffCorner += 1\n\n        # now that we understand how many corner positions are truncated,\n        # we can fully determine the size of the ascii map\n        self._asciiMaxCol = self._ijMax + 1\n        self._asciiMaxLine = self._ijMax * 2 + 1 - self._asciiLinesOffCorner\n\n\nclass AsciiMapHexFullFlatsUp(AsciiMapHexThirdFlatsUp):\n    \"\"\"\n    Full core flats up ascii map.\n\n    Notes\n    -----\n    Rather than making a consistent base, we switch base angles with this one because otherwise there would be a\n    ridiculous number of placeholders on the left. This makes this one's base computation more complex.\n\n    We also allow all corners to be cut off on these, further complicating things.\n    \"\"\"\n\n    def _getIJBaseByAsciiLine(self, asciiLineNum):\n        \"\"\"\n        Get i,j base (starting point) for a row from bottom.\n\n        Starts out in simple pattern and then shifts.\n\n        Recall that there are 2 ascii lines per j index because jagged.\n\n        If hex corners are omitted, we must offset the line num to get the base right (complexity!)\n\n        In this orientation, we need the _ijMax to help orient us. This represents the number of ascii lines between the\n        center of the core and the top (or bottom)\n        \"\"\"\n        # handle potentially-omitted corners\n        asciiLineNum += self._asciiLinesOffCorner\n        if asciiLineNum < self._ijMax:\n            # goes from (0,-9), (-1,-8), (-2,7)...\n            i, j = -asciiLineNum, -self._ijMax + asciiLineNum\n        elif not (asciiLineNum - self._ijMax) % 2:\n            # goes JAGGED from (-9,0), (-8, 0), (-9,2)...\n            # this is the outermost upward ray\n            index = (asciiLineNum - self._ijMax) // 2\n            i, j = -self._ijMax, index\n        else:\n            # this is the innermost upward ray\n            index = (asciiLineNum - self._ijMax) // 2\n            i, j = -self._ijMax + 1, index\n\n        return i, j\n\n    def _makeOffsets(self):\n        \"\"\"\n        Handle offsets for full-hex flat grids.\n\n        Due to the staggered nature, these have 0 or 1 offsets on top and then 0 or 1 + an actual offset on the bottom.\n        \"\"\"\n        # max lines required if corners were not cut off\n        maxIJIndex = self._ijMax\n        self.asciiOffsets = []\n        # grab top left edge going down until corner where it lifts off edge.\n        # Due to the placeholders these just oscillate\n        for li in range(maxIJIndex * 3):\n            self.asciiOffsets.append((li - self._asciiLinesOffCorner) % 2)\n\n        # going away from the left edge, the offsets increase linearly\n        self.asciiOffsets.extend(range(maxIJIndex + 1))\n\n        # since we allow cut-off corners, we must truncate the offsets number of items in last line indicates how many\n        # need to be cut. (first line has placeholders...)\n        cutoff = self._asciiLinesOffCorner\n        if cutoff:\n            self.asciiOffsets = self.asciiOffsets[cutoff:-cutoff]\n\n    def _updateDimensionsFromData(self):\n        AsciiMapHexThirdFlatsUp._updateDimensionsFromData(self)\n        self._asciiMaxCol = self._ijMax + 1\n        self._asciiMaxLine = self._ijMax * 4 + 1 - self._asciiLinesOffCorner * 2\n\n\nclass AsciiMapHexFullTipsUp(AsciiMap):\n    \"\"\"\n    Full hex with tips up of the smaller cells.\n\n    - I axis is pure horizontal here\n    - J axis is 60 degrees up. (upper right corner)\n    - (0,0) is in the center of the hexagon.\n\n    Frequently used for pins inside hex assemblies.\n\n    This does not currently support omitted positions on the hexagonal corners.\n\n    In this geometry, the outline-defining _ijMax is equal to I at the far right of the hex. Thus, ijMax represents the\n    number of positions from the center to the outer edge towards any of the 6 corners.\n    \"\"\"\n\n    def _asciiLinesToIndices(self):\n        \"\"\"Read lines in from top to bottom.\"\"\"\n        self.asciiLabelByIndices = {}\n\n        for li, line in enumerate(self.asciiLines):\n            iBase, jBase = self._getIJBaseByAsciiLine(li)\n            for ci, asciiLabel in enumerate(line):\n                ij = self._getIJFromColAndBase(ci, iBase, jBase)\n                self.asciiLabelByIndices[ij] = asciiLabel\n            self.asciiOffsets.append(li)\n\n    def _getIJFromColAndBase(self, columnNum, iBase, jBase):\n        \"\"\"\n        Map ascii column and base to i,j hex indices.\n\n        Indices simply increment from the base across the rows.\n        \"\"\"\n        return iBase + columnNum + jBase, -(iBase + columnNum)\n\n    def _getIJFromColRow(self, columnNum, lineNum):\n        \"\"\"\n        Map indices from ascii.\n\n        Notes\n        -----\n        Not used in reading from file b/c inefficient/repeated base calc but required for writing from ij data.\n        \"\"\"\n        iBase, jBase = self._getIJBaseByAsciiLine(lineNum)\n        return self._getIJFromColAndBase(columnNum, iBase, jBase)\n\n    def _getIJBaseByAsciiLine(self, asciiLineNum):\n        \"\"\"\n        Get i,j base (starting point) for a row counting from the top.\n\n        Upper left is shifted by (size-1)//2\n\n        for a 19-line grid, we have the top left as (-18,9) and then: (-17, 8), (-16, 7), ...\n        \"\"\"\n        shift = self._ijMax\n        iBase = -shift * 2 + asciiLineNum\n        jBase = shift - asciiLineNum\n        return iBase, jBase\n\n    def _updateDimensionsFromAsciiLines(self):\n        \"\"\"Update dimension metadata when reading ascii.\"\"\"\n        # ijmax here can be inferred directly from the max number of columns in the asciimap text\n        self._ijMax = (self._asciiMaxCol - 1) // 2\n\n    def _updateDimensionsFromData(self):\n        \"\"\"Update asciimap dimensions from data before writing ascii.\"\"\"\n        AsciiMap._updateDimensionsFromData(self)\n        self._asciiMaxCol = self._ijMax * 2 + 1\n        self._asciiMaxLine = self._ijMax * 2 + 1\n\n    def _getLineNumsToWrite(self):\n        \"\"\"\n        Get order of lines to write.\n\n        This map indexes lines from top to bottom.\n        \"\"\"\n        return range(self._asciiMaxLine)\n\n    def _makeOffsets(self):\n        \"\"\"Full hex tips-up grids have linearly incrementing offset.\"\"\"\n        self.asciiOffsets = []\n        for li, _line in enumerate(self.asciiLines):\n            self.asciiOffsets.append(li)\n\n\ndef asciiMapFromGeomAndDomain(\n    geomType: Union[str, geometry.GeomType], domain: Union[str, geometry.DomainType]\n) -> \"AsciiMap\":\n    \"\"\"Get a ASCII map class from a geometry and domain type.\"\"\"\n    from armi.reactor import geometry\n\n    if (\n        str(geomType) == geometry.HEX_CORNERS_UP\n        and geometry.DomainType.fromAny(domain) == geometry.DomainType.FULL_CORE\n    ):\n        return AsciiMapHexFullTipsUp\n\n    mapFromGeom = {\n        (\n            geometry.GeomType.HEX,\n            geometry.DomainType.THIRD_CORE,\n        ): AsciiMapHexThirdFlatsUp,\n        (geometry.GeomType.HEX, geometry.DomainType.FULL_CORE): AsciiMapHexFullFlatsUp,\n        (geometry.GeomType.CARTESIAN, None): AsciiMapCartesian,\n        (geometry.GeomType.CARTESIAN, geometry.DomainType.FULL_CORE): AsciiMapCartesian,\n        (\n            geometry.GeomType.CARTESIAN,\n            geometry.DomainType.QUARTER_CORE,\n        ): AsciiMapCartesian,\n    }\n\n    return mapFromGeom[\n        (\n            geometry.GeomType.fromAny(geomType),\n            geometry.DomainType.fromAny(domain),\n        )\n    ]\n"
  },
  {
    "path": "armi/utils/codeTiming.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utilities related to profiling code.\"\"\"\n\nimport copy\nimport functools\nimport os\nimport time\n\n\ndef timed(*args):\n    \"\"\"\n    Decorate functions to measure how long they take.\n\n    Examples\n    --------\n    Here are some examples of using this method::\n\n        @timed # your timer will be called the module+method name\n        def mymethod(stuff):\n            do stuff\n\n        @timed('call my timer this instead')\n        def mymethod2(stuff)\n           do even more stuff\n    \"\"\"\n\n    def time_decorator(func):\n        @functools.wraps(func)\n        def time_wrapper(*args, **kwargs):\n            generated_name = \"::\".join(\n                [\n                    os.path.split(func.__code__.co_filename)[1],\n                    str(func.__code__.co_firstlineno),\n                    func.__code__.co_name,\n                ]\n            )\n\n            MasterTimer.startTimer(label or generated_name)\n            return_value = func(*args, **kwargs)\n            MasterTimer.endTimer(label or generated_name)\n\n            return return_value\n\n        return time_wrapper\n\n    if len(args) == 1 and callable(args[0]):\n        label = None\n        return time_decorator(args[0])\n    elif len(args) == 1 and isinstance(args[0], str):\n        label = args[0]\n        return time_decorator\n    else:\n        raise ValueError(f\"The timed decorator has been misused. Input args were {args}\")\n\n\nclass MasterTimer:\n    \"\"\"A code timing interface, this class is designed to be a singleton.\"\"\"\n\n    _instance = None\n\n    def __init__(self):\n        if MasterTimer._instance is not None:\n            raise RuntimeError(\n                \"{} is a pseudo singleton, do not attempt to make more than one.\".format(self.__class__.__name__)\n            )\n        MasterTimer._instance = self\n\n        self.timers = {}\n        self.start_time = time.time()\n        self.end_time = None\n\n    @staticmethod\n    def getMasterTimer():\n        \"\"\"Primary method that users need get access to the MasterTimer singleton.\"\"\"\n        if MasterTimer._instance is None:\n            MasterTimer()\n\n        return MasterTimer._instance\n\n    @staticmethod\n    def getTimer(eventName):\n        \"\"\"Return a timer with no special action take.\n\n        ``with timer: ...`` friendly!\n        \"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        if eventName in master.timers:\n            timer = master.timers[eventName]\n        else:\n            timer = _Timer(eventName, False)\n            master.timers[eventName] = timer\n        return timer\n\n    @staticmethod\n    def startTimer(eventName):\n        \"\"\"Return a timer with a start call, or a newly made started timer.\n\n        ``with timer: ...`` unfriendly!\n        \"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        if eventName in master.timers:\n            timer = master.timers[eventName]\n            timer.start()\n        else:\n            timer = _Timer(eventName, True)\n            master.timers[eventName] = timer\n        return timer\n\n    @staticmethod\n    def endTimer(eventName):\n        \"\"\"Return a timer with a stop call, or a newly made unstarted timer.\n\n        ``with timer: ...`` unfriendly!\n        \"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        if eventName in master.timers:\n            timer = master.timers[eventName]\n            timer.stop()\n        else:\n            timer = _Timer(eventName, False)\n            master.timers[eventName] = timer\n        return timer\n\n    @staticmethod\n    def time():\n        \"\"\"System time offset by when this master timer was initialized.\"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        if master.end_time:\n            return master.end_time - master.start_time\n        else:\n            return time.time() - master.start_time\n\n    @staticmethod\n    def startAll():\n        \"\"\"Starts all timers, won't work after a stopAll command.\"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        for timer in master.timers.values():\n            timer.start()\n\n    @staticmethod\n    def stopAll():\n        \"\"\"Kills the timer run, can't easily be restarted.\"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        for timer in master.timers.values():\n            timer.overStart = 0  # deal with what recursion may have caused\n            timer.stop()\n\n        _Timer._frozen = True\n\n        master.end_time = time.time()\n\n    @staticmethod\n    def getActiveTimers():\n        \"\"\"Get all the timers for processes that are still active.\"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        return [t for t in master.timers.values() if t.isActive]\n\n    def __str__(self):\n        t = self.time()\n        return \"{:55s} {:>14.2f} {:>14.2f} {:11}\".format(\"TOTAL TIME\", t, t, 1)\n\n    @staticmethod\n    def report(inclusionCutoff=0.1, totalTime=False):\n        \"\"\"\n        Write a string report of the timers.\n\n        This report prints a table that looks something like this:\n\n        TIMER REPORTS                                           CUMULATIVE (s)    AVERAGE (s)   NUM ITERS\n        thing1                                                            0.01           0.01           1\n        thing2                                                            0.01           0.01           1\n        TOTAL TIME                                                        0.02           0.02           1\n\n        Parameters\n        ----------\n        inclusionCutoff : float, optional\n            Will not show results that have less than this fraction of the total time.\n        totalTime : bool, optional\n            Use the ratio of total time or time since last report to compare against the cutoff.\n\n        See Also\n        --------\n        armi.utils.codeTiming._Timer.__str__ : prints out the results for each individual line item\n\n        Returns\n        -------\n        str : Plain-text table report on the timers.\n        \"\"\"\n        master = MasterTimer.getMasterTimer()\n\n        table = [\n            \"{:55s} {:^15} {:^15} {:9}\".format(\n                \"TIMER REPORTS\",\n                \"CUMULATIVE (s)\",\n                \"AVERAGE (s)\",\n                \"NUM ITERS\".rjust(9, \" \"),\n            )\n        ]\n\n        for timer in sorted(master.timers.values(), key=lambda x: x.time):\n            if totalTime:\n                timeRatio = timer.time / master.time()\n            else:\n                timeRatio = timer.timeSinceReport / master.time()\n\n            if timeRatio < inclusionCutoff:\n                continue\n            table.append(str(timer))\n\n        # add the total time as the last row\n        table.append(str(master))\n        return \"\\n\".join(table)\n\n    @staticmethod\n    def timeline(baseFileName, inclusionCutoff=0.1, totalTime=False):\n        \"\"\"Produces a timeline graphic of the timers.\n\n        Parameters\n        ----------\n        baseFileName : str\n            Whatever the leading file path should be.\n            This method generates the same file extension for every image to add to the base.\n        inclusionCutoff : float, optional\n            Will not show results that have less than this fraction of the total time.\n        totalTime : bool, optional\n            Use the ratio of total time or time since last report to compare against the cutoff.\n\n        Returns\n        -------\n        str : Path to the saved plot file.\n        \"\"\"\n        import matplotlib.pyplot as plt\n        import numpy as np\n\n        # initial set up\n        master = MasterTimer.getMasterTimer()\n        curTime = master.time()\n\n        color_map = plt.cm.jet\n\n        colors = []\n        names = []\n        xStarts = []\n        xStops = []\n        yLevel = 0  # height of the timelines\n        yValues = []  # list of heights\n\n        # plot content gather\n        for timer in sorted(master.timers.values(), key=lambda x: x.name):\n            if totalTime:\n                timeRatio = timer.time / master.time()\n            else:\n                timeRatio = timer.timeSinceReport / master.time()\n            if timeRatio < inclusionCutoff:\n                continue\n\n            yLevel += 1\n            names.append(timer.name)\n            for timePair in timer.times:\n                colors.append(color_map(timeRatio))\n                xStarts.append(timePair[0])\n                xStops.append(timePair[1])\n                yValues.append(yLevel)\n\n        # plot set up: might not be necessary to scale the width with the height like this\n        plt.figure(figsize=(3 + len(master.timers.values()), (3 + len(master.timers.values()))))\n        plt.axis([0.0, curTime, 0.0, yLevel + 1])\n        plt.xlabel(\"Time (s)\")\n        plt.yticks(np.arange(yLevel + 1), [\"\"] + names)\n        _loc, labels = plt.yticks()\n        for tick in labels:\n            tick.set_fontsize(40)\n\n        plt.tight_layout()\n\n        # plot content draw\n        plt.hlines(yValues, xStarts, xStops, colors)\n\n        def flatMerge(l1, l2=None):\n            # duplicate a list flatly or merge them flatly (no tuples compared to zip)\n            return [item for sublist in zip(l1, l2 or l1) for item in sublist]\n\n        ymin = [y - 0.3 for y in yValues]\n        ymax = [y + 0.3 for y in yValues]\n        plt.vlines(\n            flatMerge(xStarts, xStops),\n            flatMerge(ymin),\n            flatMerge(ymax),\n            flatMerge(colors),\n        )\n\n        # save and close\n        filename = f\"{baseFileName}.code-timeline.png\"\n        plt.savefig(filename)\n        plt.close()\n        return os.path.join(os.getcwd(), filename)\n\n\nclass _Timer:\n    \"\"\"Code timer to call at various points to measure performance.\n\n    See Also\n    --------\n    MasterTimer.getTimer() for construction\n    \"\"\"\n\n    # If the master timer stops, all timers must freeze with no thaw.\n    _frozen = False\n\n    def __init__(self, name, start):\n        self.name = name\n        self._active = False\n        self._times = []  # [(start, end), (start, end)...]\n        self.overStart = 0  # necessary for recursion tracking\n        self.reportedTotal = 0.0  # time elapsed since last asked to report time in __str__\n\n        if start:\n            self.start()\n\n    def __repr__(self):\n        return \"<{} name:'{}' num iterations:{} time:{}>\".format(\n            self.__class__.__name__, self.name, self.numIterations, self.time\n        )\n\n    def __str__(self):\n        s = \"{:55s} {:>14.2f} {:>14.2f} {:11}\".format(\n            self.name[:55],\n            self.time,\n            self.time / (self.numIterations + 1),\n            self.numIterations + 1,\n        )\n        # needs to come after str generation because it resets the timeSinceReport\n        self.reportedTotal = self.time\n        return s\n\n    def __enter__(self):\n        self.start()\n\n    def __exit__(self, *args, **kwargs):\n        self.stop()\n\n    @property\n    def isActive(self):\n        \"\"\"Return True if the code for this timer still running.\"\"\"\n        return self._active\n\n    @property\n    def numIterations(self):\n        \"\"\"If this number seems high, remember .start() twice in a row adds an iteration to numIterations.\"\"\"\n        return len(self._times) - 1 if self._times else 0\n\n    @property\n    def time(self):\n        \"\"\"Total time value.\"\"\"\n        return sum([t[1] - t[0] for t in self.times])\n\n    @property\n    def timeSinceReport(self):\n        \"\"\"The elapsed time since this timer was asked to report itself.\"\"\"\n        return self.time - self.reportedTotal\n\n    @property\n    def times(self):\n        \"\"\"List of time start / stop pairs, if active the current time is used as the last stop.\"\"\"\n        if self.isActive:\n            times = copy.deepcopy(self._times)\n            times[-1] = (self._times[-1][0], MasterTimer.time())\n            return times\n        else:\n            return self._times\n\n    def _openTimePair(self, curTime):\n        self._times.append((curTime, None))\n\n    def _closeTimePair(self, curTime):\n        self._times[-1] = (self._times[-1][0], curTime)\n\n    def start(self):\n        \"\"\"Start this Timer.\n\n        Returns\n        -------\n        float : Time stamp for the current time / start time.\n        \"\"\"\n        curTime = MasterTimer.time()\n\n        if self._frozen:\n            return curTime\n        elif self.isActive:\n            # call was made on an active timer, we're now over-started\n            self.overStart += 1\n            self._closeTimePair(curTime)\n\n        self._active = True\n        self._openTimePair(curTime)\n\n        return curTime\n\n    def stop(self):\n        \"\"\"Stop this Timer.\n\n        Returns\n        -------\n        float : Time stamp for the current time / stop time.\n        \"\"\"\n        curTime = MasterTimer.time()\n\n        if self._frozen:\n            return curTime\n\n        if self.overStart:\n            # can't end the timer as it's over-started\n            self.overStart -= 1\n        elif self.isActive:\n            self._active = False\n            self._closeTimePair(curTime)\n\n        return curTime\n"
  },
  {
    "path": "armi/utils/customExceptions.py",
    "content": "# Copyright 2021 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nGlobally accessible exception definitions for better granularity on exception behavior and\nexception handling behavior.\n\"\"\"\n\nfrom inspect import getframeinfo, stack\n\nfrom armi import runLog\n\n\ndef info(func):\n    \"\"\"Decorator to write to current log, using the info method.\"\"\"\n\n    def decorated(*args, **kwargs):\n        r\"\"\"Decorated method.\"\"\"\n        runLog.info(func(*args, **kwargs))\n\n    return decorated\n\n\ndef important(func):\n    \"\"\"Decorator to write to current log, using the important method.\"\"\"\n\n    def decorated(*args, **kwargs):\n        \"\"\"Decorated method.\"\"\"\n        runLog.important(func(*args, **kwargs))\n\n    return decorated\n\n\ndef warn(func):\n    \"\"\"Decorates a method to produce a repeatable warning message.\"\"\"\n\n    def decorated(*args, **kwargs):\n        \"\"\"Decorated method.\"\"\"\n        runLog.warning(func(*args, **kwargs))\n\n    return decorated\n\n\ndef _message_when_root(func):\n    \"\"\"Do not use this decorator.\"\"\"\n\n    def decorated(*args, **kwargs):\n        from armi import MPI_RANK\n\n        if MPI_RANK == 0:\n            func(*args, **kwargs)\n\n    return decorated\n\n\ndef warn_when_root(func):\n    \"\"\"Decorates a method to produce a warning message only on the root node.\"\"\"\n    return _message_when_root(warn(func))\n\n\n# ---------------------------------------------------\n\n\nclass InputError(Exception):\n    \"\"\"An error found in an ARMI input file.\"\"\"\n\n    def __init__(self, msg):\n        self.msg = msg\n        self.caller = getframeinfo(stack()[1][0])\n\n    def __str__(self):\n        # Check if the call site is sensible enough to warrant printing.\n        # In the past, we assumed cython would wrap the fake stack filename in <>\n        callSiteIsFake = self.caller.filename.startswith(\"<\") and self.caller.filename.endswith(\">\")\n        if callSiteIsFake:\n            return self.msg\n        else:\n            return self.caller.filename + \":\" + str(self.caller.lineno) + \" - \" + self.msg\n\n\n# ---------------------------------------------------\n\n\nclass SettingException(Exception):\n    \"\"\"Standardize behavior of setting-family errors.\"\"\"\n\n    def __init__(self, msg):\n        Exception.__init__(self, msg)\n\n\nclass InvalidSettingsStopProcess(SettingException):\n    \"\"\"\n    Exception raised when setting file contains invalid settings and user aborts or process is\n    uninteractive.\n    \"\"\"\n\n    def __init__(self, reader):\n        msg = \"Input settings file {}\".format(reader.inputPath)\n        if reader.liveVersion != reader.inputVersion:\n            msg += (\n                '\\n\\twas made with version \"{0}\" which differs from the current version \"{1}.\" '\n                'Either create the input file with the \"{1}\", or switch to a development version '\n                \"of ARMI.\".format(reader.inputVersion, reader.liveVersion)\n            )\n        if reader.invalidSettings:\n            msg += \"\\n\\tcontains the following {} invalid settings:\\n\\t\\t{}\".format(\n                len(reader.invalidSettings), \"\\n\\t\\t\".join(reader.invalidSettings)\n            )\n        SettingException.__init__(self, msg)\n\n\nclass NonexistentSetting(SettingException):\n    \"\"\"Exception raised when a non existent setting is asked for.\"\"\"\n\n    def __init__(self, setting):\n        SettingException.__init__(self, \"Attempted to locate non-existent setting {}.\".format(setting))\n\n\nclass InvalidSettingsFileError(SettingException):\n    \"\"\"Not a valid settings file.\"\"\"\n\n    def __init__(self, path, customMsgEnd=\"\"):\n        msg = \"Attempted to load an invalid settings file from: {}. \".format(path)\n        msg += customMsgEnd\n\n        SettingException.__init__(self, msg)\n\n\nclass NonexistentSettingsFileError(SettingException):\n    \"\"\"Settings file does not exist.\"\"\"\n\n    def __init__(self, path):\n        SettingException.__init__(self, \"Attempted to load settings file, cannot locate file: {}\".format(path))\n"
  },
  {
    "path": "armi/utils/densityTools.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Assorted utilities to help with basic density calculations.\"\"\"\n\nfrom typing import Dict, List, Tuple, Union\n\nimport numpy as np\n\nfrom armi import runLog\nfrom armi.nucDirectory import elements, nucDir, nuclideBases\nfrom armi.utils import units\n\n\ndef getNDensFromMasses(rho, massFracs, normalize=False):\n    \"\"\"\n    Convert density (g/cc) and massFracs vector into a number densities vector (#/bn-cm).\n\n    .. impl:: Number densities are retrievable from masses.\n        :id: I_ARMI_UTIL_MASS2N_DENS\n        :implements: R_ARMI_UTIL_MASS2N_DENS\n\n        Loops over all provided nuclides (given as keys in the ``massFracs`` vector) and calculates\n        number densities of each, at a given material ``density``. Mass fractions can be provided\n        either as normalized to 1, or as unnormalized with subsequent normalization calling\n        ``normalizeNuclideList`` via the ``normalize`` flag.\n\n    Parameters\n    ----------\n    rho : float\n        density in (g/cc)\n    massFracs : dict\n        vector of mass fractions -- normalized to 1 -- keyed by their nuclide name\n\n    Returns\n    -------\n    nuclides : np.ndarray[np.bytes_]\n        vector of nuclide names as byte strings\n    numberDensities : np.ndarray[np.float64]\n        vector of number densities (#/bn-cm) for each nuclide in nuclides\n    \"\"\"\n    if normalize:\n        massFracs = normalizeNuclideList(massFracs, normalization=normalize)\n\n    nuclides = []\n    numberDensities = []\n    rho = rho * units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n    for nucName, massFrac in massFracs.items():\n        atomicWeight = nuclideBases.byName[nucName].weight\n        nuclides.append(nucName.encode())\n        numberDensities.append(massFrac * rho / atomicWeight)\n    return np.array(nuclides), np.array(numberDensities)\n\n\ndef getMassFractions(numberDensities):\n    \"\"\"\n    Convert number densities (#/bn-cm) into mass fractions.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        number densities (#/bn-cm) keyed by their nuclide name\n\n    Returns\n    -------\n    massFracs : dict\n        mass fractions -- normalized to 1 -- keyed by their nuclide\n        name\n    \"\"\"\n    nucMassFracs = {}\n    totalWeight = 0.0\n    for nucName, numDensity in numberDensities.items():\n        weightI = numDensity * nucDir.getAtomicWeight(nucName)\n        nucMassFracs[nucName] = weightI  # will be normalized at end\n        totalWeight += weightI\n\n    if totalWeight != 0:\n        for nucName in numberDensities:\n            nucMassFracs[nucName] /= totalWeight\n    else:\n        for nucName in numberDensities:\n            nucMassFracs[nucName] = 0.0\n\n    return nucMassFracs\n\n\ndef calculateMassDensity(numberDensities):\n    \"\"\"\n    Calculates the mass density.\n\n    Parameters\n    ----------\n    numberDensities : dict\n        vector of number densities (atom/bn-cm) indexed by nuclides names\n\n    Returns\n    -------\n    rho : float\n        density in (g/cc)\n    \"\"\"\n    rho = 0\n    for nucName, nDensity in numberDensities.items():\n        atomicWeight = nuclideBases.byName[nucName].weight\n        rho += nDensity * atomicWeight / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n    return rho\n\n\ndef calculateNumberDensity(nucName, mass, volume):\n    \"\"\"\n    Calculates the number density.\n\n    Parameters\n    ----------\n    mass : float\n    volume : volume\n    nucName : armi nuclide name -- e.g. 'U235'\n\n    Returns\n    -------\n    number density : float\n        number density (#/bn-cm)\n\n    See Also\n    --------\n    armi.reactor.blocks.Block.setMass\n    \"\"\"\n    A = nucDir.getAtomicWeight(nucName)\n    try:\n        return units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM * mass / (volume * A)\n    except ZeroDivisionError:\n        if mass == 0 and volume == 0:\n            return 0\n\n        raise ValueError(\n            \"Could not calculate number density with input.\\nmass : {}\\nvolume : {}\\natomic weight : {}\\n\".format(\n                mass, volume, A\n            )\n        )\n\n\ndef getMassInGrams(nucName: str, volume: float, numberDensity: Union[float, None] = None) -> float:\n    \"\"\"\n    Gets mass of a nuclide of a known volume and know number density.\n\n    Parameters\n    ----------\n    nucName\n        name of nuclide -- e.g. 'U235'\n    volume\n        volume in (cm3)\n    numberDensity\n        number density in (at/bn-cm)\n\n    Returns\n    -------\n    mass\n        mass of nuclide (g)\n    \"\"\"\n    if not numberDensity:\n        return 0.0\n    A = nucDir.getAtomicWeight(nucName)\n    return numberDensity * volume * A / units.MOLES_PER_CC_TO_ATOMS_PER_BARN_CM\n\n\ndef formatMaterialCard(\n    densities,\n    matNum=0,\n    minDens=1e-15,\n    sigFigs=8,\n    mcnp6Compatible=False,\n    mcnpLibrary=None,\n):\n    \"\"\"\n    Formats nuclides and densities into a MCNP material card.\n\n    .. impl:: Create MCNP material card.\n        :id: I_ARMI_UTIL_MCNP_MAT_CARD\n        :implements: R_ARMI_UTIL_MCNP_MAT_CARD\n\n        Loops over a vector of nuclides (of type ``nuclideBase``) provided in ``densities`` and\n        formats them into a list of strings consistent with MCNP material card syntax, skipping\n        dummy nuclides and LFPs.\n\n        A ``matNum`` may optionally be provided for the created material card: if not provided, it\n        is left blank. The desired number of significant figures for the created card can be\n        optionally provided by ``sigFigs``. Nuclides whose number density falls below a threshold\n        (optionally specified by ``minDens``) are set to the threshold value.\n\n        The boolean ``mcnp6Compatible`` may optionally be provided to include the nuclide library at\n        the end of the vector of individual nuclides using the \"nlib=\" syntax leveraged by MCNP. If\n        this boolean is turned on, the associated value ``mcnpLibrary`` should generally also be\n        provided, as otherwise, the library will be left blank in the resulting material card\n        string.\n\n    Parameters\n    ----------\n    densities : dict\n        number densities indexed by nuclideBase\n\n    matNum : int\n        mcnp material number\n\n    minDens : float\n        minimum density\n\n    sigFigs : int\n        significant figures for the material card\n\n    Returns\n    -------\n    mCard : list\n        list of material card strings\n    \"\"\"\n    if all(isinstance(nuc, (nuclideBases.LumpNuclideBase, nuclideBases.DummyNuclideBase)) for nuc in densities):\n        return []  # no valid nuclides to write\n    if matNum >= 0:\n        mCard = [\"m{matNum}\\n\".format(matNum=matNum)]\n    else:\n        mCard = [\"m{}\\n\"]\n\n    for nuc, dens in sorted(densities.items()):\n        # skip LFPs and Dummies.\n        if isinstance(nuc, (nuclideBases.LumpNuclideBase)):\n            runLog.important(\"The material card returned will ignore LFPs.\", single=True)\n            continue\n        elif isinstance(nuc, nuclideBases.DummyNuclideBase):\n            runLog.info(\"Omitting dummy nuclides such as {}\".format(nuc), single=True)\n            continue\n        mcnpNucName = nuc.getMcnpId()\n        newEntry = (\"      {nucName:5d} {ndens:.\" + str(sigFigs) + \"e}\\n\").format(\n            nucName=int(mcnpNucName), ndens=max(dens, minDens)\n        )  # 0 dens is invalid\n        mCard.append(newEntry)\n\n    if mcnp6Compatible:\n        mCard.append(\"      nlib={lib}c\\n\".format(lib=mcnpLibrary))\n\n    return mCard\n\n\ndef filterNuclideList(nuclideVector, nuclides):\n    \"\"\"\n    Filter out nuclides not in the nuclide vector.\n\n    Parameters\n    ----------\n    nuclideVector : dict\n        dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases\n\n    nuclides : list\n        list of nuclide identifiers\n\n    Returns\n    -------\n    nuclideVector : dict\n        dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases\n    \"\"\"\n    if not isinstance(list(nuclideVector.keys())[0], nuclides[0].__class__):\n        raise ValueError(\n            \"nuclide vector is indexed by {} where as the nuclides list is {}\".format(\n                nuclideVector.keys()[0].__class__, nuclides[0].__class__\n            )\n        )\n\n    for nucName in list(nuclideVector.keys()):\n        if nucName not in nuclides:\n            del nuclideVector[nucName]\n\n    return nuclideVector\n\n\ndef normalizeNuclideList(nuclideVector, normalization=1.0):\n    \"\"\"\n    Normalize the nuclide vector.\n\n    Parameters\n    ----------\n    nuclideVector : dict\n        dictionary of values -- e.g. floats, ints -- indexed by nuclide identifiers -- e.g. nucNames\n        or nuclideBases\n    normalization : float\n\n    Returns\n    -------\n    nuclideVector : dict\n        dictionary of values indexed by nuclide identifiers -- e.g. nucNames or nuclideBases\n    \"\"\"\n    normalizationFactor = sum(nuclideVector.values()) / normalization\n\n    for nucName, mFrac in nuclideVector.items():\n        nuclideVector[nucName] = mFrac / normalizationFactor\n\n    return nuclideVector\n\n\ndef expandElementalMassFracsToNuclides(\n    massFracs: dict,\n    elementExpansionPairs: Tuple[elements.Element, List[nuclideBases.NuclideBase]],\n):\n    \"\"\"\n    Expand elemental mass fractions to natural nuclides.\n\n    Modifies the input ``massFracs`` in place to contain nuclides.\n\n    Notes\n    -----\n    This indirectly updates number densities through mass fractions.\n\n    .. impl:: Expand mass fractions to nuclides.\n        :id: I_ARMI_UTIL_EXP_MASS_FRACS\n        :implements: R_ARMI_UTIL_EXP_MASS_FRACS\n\n        Given a vector of elements and nuclides with associated mass fractions (``massFracs``),\n        expands the elements in-place into a set of nuclides using\n        ``expandElementalNuclideMassFracs``. Isotopes to expand into are provided for each element\n        by specifying them with ``elementExpansionPairs``, which maps each element to a list of\n        particular NuclideBases; if left unspecified, all naturally-occurring isotopes are included.\n\n        Explicitly specifying the expansion isotopes provides a way for particular\n        naturally-occurring isotopes to be excluded from the expansion, e.g. excluding O-18 from an\n        expansion of elemental oxygen.\n\n    Parameters\n    ----------\n    massFracs : dict(str, float)\n        dictionary of nuclide or element names with mass fractions. Elements will be expanded in\n        place using natural isotopics.\n\n    elementExpansionPairs : (Element, [NuclideBase]) pairs\n        element objects to expand (from nuclidBase.element) and list of NuclideBases to expand into\n        (or None for all natural)\n    \"\"\"\n    # expand elements\n    for element, isotopicSubset in elementExpansionPairs:\n        massFrac = massFracs.pop(element.symbol, None)\n        if massFrac is None:\n            continue\n\n        expandedNucs = expandElementalNuclideMassFracs(element, massFrac, isotopicSubset)\n        massFracs.update(expandedNucs)\n\n        total = sum(expandedNucs.values())\n        if massFrac > 0.0 and abs(total - massFrac) / massFrac > 1e-6:\n            raise ValueError(\"Mass fractions not normalized properly {}!\".format((total, massFrac)))\n\n\ndef expandElementalNuclideMassFracs(\n    element: elements.Element,\n    massFrac: float,\n    isotopicSubset: List[nuclideBases.NuclideBase] = None,\n):\n    \"\"\"\n    Return a dictionary of nuclide names to isotopic mass fractions.\n\n    If an isotopic subset is passed in, the mass fractions get scaled up\n    s.t. the total mass fraction remains constant.\n\n    Parameters\n    ----------\n    element : Element\n        The element to expand to natural isotopics\n    massFrac : float\n        Mass fraction of the initial element\n    isotopicSubset : list of NuclideBases\n        Natural isotopes to include in the expansion. Useful e.g. for\n        excluding O18 from an expansion of Oxygen.\n    \"\"\"\n    elementNucBases = element.getNaturalIsotopics()\n    if isotopicSubset:\n        expandedNucBases = [nb for nb in elementNucBases if nb in isotopicSubset]\n    else:\n        expandedNucBases = elementNucBases\n    elementalWeightGperMole = sum(nb.weight * nb.abundance for nb in expandedNucBases)\n    if not any(expandedNucBases):\n        raise ValueError(\"Cannot expand element `{}` into isotopes: `{}`\".format(element, expandedNucBases))\n    expanded = {}\n    for nb in expandedNucBases:\n        expanded[nb.name] = massFrac * nb.abundance * nb.weight / elementalWeightGperMole\n    return expanded\n\n\ndef getChemicals(nuclideInventory):\n    \"\"\"\n    Groups the inventories of nuclides by their elements.\n\n    Parameters\n    ----------\n    nuclideInventory : dict\n        nuclide inventories indexed by nuc -- either nucNames or nuclideBases\n\n    Returns\n    -------\n    chemicals : dict\n        inventory of elements indexed by element symbol -- e.g. 'U' or 'PU'\n    \"\"\"\n    chemicals = {}\n    for nuc, N in nuclideInventory.items():\n        nb = nuc if isinstance(nuc, nuclideBases.INuclide) else nuclideBases.byName[nuc]\n\n        if nb.element.symbol in chemicals:\n            chemicals[nb.element.symbol] += N\n        else:\n            chemicals[nb.element.symbol] = N\n\n    return chemicals\n\n\ndef applyIsotopicsMix(material, enrichedMassFracs: Dict[str, float], fertileMassFracs: Dict[str, float]):\n    \"\"\"\n    Update material heavy metal mass fractions based on its enrichment and two nuclide feeds.\n\n    This will remix the heavy metal in a Material object based on the object's\n    ``class1_wt_frac`` parameter and the input nuclide information.\n\n    This can be used for inputting mixtures of two external custom isotopic feeds\n    as well as for fabricating assemblies from two  closed-cycle collections\n    of material.\n\n    See Also\n    --------\n    armi.materials.material.FuelMaterial\n\n    Parameters\n    ----------\n    material : material.Material\n        The object to modify. Must have a ``class1_wt_frac`` param set\n    enrichedMassFracs : dict\n        Nuclide names and weight fractions of the class 1 nuclides\n    fertileMassFracs : dict\n        Nuclide names and weight fractions of the class 2 nuclides\n    \"\"\"\n    total = sum(material.massFrac.values())\n    hm = 0.0\n    for nucName, massFrac in material.massFrac.items():\n        nb = nuclideBases.byName[nucName]\n        if nb.isHeavyMetal():\n            hm += massFrac\n    hmFrac = hm / total\n    hmEnrich = material.class1_wt_frac\n    for nucName in (\n        set(enrichedMassFracs.keys()).union(set(fertileMassFracs.keys())).union(set(material.massFrac.keys()))\n    ):\n        nb = nuclideBases.byName[nucName]\n        if nb.isHeavyMetal():\n            material.massFrac[nucName] = hmFrac * (\n                hmEnrich * enrichedMassFracs.get(nucName, 0.0) + (1 - hmEnrich) * fertileMassFracs.get(nucName, 0.0)\n            )\n"
  },
  {
    "path": "armi/utils/directoryChangers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport pathlib\nimport random\nimport shutil\nimport string\n\nfrom armi import context, runLog\nfrom armi.utils import pathTools, safeCopy, safeMove\n\n\ndef _changeDirectory(destination):\n    if os.path.exists(destination):\n        os.chdir(destination)\n    else:\n        raise IOError(\"Cannot change directory to non-existent location: {}\".format(destination))\n\n\nclass DirectoryChanger:\n    r\"\"\"\n    Utility to change directory.\n\n    Use with 'with' statements to execute code in a different dir, guaranteeing a clean\n    return to the original directory\n\n    >>> with DirectoryChanger('C:\\\\whatever')\n    ...     pass\n\n    Parameters\n    ----------\n    destination : str\n        Path of directory to change into\n    filesToMove : list of str, optional\n        Filenames to bring from the CWD into the destination\n    filesToRetrieve : list of str, optional\n        Filenames to bring back from the destination to the cwd. Note that if any of these\n        files do not exist then the file will be skipped and a warning will be provided.\n    dumpOnException : bool, optional\n        Flag to tell system to retrieve the entire directory if an exception is raised within a the\n        context manager.\n    outputPath : str, optional\n        Output path for filesToRetrieve. If None, default is the initial working directory from\n        which the DirectoryChanger is called.\n    \"\"\"\n\n    def __init__(\n        self,\n        destination,\n        filesToMove=None,\n        filesToRetrieve=None,\n        dumpOnException=True,\n        outputPath=None,\n    ):\n        \"\"\"Establish the new and return directories.\"\"\"\n        self.initial = pathTools.armiAbsPath(os.getcwd())\n        self.destination = None\n        self.outputPath = None\n        if destination is not None:\n            self.destination = pathTools.armiAbsPath(destination)\n        if outputPath is not None:\n            self.outputPath = pathTools.armiAbsPath(outputPath)\n        else:\n            self.outputPath = self.initial\n        self._filesToMove = filesToMove or []\n        self._filesToRetrieve = filesToRetrieve or []\n        self._dumpOnException = dumpOnException\n\n    def __enter__(self):\n        \"\"\"At the inception of a with command, navigate to a new directory if one is supplied.\"\"\"\n        runLog.debug(\"Changing directory to {}\".format(self.destination))\n        self.moveFiles()\n        self.open()\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        \"\"\"At the termination of a with command, navigate back to the original directory.\"\"\"\n        runLog.debug(\"Returning to directory {}\".format(self.initial))\n        self._createOutputDirectory()\n        if exc_type is not None and self._dumpOnException:\n            runLog.info(\"An exception was raised within a DirectoryChanger. Retrieving entire folder for debugging.\")\n            self._retrieveEntireFolder()\n        else:\n            self.retrieveFiles()\n        self.close()\n\n    def __repr__(self):\n        \"\"\"Print the initial and destination paths.\"\"\"\n        return \"<{} {} to {}>\".format(self.__class__.__name__, self.initial, self.destination)\n\n    def open(self):\n        \"\"\"\n        User requested open, used to stalling the close from a with statement.\n\n        This method has been made for old uses of :code:`os.chdir()` and is not\n        recommended.  Please use the with statements\n        \"\"\"\n        if self.destination:\n            _changeDirectory(self.destination)\n\n    def close(self):\n        \"\"\"User requested close.\"\"\"\n        if self.initial != os.getcwd():\n            _changeDirectory(self.initial)\n\n    def moveFiles(self):\n        \"\"\"Copy ``filesToMove`` into the destination directory on entry.\"\"\"\n        initialPath = self.initial\n        destinationPath = self.destination\n        self._transferFiles(initialPath, destinationPath, self._filesToMove, moveFiles=False)\n        if self.outputPath != self.initial:\n            destinationPath = self.outputPath\n            self._transferFiles(initialPath, destinationPath, self._filesToMove, moveFiles=False)\n\n    def retrieveFiles(self):\n        \"\"\"Copy ``filesToRetrieve`` back into the initial directory on exit.\"\"\"\n        if self.outputPath != self.initial:\n            self._transferFiles(\n                self.destination,\n                self.outputPath,\n                self._filesToRetrieve,\n                moveFiles=False,\n            )\n        self._transferFiles(self.destination, self.initial, self._filesToRetrieve, moveFiles=True)\n\n    def _retrieveEntireFolder(self):\n        \"\"\"\n        Retrieve all files to a dump directory.\n\n        This is used when an exception is caught by the DirectoryChanger to rescue the\n        entire directory to aid in debugging. Typically this is only called if\n        ``dumpOnException`` is True.\n        \"\"\"\n        folderName = os.path.split(self.destination)[1]\n        recoveryPath = os.path.join(self.initial, f\"dump-{folderName}\")\n        shutil.copytree(self.destination, recoveryPath)\n\n    def _createOutputDirectory(self):\n        if self.outputPath == self.initial:\n            return\n        if not os.path.exists(self.outputPath):\n            runLog.extra(f\"Creating output folder: {self.outputPath}\")\n            try:\n                os.makedirs(self.outputPath)\n            except OSError as ee:\n                # even though we checked exists, this still fails\n                # sometimes when multiple MPI nodes try\n                # to make the dirs due to I/O delays\n                runLog.error(f\"Failed to make output folder: {self.outputPath}. Exception: {ee}\")\n        else:\n            runLog.extra(f\"Output folder already exists: {self.outputPath}\")\n\n    @staticmethod\n    def _transferFiles(initialPath, destinationPath, fileList, moveFiles=False):\n        \"\"\"\n        Transfer files into or out of the directory.\n\n        This is used in ``moveFiles`` and ``retrieveFiles`` to shuffle files about when creating a\n        target directory or when coming back, respectively.\n\n        Parameters\n        ----------\n        initialPath : str\n            Path to the folder to find files in.\n        destinationPath: str\n            Path to the folder to move file to.\n        fileList : list of str or list of tuple\n            File names to move from initial to destination. If this is a simple list of strings, the\n            files will be transferred. Alternatively tuples of (initialName, finalName) are allowed\n            if you want the file renamed during transit. In the non-tuple option, globs/wildcards\n            are allowed.\n        moveFiles: bool, optional\n            Controls whether the files are \"moved\" (``mv``) or \"copied\" (``cp``)\n\n        Warning\n        -------\n        On Windows the max number of characters in a path is 260.\n        If you exceed this you will see FileNotFound errors here.\n        \"\"\"\n        if not fileList:\n            return\n\n        if not os.path.exists(destinationPath):\n            os.makedirs(destinationPath)\n\n        for pattern in fileList:\n            if isinstance(pattern, tuple):\n                # allow renames in transit\n                fromName, destName = pattern\n                copies = [(fromName, destName)]\n            else:\n                # expand globs if they're given\n                copies = []\n                for ff in glob.glob(pattern):\n                    # renaming not allowed with globs\n                    copies.append((ff, ff))\n\n            for fromName, destName in copies:\n                fromPath = os.path.join(initialPath, fromName)\n                if not os.path.exists(fromPath):\n                    runLog.warning(f\"{fromPath} does not exist and will not be copied.\")\n                    continue\n\n                toPath = os.path.join(destinationPath, destName)\n                if moveFiles:\n                    runLog.extra(\"Moving {} to {}\".format(fromPath, toPath))\n                    safeMove(fromPath, toPath)\n                else:\n                    runLog.extra(\"Copying {} to {}\".format(fromPath, toPath))\n                    safeCopy(fromPath, toPath)\n\n\nclass TemporaryDirectoryChanger(DirectoryChanger):\n    \"\"\"\n    Create a temporary directory, change into it, and if there is no error/exception generated when using a\n    :code:`with` statement, delete the directory.\n\n    Notes\n    -----\n    If there is an error/exception generated while in a :code:`with` statement, the temporary directory contents will\n    be copied to the original directory and then the temporary directory will be deleted.\n\n    There is the ability for a user to set the environment variable ARMI_TEMP_ROOT_PATH, which will globally override\n    the `root` argument being passed in. This is a useful tool for running code or tests in a read-only environment.\n    \"\"\"\n\n    def __init__(\n        self,\n        root=None,\n        filesToMove=None,\n        filesToRetrieve=None,\n        dumpOnException=True,\n        outputPath=None,\n    ):\n        DirectoryChanger.__init__(\n            self,\n            root,\n            filesToMove,\n            filesToRetrieve,\n            dumpOnException,\n            outputPath,\n        )\n\n        # If an application sets this environment variable, all root args in all `TempDirChanger` uses are overriden\n        # with a different root path. This is useful for running unit tests in a read-only environment.\n        if os.environ.get(\"ARMI_TEMP_ROOT_PATH\"):\n            root = os.environ[\"ARMI_TEMP_ROOT_PATH\"]\n\n        # If no root dir is given, the default path comes from context.getFastPath, which\n        # *might* be relative to the cwd, making it possible to delete unintended files.\n        # So this check is here to ensure that if we grab a path from context, it is a\n        # proper temp dir.\n        # That said, since the TemporaryDirectoryChanger *always* responsible for\n        # creating its destination directory, it may always be safe to delete it\n        # regardless of location.\n        if root is None:\n            root = context.getFastPath()\n            # ARMIs temp dirs are in an context.APP_DATA directory: validate this is a temp dir.\n            if pathlib.Path(context.APP_DATA) not in pathlib.Path(root).parents:\n                raise ValueError(\"Temporary directory not in a safe location for deletion.\")\n\n        # make the tmp dir, if necessary\n        if not os.path.exists(root):\n            try:\n                os.makedirs(root)\n            except FileExistsError:\n                # ignore the obvious race condition\n                pass\n\n        # init the important path attributes\n        self.initial = os.path.abspath(os.getcwd())\n        self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)\n        while os.path.exists(self.destination):\n            self.destination = TemporaryDirectoryChanger.GetRandomDirectory(root)\n\n    @classmethod\n    def GetRandomDirectory(cls, root):\n        return os.path.join(\n            root,\n            \"temp-\" + \"\".join(random.choice(string.ascii_letters + string.digits) for _ in range(10)),\n        )\n\n    def __enter__(self):\n        os.makedirs(self.destination)\n        return DirectoryChanger.__enter__(self)\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        DirectoryChanger.__exit__(self, exc_type, exc_value, traceback)\n        try:\n            pathTools.cleanPath(self.destination, mpiRank=context.MPI_RANK, forceClean=True)\n        except PermissionError:\n            if os.name == \"nt\":\n                runLog.warning(\n                    \"There is an issue where Windows will not agree to delete private directories.\"\n                    \"That is, if you create a directory with a name starting with a period, the \"\n                    \"TempDirChanger will not be able to clean it (for instance, a '.git' dir).\"\n                )\n\n\nclass ForcedCreationDirectoryChanger(DirectoryChanger):\n    \"\"\"Creates the directory tree necessary to reach your desired destination.\"\"\"\n\n    def __init__(\n        self,\n        destination,\n        filesToMove=None,\n        filesToRetrieve=None,\n        dumpOnException=True,\n        outputPath=None,\n    ):\n        if not destination:\n            raise ValueError(\"A destination directory must be provided.\")\n        DirectoryChanger.__init__(\n            self,\n            destination,\n            filesToMove,\n            filesToRetrieve,\n            dumpOnException,\n            outputPath,\n        )\n\n    def __enter__(self):\n        if not os.path.exists(self.destination):\n            runLog.extra(f\"Creating destination folder: {self.destination}\")\n            try:\n                os.makedirs(self.destination)\n            except OSError as ee:\n                # even though we checked exists, this still fails\n                # sometimes when multiple MPI nodes try\n                # to make the dirs due to I/O delays\n                runLog.error(f\"Failed to make destination folder: {self.destination}. Exception: {ee}\")\n        else:\n            runLog.extra(f\"Destination folder already exists: {self.destination}\")\n        DirectoryChanger.__enter__(self)\n\n        return self\n\n\ndef directoryChangerFactory():\n    if context.MPI_SIZE > 1:\n        from armi.utils.directoryChangersMpi import MpiDirectoryChanger\n\n        return MpiDirectoryChanger\n    else:\n        return DirectoryChanger\n"
  },
  {
    "path": "armi/utils/directoryChangersMpi.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMPI Directory changers.\n\nThis is a separate module largely to minimize potential cyclic imports\nbecause the mpi action stuff requires an import of the reactor framework.\n\"\"\"\n\nfrom armi import mpiActions\nfrom armi.utils import directoryChangers\n\n\nclass MpiDirectoryChanger(directoryChangers.DirectoryChanger):\n    \"\"\"Change all nodes to specified directory.\n\n    Notes\n    -----\n    `filesToMove` and `filesToRetrieve` do not get broadcasted to worker nodes. This is\n    intended since this would cause a race condition between deleting and moving files.\n    \"\"\"\n\n    def __init__(self, destination, outputPath=None):\n        \"\"\"Establish the new and return directories.\n\n        Parameters\n        ----------\n        destination : str\n            destination directory\n        outputPath : str, optional\n            directory for outputs\n        \"\"\"\n        directoryChangers.DirectoryChanger.__init__(self, destination, outputPath=outputPath)\n\n    def open(self):\n        cdma = _ChangeDirectoryMpiAction(self.destination)\n        # line below looks a little weird, but it returns the instance\n        cdma = cdma.broadcast(cdma)\n        cdma.invoke(None, None, None)\n\n    def close(self):\n        cdma = _ChangeDirectoryMpiAction(self.initial)\n        cdma = cdma.broadcast(cdma)\n        cdma.invoke(None, None, None)\n\n\nclass _ChangeDirectoryMpiAction(mpiActions.MpiAction):\n    \"\"\"Change directory action.\"\"\"\n\n    def __init__(self, destination):\n        mpiActions.MpiAction.__init__(self)\n        self._destination = destination\n\n    def invokeHook(self):\n        directoryChangers._changeDirectory(self._destination)\n        return True\n"
  },
  {
    "path": "armi/utils/dynamicImporter.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Dynamic importing help.\"\"\"\n\n\ndef getEntireFamilyTree(cls):\n    \"\"\"Returns a list of classes subclassing the input class.\n\n    One large caveat is it can only locate subclasses that had been imported somewhere\n    Look to use importEntirePackage before searching for subclasses if not all children\n    are being found as expected.\n    \"\"\"\n    return cls.__subclasses__() + [\n        grandchildren for child in cls.__subclasses__() for grandchildren in getEntireFamilyTree(child)\n    ]\n"
  },
  {
    "path": "armi/utils/flags.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA Flag class, similar to ``enum.Flag``.\n\nThis is an alternate implementation of the standard-library ``enum.Flag`` class. We use this to implement\n:py:class:`armi.reactor.flags.Flags`. We used to use the standard-library implementation, but that became limiting when\nwe wanted to make it possible for plugins to define their own flags; the standard implementation does not support\nextension. We also considered the ``aenum`` package, which permits extension of ``Enum`` classes, but unfortunately does\nnot support extension of ``Flags``. So, we had to make our own. This is a much simplified version of what comes with\n``aenum``, but still provides most of the safety and functionality.\n\"\"\"\n\nimport math\nfrom typing import Dict, List, Sequence, Tuple, Union\n\nfrom armi import runLog\n\n\nclass auto:  # noqa: N801\n    \"\"\"\n    Empty class for requesting a lazily-evaluated automatic field value.\n\n    This can be used to automatically provision a value for a field, when the specific value does not matter.\n\n    In the future, it would be nice to support some arithmetic for these so that automatically-derived combinations of\n    other automatically defined fields can be specified as well.\n    \"\"\"\n\n    def __iter__(self):\n        \"\"\"\n        Dummy __iter__ implementation.\n\n        This is only needed to make mypy happy when it type checks things that have FlagTypes in them, since these can\n        normally be iterated over, but mypy doesn't know that the metaclass consumes the autos.\n        \"\"\"\n        raise NotImplementedError(\n            f\"__iter__() is not actually implemented on {type(self)}; it is only defined to appease mypy.\"\n        )\n\n\nclass _FlagMeta(type):\n    \"\"\"\n    Metaclass for defining new Flag classes.\n\n    This attempts to do the minimum required to make the Flag class and its subclasses function properly. It mostly\n    digests the class attributes, resolves automatic values and creates instances of the class as it's own class\n    attributes for each field. The rest of the functionality lives in the base ``Flag`` class as plain-old code.\n\n    .. tip:: Because individual flags are defined as *class* attributes (as opposed to instance attributes), we have to\n        customize the way a Flag subclass itself is built, which requires a metaclass.\n    \"\"\"\n\n    def __new__(cls, name, bases, attrs):\n        autoAt = 1\n        explicitFields = [(attr, val) for attr, val in attrs.items() if isinstance(val, int)]\n        explicitValues = set(val for name, val in explicitFields)\n\n        flagClass = type.__new__(cls, name, bases, attrs)\n\n        # Make sure that none of the values collide\n        assert len(explicitValues) == len(explicitFields)\n\n        # Assign numeric values to the autos\n        for aName, aVal in attrs.items():\n            if isinstance(aVal, auto):\n                while autoAt in explicitValues:\n                    autoAt *= 2\n                attrs[aName] = autoAt\n                autoAt *= 2\n\n        # Auto fields have been resolved, so now collect all ints\n        allFields = {name: val for name, val in attrs.items() if isinstance(val, int)}\n        allFields = {n: v for n, v in allFields.items() if not _FlagMeta.isdunder(n)}\n        flagClass._nameToValue = allFields\n        flagClass._valuesTaken = set(val for _, val in allFields.items())\n        flagClass._autoAt = autoAt\n        flagClass._width = math.ceil(len(flagClass._nameToValue) / 8)\n\n        # Replace the original class attributes with instances of the class itself.\n        for name, value in allFields.items():\n            instance = flagClass()\n            instance._value = value\n            setattr(flagClass, name, instance)\n\n        return flagClass\n\n    @staticmethod\n    def isdunder(s):\n        return s.startswith(\"__\") and s.endswith(\"__\")\n\n    def __getitem__(cls, key):\n        \"\"\"\n        Implement indexing at the class level.\n\n        This has to be done at the metaclass level, since the python interpreter looks to\n        ``type(klass).__getitem__(klass, key)``, which for an implementation of Flag is this metaclass.\n        \"\"\"\n        return cls(cls._nameToValue[key])\n\n\nclass Flag(metaclass=_FlagMeta):\n    \"\"\"\n    A collection of bitwise flags.\n\n    This is intended to emulate ``enum.Flag``, except with the possibility of extension after the class has been\n    defined. Most docs for ``enum.Flag`` should be relevant here, but there are sure to be occasional differences.\n\n    .. impl:: No two flags have equivalence.\n        :id: I_ARMI_FLAG_DEFINE\n        :implements: R_ARMI_FLAG_DEFINE\n\n        A bitwise flag class intended to emulate the standard library's ``enum.Flag``, with the added functionality that\n        it allows for extension after the class has been defined. Each Flag is unique; no two Flags are equivalent.\n\n        Note that while Python allows for arbitrary-width integers, exceeding the system-native integer size can lead to\n        challenges in storing data, e.g. in an HDF5 file. In this case, the ``from_bytes()`` and ``to_bytes()`` methods\n        are provided to represent a Flag's values in smaller chunks so that writeability can be maintained.\n\n    .. warning::\n        Python features arbitrary-width integers, allowing one to represent an practically unlimited number of fields.\n        *However*, including more flags than can be represented in the system-native integer types may lead to strange\n        behavior when interfacing with non-pure Python code. For instance, exceeding 64 fields makes the underlying\n        value not trivially-storable in an HDF5 file. In such circumstances, the ``from_bytes()`` and ``to_bytes()``\n        methods are available to represent a Flag's values in smaller chunks.\n    \"\"\"\n\n    _autoAt = None\n    _nameToValue = dict()\n    _valuesTaken = set()\n    _width = None\n\n    def __init__(self, init=0):\n        self._value = int(init)\n\n    def _flagsOn(self):\n        flagsOn = set()\n        for k, v in self._nameToValue.items():\n            if self._value & v:\n                flagsOn.add(k)\n\n        return flagsOn\n\n    def __repr__(self):\n        return f\"<{type(self).__name__}.{'|'.join(self._flagsOn())}: {self._value}>\"\n\n    def __str__(self):\n        return f\"{type(self).__name__}.{'|'.join(self._flagsOn())}\"\n\n    def __getstate__(self):\n        return self._value\n\n    def __setstate__(self, state: int):\n        self._value = state\n\n    @classmethod\n    def _registerField(cls, name, value):\n        \"\"\"\n        Plug a new field into the Flags.\n\n        This makes sure everything is consistent and does error/collision checks. Mostly useful for extending an\n        existing class with more fields.\n        \"\"\"\n        if name in cls._nameToValue:\n            runLog.debug(f\"The flag {name} already exists and does not need to be recreated.\")\n            return\n\n        cls._valuesTaken.add(value)\n        cls._nameToValue[name] = value\n        cls._width = math.ceil(len(cls._nameToValue) / 8)\n        instance = cls(value)\n        setattr(cls, name, instance)\n\n    @classmethod\n    def _resolveAutos(cls, fields: Sequence[str]) -> List[Tuple[str, int]]:\n        \"\"\"Assign values to autos, based on the current state of the class.\"\"\"\n        # There is some opportunity for code reuse between this and the metaclass...\n        resolved = []\n        for field in fields:\n            while cls._autoAt in cls._valuesTaken:\n                cls._autoAt *= 2\n            value = cls._autoAt\n            resolved.append((field, value))\n            cls._autoAt *= 2\n        return resolved\n\n    @classmethod\n    def width(cls):\n        \"\"\"Return the number of bytes needed to store all of the flags on this class.\"\"\"\n        return cls._width\n\n    @classmethod\n    def fields(cls):\n        \"\"\"Return a dictionary containing a mapping from field name to integer value.\"\"\"\n        return cls._nameToValue\n\n    @classmethod\n    def sortedFields(cls):\n        \"\"\"Return a list of all field names, sorted by increasing integer value.\"\"\"\n        return [i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])]\n\n    @classmethod\n    def extend(cls, fields: Dict[str, Union[int, auto]]):\n        \"\"\"\n        Extend the Flags object with new fields.\n\n        .. warning::\n            This alters the class that it is called upon! Existing instances should see the new data, since classes are\n            mutable.\n\n        .. impl:: Set of flags are extensible without loss of uniqueness.\n            :id: I_ARMI_FLAG_EXTEND0\n            :implements: R_ARMI_FLAG_EXTEND\n\n            A class method to extend a ``Flag`` with a vector of provided additional ``fields``, with field names as\n            keys, without loss of uniqueness. Values for the additional ``fields`` can be explicitly specified, or an\n            instance of ``auto`` can be supplied.\n\n        Parameters\n        ----------\n        fields : dict\n            A dictionary containing field names as keys, and their desired values, or an instance of ``auto`` as values.\n\n        Example\n        -------\n        >>> class MyFlags(Flags):\n        ...     FOO = auto()\n        ...     BAR = 1\n        ...     BAZ = auto()\n        >>> MyFlags.extend({\"SUPER\": auto()})\n        >>> print(MyFlags.SUPER)\n        <MyFlags.SUPER: 8>\n        \"\"\"\n        # add explicit values first, so that autos know about them\n        for field, value in ((f, v) for f, v in fields.items() if isinstance(v, int)):\n            cls._registerField(field, value)\n\n        # find auto values (ignore if they already exist)\n        toResolve = [field for field, val in fields.items() if isinstance(val, auto)]\n        toResolve = [field for field in toResolve if field not in cls._nameToValue]\n        resolved = cls._resolveAutos(toResolve)\n        for field, value in resolved:\n            cls._registerField(field, value)\n\n    def to_bytes(self, byteorder=\"little\"):\n        \"\"\"\n        Return a byte stream representing the flag.\n\n        This is useful when storing Flags in a data type of limited size. Python ints can be of arbitrary size, while\n        most other systems can only represent integers of 32 or 64 bits. For compatibility, this function allows to\n        convert the flags to a sequence of single-byte elements.\n\n        Note that this uses snake_case to mimic the method on the Python-native int type.\n        \"\"\"\n        return self._value.to_bytes(self.width(), byteorder=byteorder)\n\n    @classmethod\n    def from_bytes(cls, bytes, byteorder=\"little\"):\n        \"\"\"Return a Flags instance given a byte stream.\"\"\"\n        return cls(int.from_bytes(bytes, byteorder=byteorder))\n\n    def __int__(self):\n        return self._value\n\n    def __and__(self, other):\n        return type(self)(self._value & other._value)\n\n    def __or__(self, other):\n        return type(self)(self._value | other._value)\n\n    def __xor__(self, other):\n        return type(self)(self._value ^ other._value)\n\n    def __invert__(self):\n        \"\"\"\n        Implement unary ~.\n\n        Note\n        ----\n        This is avoiding just ~ on the ``_value`` because it might not be safe. Using the int directly is slightly\n        dangerous in that python ints are not of fixed width, so the result of inverting one Flag might not be as wide\n        as the result of inverting another Flag. Typically, one would want to invert a Flag to create a mask for\n        unsetting a bit on another Flag, like ``f1 &= ~f2``. If ``f2`` is narrower than ``f1`` the field of ones that\n        you need to keep ``f1`` bits on might not cover the width of ``f1``, erroneously turning off its upper bits. Not\n        sure if this was an issue before or not. Once things are working, might makes sense to play with this more.\n        \"\"\"\n        new = self._value\n        for _, val in self._nameToValue.items():\n            if val & new:\n                new -= val\n            else:\n                new += val\n        return type(self)(new)\n\n    def __iter__(self):\n        for _, value in self._nameToValue.items():\n            if value & self._value:\n                yield type(self)(value)\n\n    def __bool__(self):\n        return bool(self._value)\n\n    def __eq__(self, other):\n        return self._value == other._value\n\n    def __contains__(self, other):\n        return bool(other & self)\n\n    def __hash__(self):\n        return hash(self._value)\n\n\n# Type alias to reliably check for a proper Flag type. This cannot just be `Flag`, since mypy gets confused by `auto`\n# because it doesn't go to the trouble of resolving them in the metaclass.\nFlagType = Union[Flag, auto]\n"
  },
  {
    "path": "armi/utils/gridEditor.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nGUI elements for manipulating grid layout and contents.\n\nThis provides a handful of classes which provide wxPython Controls for manipulating grids and grid\nBlueprints.\n\nThe grid editor may be invoked with the :py:mod:`armi.cli.gridGui` entry point::\n\n    $ python -m armi grids\n\nIf you have an existing set of input files, pass in the blueprints input file as the first argument\nand the system will load up the associated grid, e.g.::\n\n    $ python -m armi grids FFTF-blueprints.yaml\n\n\n.. figure:: /.static/gridEditor.png\n    :align: center\n\n    An example of the Grid Editor being used on a FFTF input file\n\n**Known Issues**\n\n* There is no action stack or undo functionality. Save frequently if you want to recover previous\n  states\n\n* Cartesian grids are supported, but not rendered as nicely as their Hex counterparts. The \"through\n  center assembly\" case is not rendered properly with the half-assemblies that lie along the edges.\n\n* The controls are optimized for manipulating a Core layout, displaying an \"Assembly palette\" that\n  contains the Assembly designs found in the top-level blueprints. A little extra work and this\n  could also be made to manipulate block grids or other things.\n\n* Assembly colors are derived from the set of flags applied to them, but the mapping of colors to\n  flags is not particularly rich, and there isn't anything to disambiguate between assemblies of\n  different design, but the same flags.\n\n* No proper zoom support, and object sizes are fixed and don't accommodate long specifiers.\n\"\"\"\n\nimport colorsys\nimport enum\nimport io\nimport os\nimport pathlib\nimport sys\nfrom typing import Dict, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport numpy.linalg\nimport wx\nimport wx.adv\n\nfrom armi.reactor import geometry, grids\nfrom armi.reactor.blueprints import Blueprints, gridBlueprint, migrate\nfrom armi.reactor.blueprints.assemblyBlueprint import AssemblyBlueprint\nfrom armi.reactor.blueprints.gridBlueprint import GridBlueprint, saveToStream\nfrom armi.reactor.flags import Flags\nfrom armi.settings.caseSettings import Settings\nfrom armi.utils import hexagon, textProcessors\n\nUNIT_SIZE = 50  # pixels per assembly\nUNIT_MARGIN = 40  # offset applied to the draw area margins\n\n# The color to use for each object is based on the flags that that object has. All applicable colors\n# will be blended together to produce the final color for the object. There are also plans to apply\n# brush styles like cross-hatching or the like, which is what the Nones are for below. Future work\n# to employ these. Colors are RGB fractions.\nFLAG_STYLES = {\n    # Red\n    Flags.FUEL: (np.array([1.0, 0.0, 0.0]), None),\n    # Green\n    Flags.CONTROL: (np.array([0.0, 1.0, 0.0]), None),\n    # Gray\n    Flags.SHIELD: (np.array([0.4, 0.4, 0.4]), None),\n    # Yellow\n    Flags.REFLECTOR: (np.array([0.5, 0.5, 0.0]), None),\n    # Paisley?\n    Flags.INNER: (np.array([0.5, 0.5, 1.0]), None),\n    # We shouldn't see many SECONDARY, OUTER, MIDDLE, etc. on their own, so these\n    # will just darken or brighten whatever color we would otherwise get)\n    Flags.SECONDARY: (np.array([0.0, 0.0, 0.0]), None),\n    Flags.OUTER: (np.array([0.0, 0.0, 0.0]), None),\n    # WHITE (same as above, this will just lighten anything that it accompanies)\n    Flags.MIDDLE: (np.array([1.0, 1.0, 1.0]), None),\n    Flags.ANNULAR: (np.array([1.0, 1.0, 1.0]), None),\n    Flags.IGNITER: (np.array([0.2, 0.2, 0.2]), None),\n    Flags.STARTER: (np.array([0.4, 0.4, 0.4]), None),\n    Flags.FEED: (np.array([0.6, 0.6, 0.6]), None),\n    Flags.DRIVER: (np.array([0.8, 0.8, 0.8]), None),\n}\n\n# RGB weights for calculating luminance. We use this to decide whether we should put white or black\n# text on top of the color. These come from CCIR 601\nLUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11])\n\n\ndef _translationMatrix(x, y):\n    \"\"\"Return an affine transformation matrix representing an x- and y-translation.\"\"\"\n    return np.array([[1.0, 0.0, x], [0.0, 1.0, y], [0.0, 0.0, 1.0]])\n\n\ndef _boundingBox(points: Sequence[np.ndarray]) -> wx.Rect:\n    \"\"\"Return the smallest wx.Rect that contains all of the passed points.\"\"\"\n    xmin = np.amin([p[0] for p in points])\n    xmax = np.amax([p[0] for p in points])\n\n    ymin = np.amin([p[1] for p in points])\n    ymax = np.amax([p[1] for p in points])\n\n    return wx.Rect(wx.Point(int(xmin), int(ymin)), wx.Point(int(xmax), int(ymax)))\n\n\ndef _desaturate(c: Sequence[float]):\n    r, g, b = tuple(c)\n    hue, lig, sat = colorsys.rgb_to_hls(r, g, b)\n    lig = lig + (1.0 - lig) * 0.5\n    return np.array(colorsys.hls_to_rgb(hue, lig, sat))\n\n\ndef _getColorAndBrushFromFlags(f, bold=True):\n    \"\"\"Given a set of Flags, return a wx.Pen and wx.Brush with which to draw a shape.\"\"\"\n    c = np.array([0.0, 0.0, 0.0])\n    nColors = 0\n\n    for styleFlag, style in FLAG_STYLES.items():\n        if not styleFlag & f:\n            continue\n\n        color, brush = style\n        if color is not None:\n            c += color\n            nColors += 1\n    if nColors:\n        c /= nColors\n\n    if not bold:\n        # round-trip the rgb color through hsv so that we can desaturate\n        c = _desaturate(c)\n\n    luminance = c.dot(LUMINANCE_WEIGHTS)\n    dark = luminance < 0.5\n\n    c = tuple(int(255 * ci) for ci in c)\n\n    brush = wx.Brush(wx.Colour(*c, 255))\n    pen = wx.WHITE if dark else wx.BLACK\n\n    return pen, brush\n\n\ndef _drawShape(\n    dc: wx.DC,\n    geom: geometry.GeomType,\n    view: np.ndarray,\n    model: Optional[np.ndarray] = None,\n    label: str = \"\",\n    description: Optional[str] = None,\n    bold: bool = True,\n):\n    \"\"\"\n    Draw a shape to the passed DC, given its GeomType and other relevant information. Return the\n    bounding box.\n\n    Parameters\n    ----------\n    dc: wx.DC\n        The device context to draw to\n    geom: geometry.GeomType\n        The geometry type, which defines the shape to be drawn\n    view: np.ndarray\n        A 3x3 matrix defining the world transform\n    model: np.ndarray, optional\n        A 3x3 matrix defining the model transform. No transform is made to the \"unit\"\n        shape if no model transform is provided.\n    label: str, optional\n        A string label to draw on the shape\n    description: str, optional\n        A string containing metadata for determining how to style to shape\n    bold: bool, optional\n        Whether the object should be drawn with full saturation. Default ``True``\n    \"\"\"\n    if description is None:\n        dc.SetBrush(wx.Brush(wx.Colour(200, 200, 200, 255)))\n        color = wx.BLACK\n    else:\n        aFlags = Flags.fromStringIgnoreErrors(description)\n        color, brush = _getColorAndBrushFromFlags(aFlags, bold=bold)\n        dc.SetBrush(brush)\n\n    if geom == geometry.GeomType.HEX:\n        primitive = hexagon.corners(rotation=0)\n    elif geom == geometry.GeomType.CARTESIAN:\n        primitive = [(-0.5, -0.5), (0.5, -0.5), (0.5, 0.5), (-0.5, 0.5)]\n    else:\n        raise ValueError(\"Geom type `{}` unsupported\".format(geom))\n\n    # Appending 1 to each coordinate since the transformation matrix is 3x3\n    poly = np.array([np.append(vertex, 1) for vertex in primitive]).transpose()\n    model = model if model is not None else np.eye(3)\n    poly = view.dot(model).dot(poly).transpose()\n    poly = [wx.Point(int(vertex[0]), int(vertex[1])) for vertex in poly]\n\n    boundingBox = _boundingBox(poly)\n\n    dc.SetTextForeground(color)\n    dc.DrawPolygon(poly)\n    dc.DrawLabel(label, boundingBox, wx.ALIGN_CENTRE)\n\n    return boundingBox\n\n\nclass _GridControls(wx.Panel):\n    \"\"\"Collection of controls for the main Grid editor. Save/Open, num rings, etc.\"\"\"\n\n    def __init__(self, parent):\n        wx.Panel.__init__(self, parent, id=wx.ID_ANY, pos=wx.DefaultPosition, size=wx.DefaultSize)\n\n        self.parent = parent\n\n        sizer = wx.BoxSizer(wx.HORIZONTAL)\n\n        self.ringControl = wx.SpinCtrl(self, id=wx.ID_ANY, initial=5, min=1, max=20)\n        self.ringControl.SetToolTip(\"Select how many rings of the grid to display\")\n        self.ringApply = wx.Button(self, id=wx.ID_ANY, label=\"Apply\")\n        self.ringApply.SetToolTip(\n            \"Apply the number of rings to the current grid. \"\n            \"Assemblies outside of the displayed region will not be removed.\"\n        )\n\n        self.expandButton = wx.Button(self, id=wx.ID_ANY, label=\"Expand to full core\")\n        self.labelMode = wx.Choice(\n            self,\n            id=wx.ID_ANY,\n            choices=[mode.label for mode in GridGui.Mode if mode is not GridGui.Mode.PATH],\n        )\n        self.labelMode.SetSelection(int(GridGui.Mode.SPECIFIER))\n        self.labelMode.SetToolTip(\"Select what to display in each grid region.\")\n\n        self.saveButton = wx.Button(self, id=wx.ID_ANY, label=\"Save grid blueprints...\")\n        self.saveButton.SetToolTip(\"Save just the grids section to its own file. \")\n        self.openButton = wx.Button(self, id=wx.ID_ANY, label=\"Open blueprints...\")\n        self.openButton.SetToolTip(\n            \"Open a new top-level blueprints file. Top-level is needed to populate the assembly palette on the right.\"\n        )\n        self.newButton = wx.Button(self, id=wx.ID_ANY, label=\"New grid blueprints...\")\n        self.newButton.SetToolTip(\"Create a new Grid blueptint.\")\n        self.helpButton = wx.Button(self, id=wx.ID_ANY, label=\"Help\")\n        self.saveImgButton = wx.Button(self, id=wx.ID_ANY, label=\"Save image...\")\n        self.saveImgButton.SetToolTip(\"Save the grid layout to an image file.\")\n\n        self.Bind(wx.EVT_BUTTON, self.onChangeRings, self.ringApply)\n        self.Bind(wx.EVT_BUTTON, self.onExpand, self.expandButton)\n        self.Bind(wx.EVT_BUTTON, self.onSave, self.saveButton)\n        self.Bind(wx.EVT_BUTTON, self.onOpen, self.openButton)\n        self.Bind(wx.EVT_BUTTON, self.onNew, self.newButton)\n        self.Bind(wx.EVT_BUTTON, self.onHelp, self.helpButton)\n        self.Bind(wx.EVT_BUTTON, self.onSaveImage, self.saveImgButton)\n        self.Bind(wx.EVT_CHOICE, self.onLabelMode, self.labelMode)\n\n        self.help = HelpDialog(self)\n\n        ringBox = wx.BoxSizer(wx.VERTICAL)\n        ringLabel = wx.StaticText(self, wx.ID_ANY, \"Num. Rings\", style=wx.ALIGN_CENTRE_HORIZONTAL)\n        ringBox.Add(ringLabel, 1, wx.EXPAND)\n        ringBox.Add(self.ringControl, 1, wx.EXPAND)\n        ringBox.Add(self.ringApply, 1, wx.EXPAND)\n        sizer.Add(ringBox, 0, wx.ALL, 0)\n\n        auxButtons = wx.BoxSizer(wx.VERTICAL)\n        auxButtons.Add(self.expandButton, 1, wx.EXPAND)\n        auxButtons.Add(self.labelMode, 1, wx.EXPAND)\n        sizer.Add(auxButtons)\n\n        fileBox = wx.BoxSizer(wx.VERTICAL)\n        fileBox.Add(self.saveButton, 1, wx.EXPAND)\n        fileBox.Add(self.openButton, 1, wx.EXPAND)\n        fileBox.Add(self.newButton, 1, wx.EXPAND)\n\n        sizer.Add(fileBox)\n        sizer.Add(self.helpButton)\n        sizer.Add(self.saveImgButton)\n\n        self.SetSizerAndFit(sizer)\n\n    def setNumRings(self, numRings):\n        self.ringControl.SetValue(numRings)\n\n    def onChangeRings(self, _event):\n        self.parent.setNumRings(self.ringControl.GetValue())\n\n    def onHelp(self, _event):\n        self.help.Show()\n\n    def onLabelMode(self, _event):\n        newMode = GridGui.Mode(self.labelMode.GetSelection())\n        self.parent.setMode(newMode)\n\n    def onExpand(self, event):\n        self.parent.expandToFullCore(event)\n\n    def onSave(self, event):\n        self.parent.save()\n\n    def onSaveImage(self, event):\n        self.parent.saveImage()\n\n    def onOpen(self, event):\n        self.parent.open(event)\n\n    def onNew(self, event):\n        self.parent.new(event)\n\n\nclass _PathControl(wx.Panel):\n    \"\"\"Collection of controls for manipulating fuel shuffling paths.\"\"\"\n\n    def __init__(self, parent, viewer=None):\n        wx.Panel.__init__(self, parent, id=wx.ID_ANY)\n\n        # Direct link to the main viz control. This avoids having to reach up and back down for an\n        # instance, with all of the structural assumptions that that requires.\n        self._viewer = viewer\n\n        self._needsIncrement = False\n\n        self.activateButton = wx.ToggleButton(self, label=\"Fuel Path\")\n        self.clearButton = wx.ToggleButton(self, label=\"Remove From Path\")\n        sizer = wx.BoxSizer(wx.VERTICAL)\n\n        pathSizer = wx.BoxSizer(wx.HORIZONTAL)\n        indexSizer = wx.BoxSizer(wx.HORIZONTAL)\n\n        self.pathSpinner = wx.SpinCtrl(self, id=wx.ID_ANY, initial=0, min=0)\n        self.indexSpinner = wx.SpinCtrl(self, id=wx.ID_ANY, initial=0, min=0)\n        self.autoIncrement = wx.CheckBox(self, id=wx.ID_ANY, label=\"Increment\")\n\n        pathSizer.Add(wx.StaticText(self, wx.ID_ANY, \"Path: \"))\n        pathSizer.Add(self.pathSpinner, 1)\n\n        indexSizer.Add(wx.StaticText(self, wx.ID_ANY, \"Index: \"))\n        indexSizer.Add(self.indexSpinner, 1)\n\n        buttonSizer = wx.BoxSizer(wx.HORIZONTAL)\n        buttonSizer.Add(self.activateButton)\n        buttonSizer.Add(self.clearButton)\n        buttonSizer.AddSpacer(20)\n\n        sizer.Add(buttonSizer, 1, wx.EXPAND)\n        sizer.Add(pathSizer, 1)\n        sizer.Add(indexSizer, 1)\n\n        sizer.Add(self.autoIncrement)\n\n        self.Bind(wx.EVT_TOGGLEBUTTON, parent.onToggle, self.activateButton)\n        self.Bind(wx.EVT_TOGGLEBUTTON, parent.onToggle, self.clearButton)\n        self.Bind(wx.EVT_CHECKBOX, self.onAutoIncrement, self.autoIncrement)\n        self.Bind(wx.EVT_SPINCTRL, self.onPathChange, self.pathSpinner)\n\n        self.SetSizerAndFit(sizer)\n\n    def onPathChange(self, event):\n        self.indexSpinner.SetValue(0)\n        if self._viewer is not None:\n            self._viewer.drawGrid()\n            self._viewer.drawArrows()\n            self._viewer.Refresh()\n\n    def onAutoIncrement(self, event):\n        self.indexSpinner.Enable(not self.autoIncrement.GetValue())\n\n    def getActivateButtons(self):\n        return {\n            self.activateButton.GetId(): self.activateButton,\n            self.clearButton.GetId(): self.clearButton,\n        }\n\n    def getIndices(self, clear=False) -> Tuple[Optional[int], Optional[int]]:\n        if self.clearButton.GetValue() and clear:\n            return None, None\n        path, index = self.pathSpinner.GetValue(), self.indexSpinner.GetValue()\n        if self._needsIncrement:\n            self._needsIncrement = False\n            self.indexSpinner.SetValue(index + 1)\n        return path, index\n\n    def maybeIncrement(self):\n        self._needsIncrement = self.autoIncrement.GetValue() and self.activateButton.GetValue()\n\n\nclass _AssemblyPalette(wx.ScrolledWindow):\n    \"\"\"\n    Collection of toggle controls for each defined AssemblyBlueprint, as well as some extra controls\n    for configuring fuel shuffling paths.\n    \"\"\"\n\n    def __init__(\n        self,\n        parent,\n        geomType: Optional[geometry.GeomType],\n        assemDesigns=None,\n        viewer=None,\n    ):\n        wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY, (0, 0), size=(250, 150), style=wx.SUNKEN_BORDER)\n\n        self.parent = parent\n        self.geomType = geomType\n        self.assemDesigns = assemDesigns or dict()\n\n        self.SetScrollRate(0, 20)\n\n        # None -> None is useful for propagating a None to other components without\n        # special branching\n        self.assemDesignsById: Dict[Optional[int], Optional[AssemblyBlueprint]] = {None: None}\n\n        sizer = wx.BoxSizer(wx.VERTICAL)\n        sizer.Add(\n            wx.StaticText(self, wx.ID_ANY, \"Assemblies:\"),\n            0,\n            wx.ALIGN_CENTRE | wx.ALL,\n            5,\n        )\n\n        # keyed on ID\n        self.assemButtons = dict()\n        self.buttonIdBySpecifier = {None: None}\n        self.activeAssemID: Optional[int] = None\n\n        for key, design in self.assemDesigns.items():\n            # flip y-coordinates, enlarge, offset\n            flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])\n            scale = np.array(\n                [\n                    [UNIT_SIZE * 0.8, 0.0, 0.0],\n                    [0.0, UNIT_SIZE * 0.8, 0.0],\n                    [0.0, 0.0, 1.0],\n                ]\n            )\n            translate = np.array(\n                [\n                    [1.0, 0.0, UNIT_SIZE * 0.5],\n                    [0.0, 1.0, UNIT_SIZE * 0.5],\n                    [0.0, 0.0, 0.0],\n                ]\n            )\n            transform = translate.dot(flip_y).dot(scale)\n\n            bmap = wx.Bitmap(UNIT_SIZE, UNIT_SIZE)\n            dc = wx.MemoryDC()\n            dc.SelectObject(bmap)\n            brush = wx.Brush(self.GetBackgroundColour())\n            dc.SetBackground(brush)\n            dc.Clear()\n\n            _drawShape(\n                dc,\n                self.geomType,\n                transform,\n                label=design.specifier,\n                description=design.flags or key,\n            )\n\n            dc.SelectObject(wx.NullBitmap)\n\n            img = wx.StaticBitmap(self, bitmap=bmap)\n            button = wx.ToggleButton(self, wx.ID_ANY, key)\n            self.assemButtons[button.GetId()] = button\n            self.buttonIdBySpecifier[design.specifier] = button.GetId()\n\n            self.Bind(wx.EVT_TOGGLEBUTTON, self.onToggle, button)\n\n            buttonSizer = wx.BoxSizer(wx.HORIZONTAL)\n            buttonSizer.Add(img)\n            buttonSizer.Add(button, 1, wx.EXPAND)\n            buttonSizer.AddSpacer(20)\n\n            sizer.Add(buttonSizer, 1, wx.EXPAND)\n\n            self.assemDesignsById[button.GetId()] = design\n\n        sizer.Add(wx.StaticText(self, wx.ID_ANY, \"Equilibrium Fuel Path:\"), 0, wx.ALIGN_CENTRE)\n\n        self.pathControl = _PathControl(self, viewer)\n        sizer.Add(self.pathControl)\n        self.assemButtons.update(self.pathControl.getActivateButtons())\n\n        self.SetSizerAndFit(sizer)\n\n    def _setActiveAssemID(self, id: Optional[int]):\n        \"\"\"Make sure the appropriate button is on, but none others.\"\"\"\n        if self.activeAssemID is not None and self.activeAssemID != id:\n            # there is currently an active assem, and it isn't the requested one. Turn\n            # its button off.\n            self.assemButtons[self.activeAssemID].SetValue(False)\n\n        if id is not None:\n            # we are activating an assem ID. Turn its button on\n            self.assemButtons[id].SetValue(True)\n\n        self.activeAssemID = id\n\n    def onToggle(self, event):\n        \"\"\"\n        Respond to toggle events.\n\n        This makes sure that the right selector button is activated, and switches the\n        GUI mode into the proper one based on whether an assembly design is selected, or\n        the fuel path controls.\n        \"\"\"\n        if self.assemButtons[event.GetId()].GetValue():\n            # The button that generated the event is \"on\" (the ToggleButton assumes its new value\n            # before the event is propagated). We need to select whichever button it was.\n            setTo = event.GetId()\n        else:\n            # The button that generated the event is off, implying that the user clicked on the\n            # previously-selected button. Clear the active selection\n            setTo = None\n\n        self._setActiveAssemID(setTo)\n\n        mode = (\n            GridGui.Mode.PATH\n            if event.GetId() in self.pathControl.getActivateButtons() and setTo is not None\n            else GridGui.Mode.SPECIFIER\n        )\n\n        self.parent.setMode(mode)\n\n    def editorClicked(self):\n        self.pathControl.maybeIncrement()\n\n    def getSelectedAssem(self) -> Optional[Union[AssemblyBlueprint, Tuple[int, int]]]:\n        \"\"\"Return the currently-selected assembly design or fuel path indices.\"\"\"\n        if self.activeAssemID in self.assemDesignsById:\n            # We have an assembly design activated. return it\n            return self.assemDesignsById[self.activeAssemID]\n        elif self.activeAssemID in self.pathControl.getActivateButtons():\n            # we are in path selection mode, return stuff from the pathControl\n            return self.pathControl.getIndices(clear=False)\n        else:\n            return None\n\n    def getAssemToSet(self) -> Optional[Union[AssemblyBlueprint, Tuple[int, int]]]:\n        \"\"\"\n        Return the assembly design of fuel path tuple that a client should set.\n\n        This differs from ``getSelectedAssem`` in that it can incorporate more logic to enforce\n        certain rules, such as performing increments, masking things off based on other state etc.,\n        whereas ``getSelectedAssem`` should be more dumb and just return the state of the controls\n        themselves.\n        \"\"\"\n        if self.activeAssemID in self.assemDesignsById:\n            # We have an assembly design activated. return it\n            return self.assemDesignsById[self.activeAssemID]\n        elif self.activeAssemID in self.pathControl.getActivateButtons():\n            # we are in path selection mode, return stuff from the pathControl\n            return self.pathControl.getIndices(clear=True)\n        else:\n            return None\n\n    def setActiveAssem(self, assemDesign: Optional[Union[AssemblyBlueprint, tuple]]):\n        \"\"\"Override the selected assembly design from above.\"\"\"\n        specifier = None\n        if isinstance(assemDesign, AssemblyBlueprint):\n            specifier = assemDesign.specifier\n            self._setActiveAssemID(self.buttonIdBySpecifier[specifier])\n        elif isinstance(assemDesign, tuple):\n            self._setActiveAssemID(self.pathControl.activateButton.GetId())\n        elif assemDesign is None:\n            self._setActiveAssemID(None)\n\n\nclass GridGui(wx.ScrolledWindow):\n    \"\"\"\n    Visual editor for grid blueprints.\n\n    This is the actual viewer that displays the grid and grid blueprints contents, and responds to\n    mouse events. Under the hood, it uses a wx.PseudoDC to handle the drawing, which provides the\n    following benefits over a regular DC:\n\n     * Drawn objects can be associated with an ID, allowing parts of the drawing to be modified or\n       cleared without having to re-draw everything.\n     * The IDs associated with the objects can be used to distinguish what was clicked on in a mouse\n       event (though the support for this isn't super great, so we do have to do some of our own\n       object disambiguation).\n\n    The ``drawGrid()`` method is used to re-draw the entire geometry, whereas the ``applyAssem()``\n    method may be used to update a single assembly.\n    \"\"\"\n\n    class Mode(enum.IntEnum):\n        \"\"\"\n        Enumeration for what type of objects are currently being manipulated.\n\n        This can either be SPECIFIER, for laying out the initial core layout, or PATH for\n        manipulating fuel shuffling paths.\n        \"\"\"\n\n        # We use these values to map between selections in GUI elements, so do not go changing them\n        # willy-nilly.\n        SPECIFIER = 0\n        POSITION_IJ = 1\n        POSITION_RINGPOS = 2\n        PATH = 3\n\n        @property\n        def label(self):\n            if self == self.SPECIFIER:\n                return \"Specifier\"\n            elif self == self.PATH:\n                return \"Shuffle Path\"\n            elif self == self.POSITION_IJ:\n                return \"(i, j)\"\n            else:\n                return \"(Ring, Position)\"\n\n        @property\n        def isPosition(self):\n            return self in (self.POSITION_IJ, self.POSITION_RINGPOS)\n\n    def __init__(self, parent, bp=None, defaultGeom=geometry.CARTESIAN):\n        \"\"\"\n        Create a new GridGui.\n\n        Parameters\n        ----------\n        parent : wx.Window\n            The parent control\n\n        bp : set of grid blueprints, optional\n            This should be the ``gridDesigns`` section of a root Blueprints object. If\n            not provided, a dictionary will be created with an empty \"core\" grid blueprint.\n        \"\"\"\n        wx.ScrolledWindow.__init__(self, parent, wx.ID_ANY, (0, 0), size=(250, 150), style=wx.BORDER_DEFAULT)\n\n        self.parent = parent\n\n        if bp is None:\n            bp = {\"core\": GridBlueprint(name=\"core\", gridContents=dict(), geom=defaultGeom)}\n\n        self.bp = bp\n        self.coreBp = bp[\"core\"]\n        self.eqFuelPathBp = bp.get(\"coreEqPath\", None)\n\n        self.numRings = 7\n\n        self._grid = None\n        self._geomType = None\n\n        # What are we displaying/modifying\n        self._mode = GridGui.Mode.SPECIFIER\n\n        grid = self.coreBp.construct()\n        if self.coreBp.gridContents:\n            maxRings = max(grid.getRingPos(idx)[0] for idx in self.coreBp.gridContents.keys())\n            self.numRings = max(7, maxRings)\n\n        # Need to assign this after setting numRings, since we need a grid to\n        # determine numRings, but need numRings to properly set the self.grid\n        # property.\n        self.grid = grid\n\n        # If we are in the middle of handling some click events, what are the indices of\n        # the clicked-on region\n        self.clickIndices = None\n\n        self.Bind(wx.EVT_PAINT, self.onPaint)\n        self.Bind(wx.EVT_MOUSE_EVENTS, self.onMouse)\n\n        self.contextMenuIDs = {\n            item[0]: (wx.NewIdRef(), item[1])\n            for item in [\n                (\"Select assembly type\", self.onSelectAssembly),\n                (\"Make ring like this\", self.onFillRing),\n                (\"Clear ring\", self.onClearRing),\n            ]\n        }\n        self.contextMenu = wx.Menu()\n        for text, info in self.contextMenuIDs.items():\n            self.contextMenu.Append(info[0], text)\n            self.Bind(wx.EVT_MENU, info[1], info[0])\n\n        self.pdc = wx.adv.PseudoDC()\n\n        # Might be a good idea to implement this with bidict, but maybe not worth the\n        # dependency\n        self.pdcIdToIndices: Dict[int, Tuple[int, int, int]] = dict()\n        self.indicesToPdcId: Dict[Tuple[int, int, int], int] = dict()\n        # map from a PeudoDC ID (e.g. a hex) to the pixel location of the shapes\n        # center. This is used to distinguish between a multi-object hit on click\n        # events. While the FindObjects docs purport to distinguish objects\n        # pixel-by-pixel, it seems like this is a lie, and that they simply use the\n        # bounding boxes provided by the drawer. Laaaaame.\n        self.pdcIdToCenter: Dict[int, wx.Point] = dict()\n        # The ID to use for all arrow drawing. lets us clear and re-draw easily\n        self._arrowPdcId = wx.NewIdRef()\n\n        self.drawGrid()\n\n    @property\n    def grid(self):\n        return self._grid\n\n    @grid.setter\n    def grid(self, newGrid):\n        self._grid = newGrid\n        self._geomType = self._grid.geomType\n        self._idxByRing = [list() for _ in range(self.numRings)]\n        for idx, loc in self._grid.items():\n            ring, _pos = self._grid.getRingPos(idx)\n            if not self._grid.locatorInDomain(loc, symmetryOverlap=False) or ring > self.numRings:\n                continue\n            self._idxByRing[ring - 1].append(idx)\n\n    @property\n    def mode(self):\n        return self._mode\n\n    @mode.setter\n    def mode(self, newMode):\n        if self.mode == newMode:\n            return\n\n        self._mode = newMode\n        self.drawGrid()\n\n        if self._mode == GridGui.Mode.PATH:\n            self.drawArrows()\n\n        self.Refresh()\n\n    @property\n    def activeBlueprints(self):\n        if self.mode == GridGui.Mode.SPECIFIER:\n            return self.coreBp\n        elif self.mode == GridGui.Mode.PATH:\n            return self.eqFuelPathBp\n        elif self.mode.isPosition:\n            return self.coreBp\n        else:\n            raise ValueError(\"Unsupported mode `{}`\".format(self.mode))\n\n    def growToFullCore(self):\n        if geometry.FULL_CORE not in self.coreBp.symmetry:\n            self.coreBp.expandToFull()\n            if self.eqFuelPathBp is not None:\n                self.eqFuelPathBp.expandToFull()\n            self.grid = self.coreBp.construct()\n            self.drawGrid()\n            self.Refresh()\n\n    def _getWindowCoordinates(self, event):\n        xv, yv = self.GetViewStart()\n        dx, dy = self.GetScrollPixelsPerUnit()\n        xOffset = dx * xv\n        yOffset = dy * yv\n        x = event.GetX()\n        y = event.GetY()\n        xScrolled = x + xOffset\n        yScrolled = y + yOffset\n\n        return xScrolled, yScrolled\n\n    def _getIndicesFromEvent(self, event) -> Optional[Tuple[int, int, int]]:\n        obj = self._getObjectFromEvent(event)\n\n        if obj is None:\n            return None\n\n        return self.pdcIdToIndices[obj]\n\n    def _getObjectFromEvent(self, event) -> Optional[int]:\n        def _distanceish(p1, p2):\n            return (p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2\n\n        x, y = self._getWindowCoordinates(event)\n        objs = self.pdc.FindObjects(x, y, radius=1)\n\n        if not objs:\n            return None\n\n        if len(objs) == 1:\n            return objs[0]\n\n        # list of tuples with (distance, ID)\n        sortableObjectIds = [(_distanceish(wx.RealPoint(x, y), self.pdcIdToCenter[obj]), obj) for obj in objs]\n\n        return min(sortableObjectIds)[1]\n\n    def drawGrid(self):\n        \"\"\"Wipe out anything in the drawing and re-draw everything.\"\"\"\n        self.pdc.Clear()\n        self.pdc.RemoveAll()\n\n        self.pdcIdToIndices = dict()\n        self.indicesToPdcId = dict()\n        self.pdcIdToCenter = dict()\n        self.pdc.SetPen(wx.Pen(\"BLACK\", 1))\n\n        gridScale = self._gridScale(self.grid)\n\n        # flip y-coordinates, enlarge\n        flip_y = np.array([[1.0, 0.0, 0.0], [0.0, -1.0, 0.0], [0.0, 0.0, 1.0]])\n        scale = np.array(\n            [\n                [UNIT_SIZE / gridScale[0], 0.0, 0.0],\n                [0.0, UNIT_SIZE / gridScale[1], 0.0],\n                [0.0, 0.0, 1.0],\n            ]\n        )\n\n        # uniform grid, so all shapes have the same scale\n        model = np.array([[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]])\n        self.transform = flip_y.dot(scale)\n        rect = self._calcGridBounds()\n        self.SetVirtualSize((rect.Width, rect.Height))\n        self.SetScrollRate(20, 20)\n        # Global translation used to center the view\n        translate = _translationMatrix(-1 * rect.Left, -1 * rect.Top)\n        self.transform = translate.dot(self.transform)\n\n        brush = wx.Brush(wx.Colour(128, 128, 128, 0))\n        self.pdc.SetBrush(brush)\n\n        for idx, loc in self.grid.items():\n            ring, _ = self.grid.getRingPos(idx)\n            if not self.grid.locatorInDomain(loc) or ring > self.numRings:\n                continue\n\n            id = wx.NewIdRef()\n            self.pdcIdToIndices[id] = idx\n            self.indicesToPdcId[idx] = id\n            self.pdc.SetId(id)\n\n            label, description, bold = self._getLabel(idx)\n\n            coords = np.array(self.grid.getCoordinates(idx))[:2]\n            offset = _translationMatrix(*coords)\n\n            boundingBox = _drawShape(\n                self.pdc,\n                self._geomType,\n                self.transform,\n                model=offset.dot(model),\n                label=label,\n                description=description,\n                bold=bold,\n            )\n            center = (boundingBox.TopLeft + boundingBox.BottomRight) / 2\n            self.pdcIdToCenter[id] = center\n            self.pdc.SetIdBounds(id, boundingBox)\n\n    def drawArrows(self):\n        \"\"\"Draw fuel path arrows.\"\"\"\n        if self.mode != GridGui.Mode.PATH:\n            return\n\n        self.pdc.ClearId(self._arrowPdcId)\n        self.pdc.SetId(self._arrowPdcId)\n\n        goodPen = wx.Pen(wx.Colour(0, 0, 0), width=1, style=wx.PENSTYLE_DOT)\n        badPen = wx.Pen(wx.Colour(255, 0, 0))\n\n        thisPath = self.parent.getSelectedPath()\n\n        stuffInPath = sorted(\n            [(index, idx) for idx, (path, index) in self.eqFuelPathBp.gridContents.items() if path == thisPath]\n        )\n        touchedIndices = {entry[0] for entry in stuffInPath}\n        indexGraph = {index: list() for index in touchedIndices}\n        for index, location in stuffInPath:\n            indexGraph[index].append(location)\n\n        # python 3.6+ should maintain key order from the sorted stuffInPath\n        keys = list(indexGraph.keys())\n        for i1, i2 in zip(keys[:-1], keys[1:]):\n            pen = goodPen\n            if i2 - i1 != 1:\n                pen = badPen\n            if len(indexGraph[i1]) > 1 or len(indexGraph[i2]) > 1:\n                pen = badPen\n\n            self.pdc.SetPen(pen)\n\n            for fromIdx in indexGraph[i1]:\n                for toIdx in indexGraph[i2]:\n                    p1 = self.grid.getCoordinates(fromIdx + (0,))\n                    p2 = self.grid.getCoordinates(toIdx + (0,))\n\n                    p1[2] = 1.0\n                    p2[2] = 1.0\n\n                    p1 = self.transform.dot(p1)[0:2]\n                    p2 = self.transform.dot(p2)[0:2]\n                    p1 = [int(v) for v in p1]\n                    p2 = [int(v) for v in p2]\n                    self.pdc.DrawLines([wx.Point(*p1), wx.Point(*p2)])\n\n    def _getLabel(self, idx) -> Tuple[str, Optional[str], bool]:\n        \"\"\"\n        Given (i, j, k) indices, return information about the object at that location.\n\n        This will return a tuple containing:\n         - The label to actually display in the GUI\n         - Optionally, a description that can be turned into Flags and used to determine\n           what the object should look like\n         - Whether the object should be drawn in its full/bold representation\n        \"\"\"\n        ring, pos = self.grid.getRingPos(idx)\n        specifier = self.coreBp.gridContents.get(tuple(idx[0:2]), None)\n        aDesign = None\n        description = None\n        bold = True\n        if specifier is not None:\n            aDesign = self.parent.getAssemDesignBySpecifier(specifier)\n            description = aDesign.flags or aDesign.name\n\n        if self.mode == GridGui.Mode.SPECIFIER:\n            if aDesign is not None:\n                label = specifier\n            else:\n                label = \"{}, {}\".format(ring, pos)\n        elif self.mode == GridGui.Mode.PATH:\n            selectedPath = self.parent.getSelectedPath()\n            if self.eqFuelPathBp is None:\n                # We need to add a grid blueprint for the equilibrium fuel path\n                self.bp[\"coreEqPath\"] = GridBlueprint(\"coreEqPath\", self.coreBp.geom)\n                self.eqFuelPathBp = self.bp[\"coreEqPath\"]\n            if self.eqFuelPathBp.gridContents is None:\n                _grid = self.eqFuelPathBp.construct()\n\n            path, index = self.eqFuelPathBp.gridContents.get(idx[0:2], (None, None))\n            if path != selectedPath:\n                bold = False\n            if path is not None and index is not None:\n                label = \"({}, {})\".format(path, index)\n            else:\n                label = \"-\"\n        elif self.mode == GridGui.Mode.POSITION_RINGPOS:\n            label = \"{}, {}\".format(ring, pos)\n        elif self.mode == GridGui.Mode.POSITION_IJ:\n            label = \"{}, {}\".format(*idx[0:2])\n\n        else:\n            raise ValueError(\"Unsupported mode `{}`\".format(self.mode))\n\n        return label, description, bold\n\n    def setNumRings(self, n: int):\n        \"\"\"Change the number of rings that should be drawn.\"\"\"\n        self.numRings = n\n        if self.grid.geomType == geometry.GeomType.HEX:\n            grid = grids.HexGrid.fromPitch(1, numRings=self.numRings)\n        elif self.grid.geomType == geometry.GeomType.CARTESIAN:\n            rectangle = [1.0, 1.0]\n            if self.coreBp.latticeDimensions is not None:\n                rectangle = [\n                    self.coreBp.latticeDimensions.x,\n                    self.coreBp.latticeDimensions.y,\n                ]\n            grid = grids.CartesianGrid.fromRectangle(*rectangle, numRings=self.numRings)\n        else:\n            raise ValueError(\"Only support Hex and Cartesian grids, not {}\".format(self.grid.geomType))\n\n        grid.symmetry = self.grid.symmetry\n        grid.geomType = self.grid.geomType\n        self.grid = grid\n\n        self.drawGrid()\n        self.Refresh()\n\n    def onPaint(self, event, dc=None):\n        selfPaint = dc is None\n        dc = dc or wx.BufferedPaintDC(self)\n        dc.SetBackground(wx.Brush(wx.Colour(255, 255, 255, 255)))\n        dc.Clear()\n\n        self.DoPrepareDC(dc)\n\n        if selfPaint:\n            xv, yv = self.GetViewStart()\n            dx, dy = self.GetScrollPixelsPerUnit()\n            region = self.GetUpdateRegion()\n            region.Offset(dx * xv, dy * yv)\n\n            _ = region.GetBox()\n\n        self.pdc.DrawToDC(dc)\n\n    def onMouse(self, event):\n        if event.RightUp():\n            self.onContextMenu(event)\n            return event.Skip()\n\n        if event.LeftDown():\n            _ = event.GetX()\n            _ = event.GetY()\n\n            objId = self._getObjectFromEvent(event)\n\n            if objId is None:\n                return event.Skip()\n\n            idx = tuple(self.pdcIdToIndices[objId])[0:2]\n            self.parent.objectClicked(idx)\n            assem = self.parent.getAssemToSet()\n            self.applyAssem(objId, assem)\n\n        if event.LeftUp():\n            pass\n\n        return event.Skip()\n\n    def onContextMenu(self, event):\n        self.clickIndices = self._getIndicesFromEvent(event)\n        self.PopupMenu(self.contextMenu)\n        self.clickIndices = None\n\n    def onSelectAssembly(self, event):\n        specifier = self.coreBp.gridContents.get(self.clickIndices[0:2], None)\n        aDesign = self.parent.getAssemDesignBySpecifier(specifier) if specifier is not None else None\n        self.parent.setActiveAssem(aDesign)\n\n    def onFillRing(self, event):\n        ring, _ = self.grid.getRingPos(self.clickIndices)\n        specifier = self.coreBp.gridContents.get(self.clickIndices[0:2], None)\n        aDesign = self.parent.getAssemDesignBySpecifier(specifier) if specifier is not None else None\n        for idx in self._idxByRing[ring - 1]:\n            self.applyAssem(self.indicesToPdcId[idx], aDesign)\n\n    def onClearRing(self, event):\n        ring, _ = self.grid.getRingPos(self.clickIndices)\n        for idx in self._idxByRing[ring - 1]:\n            self.applyAssem(self.indicesToPdcId[idx], None)\n\n    def applyAssem(self, pdcId, value: Optional[Union[AssemblyBlueprint, Tuple[int, int]]]):\n        \"\"\"\n        Apply the passed assembly design or equilibrium path indices for the desired\n        object and redraw it.\n        \"\"\"\n        if self.activeBlueprints is None:\n            return\n\n        # uniform grid, so all shapes have the same scale\n        gridScale = self._gridScale(self.grid)\n        model = np.array([[gridScale[0], 0.0, 0.0], [0.0, gridScale[1], 0.0], [0.0, 0.0, 1.0]])\n\n        idx = tuple(self.pdcIdToIndices[pdcId])\n        idx2 = idx[0:2]\n        if value is not None:\n            if isinstance(value, AssemblyBlueprint):\n                assert self.mode in {\n                    GridGui.Mode.SPECIFIER,\n                    GridGui.Mode.POSITION_IJ,\n                    GridGui.Mode.POSITION_RINGPOS,\n                }\n                self.activeBlueprints.gridContents[idx2] = value.specifier\n            elif isinstance(value, tuple):\n                assert self.mode == GridGui.Mode.PATH\n                self.activeBlueprints.gridContents[idx2] = value\n        else:\n            # Clear whatever we clicked on\n            if idx2 in self.activeBlueprints.gridContents:\n                del self.activeBlueprints.gridContents[idx2]\n\n        self.pdc.ClearId(pdcId)\n        self.pdc.SetId(pdcId)\n\n        coords = np.array(self.grid.getCoordinates(idx))\n        model = _translationMatrix(*coords[0:2]).dot(model)\n\n        label, description, bold = self._getLabel(idx)\n\n        boundingBox = _drawShape(\n            self.pdc,\n            self._geomType,\n            self.transform,\n            model=model,\n            label=label,\n            description=description,\n            bold=bold,\n        )\n        self.pdc.SetIdBounds(pdcId, boundingBox)\n\n        self.drawArrows()\n        self.Refresh()\n\n    @staticmethod\n    def _gridScale(grid):\n        if isinstance(grid, grids.HexGrid):\n            # Unit steps aren't aligned with the x,y coordinate system for Hex, so just\n            # use the y dimension, assuming that's the proper flat-to-flat dimension\n            coordScale = np.array([grid._unitSteps[1][1]] * 2)\n        elif isinstance(grid, grids.CartesianGrid):\n            # Cartesian grids align with the GUI coordinates, so just use unit steps\n            # directly\n            coordScale = np.array([grid._unitSteps[0][0], grid._unitSteps[1][1]])\n        return coordScale\n\n    def _calcGridBounds(self) -> wx.Rect:\n        \"\"\"\n        Return the width and height (in pixels) that are needed to display the passed grid.\n\n        This allows us to dynamically size the scrolled area, and to offset the geometry\n        properly into the center of the screen.\n        \"\"\"\n        inDomain = {\n            idx: loc\n            for idx, loc in self.grid.items()\n            if self.grid.locatorInDomain(loc) and self.grid.getRingPos(loc)[0] <= self.numRings\n        }\n\n        _ = self._gridScale(self.grid)\n\n        allCenters = np.array([self.grid.getCoordinates(idx)[:2] for idx in inDomain])\n        minXY = np.amin(allCenters, axis=0)\n        maxXY = np.amax(allCenters, axis=0)\n\n        topRight = np.append([maxXY[1], maxXY[1]], 1.0)\n        bottomLeft = np.append([minXY[0], minXY[1]], 1.0)\n        nudge = np.array([UNIT_MARGIN, -UNIT_MARGIN, 0.0])\n\n        bottomRight = (self.transform.dot(topRight) + nudge).tolist()\n        topLeft = (self.transform.dot(bottomLeft) - nudge).tolist()\n\n        bottomRight = [int(v) for v in bottomRight]\n        topLeft = [int(v) for v in topLeft]\n\n        return wx.Rect(wx.Point(*topLeft[:2]), wx.Point(*bottomRight[:2]))\n\n\nclass GridBlueprintControl(wx.Panel):\n    \"\"\"\n    A GUI for manipulating core layouts.\n\n    The original intent of this is to serve as a stand-in replacement for the current\n    \"HexDragger\". With further work, this could be made to function as a more general\n    tool for manipulating grids of any sort.\n    \"\"\"\n\n    _wildcard = \"YAML blueprints (*.yaml)|*.yaml|All files (*.*)|*.*\"\n\n    _defaultGeom = geometry.CARTESIAN\n\n    def __init__(self, parent):\n        wx.Panel.__init__(self, parent, wx.ID_ANY, size=(200, 30))\n\n        bp = Blueprints()\n        bp.gridDesigns = gridBlueprint.Grids()\n\n        # cs only needed for migrations. Realistically, this would be set from a\n        # higher-level GUI container. If it is not set and migrations are needed\n        # anyways, the user will be prompted.\n        self._cs = None\n        self._fName = None\n\n        self._bp = bp\n\n        self.clicker = GridGui(self, defaultGeom=self._defaultGeom)\n        self.assemblyPalette = _AssemblyPalette(self, None, dict(), self.clicker)\n\n        self.controls = _GridControls(self)\n        self.controls.setNumRings(self.clicker.numRings)\n\n        sizer = wx.BoxSizer(wx.VERTICAL)\n\n        hsizer = wx.BoxSizer(wx.HORIZONTAL)\n        hsizer.Add(self.clicker, 1, wx.EXPAND)\n        hsizer.Add(self.assemblyPalette, 0)\n\n        sizer.Add(hsizer, 1, wx.EXPAND)\n        sizer.Add(self.controls, 0)\n\n        self.sizer = sizer\n        self.SetSizerAndFit(self.sizer)\n\n    @property\n    def bp(self):\n        return self._bp\n\n    @bp.setter\n    def bp(self, bp):\n        self._bp = bp\n\n        geomType = geometry.GeomType.fromStr(bp.gridDesigns[\"core\"].geom)\n\n        # Make new assembly palette and editor\n        newClicker = GridGui(self, bp=self.bp.gridDesigns)\n        newPalette = _AssemblyPalette(self, geomType, bp.assemDesigns, newClicker)\n\n        self.sizer.Replace(self.assemblyPalette, newPalette, recursive=True)\n        self.sizer.Replace(self.clicker, newClicker, recursive=True)\n\n        self.assemblyPalette.Destroy()\n        self.clicker.Destroy()\n\n        self.assemblyPalette = newPalette\n        self.clicker = newClicker\n\n        self.controls.setNumRings(self.clicker.numRings)\n\n        self.sizer.Layout()\n        self.SendSizeEventToParent()\n\n    @property\n    def grid(self):\n        return self.clicker.grid\n\n    def setNumRings(self, n: int):\n        self.clicker.setNumRings(n)\n\n    def setActiveAssem(self, aDesign):\n        self.assemblyPalette.setActiveAssem(aDesign)\n        self.clicker.mode = (\n            GridGui.Mode.SPECIFIER if isinstance(aDesign, (AssemblyBlueprint, type(None))) else GridGui.Mode.PATH\n        )\n\n    def setMode(self, mode: GridGui.Mode):\n        self.clicker.mode = mode\n\n        # make sure that gui elements that have to do with mode setting are consistent\n        if mode == GridGui.Mode.isPosition:\n            self.assemblyPalette.setActiveAssem(None)\n        self.controls.labelMode.SetSelection(mode)\n\n    def expandToFullCore(self, event):\n        self.clicker.growToFullCore()\n\n    def objectClicked(self, _idx):\n        \"\"\"\n        Notify relevant controls that the object at the passed indices has been\n        activated.\n\n        This is needed to make the auto-increment stuff work in the fuel path editor.\n        Without some sort of event that provides a positive assertion that the user is\n        trying to interact with the layout, we can't know when to increment.\n        \"\"\"\n        self.assemblyPalette.editorClicked()\n\n    def saveImage(self):\n        \"\"\"\n        Save the core layout to an image.\n\n        Currently this only supports PNG images for simplicity. wxpython does not\n        attempt to infer the file type based on extension, so we would need to make a\n        file extension-to-format mapping.\n        \"\"\"\n        dlg = wx.FileDialog(\n            self,\n            message=\"Save image to...\",\n            style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,\n            wildcard=\"PNG images (.png)|*.png\",\n        )\n\n        if dlg.ShowModal() == wx.ID_OK:\n            path = dlg.GetPath()\n        else:\n            return\n\n        size = self.clicker.GetVirtualSize()\n        image = wx.Bitmap(size)\n\n        dc = wx.MemoryDC()\n        dc.SelectObject(image)\n\n        self.clicker.onPaint(None, dc=dc)\n        dc.SelectObject(wx.NullBitmap)\n        image.SaveFile(path, wx.BITMAP_TYPE_PNG)\n\n    def save(self, stream=None, full=False):\n        \"\"\"\n        Save the blueprints to the passed stream, if provided. Otherwise prompt for a\n        file to save to.\n\n        This can save either the entire blueprints, or just the `grids:` section of the\n        blueprints, based on the passed ``full`` argument. Saving just the grid\n        blueprints can be useful when cobbling blueprints together with !include flags.\n        \"\"\"\n        if stream is None:\n            self._saveNoStream(full)\n        else:\n            saveToStream(stream, self.bp, full, tryMap=True)\n\n    def _saveNoStream(self, full=False):\n        \"\"\"Prompt for a file to save to.\n\n        This can save either the entire blueprints, or just the `grids:` section of the\n        blueprints, based on the passed ``full`` argument. Saving just the grid\n        blueprints can be useful when cobbling blueprints together with !include flags.\n        \"\"\"\n        # Prompt the user for a file name, open it, and call ourself again with that\n        # as the stream argument\n        if self._fName is None:\n            wd = os.getcwd()\n        else:\n            wd = os.path.split(self._fName)[0]\n\n        # Don't use the blueprints filename as the default if we are only saving the\n        # grids section; doing so may encourage users to overwrite their main\n        # blueprints file.\n        if full:\n            fName = self._fName or \"\"\n        else:\n            fName = \"\"\n\n        title = \"Save blueprints to...\" if full else \"Save grid designs to...\"\n\n        dlg = wx.FileDialog(\n            self,\n            message=title,\n            defaultDir=wd,\n            defaultFile=fName,\n            wildcard=self._wildcard,\n            style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT,\n        )\n\n        if dlg.ShowModal() == wx.ID_OK:\n            path = dlg.GetPath()\n        else:\n            return\n\n        # Disallow overwriting the main blueprints with the grids section\n        if not full and pathlib.Path(path).exists() and pathlib.Path(path).samefile(self._fName):\n            message = (\n                \"The chosen path, `{}` is the same as the main blueprints \"\n                'file. This tool only saves the \"grids\" section of the '\n                \"blueprints file, so saving over the original top-level blueprints \"\n                \"will lead to data loss. Try again with a different name.\".format(path)\n            )\n\n            with wx.MessageDialog(\n                self,\n                message,\n                \"Overwriting top-level blueprints!\",\n                style=wx.ICON_WARNING,\n            ) as dlg:\n                dlg.ShowModal()\n                return\n\n        # Try writing to an internal buffer before opening the file for write. This\n        # way to don't destroy anything unless we know we have something with which\n        # to replace it.\n        bpStream = io.StringIO()\n        saveToStream(bpStream, self.bp, full, tryMap=True)\n        with open(path, \"w\") as stream:\n            stream.write(bpStream.getvalue())\n\n    def open(self, _event):\n        if self._fName is None:\n            wd = os.getcwd()\n        else:\n            wd = os.path.split(self._fName)[0]\n\n        dlg = wx.FileDialog(\n            self,\n            message=\"Open blueprints file...\",\n            defaultDir=wd,\n            defaultFile=\"\",\n            wildcard=self._wildcard,\n            style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,\n        )\n\n        if dlg.ShowModal() == wx.ID_OK:\n            path = dlg.GetPath()\n\n            self.loadFile(path)\n\n    def new(self, _event):\n        \"\"\"\n        Create a Dialog with options to make a new grid blueprint, then make it and\n        rejigger everything to use it.\n        \"\"\"\n        with NewGridBlueprintDialog(self) as dlg:\n            if dlg.ShowModal() == wx.ID_OK:\n                # Make new bp\n                gridBp = dlg.getGridBlueprint()\n                if self.bp is not None:\n                    self.bp.gridDesigns[gridBp.name] = gridBp\n                self.bp = self.bp\n\n    def loadFile(self, fName, cs=None):\n        \"\"\"Load a new blueprints file, refreshing pretty much everything.\"\"\"\n        self._fName = fName\n        self._cs = cs\n        with open(fName, \"r\") as bpYaml:\n            bpYaml = textProcessors.resolveMarkupInclusions(bpYaml, root=pathlib.Path(fName).parent)\n            bp = Blueprints.load(bpYaml)\n            if bp.gridDesigns is None or \"core\" not in bp.gridDesigns:\n                cs = self._cs or self._promptForCs()\n                if cs is None:\n                    # We didn't get a CS from the user, so cannot migrate old\n                    # blueprints. Give up.\n                    return\n\n                migrate(bp, cs)\n\n        self.bp = bp\n\n    def getAssemDesignBySpecifier(self, specifier):\n        for _key, design in self.bp.assemDesigns.items():\n            if design.specifier == specifier:\n                return design\n\n        raise KeyError(\"Could not find an Assembly design with specifier `{}`\".format(specifier))\n\n    def getAssemToSet(self):\n        return self.assemblyPalette.getAssemToSet()\n\n    def getSelectedPath(self):\n        \"\"\"\n        Return the fuel path index that is currently selected.\n\n        This is used to route the state of the _AssemblyPalette controls to things that\n        need to know about such things (arrow drawing, whether objects should be bold,\n        etc.)\n        \"\"\"\n        assem = self.assemblyPalette.getSelectedAssem()\n        assert isinstance(assem, tuple)\n        return assem[0]\n\n    def _promptForCs(self) -> Optional[Settings]:\n        \"\"\"\n        Ask the user for a case settings file to locate the appropriate geom file to\n        perform blueprint migrations.\n        \"\"\"\n        if self._fName is None:\n            wd = os.getcwd()\n        else:\n            wd = os.path.split(self._fName)[0]\n\n        dlg = wx.FileDialog(\n            self,\n            message=\"Migrations needed. Please provide a settings file...\",\n            defaultDir=wd,\n            defaultFile=\"\",\n            style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST,\n        )\n\n        if dlg.ShowModal() == wx.ID_OK:\n            path = dlg.GetPath()\n            return Settings(path)\n        return None\n\n\nclass HelpDialog(wx.Dialog):\n    def __init__(self, parent):\n        wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title=\"About the grid editor...\")\n\n        helpTxt = \"\"\"\nThe Grid Editor is a rudimentary tool for manipulating the contents and basic structure\nof Grid Blueprints. Rather than being a full-featured reactor editor GUI, it intends to\nhelp in the specific task of laying objects out in a grid, which can be frustrating or\ntedious to do by hand in a text editor.\n\nSince this is not a general-purpose blueprint editor, this will only save the \"grids\"\nsection of a blueprints file, which will then need to be incorporated into a top-level\nblueprints input, typically by !include-ing from the host blueprints.\n\nWhen opening a blueprints file, the root blueprints should be provided, since the Editor\nuses the assembly designs to populate the assembly palette on the right.\n\nControls\n--------\nLeft-click in map: Apply the selected assembly design to the clicked location. If no\nassembly is selected, clear the assembly design in the licked location.\n\nRight-click in map: Summon context menu with useful tools.\n\n\"Num. Rings\" spinner: Modify the number of rings that the displayed grid should span.\nReducing the number of rings below the region with defined assemblies *will not* clear\nthose regions.\n\n\"Expand to full core\": Expand a 1/N-th reactor map into full symmetry. This will honor\nthe periodic/reflective boundary conditions as specified.\n\nLabel display drop-down: Select what should be displayed at each grid location.\n\nSave grid blueprints: Save just the grid blueprints to a file. This will need to be\nincorporated into a top-level blueprints file. To prevent loss of data, it will try to\nprevent overwriting the original blueprints file that was opened.\n\nOpen blueprints: Open a new top-level blueprints file.\n\nNew grid blueprints: Create a new grid blueprint, allowing configuration of the geometry\ntype, domain, and boundary conditions.\n\n\"\"\"\n\n        self.Sizer = wx.BoxSizer(wx.VERTICAL)\n        txt = wx.StaticText(self, label=helpTxt)\n        ok = wx.Button(self, id=wx.ID_OK)\n\n        self.Sizer.Add(txt)\n        self.Sizer.Add(ok)\n\n        self.Fit()\n\n\nclass NewGridBlueprintDialog(wx.Dialog):\n    \"\"\"Dialog box for configuring a new grid blueprint.\"\"\"\n\n    # these provide stable mappings from the wx.Choice control indices to the respective geom types\n    _geomFromIdx = {i: geomType for i, geomType in enumerate({geometry.GeomType.CARTESIAN, geometry.GeomType.HEX})}\n    _idxFromGeom = {geomType: i for i, geomType in _geomFromIdx.items()}\n\n    def __init__(self, parent):\n        wx.Dialog.__init__(self, parent, id=wx.ID_ANY, title=\"New Grid Blueprint...\")\n\n        nameLabel = wx.StaticText(self, label=\"Grid name:\")\n        self.gridName = wx.TextCtrl(self, value=\"core\")\n        nameSizer = wx.BoxSizer(wx.HORIZONTAL)\n        nameSizer.Add(nameLabel, 0)\n        nameSizer.Add(self.gridName, 1, wx.EXPAND)\n\n        self.geomType = wx.Choice(\n            self,\n            id=wx.ID_ANY,\n            choices=[gt.label for gt in self._geomFromIdx.values()],\n        )\n\n        self.Bind(wx.EVT_CHOICE, self.onSelectGeomType, self.geomType)\n\n        # Domain controls\n        self.throughCenter = wx.CheckBox(self, id=wx.ID_ANY, label=\"Through Center Assembly\")\n        self.domainFull = wx.RadioButton(self, id=wx.ID_ANY, label=\"Full Core\", style=wx.RB_GROUP)\n        self.domain3 = wx.RadioButton(self, id=wx.ID_ANY, label=\"1/3 Core\")\n        self.domain4 = wx.RadioButton(self, id=wx.ID_ANY, label=\"1/4 Core\")\n\n        domainBox = wx.StaticBoxSizer(wx.VERTICAL, self, label=\"Domain\")\n        domainBox.Add(self.domainFull, 0)\n        domainBox.Add(self.domain3, 0)\n        domainBox.Add(self.domain4, 0)\n        domainBox.Add(self.throughCenter, 0)\n\n        self.Bind(wx.EVT_RADIOBUTTON, self.onDomainChange)\n\n        # Symmetry controls\n        self.symmetryFull = wx.RadioButton(self, id=wx.ID_ANY, style=wx.RB_GROUP, label=\"Full\")\n        self.periodic = wx.RadioButton(self, id=wx.ID_ANY, label=\"Periodic\")\n        self.reflective = wx.RadioButton(self, id=wx.ID_ANY, label=\"Reflective\")\n\n        symmetryBox = wx.StaticBoxSizer(wx.VERTICAL, self, label=\"Symmetry\")\n        symmetryBox.Add(self.symmetryFull, 0)\n        symmetryBox.Add(self.periodic, 0)\n        symmetryBox.Add(self.reflective, 0)\n\n        # arrange the two boxes horizontally\n        gridControls = wx.BoxSizer(wx.HORIZONTAL)\n        gridControls.Add(domainBox, 0)\n        gridControls.Add(symmetryBox, 0)\n\n        ok = wx.Button(self, wx.ID_OK)\n        cancel = wx.Button(self, wx.ID_CANCEL)\n\n        self.Sizer = wx.BoxSizer(wx.VERTICAL)\n        self.Sizer.Add(nameSizer, 1, wx.ALL, 0)\n        self.Sizer.Add(self.geomType, 0, wx.ALL, 0)\n        self.Sizer.Add(gridControls, 0, wx.ALL, 0)\n        self.Sizer.Add(wx.StaticLine(self), 0, wx.EXPAND)\n\n        okCancelSizer = wx.BoxSizer(wx.HORIZONTAL)\n        okCancelSizer.Add(ok)\n        okCancelSizer.Add(cancel)\n\n        self.Sizer.Add(okCancelSizer, 0, wx.EXPAND | wx.ALL, 10)\n\n        self.selectGeomType(geometry.GeomType.HEX)\n        self.Fit()\n\n    def selectGeomType(self, geom):\n        \"\"\"Enable/disable relevant controls for the selected geom type.\"\"\"\n        # make sure the geom type Choice is in sync. This function doesn't have to be\n        # called from the event handler.\n        self.geomType.SetSelection(self._idxFromGeom[geom])\n        # switch to full-core, since it's always available\n        self.domainFull.SetValue(True)\n        self.symmetryFull.SetValue(True)\n        self._toggleControls()\n\n    def onSelectGeomType(self, _event):\n        self.selectGeomType(self._geomFromIdx[self.geomType.GetSelection()])\n\n    def _toggleControls(self):\n        \"\"\"Make sure that the appropriate controls are enabled/disabled.\"\"\"\n        geom = self._geomFromIdx[self.geomType.GetSelection()]\n        full = self.domainFull.GetValue()\n        self.throughCenter.Enable(enable=geom == geometry.GeomType.CARTESIAN)\n        self.symmetryFull.Enable(enable=full)\n        self.domain3.Enable(enable=geom == geometry.GeomType.HEX)\n        self.domain4.Enable(enable=geom == geometry.GeomType.CARTESIAN)\n        self.periodic.Enable(enable=not full)\n        self.reflective.Enable(enable=not full and geom == geometry.GeomType.CARTESIAN)\n        if full:\n            self.symmetryFull.SetValue(True)\n\n    def onDomainChange(self, event):\n        if event.EventObject in {self.domainFull, self.domain3, self.domain4}:\n            if self.domainFull.GetValue():\n                self.symmetryFull.SetValue(True)\n            else:\n                self.periodic.SetValue(True)\n        self._toggleControls()\n\n    def getGridBlueprint(self):\n        \"\"\"Using the state of the dialog controls, return a corresponding GridBlueprint.\"\"\"\n        name = self.gridName.GetValue()\n        geom = self._geomFromIdx[self.geomType.GetSelection()]\n\n        if self.domainFull.GetValue():\n            domain = geometry.DomainType.FULL_CORE\n        elif self.domain3.GetValue():\n            domain = geometry.DomainType.THIRD_CORE\n        elif self.domain4.GetValue():\n            domain = geometry.DomainType.QUARTER_CORE\n        else:\n            raise ValueError(\"Couldn't map selection to supported fractional domain\")\n\n        if self.periodic.GetValue():\n            bc = geometry.BoundaryType.PERIODIC\n        elif self.reflective.GetValue():\n            bc = geometry.BoundaryType.REFLECTIVE\n        else:\n            bc = geometry.BoundaryType.NO_SYMMETRY\n\n        symmetry = geometry.SymmetryType(domain, bc, self.throughCenter.GetValue())\n\n        assert symmetry.checkValidSymmetry()\n\n        bp = GridBlueprint(name=name, geom=str(geom), symmetry=str(symmetry))\n\n        return bp\n\n\nif __name__ == \"__main__\":\n    app = wx.App()\n    frame = wx.Frame(None, wx.ID_ANY, title=\"Grid Blueprints GUI\", size=(1000, 1000))\n\n    gui = GridBlueprintControl(frame)\n    frame.Show()\n    if len(sys.argv) > 1:\n        gui.loadFile(sys.argv[1])\n    app.MainLoop()\n"
  },
  {
    "path": "armi/utils/hexagon.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\nGeneric hexagon math.\n\nHexagons are fundamental to advanced reactors.\n\n.. image:: /.static/hexagon.png\n    :width: 100%\n\"\"\"\n\nimport math\n\nimport numpy as np\n\nSQRT3 = math.sqrt(3.0)\n\n\ndef area(pitch):\n    \"\"\"\n    Area of a hex given the flat-to-flat pitch.\n\n    Notes\n    -----\n    The pitch is the distance between the center of the hexagons in the lattice.\n    \"\"\"\n    return SQRT3 / 2.0 * pitch**2\n\n\ndef side(pitch):\n    r\"\"\"\n    Side length of a hex given the flat-to-flat pitch.\n\n    Pythagorean theorem says:\n\n    .. math::\n\n        \\frac{s}{2}^2 + \\frac{p}{2}^2 = s^2\n\n    which you can solve to find p = sqrt(3)*s\n\n    Notes\n    -----\n    The pitch is the distance between the center of the hexagons in the lattice.\n    \"\"\"\n    return pitch / SQRT3\n\n\ndef corners(rotation=0):\n    \"\"\"\n    Return the coordinates of a unit hexagon, rotated as requested.\n\n    Zero rotation implies flat-to-flat aligned with y-axis. Origin in the center.\n    \"\"\"\n    points = np.array(\n        [\n            (1.0 / (2.0 * math.sqrt(3.0)), 0.5),\n            (1.0 / math.sqrt(3.0), 0.0),\n            (1.0 / (2.0 * math.sqrt(3.0)), -0.5),\n            (-1.0 / (2.0 * math.sqrt(3.0)), -0.5),\n            (-1.0 / math.sqrt(3.0), 0.0),\n            (-1.0 / (2.0 * math.sqrt(3.0)), 0.5),\n        ]\n    )\n\n    rotation = rotation / 180.0 * math.pi\n    rotation = np.array(\n        [\n            [math.cos(rotation), -math.sin(rotation)],\n            [math.sin(rotation), math.cos(rotation)],\n        ]\n    )\n\n    return np.array([tuple(rotation.dot(point)) for point in points])\n\n\ndef pitch(side):\n    \"\"\"\n    Calculate the pitch from the length of a hexagon side.\n\n    Notes\n    -----\n    The pitch is the distance between the center of the hexagons in the lattice.\n    \"\"\"\n    return side * SQRT3\n\n\ndef numRingsToHoldNumCells(numCells):\n    \"\"\"\n    Determine the number of rings in a hexagonal grid with this many hex cells.\n    If the number of pins don't fit exactly into any ring, returns the ring just large\n    enough to fit them.\n\n    Parameters\n    ----------\n    numCells : int\n        The number of hex cells in a hex lattice\n\n    Returns\n    -------\n    numRings : int\n        Number of rings required to contain numCells items.\n\n    Notes\n    -----\n    The first hex ring (center) holds 1 position. Each subsequent hex ring contains 6\n    more positions than the last.  This method works by incrementing ring numbers until\n    the number of items is reached or exceeded. It could easily be replaced by a lookup\n    table if so desired.\n    \"\"\"\n    if numCells == 0:\n        return 0\n    nPinRings = int(math.ceil(0.5 * (1 + math.sqrt(1 + 4 * (numCells - 1) // 3))))\n\n    return nPinRings\n\n\ndef numPositionsInRing(ring):\n    \"\"\"Number of positions in ring (starting at 1) of a hex lattice.\"\"\"\n    return (ring - 1) * 6 if ring != 1 else 1\n\n\ndef totalPositionsUpToRing(ring: int) -> int:\n    \"\"\"Return the number of positions in a hexagon with a given number of rings.\"\"\"\n    return 1 + 3 * ring * (ring - 1)\n\n\ndef getIndexOfRotatedCell(initialCellIndex: int, orientationNumber: int) -> int:\n    \"\"\"Obtain a new cell number after placing a hexagon in a new orientation.\n\n    Parameters\n    ----------\n    initialCellIndex : int\n        Positive number for this cell's position in a hexagonal lattice.\n    orientationNumber :\n        Orientation in number of 60 degree, counter clockwise rotations. An orientation of zero\n        means the first cell in each ring of a flags up hexagon is in the upper right corner.\n\n    Returns\n    -------\n    int\n        New cell number across the rotation\n\n    Raises\n    ------\n    ValueError\n        If ``initialCellIndex`` is not positive.\n        If ``orientationNumber`` is less than zero or greater than five.\n    \"\"\"\n    if orientationNumber < 0 or orientationNumber > 5:\n        raise ValueError(f\"Orientation number must be in [0:5], got {orientationNumber}\")\n    if initialCellIndex > 1:\n        if orientationNumber == 0:\n            return initialCellIndex\n        ring = numRingsToHoldNumCells(initialCellIndex)\n        tot_pins = totalPositionsUpToRing(ring)\n        newPinLocation = initialCellIndex + (ring - 1) * orientationNumber\n        if newPinLocation > tot_pins:\n            newPinLocation -= (ring - 1) * 6\n        return newPinLocation\n    elif initialCellIndex == 1:\n        return initialCellIndex\n\n    raise ValueError(f\"Cell number must be positive, got {initialCellIndex}\")\n"
  },
  {
    "path": "armi/utils/iterables.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module of utilities to help dealing with iterable objects in Python.\"\"\"\n\nimport struct\nfrom itertools import chain, filterfalse, tee\n\nimport numpy as np\n\n\ndef flatten(lst):\n    \"\"\"Flattens an iterable of iterables by one level.\n\n    Examples\n    --------\n    >>> flatten([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]])\n    [1,2,3,4,5,6,7,8,9,10]\n    \"\"\"\n    return [item for sublist in lst for item in sublist]\n\n\ndef chunk(lst, n):\n    r\"\"\"Returns a generator object that yields lenght-`n` chunks of `lst`.\n\n    The last chunk may have a length less than `n` if `n` doesn't divide\n    `len(lst)`.\n\n    Examples\n    --------\n    >>> list(chunk([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4))\n     [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]\n    \"\"\"\n    for i in range(0, len(lst), n):\n        yield lst[i : i + n]\n\n\ndef split(a, n, padWith=()):\n    r\"\"\"\n    Split an iterable `a` into `n` sublists.\n\n    Parameters\n    ----------\n    a : iterable\n        The list to be broken into chunks\n\n    n : int\n        The number of \"even\" chunks to break this into. There will be this many\n        entries in the returned list no matter what. If len(a) < n,\n        error unless padWith has been set. If padWithNones is true, then the output\n        will be padded with lists containing a single None.\n\n    padWith : object, optional\n        if n > len(a), then the result will be padded to length-n by appending `padWith`.\n\n    Returns\n    -------\n    chunked : list[len=n] of lists\n\n    Examples\n    --------\n    >>> split([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)\n     [[1, 2, 3], [4, 5, 6], [7, 8], [9, 10]]\n\n    >>> split([0, 1, 2], 5, padWith=None)\n     [[0], [1], [2], None, None]\n    \"\"\"\n    a = list(a)  # in case `a` is not list-like\n    N = len(a)\n\n    assert n > 0, \"Cannot chunk into less than 1 chunks. You requested {0}\".format(n)\n\n    k, m = divmod(N, n)\n    chunked = [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] or padWith for i in range(n)]\n    return chunked\n\n\ndef unpackBinaryStrings(binaryRow):\n    \"\"\"Unpacks a row of binary strings to a list of floats.\"\"\"\n    if len(binaryRow) % 8:\n        raise ValueError(\"Cannot unpack binary strings from misformatted row. Expected chunks of size 8.\")\n    return [(struct.unpack(\"<d\", barray)[0]) for barray in chunk(binaryRow, 8)]\n\n\ndef packBinaryStrings(valueDict):\n    \"\"\"Converts a dictionary of lists of floats into a dictionary of lists of byte arrays.\"\"\"\n    bytearrays = {}\n    for entry in valueDict:\n        bytearrays[entry] = [bytearray()]\n\n        for value in valueDict[entry]:\n            bytearrays[entry][0].extend(struct.pack(\"<d\", value))\n\n    return bytearrays\n\n\ndef unpackHexStrings(hexRow):\n    \"\"\"Unpacks a row of binary strings to a list of floats.\"\"\"\n    return [float.fromhex(ss) for ss in hexRow.split() if ss != \"\"]\n\n\ndef packHexStrings(valueDict):\n    \"\"\"Converts a dictionary of lists of floats into a dictionary of lists of hex values arrays.\"\"\"\n    hexes = {}\n    for entry in valueDict:\n        hexes[entry] = [\" \".join(float.hex(float(value)) for value in valueDict[entry])]\n    return hexes\n\n\nclass Sequence:\n    \"\"\"\n    The Sequence class partially implements a list-like interface,\n    supporting methods like append and extend and also operations like + and +=.\n\n    It also provides some convenience methods such as drop and select to support\n    filtering, as well as a transform function to modify the sequence. Note that\n    these methods return a \"cloned\" version of the iterator to support chaining,\n    e.g.\n\n    >>> s = Sequence(range(1000000))\n    >>> tuple(s.drop(lambda i: i % 2 == 0).select(lambda i: i < 20).transform(lambda i: i * 10))\n    (10, 30, 50, 70, 90, 110, 130, 150, 170, 190)\n\n    This starts with a Sequence over 1 million elements (not stored in memory),\n    drops the even elements, selects only those whose value is less than 20, and\n    multiplies the resulting values by 10, all while loading only one element\n    at a time into memory. It is only when tuple is called that the operations\n    are performed.\n\n    drop, select, and transform act in-place, so the following is equivalent to\n    the chained expression given above:\n    >>> s = Sequence(range(1000000))\n    >>> s.drop(lambda i: i % 2 == 0)\n    <Sequence at 0x...>\n    >>> s.select(lambda i: i < 20)\n    <Sequence at 0x...>\n    >>> s.transform(lambda i: i * 10)\n    <Sequence at 0x...>\n    >>> tuple(s)\n    (10, 30, 50, 70, 90, 110, 130, 150, 170, 190)\n\n    Note: that this class is intended for use with finite sequences. Don't attempt\n    to use with infinite generators. For instance, the following will not work:\n\n    >>> def counter():\n    ...     i = 0\n    ...     while True:\n    ...         yield i\n    ...         i += 1\n    >>> s = Sequence(counter()).select(lambda i: i < 10)\n    >>> tuple(s)  # DON'T DO THIS!\n\n    Although the result should be (0,1,2,3,4,5,6,7,8,9), the select method is not\n    smart enough to know that it's a terminal condition and will continue to\n    check every number generated forever. One could remedy this by using the\n    dropwhile and/or takewhile methods in the itertools module, but this has\n    not been done.\n    \"\"\"\n\n    def __init__(self, seq=None):\n        \"\"\"Constructs a new Sequence object from an iterable. This also serves\n        as a copy constructor if seq is an instance of Sequence.\n        \"\"\"\n        if seq is None:\n            seq = []\n        elif isinstance(seq, Sequence):\n            seq = seq.copy()\n        self._iter = iter(seq)\n\n    def copy(self):\n        \"\"\"Return a new iterator that is a copy of self without consuming self.\"\"\"\n        self._iter, copy = tee(self._iter, 2)\n        return Sequence(copy)\n\n    def __iter__(self):\n        return self\n\n    def __repr__(self):\n        return \"<{:s} at 0x{:x}>\".format(self.__class__.__name__, id(self))\n\n    def __next__(self):\n        return next(self._iter)\n\n    def select(self, pred):\n        \"\"\"Keep only items for which pred(item) evaluates to True.\n\n        Note: returns self so it can be chained with other filters, e.g.,\n\n                newseq = seq.select(...).drop(...).transform(...)\n        \"\"\"\n        self._iter = filter(pred, self._iter)\n        return self\n\n    def drop(self, pred):\n        \"\"\"Drop items for which pred(item) evaluates to True.\n\n        Note: returns self so it can be chained with other filters, e.g.,\n\n                newseq = seq.select(...).drop(...).transform(...)\n        \"\"\"\n        self._iter = filterfalse(pred, self._iter)\n        return self\n\n    def transform(self, func):\n        \"\"\"Apply func to this sequence.\"\"\"\n        self._iter = map(func, self._iter)\n        return self\n\n    def extend(self, seq):\n        self._iter = chain(self._iter, seq)\n\n    def append(self, item):\n        self.extend([item])\n\n    def __radd__(self, other):\n        \"\"\"Basic sequence addition: s1 += s2.\"\"\"\n        new = Sequence(other)\n        new += Sequence(self)\n        return new\n\n    def __add__(self, other):\n        new = Sequence(self)\n        new += Sequence(other)\n        return new\n\n    def __iadd__(self, other):\n        self.extend(Sequence(other))\n        return self\n\n\ndef pivot(items, position: int):\n    \"\"\"Pivot the items in an iterable to start at a given position.\n\n    Functionally just ``items[position:] + items[:position]`` with\n    some logic to handle numpy arrays (concatenation not summation)\n\n    Parameters\n    ----------\n    items : list or numpy.ndarray\n        Sequence to be re-ordered\n    position : int\n        Position that will be the first item in the sequence after the pivot\n\n    Returns\n    -------\n    list or numpy.ndarray\n    \"\"\"\n    if isinstance(items, np.ndarray):\n        return np.concatenate((items[position:], items[:position]))\n    elif isinstance(items, list):\n        return items[position:] + items[:position]\n    raise TypeError(f\"Pivoting {type(items)} not supported : {items}\")\n"
  },
  {
    "path": "armi/utils/mathematics.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Various math utilities.\"\"\"\n\nimport math\nimport operator  # the python package, not the ARMI module\nimport re\n\nimport numpy as np\nimport scipy.optimize as sciopt\n\n# special pattern to deal with FORTRAN-produced scipats without E, like 3.2234-234\nSCIPAT_SPECIAL = re.compile(r\"([+-]?\\d*\\.\\d+)[eEdD]?([+-]\\d+)\")\n\n\ndef average1DWithinTolerance(vals, tolerance=0.2):\n    \"\"\"\n    Compute the average of a series of 1D arrays with a tolerance.\n\n    Tuned for averaging assembly meshes or block heights.\n\n    Parameters\n    ----------\n    vals : 2D np.array\n        could be assembly x axial mesh tops or heights\n    tolerance : float\n        The accuracy to which we need to know the average.\n\n    Returns\n    -------\n    1D np.array\n        The average of all the input 1D NumPy arrays.\n    \"\"\"\n    vals = np.array(vals)\n\n    filterOut = np.array([False])  # this gets discarded\n    while not filterOut.all():  # 20% difference is the default tolerance\n        avg = vals.mean(axis=0)  # average over all columns\n        diff = abs(vals - avg) / avg  # no nans, because all vals are non-zero\n        # True = 1, sum across axis means any height in assem is off\n        filterOut = (diff > tolerance).sum(axis=1) == 0\n        vals = vals[filterOut]  # filter anything that is skewing\n\n    if vals.size == 0:\n        raise ValueError(\"Nothing was near the mean, there are no acceptable values!\")\n\n    if (avg <= 0.0).any():\n        raise ValueError(\n            \"A non-physical value (<=0) was computed, but this is not possible.\\nValues: {}\\navg: {}\".format(vals, avg)\n        )\n\n    return avg\n\n\ndef convertToSlice(x, increment=0):\n    \"\"\"\n    Convert a int, float, list of ints or floats, None, or slice to a slice. Also optionally\n    increments that slice to make it easy to line up lists that don't start with 0.\n\n    Use this with np.array (np.ndarray) types to easily get selections of it's elements.\n\n    Parameters\n    ----------\n    x : multiple types allowed.\n        int: select one index.\n        list of int: select these index numbers.\n        None: select all indices.\n        slice: select this slice\n    increment : integer (or boolean), optional\n        Step size, when taking your slices. (`False` is zero.)\n\n    Returns\n    -------\n    slice : slice\n        Returns a slice object that can be used in an array like a[x] to select from its members.\n        Also, the slice has its index numbers decremented by 1. It can also return a numpy array,\n        which can be used to slice other numpy arrays in the same way as a slice.\n    increment : int\n        Step size to take, if you want to take less then every datum in the collection.\n\n    Examples\n    --------\n    >>> a = np.array([10, 11, 12, 13])\n\n    >>> convertToSlice(2)\n    slice(2, 3, None)\n    >>> a[convertToSlice(2)]\n    array([12])\n\n    >>> convertToSlice(2, increment=-1)\n    slice(1, 2, None)\n    >>> a[convertToSlice(2, increment=-1)]\n    array([11])\n\n    >>> a[convertToSlice(None)]\n    array([10, 11, 12, 13])\n\n    >>> a[utils.convertToSlice([1, 3])]\n    array([11, 13])\n\n    >>> a[utils.convertToSlice([1, 3], increment=-1)]\n    array([10, 12])\n\n    >>> a[utils.convertToSlice(slice(2, 3, None), increment=-1)]\n    array([11])\n    \"\"\"\n    if x is None:\n        x = np.s_[:]\n\n    if isinstance(x, list):\n        x = np.array(x)\n\n    if isinstance(x, (int, np.integer, float, np.floating)):\n        x = slice(int(x), int(x) + 1, None)\n\n    # Correct the slice indices to be group instead of index based.\n    # The energy groups are 1..x and the indices are 0..x-1.\n    if isinstance(x, slice):\n        if x.start is not None:\n            jstart = x.start + increment\n        else:\n            jstart = None\n\n        if x.stop is not None:\n            if isinstance(x.stop, list):\n                jstop = [x + increment for x in x.stop]\n            else:\n                jstop = x.stop + increment\n        else:\n            jstop = None\n\n        jstep = x.step\n\n        return np.s_[jstart:jstop:jstep]\n    elif isinstance(x, np.ndarray):\n        return np.array([i + increment for i in x])\n    else:\n        raise Exception(f\"It is not known how to handle x type: {type(x)} in utils.convertToSlice\")\n\n\ndef efmt(a: str) -> str:\n    \"\"\"Converts string exponential number to another string with just 2 digits in the exponent.\"\"\"\n    # this assumes that none of our numbers will be more than 1e100 or less than 1e-100...\n    if len(a.split(\"E\")) != 2:\n        two = a.split(\"e\")\n    else:\n        two = a.split(\"E\")\n\n    # print two\n    exp = two[1]  # this is '+002' or '+02' or something\n\n    if len(exp) == 4:  # it has 3 digits of exponent\n        exp = exp[0] + exp[2:]  # gets rid of the hundred's place digit\n\n    return two[0] + \"E\" + exp\n\n\ndef expandRepeatedFloats(repeatedList):\n    \"\"\"\n    Return an expanded repeat list.\n\n    Notes\n    -----\n    R char is valid for showing the number of repeats in MCNP. For examples the list:\n    [150,  200, '9R']\n    indicates a 150 day cycle followed by 10 200 day cycles.\n    \"\"\"\n    nonRepeatList = []\n    for val in repeatedList:\n        isRepeat = False\n        if isinstance(val, str):\n            val = val.upper()\n            if val.count(\"R\") > 1:\n                raise ValueError(\"List had strings that were not repeats\")\n            elif \"R\" in val:\n                val = val.replace(\"R\", \"\")\n                isRepeat = True\n        if isRepeat:\n            nonRepeatList += [nonRepeatList[-1]] * int(val)\n        else:\n            nonRepeatList.append(float(val))\n    return nonRepeatList\n\n\ndef findClosest(listToSearch, val, indx=False):\n    \"\"\"\n    Find closest item in a list.\n\n    Parameters\n    ----------\n    listToSearch : list\n        The list to search through\n\n    val : float\n        The target value that is being searched for in the list\n\n    indx : bool, optional\n        If true, returns minVal and minIndex, otherwise, just the value\n\n    Returns\n    -------\n    minVal : float\n        The item in the listToSearch that is closest to val\n    minI : int\n        The index of the item in listToSearch that is closest to val. Returned if indx=True.\n    \"\"\"\n    d = float(\"inf\")\n    minVal = None\n    minI = None\n    for i, item in enumerate(listToSearch):\n        if abs(item - val) < d:\n            d = abs(item - val)\n            minVal = item\n            minI = i\n    if indx:\n        return minVal, minI\n    else:\n        # backwards compatibility\n        return minVal\n\n\ndef findNearestValue(searchList, searchValue):\n    \"\"\"Search a given list for the value that is closest to the given search value.\"\"\"\n    return findNearestValueAndIndex(searchList, searchValue)[0]\n\n\ndef findNearestValueAndIndex(searchList, searchValue):\n    \"\"\"Search a given list for the value that is closest to the given search value. Return a tuple\n    containing the value and its index in the list.\n    \"\"\"\n    searchArray = np.array(searchList)\n    closestValueIndex = (np.abs(searchArray - searchValue)).argmin()\n    return searchArray[closestValueIndex], closestValueIndex\n\n\ndef fixThreeDigitExp(strToFloat: str) -> float:\n    \"\"\"\n    Convert FORTRAN numbers that cannot be converted into floats.\n\n    Notes\n    -----\n    Converts a number line  \"9.03231714805651-101\" (no e or E) to \"9.03231714805651e-101\".\n    Some external depletion kernels currently need this fix. From contact with developer:\n    The notation like 1.0-101 is a FORTRAN thing, with history going back to the 60's.\n    They will only put E before an exponent 99 and below.  Fortran will also read these guys\n    just fine, and they are valid floating point numbers.  It would not be a useful effort,\n    in terms of time, trying to get FORTRAN to behave differently.\n    The approach has been to write a routine in the reading code which will interpret these.\n\n    This helps when the scientific number exponent does not fit.\n    \"\"\"\n    match = SCIPAT_SPECIAL.match(strToFloat)\n    return float(\"{}E{}\".format(*match.groups()))\n\n\ndef getFloat(val):\n    \"\"\"Returns float version of val, or None if it's impossible. Useful for converting\n    user-input into floats when '' might be possible.\n    \"\"\"\n    try:\n        newVal = float(val)\n        return newVal\n    except Exception:\n        return None\n\n\ndef getStepsFromValues(values, prevValue=0.0):\n    \"\"\"Convert list of floats to list of steps between each float.\"\"\"\n    steps = []\n    for val in values:\n        currentVal = float(val)\n        steps.append(currentVal - prevValue)\n        prevValue = currentVal\n\n    return steps\n\n\ndef isMonotonic(inputIter, relation):\n    \"\"\"\n    Checks if an iterable contains elements that are monotonically increasing or\n    decreasing, whatever that might mean for the specific types of the elements.\n\n    Parameters\n    ----------\n    inputIter : list\n        Some list to check. Values in the list should have a defined relation to\n        each other.\n    relation : {'<=', '<', '>=', '>'}\n        The relation between the elements to check, from left to right through\n        the iterable.\n\n    Returns\n    -------\n    bool\n    \"\"\"\n    operatorDict = {\n        \"<=\": operator.le,\n        \"<\": operator.lt,\n        \">=\": operator.ge,\n        \">\": operator.gt,\n    }\n    try:\n        op = operatorDict[relation]\n    except KeyError:\n        raise ValueError(f\"Valid relation not specified: {relation}\")\n\n    return all([op(x, y) for x, y in zip(inputIter, inputIter[1:])])\n\n\ndef linearInterpolation(x0, y0, x1, y1, targetX=None, targetY=None):\n    \"\"\"\n    Does a linear interpolation (or extrapolation) for y=f(x).\n\n    Parameters\n    ----------\n    x0,y0,x1,y1 : float\n        Coordinates of two points to interpolate between\n\n    targetX : float, optional\n        X value to evaluate the line at\n\n    targetY : float, optional\n        Y value we want to find the x value for (inverse interpolation)\n\n    Returns\n    -------\n    interpY : float\n        The value of y(targetX), if targetX is not None\n\n    interpX : float\n        The value of x where y(x) = targetY (if targetY is not None)\n\n    y = m(x-x0) + b\n\n    x = (y-b)/m\n    \"\"\"\n    if x1 == x0:\n        raise ZeroDivisionError(\"The x-values are identical. Cannot interpolate.\")\n\n    m = (y1 - y0) / (x1 - x0)\n    b = -m * x0 + y0\n\n    if targetX is not None:\n        return m * targetX + b\n    else:\n        return (targetY - b) / m\n\n\ndef minimizeScalarFunc(\n    func,\n    goal,\n    guess,\n    maxIterations=None,\n    cs=None,\n    positiveGuesses=False,\n    method=None,\n    tol=1.0e-3,\n):\n    \"\"\"\n    Use SciPy minimize with the given function, goal value, and first guess.\n\n    Parameters\n    ----------\n    func : function\n        The function that guess will be changed to try to make it return the goal value.\n\n    goal : float\n        The function will be changed until it's return equals this value.\n\n    guess : float\n        The first guess value to do Newton's method on the func.\n\n    maxIterations : int\n        The maximum number of iterations that the Newton's method will be allowed to perform.\n\n    Returns\n    -------\n    ans : float\n        The guess that when input to the func returns the goal.\n    \"\"\"\n\n    def goalFunc(guess, func, positiveGuesses):\n        if positiveGuesses is True:\n            guess = abs(guess)\n        funcVal = func(guess)\n        val = abs(goal - funcVal)\n        return val\n\n    if (maxIterations is None) and (cs is not None):\n        maxIterations = cs[\"maxNewtonsIterations\"]\n\n    X = sciopt.minimize(\n        goalFunc,\n        guess,\n        args=(func, positiveGuesses),\n        method=method,\n        tol=tol,\n        options={\"maxiter\": maxIterations},\n    )\n\n    # X returns `[num]` instead of `num`, so we have to grab the first/only element in that list\n    ans = float(X[\"x\"][0])\n    if positiveGuesses is True:\n        ans = abs(ans)\n\n    return ans\n\n\ndef newtonsMethod(func, goal, guess, maxIterations=None, cs=None, positiveGuesses=False):\n    r\"\"\"\n    Solves a Newton's method with the given function, goal value, and first guess.\n\n    Parameters\n    ----------\n    func : function\n        The function that guess will be changed to try to make it return the goal value.\n\n    goal : float\n        The function will be changed until it's return equals this value.\n\n    guess : float\n        The first guess value to do Newton's method on the func.\n\n    maxIterations : int\n        The maximum number of iterations that the Newton's method will be allowed to perform.\n\n\n    Returns\n    -------\n    ans : float\n        The guess that when input to the func returns the goal.\n\n    \"\"\"\n\n    def goalFunc(guess, func, positiveGuesses):\n        if positiveGuesses is True:\n            guess = abs(guess)\n        funcVal = func(guess)\n        val = abs(goal - funcVal)\n        return val\n\n    if (maxIterations is None) and (cs is not None):\n        maxIterations = cs[\"maxNewtonsIterations\"]\n\n    ans = float(\n        sciopt.newton(\n            goalFunc,\n            guess,\n            args=(func, positiveGuesses),\n            tol=1.0e-3,\n            maxiter=maxIterations,\n        )\n    )\n\n    if positiveGuesses is True:\n        ans = abs(ans)\n\n    return ans\n\n\ndef parabolaFromPoints(p1, p2, p3):\n    r\"\"\"\n    Find the parabola that passes through three points.\n\n    We solve a simultaneous equation with three points.\n\n    A = x1**2 x1 1\n        x2**2 x2 1\n        x3**2 x3 1\n\n    b = y1\n        y2\n        y3\n\n    find coefficients Ax=b\n\n    Parameters\n    ----------\n    p1 : tuple\n        first point (x,y) coordinates\n    p2 : tuple\n        second (x,y) points\n    p3 : tuple\n        third (x,y) points\n\n    Returns\n    -------\n    tuple\n        3 floats: a,b,c coefficients of y=ax^2+bx+c\n    \"\"\"\n    A = np.array([[p1[0] ** 2, p1[0], 1], [p2[0] ** 2, p2[0], 1], [p3[0] ** 2, p3[0], 1]])\n\n    b = np.array([[p1[1]], [p2[1]], [p3[1]]])\n\n    try:\n        x = np.linalg.solve(A, b)\n    except:\n        print(\"Error in parabola {} {}\".format(A, b))\n        raise\n\n    # x[#] returns `[num]` instead of `num`, so we have to grab the first/only element in that list\n    return float(x[0][0]), float(x[1][0]), float(x[2][0])\n\n\ndef parabolicInterpolation(ap, bp, cp, targetY):\n    \"\"\"\n    Given parabola coefficients, this interpolates the time\n    that would give k=targetK.\n\n    keff = at^2+bt+c\n    We want to solve a*t^2+bt+c-targetK = 0.0 for time.\n    if there are real roots, we should probably take the smallest one\n    because the larger one might be at very high burnup.\n    If there are no real roots, just take the point where the deriv ==0, or\n    2at+b=0, so t = -b/2a\n    The slope of the curve is the solution to 2at+b at whatever t has been determined\n\n    Parameters\n    ----------\n    ap : float\n        coefficients ap of a parabola y = ap*x^2 + bp*x + cp\n    bp : float\n        coefficients bp of a parabola y = ap*x^2 + bp*x + cp\n    cp : float\n        coefficients cp of a parabola y = ap*x^2 + bp*x + cp\n    targetY : float\n        The keff to find the cycle length of\n\n    Returns\n    -------\n    realRoots : list of tuples\n        (root, slope)\n        The best guess of the cycle length that will give k=targetK\n        If no positive root was found, this is the maximum of the curve. In that case,\n        it will be a negative number. If there are two positive roots, there will be two entries.\n\n        slope : float\n            The slope of the keff vs. time curve at t=newTime\n    \"\"\"\n    roots = np.roots([ap, bp, cp - targetY])\n    realRoots = []\n    for r in roots:\n        if r.imag == 0 and r.real > 0:\n            realRoots.append((r.real, 2.0 * ap * r.real + bp))\n\n    if not realRoots:\n        # no positive real roots. Take maximum and give up for this cyclic.\n        newTime = -bp / (2 * ap)\n        if newTime < 0:\n            raise RuntimeError(\"No positive roots or maxima.\")\n        slope = 2.0 * ap * newTime + bp\n        newTime = -newTime  # return a negative newTime to signal that it is not expected to be critical.\n        realRoots = [(newTime, slope)]\n\n    return realRoots\n\n\ndef relErr(v1: float, v2: float) -> float:\n    \"\"\"Find the relative error between to numbers.\"\"\"\n    if v1:\n        return (v2 - v1) / v1\n    else:\n        return -1e99\n\n\ndef resampleStepwise(xin, yin, xout, avg=True):\n    \"\"\"\n    Resample a piecewise-defined step function from one set of mesh points\n    to another. This is useful for reallocating values along a given axial\n    mesh (or assembly of blocks).\n\n    Parameters\n    ----------\n    xin : list\n        interval points / mesh points\n    yin : list\n        interval values / inter-mesh values\n    xout : list\n        new interval points / new mesh points\n    avg : bool\n        By default, this is set to True, forcing the resampling to be done\n        by averaging. But if this is False, the resmampling will be done by\n        summation, to try and preserve the totals after resampling.\n    \"\"\"\n    # validation: there must be one more mesh point than inter-mesh values\n    assert (len(xin) - 1) == len(yin)\n\n    # find out in which xin bin each xout value lies\n    bins = np.digitize(xout, bins=xin)\n\n    # loop through xout / the xout bins\n    yout = []\n    for i in range(1, len(bins)):\n        start = bins[i - 1]\n        end = bins[i]\n        chunk = yin[start - 1 : end]\n        length = xin[start - 1 : end + 1]\n        length = [length[j] - length[j - 1] for j in range(1, len(length))]\n\n        # if the xout lies outside the xin range\n        if not len(chunk):\n            yout.append(0)\n            continue\n\n        # trim any partial right-side bins\n        if xout[i] < xin[min(end, len(xin) - 1)]:\n            fraction = (xout[i] - xin[end - 1]) / (xin[end] - xin[end - 1])\n            if fraction == 0:\n                chunk = chunk[:-1]\n                length = length[:-1]\n            elif avg:\n                length[-1] *= fraction\n            else:\n                chunk[-1] *= fraction\n\n        # trim any partial left-side bins\n        if xout[i - 1] > xin[start - 1]:\n            fraction = (xin[start] - xout[i - 1]) / (xin[start] - xin[start - 1])\n            if fraction == 0:\n                chunk = chunk[1:]\n                length = length[1:]\n            elif avg:\n                length[0] *= fraction\n            else:\n                chunk[0] *= fraction\n\n        # return the sum or the average\n        if [1 for c in chunk if (not hasattr(c, \"__len__\") and c is None)]:\n            yout.append(None)\n        elif avg:\n            weighted_sum = sum([ch * ln for ch, ln in zip(chunk, length)])\n            yout.append(weighted_sum / sum(length))\n        else:\n            yout.append(sum(chunk))\n\n    return yout\n\n\ndef rotateXY(x, y, degreesCounterclockwise=None, radiansCounterclockwise=None):\n    \"\"\"\n    Rotates x, y coordinates.\n\n    Parameters\n    ----------\n    x : float\n        X coordinates, array-like\n    y : float\n        Y coordinates, array-like\n    degreesCounterclockwise : float\n        Degrees to rotate in the CCW direction\n    radiansCounterclockwise : float\n        Radians to rotate in the CCW direction\n\n    Returns\n    -------\n    tuple\n        xr, yr: the rotated coordinates\n    \"\"\"\n    if radiansCounterclockwise is None:\n        radiansCounterclockwise = degreesCounterclockwise * math.pi / 180.0\n\n    sinT = math.sin(radiansCounterclockwise)\n    cosT = math.cos(radiansCounterclockwise)\n    rotationMatrix = np.array([[cosT, -sinT], [sinT, cosT]])\n    xr, yr = rotationMatrix.dot(np.vstack((x, y)))\n    if len(xr) > 1:\n        # Convert to lists because everyone prefers lists for some reason\n        return xr.tolist(), yr.tolist()\n    else:\n        # Convert to scalar for consistency with old implementation\n        return xr[0], yr[0]\n"
  },
  {
    "path": "armi/utils/outputCache.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTerraPower Calculation Results Cache (CRC).\n\nThis helps avoid duplicated time/energy in running cases.\nIn test systems and analysis, it's possible that the same calc will be done\nover and over, always giving the same result. This system allows the results\nto be cached and returned instantly instead of re-running, for example, MC2.\n\nAPI usage\n---------\nGetting a cached file::\n\n    exe = \"MC2-2018-blah.exe\"\n    inpFiles = [\"mccAA.inp\", \"rmzflx\"]\n    outputFound = crc.retrieveOutput(exe, inp, output)\n    if not outputFound:\n        mc2.run(exe, inp, output)\n\nStoring a file to the cache::\n\n    crc.store(exe, inp, outFiles)\n\nNotes\n-----\nCould probably be, like, a decorate on subprocess but we call subprocess a bunch of different ways.\n\"\"\"\n\nimport hashlib\nimport json\nimport os\nimport subprocess\n\nfrom armi import runLog\nfrom armi.utils import safeCopy\nfrom armi.utils.pathTools import cleanPath\n\nMANIFEST_NAME = \"CRC-manifest.json\"\n\n\ndef retrieveOutput(exePath, inputPaths, cacheDir, locToRetrieveTo=None):\n    \"\"\"\n    Check the cache for a valid file and copy it if it exists.\n\n    Notes\n    -----\n    Input paths need to be in the same order each time if the same cached folder is expected to be found.\n    \"\"\"\n    cachedFolder = _getCachedFolder(exePath, inputPaths, cacheDir)\n    if os.path.exists(cachedFolder):\n        if locToRetrieveTo is None:\n            locToRetrieveTo = os.path.dirname(inputPaths[0])\n\n        successful = _copyOutputs(cachedFolder, locToRetrieveTo)\n\n        if successful:\n            runLog.info(\"Retrieved cached outputs for {}\".format(exePath))\n            return True\n        else:\n            # outputs didn't match manifest. Just delete to save checking next time.\n            runLog.warning(\n                \"Outputs in {} were inconsistent with manifest. Deleting and reproducing\".format(cachedFolder)\n            )\n            try:\n                deleteCache(cachedFolder)\n            except Exception as e:\n                runLog.debug(e)\n\n    return False\n\n\ndef _copyOutputs(cachedFolder, locToRetrieveTo):\n    \"\"\"Check that the outputs have the expectect hashes and copy them if they do.\"\"\"\n    manifest = os.path.join(cachedFolder, MANIFEST_NAME)\n    if not os.path.exists(manifest):\n        return False\n\n    with open(manifest) as manifestJSON:\n        storedOutputNamesToHashes = json.load(manifestJSON)\n\n    copies = []\n    for storedOutputName, expectedHash in storedOutputNamesToHashes.items():\n        storedOutputPath = os.path.join(cachedFolder, storedOutputName)\n        try:\n            if _hashFiles([storedOutputPath]) != expectedHash:\n                return False\n        except FileNotFoundError:\n            return False\n        copyPath = os.path.join(locToRetrieveTo, storedOutputName)\n        copies.append([storedOutputPath, copyPath])\n\n    for copy in copies:\n        storedOutputPath, copyPath = copy\n        safeCopy(storedOutputPath, copyPath)\n\n    return True\n\n\ndef _getCachedFolder(exePath, inputPaths, cacheDir):\n    \"\"\"Return the the folder name expected for this executable and set of inputs.\"\"\"\n    exeName = os.path.basename(os.path.splitext(exePath)[0])\n    exeHash = _hashFiles([exePath])\n    inputHash = _hashFiles(inputPaths)\n\n    # first 2 helps with reducing the number of folders in a folder\n    first2, remainder = (inputHash[:2], inputHash[2:])\n    return os.path.join(cacheDir, exeName, exeHash, first2, remainder)\n\n\ndef _hashFiles(paths):\n    \"\"\"Return a MD5 hash of a file's contents.\"\"\"\n    with open(paths[0], \"rb\") as binaryF:\n        md5Hash = hashlib.md5(binaryF.read())\n\n    for path in paths[1:]:\n        with open(path, \"rb\") as binaryF:\n            md5Hash.update(binaryF.read())\n\n    return md5Hash.hexdigest()\n\n\ndef _makeOutputManifest(outputFiles, folderLocation):\n    \"\"\"Make a json file with the output names and expected hash.\"\"\"\n    manifest = {outputFile: _hashFiles([outputFile]) for outputFile in outputFiles}\n    with open(os.path.join(folderLocation, MANIFEST_NAME), \"w\") as manifestJSON:\n        json.dump(manifest, manifestJSON)\n\n\ndef store(exePath, inputPaths, outputFiles, cacheDir):\n    \"\"\"\n    Store an output file in the cache.\n\n    Notes\n    -----\n    Input paths need to be in the same order each time if the same cached folder is expected to be found.\n    It is difficult to know what outputs will exist from a specific run, so only\n    outputs that do exist will attempt to be copied.\n    This function should be supplied with a greedy list of outputs.\n    \"\"\"\n    # outputFilePaths is a greedy list and they might not all be produced\n    outputsThatExist = [outputFile for outputFile in outputFiles if os.path.exists(outputFile)]\n\n    folderLoc = _getCachedFolder(exePath, inputPaths, cacheDir)\n    if os.path.exists(folderLoc):\n        deleteCache(folderLoc)\n    os.makedirs(folderLoc)\n    _makeOutputManifest(outputsThatExist, folderLoc)\n\n    for outputFile in outputsThatExist:\n        baseName = os.path.basename(outputFile)\n        cachedLoc = os.path.join(folderLoc, baseName)\n        safeCopy(outputFile, cachedLoc)\n\n    runLog.info(\"Added outputs for {} to the cache.\".format(exePath))\n\n\ndef deleteCache(cachedFolder):\n    \"\"\"\n    Remove this folder.\n\n    Requires keyword because this is potentially extremely destructive.\n    \"\"\"\n    if \"cache\" not in str(cachedFolder).lower():\n        raise RuntimeError(\"Cache location must contain keyword: `cache`.\")\n\n    # Output caches need to pass in `forceClean` in order to greenlight the deletion.\n    cleanPath(cachedFolder, forceClean=True)\n\n\ndef cacheCall(cacheDir, executablePath, inputPaths, outputFileNames, execute=None, tearDown=None):\n    \"\"\"\n    Checks the cache to see if there are outputs for the run and returns them, otherwise calls the execute command.\n\n    Notes\n    -----\n    It is non-trivial to determine the exact set of outputs an executable will produce\n    without running the executable. Therefore, ``outputFileNames`` is expected to be a\n    greedy list and cache will attempt to copy all the files, but not fail if the\n    file is not present. When copying outputs back, all files copied previously will\n    be targeted.\n    \"\"\"\n    if execute is None:\n        execute = lambda: subprocess.call([executablePath] + inputPaths)\n\n    if not cacheDir:\n        runLog.info(\"Executing {}\".format(executablePath))\n        execute()\n        return\n\n    try:\n        if retrieveOutput(executablePath, inputPaths, cacheDir):\n            return\n    except Exception as e:\n        runLog.warning(\n            \"Outputs existed in cache, but failed to retrieve outputs from: {} \\nerror: {}\".format(\n                _getCachedFolder(executablePath, inputPaths, cacheDir), e\n            )\n        )\n\n    runLog.warning(\"Cached outputs were not found, executing {}\".format(executablePath))\n    execute()\n    if tearDown is not None:\n        tearDown()\n\n    try:\n        store(executablePath, inputPaths, outputFileNames, cacheDir)\n    except Exception as e:\n        # something went wrong in storage.\n        # This is okay as the manifest will be inconsistent with the outputs and not used in the future.\n        runLog.warning(\n            \"Failed to store outputs in: {}\\nerror: {}\".format(\n                _getCachedFolder(executablePath, inputPaths, cacheDir), e\n            )\n        )\n"
  },
  {
    "path": "armi/utils/parsing.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This file contains tools for common tasks in parsing in python strings into non-string values.\"\"\"\n\nimport ast\nimport copy\n\n\ndef tryLiteralEval(source):\n    try:\n        source = ast.literal_eval(source)\n    except (ValueError, SyntaxError):\n        pass\n\n    return source\n\n\n# the following dict helps avoid the need for an eval() statement\n# Is there no better way to go 'bool' -> bool !?\n_str_types = {tp.__name__: tp for tp in (type(None), bool, int, complex, float, str, bytes, list, tuple, dict)}\n_type_strs = {v: k for k, v in _str_types.items()}\n\n\n# python's matching truth evaluations of Nones in different primitive types\n# str's and unicodes omitted because parseValue denies their use.\n_none_types = {\n    type(None): None,\n    bool: False,\n    int: 0,\n    complex: 0j,\n    float: 0.0,\n    list: [],\n    tuple: (),\n    dict: {},\n}\n\n\ndef _numericSpecialBehavior(source, rt):\n    try:\n        return rt(source), True  # convert, report success\n    except (ValueError, TypeError):\n        return source, False  # fail, report failure\n\n\ndef parseValue(source, requestedType, allowNone=False, matchingNonetype=True):\n    \"\"\"Tries parse a python value, expecting input to be the right type or a string.\"\"\"\n    # misuse prevention\n    if requestedType is str:\n        raise TypeError(\n            \"Unreliable and unnecessary to use parseValue for strs and unicodes. \"\n            \"Given parameters are {}, {}, {}.\".format(source, requestedType, allowNone)\n        )\n\n    # evaluation and special evaluation for numbers\n    evaluated_source, skip_instance_check = tryLiteralEval(source), False\n    if requestedType in [int, float, complex]:\n        evaluated_source, skip_instance_check = _numericSpecialBehavior(evaluated_source, requestedType)\n\n    # none logic\n    if allowNone and not evaluated_source:\n        if matchingNonetype:\n            return copy.deepcopy(_none_types[requestedType])\n        else:\n            return evaluated_source\n\n    # assert everything went well\n    if not skip_instance_check and not isinstance(evaluated_source, requestedType):\n        msg = \"Could not parse {} from source {}.\"\n        if allowNone:\n            msg += \" Nor could None be parsed from source.\"\n        raise ValueError(msg.format(requestedType, evaluated_source))\n\n    return evaluated_source\n"
  },
  {
    "path": "armi/utils/pathTools.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module contains commonly used functions relating to directories, files and path\nmanipulations.\n\"\"\"\n\nimport importlib\nimport os\nimport pathlib\nimport shutil\nfrom time import sleep\n\nfrom armi import context, runLog\nfrom armi.utils import safeCopy\n\n\ndef armiAbsPath(*pathParts):\n    \"\"\"Convert a list of path components to an absolute path, without drive letters if possible.\"\"\"\n    return os.path.abspath(os.path.join(*pathParts))\n\n\ndef copyOrWarn(filepathDescription, sourcePath, destinationPath):\n    \"\"\"Copy a file or directory, or warn if the filepath doesn't exist.\n\n    Parameters\n    ----------\n    filepathDescription : str\n        a description of the file and/or operation being performed.\n    sourcePath : str\n        Filepath to be copied.\n    destinationPath : str\n        Copied filepath.\n    \"\"\"\n    try:\n        if os.path.isdir(sourcePath):\n            shutil.copytree(sourcePath, destinationPath, dirs_exist_ok=True)\n        else:\n            safeCopy(sourcePath, destinationPath)\n        runLog.debug(\"Copied {}: {} -> {}\".format(filepathDescription, sourcePath, destinationPath))\n    except shutil.SameFileError:\n        pass\n    except Exception as e:\n        runLog.warning(\n            \"Could not copy {} from {} to {}\\nError was: {}\".format(filepathDescription, sourcePath, destinationPath, e)\n        )\n\n\ndef isFilePathNewer(path1, path2):\n    \"\"\"Returns true if path1 is newer than path2.\n\n    Returns true if path1 is newer than path2, or if path1 exists and path2 does not, otherwise\n    raises an IOError.\n    \"\"\"\n    exist1 = os.path.exists(path1)\n    exist2 = os.path.exists(path2)\n    if exist1 and exist2:\n        path1stat = os.stat(path1)\n        path2stat = os.stat(path2)\n        return path1stat.st_mtime > path2stat.st_mtime\n    elif exist1 and not exist2:\n        return True\n    else:\n        raise IOError(\"Path 1 does not exist: {}\".format(path1))\n\n\ndef isAccessible(path):\n    \"\"\"Check whether user has access to a given path.\n\n    Parameters\n    ----------\n    path : str\n        a directory or file\n    \"\"\"\n    return os.path.exists(path)\n\n\ndef separateModuleAndAttribute(pathAttr):\n    \"\"\"\n    Return True of the specified python module, and attribute of the module exist.\n\n    Parameters\n    ----------\n    pathAttr : str\n        Path to a python module followed by the desired attribute.\n        e.g.: `/path/to/my/thing.py:MyClass`\n\n    Notes\n    -----\n    The attribute of the module could be a class, function, variable, etc.\n\n    Raises\n    ------\n    ValueError:\n        If there is no `:` separating the path and attr.\n    \"\"\"\n    # rindex gives last index.\n    # The last is needed because the first colon index could be mapped drives in windows.\n    lastColonIndex = pathAttr.rindex(\":\")  # this raises a valueError\n    # there should be at least 1 colon. 2 is possible due to mapped drives in windows.\n    return (pathAttr[:lastColonIndex]), pathAttr[lastColonIndex + 1 :]\n\n\ndef importCustomPyModule(modulePath):\n    \"\"\"\n    Dynamically import a custom module.\n\n    Parameters\n    ----------\n    modulePath : str\n        Path to a python module.\n\n    Returns\n    -------\n    userSpecifiedModule : module\n        The imported python module.\n    \"\"\"\n    modulePath = pathlib.Path(modulePath)\n    if not modulePath.exists() or not modulePath.is_file():\n        raise IOError(r\"Cannot import module from the given path: `{modulePath}`\")\n    _dir, moduleName = os.path.split(modulePath)\n    moduleName = os.path.splitext(moduleName)[0]  # take off the extension\n    spec = importlib.util.spec_from_file_location(moduleName, modulePath)\n    userSpecifiedModule = importlib.util.module_from_spec(spec)\n    spec.loader.exec_module(userSpecifiedModule)\n    return userSpecifiedModule\n\n\ndef moduleAndAttributeExist(pathAttr):\n    \"\"\"\n    Return True if the specified python module, and attribute of the module exist.\n\n    Parameters\n    ----------\n    pathAttr : str\n        Path to a python module followed by the desired attribute.\n        e.g.: `/path/to/my/thing.py:MyClass`\n\n    Returns\n    -------\n    bool\n        True if the specified python module, and attribute of the module exist.\n\n    Notes\n    -----\n    The attribute of the module could be a class, function, variable, etc.\n    \"\"\"\n    try:\n        modulePath, moduleAttributeName = separateModuleAndAttribute(pathAttr)\n    except ValueError:\n        return False\n\n    modulePath = pathlib.Path(modulePath)\n    if not modulePath.is_file():\n        return False\n\n    try:\n        userSpecifiedModule = importCustomPyModule(modulePath)\n\n    # Blanket except is okay since we are checking to see if a custom import will work.\n    except Exception:\n        return False\n\n    return moduleAttributeName in userSpecifiedModule.__dict__\n\n\ndef cleanPath(path, mpiRank=0, forceClean=False):\n    \"\"\"Recursively delete a path. This function checks for a few cases we know to be OK to delete: (1) Any\n    `TemporaryDirectoryChanger` or output cache instance and (2) anything under the ARMI `_FAST_PATH`.\n\n    Be careful with editing this! Do not make it a generic can-delete-anything function, because it could in theory\n    delete anything a user has write permissions on.\n\n    Returns\n    -------\n    success : bool\n        True if file was deleted. False if it was not.\n    \"\"\"\n    valid = False\n    if not os.path.exists(path):\n        return True\n\n    if forceClean:\n        # Any forceClean can be deleted\n        valid = True\n    elif pathlib.Path(path).is_relative_to(pathlib.Path(context.getFastPath())):\n        # If the path slated for deletion is a subdirectory of _FAST_PATH, then cool, delete.\n        # _FAST_PATH itself gets deleted on program exit.\n        valid = True\n\n    if not valid:\n        raise Exception(f\"You tried to delete {path}, but it does not seem safe to do so.\")\n\n    # Delete the file/directory from only one process\n    if mpiRank == context.MPI_RANK:\n        if os.path.exists(path) and os.path.isdir(path):\n            shutil.rmtree(path)\n        elif not os.path.isdir(path):\n            # it's just a file. Delete it.\n            os.remove(path)\n\n    # Deletions may not be immediate on Windows, so wait for it to finish.\n    maxLoops = 6\n    waitTime = 0.5\n    loopCounter = 0\n    while os.path.exists(path):\n        loopCounter += 1\n        if loopCounter > maxLoops:\n            break\n        sleep(waitTime)\n\n    return not os.path.exists(path)\n"
  },
  {
    "path": "armi/utils/plotting.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis module makes heavy use of matplotlib. Beware that plots generated with matplotlib may not free their memory, even\nafter the plot is closed, and excessive use of plotting functions may gobble up all of your machine's memory.\n\nTherefore, you should use these plotting tools judiciously. It is not advisable to, for instance, plot some sequence of\nobjects in a loop at every time node. If you start to see your memory usage grow inexplicably, you should question any\nplots that you are generating.\n\"\"\"\n\nimport collections\nimport itertools\nimport math\nimport os\nimport re\nfrom glob import glob\n\nimport matplotlib\nimport matplotlib.colors as mcolors\nimport matplotlib.patches\nimport matplotlib.pyplot as plt\nimport matplotlib.text as mpl_text\nimport numpy as np\nfrom matplotlib import cm\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.widgets import Slider\nfrom mpl_toolkits import axes_grid1\nfrom ordered_set import OrderedSet\n\nimport armi\nfrom armi import runLog\nfrom armi.bookkeeping import report\nfrom armi.materials import custom\nfrom armi.reactor import grids\nfrom armi.reactor.components import Circle, DerivedShape, Helix\nfrom armi.reactor.components.basicShapes import Hexagon, Rectangle, Square\nfrom armi.reactor.flags import Flags\nfrom armi.utils import hexagon, iterables, units\n\nLUMINANCE_WEIGHTS = np.array([0.3, 0.59, 0.11, 0.0])\n\n\ndef colorGenerator(skippedColors=10):\n    \"\"\"\n    Selects a color from the matplotlib css color database.\n\n    Parameters\n    ----------\n    skippedColors: int\n        Number of colors to skip in the matplotlib CSS color database when generating the next color. Without skipping\n        colors the next color may be similar to the previous color.\n\n    Notes\n    -----\n    Will cycle indefinitely to accommodate large cores. Colors will repeat.\n    \"\"\"\n    colors = list(mcolors.CSS4_COLORS)\n\n    for start in itertools.cycle(range(20, 20 + skippedColors)):\n        for i in range(start, len(colors), skippedColors):\n            yield colors[i]\n\n\ndef plotBlockDepthMap(\n    core,\n    param=\"pdens\",\n    fName=None,\n    bare=False,\n    cmapName=\"jet\",\n    labels=(),\n    labelFmt=\"{0:.3f}\",\n    legendMap=None,\n    fontSize=None,\n    minScale=None,\n    maxScale=None,\n    axisEqual=False,\n    makeColorBar=False,\n    cBarLabel=\"\",\n    title=\"\",\n    shuffleArrows=False,\n    titleSize=25,\n    depthIndex=0,\n):\n    \"\"\"\n    Plot a param distribution in xy space with the ability to page through depth.\n\n    Notes\n    -----\n    This is useful for visualizing the spatial distribution of a param through the core. Blocks could possibly not be in\n    alignment between assemblies, but the depths viewable are based on the first fuel assembly.\n\n    Parameters\n    ----------\n    The kwarg definitions are the same as those of ``plotFaceMap``.\n\n    depthIndex: int\n        The the index of the elevation to show block params.\n        The index is determined by the index of the blocks in the first fuel assembly.\n    \"\"\"\n    fuelAssem = core.getFirstAssembly(typeSpec=Flags.FUEL)\n    if not fuelAssem:\n        raise ValueError(\n            \"Could not find fuel assembly. This method uses the first fuel blocks mesh for the axial mesh of the plot. \"\n            \"Cannot proceed without fuel block.\"\n        )\n\n    # block mid point elevation\n    elevations = [elev for _b, elev in fuelAssem.getBlocksAndZ()]\n    data = []\n    for elevation in elevations:\n        paramValsAtElevation = []\n        for a in core:\n            paramValsAtElevation.append(a.getBlockAtElevation(elevation).p[param])\n        data.append(paramValsAtElevation)\n\n    data = np.array(data)\n\n    fig = plt.figure(figsize=(12, 12), dpi=100)\n    # Make these now, so they are still referenceable after plotFaceMap.\n    patches = _makeAssemPatches(core)\n    collection = PatchCollection(patches, cmap=cmapName, alpha=1.0)\n    texts = []\n\n    plotFaceMap(\n        core,\n        param=param,\n        vals=\"peak\",\n        data=None,  # max values so legend is set correctly\n        bare=bare,\n        cmapName=cmapName,\n        labels=labels,\n        labelFmt=labelFmt,\n        legendMap=legendMap,\n        fontSize=fontSize,\n        minScale=minScale,\n        maxScale=maxScale,\n        axisEqual=axisEqual,\n        makeColorBar=makeColorBar,\n        cBarLabel=cBarLabel,\n        title=title,\n        shuffleArrows=shuffleArrows,\n        titleSize=titleSize,\n        referencesToKeep=[patches, collection, texts],\n    )\n\n    # make space for the slider\n    fig.subplots_adjust(bottom=0.15)\n\n    ax_slider = fig.add_axes([0.1, 0.05, 0.8, 0.04])\n\n    # This controls what the slider does.\n    def update(i):\n        # int, since we are indexing an array.\n        i = int(i)\n        collection.set_array(data[i, :])\n        for valToPrint, text in zip(data[i, :], texts):\n            text.set_text(labelFmt.format(valToPrint))\n\n    # Slider doesn't seem to work unless assigned to variable\n    _slider = DepthSlider(ax_slider, \"Depth(cm)\", elevations, update, \"green\", valInit=depthIndex)\n\n    if fName:\n        plt.savefig(fName, dpi=150)\n        plt.close()\n    else:\n        plt.show()\n\n    return fName\n\n\ndef plotFaceMap(\n    core,\n    param=\"pdens\",\n    vals=\"peak\",\n    data=None,\n    fName=None,\n    bare=False,\n    cmapName=\"jet\",\n    labels=(),\n    labelFmt=\"{0:.3f}\",\n    legendMap=None,\n    fontSize=None,\n    minScale=None,\n    maxScale=None,\n    axisEqual=False,\n    makeColorBar=False,\n    cBarLabel=\"\",\n    title=\"\",\n    shuffleArrows=False,\n    titleSize=25,\n    referencesToKeep=None,\n):\n    \"\"\"\n    Plot a face map of the core.\n\n    Parameters\n    ----------\n    core: Core\n        The core to plot.\n\n    param : str, optional\n        The block-parameter to plot. Default: pdens\n\n    vals : str, optional\n        Can be 'peak', 'average', or 'sum'. The type of vals to produce. Will find peak, average, or sum of block values\n        in an assembly. Default: peak\n\n    data : list, optional\n        rather than using param and vals, use the data supplied as is. It must be in the\n        same order as iter(r).\n\n    fName : str, optional\n        File name to create. If none, will show on screen.\n\n    bare : bool, optional\n        If True, will skip axis labels, etc.\n\n    cmapName : str\n        The name of the matplotlib colormap to use. Default: jet\n        Other possibilities: http://matplotlib.org/examples/pylab_examples/show_colormaps.html\n\n    labels : list of str, optional\n        Data labels corresponding to data values.\n\n    labelFmt : str, optional\n        A format string that determines how the data is printed if ``labels`` is not provided.\n        E.g. ``\"{:.1e}\"``\n\n    legendMap : list, optional\n        A tuple list of (value, label, description), to define the data in the legend.\n\n    fontSize : int, optional\n        Font size in points\n\n    minScale : float, optional\n        The minimum value for the low color on your colormap (to set scale yourself)\n        Default: autoscale\n\n    maxScale : float, optional\n        The maximum value for the high color on your colormap (to set scale yourself)\n        Default: autoscale\n\n    axisEqual : Boolean, optional\n        If True, horizontal and vertical axes are scaled equally such that a circle\n        appears as a circle rather than an ellipse.\n\n        If False, this scaling constraint is not imposed.\n\n    makeColorBar : Boolean, optional\n        If True, a vertical color bar is added on the right-hand side of the plot.\n\n        If False, no color bar is added.\n\n    cBarLabel : String, optional\n        If True, this string is the color bar quantity label.\n        If False, the color bar will have no label.\n        When makeColorBar=False, cBarLabel affects nothing.\n\n    title : String, optional\n        If True, the string is added as the plot title.\n        If False, no plot title is added.\n\n    shuffleArrows : list, optional\n        Adds arrows indicating fuel shuffling maneuvers\n\n    titleSize : int, optional\n        Size of title on plot\n\n    referencesToKeep : list, optional\n        References to previous plots you might want to plot on: patches, collection, texts.\n\n    Examples\n    --------\n    Plotting a BOL assembly type facemap with a legend::\n\n        >>> plotFaceMap(core, param='typeNumAssem', cmapName='RdYlBu')\n    \"\"\"\n    if referencesToKeep:\n        patches, collection, texts = referencesToKeep\n        fig, ax = plt.gcf(), plt.gca()\n    else:\n        fig, ax = plt.subplots(figsize=(12, 12), dpi=100)\n        # set patch (shapes such as hexagon) heat map values\n        patches = _makeAssemPatches(core)\n        collection = PatchCollection(patches, cmap=cmapName, alpha=1.0)\n        texts = []\n\n    ax.set_title(title, size=titleSize)\n\n    # get param vals\n    if data is None:\n        data = []\n        for a in core:\n            if vals == \"peak\":\n                data.append(a.getMaxParam(param))\n            elif vals == \"average\":\n                data.append(a.calcAvgParam(param))\n            elif vals == \"sum\":\n                data.append(a.calcTotalParam(param))\n            else:\n                raise ValueError(f\"{vals} is an invalid entry for `vals` in plotFaceMap. Use peak, average, or sum.\")\n    if not labels:\n        labels = [None] * len(data)\n    if len(data) != len(labels):\n        raise ValueError(\n            f\"Data had length {len(data)}, but labels had length {len(labels)}. They should be equal length.\"\n        )\n\n    collection.set_array(np.array(data))\n    if minScale or maxScale:\n        collection.set_clim([minScale, maxScale])\n    else:\n        collection.norm.autoscale(np.array(data))\n    ax.add_collection(collection)\n\n    # Makes text in the center of each shape displaying the values.\n    # (The text is either black or white depending on the background color it is written on)\n    _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collection)\n\n    # allow a color bar option\n    if makeColorBar:\n        collection2 = PatchCollection(patches, cmap=cmapName, alpha=1.0)\n        if minScale and maxScale:\n            collection2.set_array(np.array([minScale, maxScale]))\n        else:\n            collection2.set_array(np.array(data))\n\n        if \"radial\" in cBarLabel:\n            colbar = fig.colorbar(collection2, ticks=[x + 1 for x in range(max(data))], shrink=0.43)\n        else:\n            colbar = fig.colorbar(collection2, ax=ax, shrink=0.43)\n\n        colbar.set_label(cBarLabel, size=20)\n        colbar.ax.tick_params(labelsize=16)\n\n    if legendMap is not None:\n        legend = _createLegend(legendMap, collection)\n\n    else:\n        legend = None\n\n    if axisEqual:  # don't \"squish\" patches vertically or horizontally\n        ax.set_aspect(\"equal\", \"datalim\")\n\n    ax.autoscale_view(tight=True)\n\n    # make it 2-D, for now...\n    shuffleArrows = shuffleArrows or []\n    for sourceCoords, destinationCoords in shuffleArrows:\n        ax.annotate(\n            \"\",\n            xy=destinationCoords[:2],\n            xytext=sourceCoords[:2],\n            arrowprops={\"arrowstyle\": \"->\", \"color\": \"white\"},\n        )\n\n    if bare:\n        ax.set_xticks([])\n        ax.set_yticks([])\n        ax.spines[\"right\"].set_visible(False)\n        ax.spines[\"top\"].set_visible(False)\n        ax.spines[\"left\"].set_visible(False)\n        ax.spines[\"bottom\"].set_visible(False)\n    else:\n        ax.set_xlabel(\"x (cm)\")\n        ax.set_ylabel(\"y (cm)\")\n\n    if fName:\n        if legend:\n            # expand so the legend fits if necessary\n            pltKwargs = {\"bbox_extra_artists\": (legend,), \"bbox_inches\": \"tight\"}\n        else:\n            pltKwargs = {}\n        try:\n            plt.savefig(fName, dpi=150, **pltKwargs)\n        except IOError:\n            runLog.warning(\"Cannot update facemap at {0}: IOError. Is the file open?\".format(fName))\n        plt.close(fig)\n    elif referencesToKeep:\n        # Don't show yet, since it will be updated.\n        return fName\n    else:\n        # Never close figures after a .show()\n        # because they're being used interactively e.g.\n        # in a live tutorial or by the doc gallery\n        plt.show()\n\n    return fName\n\n\ndef close(fig=None):\n    \"\"\"\n    Wrapper for matplotlib close.\n\n    This is useful to avoid needing to import plotting and matplotlib. The plot functions cannot always close their\n    figure if it is going to be used somewhere else after becoming active (e.g. in reports or gallery examples).\n    \"\"\"\n    plt.close(fig)\n\n\ndef _makeAssemPatches(core):\n    \"\"\"Return a list of assembly shaped patches for each assembly.\"\"\"\n    patches = []\n\n    if isinstance(core.spatialGrid, grids.HexGrid):\n        nSides = 6\n    elif isinstance(core.spatialGrid, grids.ThetaRZGrid):\n        raise TypeError(\"This plot function is not currently supported for ThetaRZGrid grids.\")\n    else:\n        nSides = 4\n\n    pitch = core.getAssemblyPitch()\n    for a in core:\n        x, y, _ = a.spatialLocator.getLocalCoordinates()\n        if nSides == 6:\n            if core.spatialGrid.cornersUp:\n                orientation = 0\n            else:\n                orientation = math.pi / 2.0\n            assemPatch = matplotlib.patches.RegularPolygon(\n                (x, y), nSides, radius=pitch / math.sqrt(3), orientation=orientation\n            )\n        elif nSides == 4:\n            # for rectangle x, y is defined as sides instead of center\n            assemPatch = matplotlib.patches.Rectangle((x - pitch[0] / 2, y - pitch[1] / 2), *pitch)\n        else:\n            raise ValueError(f\"Unexpected number of sides: {nSides}.\")\n        patches.append(assemPatch)\n    return patches\n\n\ndef _setPlotValText(ax, texts, core, data, labels, labelFmt, fontSize, collection):\n    \"\"\"Write param values down, and return text so it can be edited later.\"\"\"\n    _ = core.getAssemblyPitch()\n    for a, val, label in zip(core, data, labels):\n        x, y, _ = a.spatialLocator.getLocalCoordinates()\n        cmap = collection.get_cmap()\n        patchColor = np.asarray(cmap(collection.norm(val)))\n        luminance = patchColor.dot(LUMINANCE_WEIGHTS)\n        dark = luminance < 0.5\n        if dark:\n            color = \"white\"\n        else:\n            color = \"black\"\n        # Write text on top of patch locations.\n        if label is None and labelFmt is not None:\n            # Write the value\n            labelText = labelFmt.format(val)\n            text = ax.text(\n                x,\n                y,\n                labelText,\n                zorder=1,\n                ha=\"center\",\n                va=\"center\",\n                fontsize=fontSize,\n                color=color,\n            )\n        elif label is not None:\n            text = ax.text(\n                x,\n                y,\n                label,\n                zorder=1,\n                ha=\"center\",\n                va=\"center\",\n                fontsize=fontSize,\n                color=color,\n            )\n        else:\n            # labelFmt was none, so they don't want any text plotted\n            continue\n        texts.append(text)\n\n\ndef _createLegend(legendMap, collection, size=9, shape=Hexagon):\n    \"\"\"Make special legend for the assembly face map plot with assembly counts, and Block Diagrams.\"\"\"\n\n    class AssemblyLegend:\n        \"\"\"\n        Custom Legend artist handler.\n\n        Matplotlib allows you to define a class that implements ``legend_artist`` to give you\n        full control over how the legend keys and labels are drawn. This is done here to get\n        Hexagons with Letters in them on the legend, which is not a built-in legend option.\n\n        See: http://matplotlib.org/users/legend_guide.html#implementing-a-custom-legend-handler\n        \"\"\"\n\n        def legend_artist(self, _legend, orig_handle, _fontsize, handlebox):\n            letter, index = orig_handle\n            x0, y0 = handlebox.xdescent, handlebox.ydescent\n            width, height = handlebox.width, handlebox.height\n            x = x0 + width / 2.0\n            y = y0 + height / 2.0\n            normVal = collection.norm(index)\n            cmap = collection.get_cmap()\n            colorRgb = cmap(normVal)\n            if shape == Hexagon:\n                patch = matplotlib.patches.RegularPolygon(\n                    (x, y),\n                    6,\n                    radius=height,\n                    orientation=math.pi / 2.0,\n                    facecolor=colorRgb,\n                    transform=handlebox.get_transform(),\n                )\n            elif shape == Rectangle:\n                patch = matplotlib.patches.Rectangle(\n                    (x - height / 2, y - height / 2),\n                    height * 2,\n                    height * 2,\n                    facecolor=colorRgb,\n                    transform=handlebox.get_transform(),\n                )\n            else:\n                patch = matplotlib.patches.Circle(\n                    (x, y),\n                    radius=height,\n                    facecolor=colorRgb,\n                    transform=handlebox.get_transform(),\n                )\n\n            luminance = np.array(colorRgb).dot(LUMINANCE_WEIGHTS)\n            dark = luminance < 0.5\n            if dark:\n                color = \"white\"\n            else:\n                color = \"black\"\n            handlebox.add_artist(patch)\n            txt = mpl_text.Text(x=x, y=y, text=letter, ha=\"center\", va=\"center\", size=7, color=color)\n            handlebox.add_artist(txt)\n            return (patch, txt)\n\n    ax = plt.gca()\n    keys = []\n    labels = []\n    for value, label, description in legendMap:\n        keys.append((label, value))\n        labels.append(description)\n\n    legend = ax.legend(\n        keys,\n        labels,\n        handler_map={tuple: AssemblyLegend()},\n        loc=\"center left\",\n        bbox_to_anchor=(1.0, 0.5),\n        frameon=False,\n        prop={\"size\": size},\n    )\n    return legend\n\n\nclass DepthSlider(Slider):\n    \"\"\"Page slider used to view params at different depths.\"\"\"\n\n    def __init__(\n        self,\n        ax,\n        sliderLabel,\n        depths,\n        updateFunc,\n        selectedDepthColor,\n        fontsize=8,\n        valInit=0,\n        **kwargs,\n    ):\n        # The color of the currently displayed depth page.\n        self.selectedDepthColor = selectedDepthColor\n        self.nonSelectedDepthColor = \"w\"\n        self.depths = depths\n\n        # Make the selection depth buttons\n        self.depthSelections = []\n        numDepths = float(len(depths))\n        rectangleBot = 0\n        textYCoord = 0.5\n        # startBoundaries go from zero to just below 1.\n        leftBoundary = [i / numDepths for i, _depths in enumerate(depths)]\n        for leftBoundary, depth in zip(leftBoundary, depths):\n            # First depth (leftBoundary==0) is on, rest are off.\n            if leftBoundary == 0:\n                color = self.selectedDepthColor\n            else:\n                color = self.nonSelectedDepthColor\n            depthSelectBox = matplotlib.patches.Rectangle(\n                (leftBoundary, rectangleBot),\n                1.0 / numDepths,\n                1,\n                transform=ax.transAxes,\n                facecolor=color,\n            )\n            ax.add_artist(depthSelectBox)\n            self.depthSelections.append(depthSelectBox)\n\n            # Make text halfway into box\n            textXCoord = leftBoundary + 0.5 / numDepths\n            ax.text(\n                textXCoord,\n                textYCoord,\n                \"{:.1f}\".format(depth),\n                ha=\"center\",\n                va=\"center\",\n                transform=ax.transAxes,\n                fontsize=fontsize,\n            )\n\n        # Make forward and backward button\n        backwardArrow, forwardArrow = \"$\\u25c0$\", \"$\\u25b6$\"\n        divider = axes_grid1.make_axes_locatable(ax)\n        buttonWidthPercent = \"5%\"\n        backwardAxes = divider.append_axes(\"right\", size=buttonWidthPercent, pad=0.03)\n        forwardAxes = divider.append_axes(\"right\", size=buttonWidthPercent, pad=0.03)\n        self.backButton = matplotlib.widgets.Button(\n            backwardAxes,\n            label=backwardArrow,\n            color=self.nonSelectedDepthColor,\n            hovercolor=self.selectedDepthColor,\n        )\n        self.backButton.label.set_fontsize(fontsize)\n        self.backButton.on_clicked(self.previous)\n        self.forwardButton = matplotlib.widgets.Button(\n            forwardAxes,\n            label=forwardArrow,\n            color=self.nonSelectedDepthColor,\n            hovercolor=self.selectedDepthColor,\n        )\n        self.forwardButton.label.set_fontsize(fontsize)\n        self.forwardButton.on_clicked(self.next)\n\n        # init at end since slider will set val to 0, and it needs to have state\n        # setup before doing that\n        Slider.__init__(self, ax, sliderLabel, 0, len(depths), valinit=0, **kwargs)\n        self.on_changed(updateFunc)\n        self.set_val(valInit)  # need to set after updateFunc is added.\n\n        # Turn off value visibility since the buttons text shows the value\n        self.valtext.set_visible(False)\n\n    def set_val(self, val):\n        \"\"\"\n        Set the value and update the color.\n\n        Notes\n        -----\n        valmin/valmax are set on the parent to 0 and len(depths).\n        \"\"\"\n        val = int(val)\n        # valmax is not allowed, since it is out of the array.\n        # valmin is allowed since 0 index is in depth array.\n        if val < self.valmin or val >= self.valmax:\n            # invalid, so ignore\n            return\n        # activate color is first since we still have access to self.val\n        self.updatePageDepthColor(val)\n        Slider.set_val(self, val)\n\n    def next(self, _event):\n        \"\"\"Move forward to the next depth (page).\"\"\"\n        self.set_val(self.val + 1)\n\n    def previous(self, _event):\n        \"\"\"Move backward to the previous depth (page).\"\"\"\n        self.set_val(self.val - 1)\n\n    def updatePageDepthColor(self, newVal):\n        \"\"\"Update the page colors.\"\"\"\n        self.depthSelections[self.val].set_facecolor(self.nonSelectedDepthColor)\n        self.depthSelections[newVal].set_facecolor(self.selectedDepthColor)\n\n\ndef plotAssemblyTypes(\n    assems: list = None,\n    fileName: str = None,\n    maxAssems: int = None,\n    showBlockAxMesh: bool = True,\n    yAxisLabel: str = None,\n    title: str = None,\n    hot: bool = True,\n) -> plt.Figure:\n    \"\"\"\n    Generate a plot showing the axial block and enrichment distributions of each assembly type in the core.\n\n    Parameters\n    ----------\n    assems: list\n        list of assembly objects to be plotted.\n    fileName : str or None\n        Base for filename to write, or None for just returning the fig\n    maxAssems: integer\n        maximum number of assemblies to plot in the assems list.\n    showBlockAxMesh: bool\n        if true, the axial mesh information will be displayed on the right side of the assembly plot.\n    yAxisLabel: str\n        Optionally, provide a label for the Y-axis.\n    title: str\n        Optionally, provide a title for the plot.\n    hot : bool, optional\n        If True, plot the hot block heights. If False, use cold heights from the inputs.\n\n    Returns\n    -------\n    fig : plt.Figure\n        The figure object created\n    \"\"\"\n    if maxAssems is not None and not isinstance(maxAssems, int):\n        raise TypeError(f\"Maximum assemblies should be an integer: {maxAssems} was of type {type(maxAssems)}.\")\n\n    numAssems = len(assems)\n    if maxAssems is None:\n        maxAssems = numAssems\n\n    if yAxisLabel is None:\n        yAxisLabel = \"Axial Heights (cm)\"\n\n    if title is None:\n        title = \"Assembly Designs\"\n\n    # Set assembly/block size constants\n    yBlockHeights = []\n    yBlockAxMesh = OrderedSet()\n    assemWidth = 5.0\n    assemSeparation = 0.3\n    xAssemLoc = 0.5\n    xAssemEndLoc = numAssems * (assemWidth + assemSeparation) + assemSeparation\n\n    # Setup figure\n    fig, ax = plt.subplots(figsize=(15, 15), dpi=300)\n    for index, assem in enumerate(assems):\n        isLastAssem = index == numAssems - 1\n        (xBlockLoc, yBlockHeights, yBlockAxMesh) = _plotBlocksInAssembly(\n            ax,\n            assem,\n            isLastAssem,\n            yBlockHeights,\n            yBlockAxMesh,\n            xAssemLoc,\n            xAssemEndLoc,\n            showBlockAxMesh,\n            hot,\n        )\n        xAxisLabel = re.sub(\" \", \"\\n\", assem.getType().upper())\n        ax.text(\n            xBlockLoc + assemWidth / 2.0,\n            -5,\n            xAxisLabel,\n            fontsize=13,\n            ha=\"center\",\n            va=\"top\",\n        )\n        xAssemLoc += assemWidth + assemSeparation\n\n    # Set up plot layout\n    ax.spines[\"right\"].set_visible(False)\n    ax.spines[\"top\"].set_visible(False)\n    ax.spines[\"bottom\"].set_visible(False)\n    ax.yaxis.set_ticks_position(\"left\")\n    yBlockHeights.insert(0, 0.0)\n    yBlockHeights.sort()\n    yBlockHeightDiffs = np.diff(yBlockHeights)  # Compute differential heights between each block\n    ax.set_yticks([0.0] + list(set(np.cumsum(yBlockHeightDiffs))))\n    ax.xaxis.set_visible(False)\n\n    ax.set_title(title, y=1.03)\n    ax.set_ylabel(yAxisLabel, labelpad=20)\n    ax.set_xlim([0.0, 0.5 + maxAssems * (assemWidth + assemSeparation)])\n\n    # Plot and save figure\n    ax.plot()\n    if fileName:\n        fig.savefig(fileName)\n        runLog.debug(f\"Writing assem layout {fileName} in {os.getcwd()}\")\n        plt.close(fig)\n\n    return fig\n\n\ndef _plotBlocksInAssembly(\n    axis,\n    assem,\n    isLastAssem,\n    yBlockHeights,\n    yBlockAxMesh,\n    xAssemLoc,\n    xAssemEndLoc,\n    showBlockAxMesh,\n    hot,\n):\n    # Set dictionary of pre-defined block types and colors for the plot\n    lightsage = \"xkcd:light sage\"\n    blockTypeColorMap = collections.OrderedDict(\n        {\n            \"fuel\": \"tomato\",\n            \"shield\": \"cadetblue\",\n            \"reflector\": \"darkcyan\",\n            \"aclp\": \"lightslategrey\",\n            \"plenum\": \"white\",\n            \"duct\": \"plum\",\n            \"control\": lightsage,\n            \"handling socket\": \"lightgrey\",\n            \"grid plate\": \"lightgrey\",\n            \"inlet nozzle\": \"lightgrey\",\n        }\n    )\n\n    # Initialize block positions\n    blockWidth = 5.0\n    yBlockLoc = 0\n    xBlockLoc = xAssemLoc\n    xTextLoc = xBlockLoc + blockWidth / 20.0\n    for b in assem:\n        # get block height\n        if hot:\n            blockHeight = b.getHeight()\n        else:\n            try:\n                blockHeight = b.getInputHeight()\n            except AttributeError:\n                raise ValueError(\n                    f\"Cannot plot cold height for block {b} in assembly {assem} because it does not have access to a \"\n                    \"blueprints through any of its parents. Either make sure that a blueprints is accessible or plot \"\n                    \"the hot heights instead.\"\n                )\n\n        # Get the basic text label for the block\n        try:\n            blockType = [bType for bType in blockTypeColorMap.keys() if b.hasFlags(Flags.fromString(bType))][0]\n            color = blockTypeColorMap[blockType]\n        except IndexError:\n            blockType = b.getType()\n            color = \"grey\"\n\n        # Get the detailed text label for the block\n        blockXsId = b.p.xsType\n        dLabel = \"\"\n        if b.hasFlags(Flags.FUEL):\n            dLabel = \" {:0.2f}%\".format(b.getFissileMassEnrich() * 100)\n        elif b.hasFlags(Flags.CONTROL):\n            blockType = \"ctrl\"\n            dLabel = \" {:0.2f}%\".format(b.getBoronMassEnrich() * 100)\n        dLabel += \" ({})\".format(blockXsId)\n\n        # Set up block rectangle\n        blockPatch = matplotlib.patches.Rectangle(\n            (xBlockLoc, yBlockLoc),\n            blockWidth,\n            blockHeight,\n            facecolor=color,\n            alpha=0.7,\n            edgecolor=\"k\",\n            lw=1.0,\n            ls=\"solid\",\n        )\n        axis.add_patch(blockPatch)\n        yBlockCenterLoc = yBlockLoc + blockHeight / 2.5\n        axis.text(\n            xTextLoc,\n            yBlockCenterLoc,\n            blockType.upper() + dLabel,\n            ha=\"left\",\n            fontsize=10,\n        )\n        yBlockLoc += blockHeight\n        yBlockHeights.append(yBlockLoc)\n\n        # Add location, block heights, and axial mesh points to ordered set\n        yBlockAxMesh.add((yBlockCenterLoc, blockHeight, b.p.axMesh))\n\n    # Add the block heights, block number of axial mesh points on the far right of the plot.\n    if isLastAssem and showBlockAxMesh:\n        xEndLoc = 0.5 + xAssemEndLoc\n        for bCenter, bHeight, axMeshPoints in yBlockAxMesh:\n            axis.text(\n                xEndLoc,\n                bCenter,\n                f\"{bHeight} cm ({axMeshPoints})\",\n                fontsize=10,\n                ha=\"left\",\n            )\n\n    return xBlockLoc, yBlockHeights, yBlockAxMesh\n\n\ndef plotRadialReactorLayouts(reactor):\n    \"\"\"Generate a radial layout image of the converted reactor core.\"\"\"\n    bpAssems = list(reactor.blueprints.assemblies.values())\n    assemsToPlot = []\n    for bpAssem in bpAssems:\n        coreAssems = reactor.core.getAssemblies(bpAssem.p.flags)\n        if not coreAssems:\n            continue\n        assemsToPlot.append(coreAssems[0])\n\n    # Obtain the plot numbering based on the existing files so that existing plots are not overwritten.\n    start = 0\n    existingFiles = glob(f\"{reactor.core.name}AssemblyTypes\" + \"*\" + \".png\")\n    # This loops over the existing files for the assembly types outputs and makes a unique integer value so that plots\n    # are not overwritten. The regular expression here captures the first integer as AssemblyTypesX and then ensures\n    # that the numbering in the next enumeration below is 1 above that.\n    for f in existingFiles:\n        newStart = int(re.search(r\"\\d+\", f).group())\n        if newStart > start:\n            start = newStart\n\n    figs = []\n    for plotNum, assemBatch in enumerate(iterables.chunk(assemsToPlot, 6), start=start + 1):\n        assemPlotName = f\"{reactor.core.name}AssemblyTypes{plotNum}-rank{armi.MPI_RANK}.png\"\n        fig = plotAssemblyTypes(assemBatch, assemPlotName, maxAssems=6, showBlockAxMesh=True)\n        figs.append(fig)\n\n    return figs\n\n\ndef plotBlockFlux(core, fName=None, bList=None, peak=False, adjoint=False, bList2=[]):\n    \"\"\"\n    Produce energy spectrum plot of real and/or adjoint flux in one or more blocks.\n\n    Parameters\n    ----------\n    core : Core\n        Core object\n    fName : str, optional\n        the name of the plot file to produce. If none, plot will be shown. A text file with\n        the flux values will also be generated if this is non-empty.\n    bList : iterable, optional\n        is a single block or a list of blocks to average over. If no bList, full core is assumed.\n    peak : bool, optional\n        a flag that will produce the peak as well as the average on the plot.\n    adjoint : bool, optional\n        plot the adjoint as well.\n    bList2 : list, optional\n        a separate list of blocks that will also be plotted on a separate axis on the same plot.\n        This is useful for comparing flux in some blocks with flux in some other blocks.\n    \"\"\"\n\n    class BlockListFlux:\n        def __init__(self, nGroup, blockList=[], adjoint=False, peak=False, primary=False):\n            self.nGroup = nGroup\n            self.blockList = blockList\n            self.adjoint = adjoint\n            self.peak = peak\n            self.avgHistogram = None\n            self.eHistogram = None\n            self.peakHistogram = None\n            self.E = None\n\n            if not blockList:\n                self.avgFlux = np.zeros(self.nGroup)\n                self.peakFlux = np.zeros(self.nGroup)\n                self.lineAvg = \"-\"\n                self.linePeak = \"-\"\n            else:\n                self.avgFlux = np.zeros(self.nGroup)\n                self.peakFlux = np.zeros(self.nGroup)\n\n                if self.adjoint:\n                    self.labelAvg = \"Average Adjoint Flux\"\n                    self.labelPeak = \"Peak Adjoint Flux\"\n                else:\n                    self.labelAvg = \"Average Flux\"\n                    self.labelPeak = \"Peak Flux\"\n\n                if primary:\n                    self.lineAvg = \"-\"\n                    self.linePeak = \"-\"\n                else:\n                    self.lineAvg = \"r--\"\n                    self.linePeak = \"k--\"\n\n        def calcAverage(self):\n            for b in self.blockList:\n                thisFlux = np.array(b.getMgFlux(adjoint=self.adjoint))\n                self.avgFlux += np.array(thisFlux)\n                if sum(thisFlux) > sum(self.peakFlux):\n                    self.peakFlux = thisFlux\n\n            self.avgFlux = self.avgFlux / len(bList)\n\n        def setEnergyStructure(self, upperEnergyBounds):\n            self.E = [eMax / 1e6 for eMax in upperEnergyBounds]\n\n        def makePlotHistograms(self):\n            self.eHistogram, self.avgHistogram = makeHistogram(self.E, self.avgFlux)\n            if self.peak:\n                _, self.peakHistogram = makeHistogram(self.E, self.peakFlux)\n\n        def checkSize(self):\n            if len(self.E) != len(self.avgFlux):\n                runLog.error(self.avgFlux)\n                raise\n\n        def getTable(self):\n            return enumerate(zip(self.E, self.avgFlux, self.peakFlux))\n\n    if bList is None:\n        bList = core.getBlocks()\n    bList = list(bList)\n    if adjoint and bList2:\n        runLog.warning(\"Cannot plot adjoint flux with bList2 argument\")\n        return\n    elif adjoint:\n        bList2 = bList\n\n    try:\n        G = len(core.lib.neutronEnergyUpperBounds)\n    except Exception:\n        runLog.warning(\"No ISOTXS library attached so no flux plots.\")\n        return\n\n    BlockListFluxes = set()\n    bf1 = BlockListFlux(G, blockList=bList, peak=peak, primary=True)\n    BlockListFluxes.add(bf1)\n    if bList2:\n        bf2 = BlockListFlux(G, blockList=bList2, adjoint=adjoint, peak=peak)\n        BlockListFluxes.add(bf2)\n\n    for bf in BlockListFluxes:\n        bf.calcAverage()\n        bf.setEnergyStructure(core.lib.neutronEnergyUpperBounds)\n        bf.checkSize()\n        bf.makePlotHistograms()\n\n    if fName:\n        # write a little flux text file\n        txtFileName = os.path.splitext(fName)[0] + \".txt\"\n        with open(txtFileName, \"w\") as f:\n            f.write(\"{0:16s} {1:16s} {2:16s}\\n\".format(\"Energy_Group\", \"Average_Flux\", \"Peak_Flux\"))\n            for _, (eMax, avgFlux, peakFlux) in bf1.getTable():\n                f.write(\"{0:12E} {1:12E} {2:12E}\\n\".format(eMax, avgFlux, peakFlux))\n\n    if max(bf1.avgFlux) <= 0.0:\n        runLog.warning(f\"Cannot plot flux with maxval=={bf1.avgFlux} in {bList[0]}\")\n        return\n\n    plt.figure()\n    plt.plot(bf1.eHistogram, bf1.avgHistogram, bf1.lineAvg, label=bf1.labelAvg)\n\n    if peak:\n        plt.plot(bf1.eHistogram, bf1.peakHistogram, bf1.linePeak, label=bf1.labelPeak)\n\n    ax = plt.gca()\n    ax.set_xscale(\"log\")\n    ax.set_yscale(\"log\")\n    plt.xlabel(\"Energy (MeV)\")\n    plt.ylabel(\"Flux (n/cm$^2$/s)\")\n\n    if peak or bList2:\n        plt.legend(loc=\"lower right\")\n\n    plt.grid(color=\"0.70\")\n    if bList2:\n        if adjoint:\n            plt.twinx()\n            plt.ylabel(\"Adjoint Flux (n/cm$^2$/s)\", rotation=270)\n            ax2 = plt.gca()\n            ax2.set_yscale(\"log\")\n        plt.plot(bf2.eHistogram, bf2.avgHistogram, bf2.lineAvg, label=bf2.labelAvg)\n        if peak and not adjoint:\n            plt.plot(bf2.eHistogram, bf2.peakHistogram, bf2.linePeak, label=bf2.labelPeak)\n        plt.legend(loc=\"lower left\")\n    plt.title(\"Group flux\")\n\n    if fName:\n        plt.savefig(fName)\n        report.setData(\n            f\"Flux Plot {os.path.split(fName)[1]}\",\n            os.path.abspath(fName),\n            report.FLUX_PLOT,\n        )\n        plt.close()\n    else:\n        # Never close interactive plots\n        plt.show()\n\n\ndef makeHistogram(x, y):\n    \"\"\"\n    Take a list of x and y values, and return a histogram version.\n\n    Good for plotting multigroup flux spectrum or cross sections.\n    \"\"\"\n    if not len(x) == len(y):\n        raise ValueError(\n            \"Cannot make a histogram unless the x and y lists are the same size.\"\n            + \"len(x) == {} and len(y) == {}\".format(len(x), len(y))\n        )\n    n = len(x)\n    xHistogram = np.zeros(2 * n)\n    yHistogram = np.zeros(2 * n)\n    for i in range(n):\n        lower = 2 * i\n        upper = 2 * i + 1\n        xHistogram[lower] = x[i - 1]\n        xHistogram[upper] = x[i]\n        yHistogram[lower] = y[i]\n        yHistogram[upper] = y[i]\n    xHistogram[0] = x[0] / 2.0\n    return xHistogram, yHistogram\n\n\ndef _makeBlockPinPatches(block, cold):\n    \"\"\"Return lists of block component patches and corresponding data and names (which relates to material of the\n    component for later plot-coloring/legend) for a single block.\n\n    Takes in a block that must have a spatialGrid attached as well as a variable which signifies whether the dimensions\n    of the components are at hot or cold temps. When cold is set to true, you would get the BOL cold temp dimensions.\n\n    Parameters\n    ----------\n    block : Block\n    cold : bool\n        true for cold temps, hot = false\n\n    Returns\n    -------\n    patches : list\n        list of patches for block components\n    data : list\n        list of the materials these components are made of\n    name : list\n        list of the names of these components\n    \"\"\"\n    patches = []\n    data = []\n    names = []\n    cornersUp = False\n    if isinstance(block.spatialGrid, grids.HexGrid):\n        largestPitch, comp = block.getPitch(returnComp=True)\n        cornersUp = block.spatialGrid.cornersUp\n    elif isinstance(block.spatialGrid, grids.ThetaRZGrid):\n        raise TypeError(\"This plot function is not currently supported for ThetaRZGrid grids.\")\n    else:\n        largestPitch, comp = block.getPitch(returnComp=True)\n        if block.getPitch()[0] != block.getPitch()[1]:\n            raise ValueError(\"Only works for blocks with equal length and width.\")\n\n    sortedComps = sorted(block, reverse=True)\n\n    derivedComponents = block.getComponentsOfShape(DerivedShape)\n    if len(derivedComponents) == 1:\n        derivedComponent = derivedComponents[0]\n        sortedComps.remove(derivedComponent)\n        cName = derivedComponent.name\n\n        if isinstance(derivedComponent.material, custom.Custom):\n            material = derivedComponent.p.customIsotopicsName\n        else:\n            material = derivedComponent.material.name\n\n        location = comp.spatialLocator\n        if isinstance(location, grids.MultiIndexLocation):\n            location = location[0]\n        x, y, _ = location.getLocalCoordinates()\n        if isinstance(comp, Hexagon):\n            orient = math.pi / 6 if cornersUp else 0\n            derivedPatch = matplotlib.patches.RegularPolygon(\n                (x, y), 6, radius=largestPitch / math.sqrt(3), orientation=orient\n            )\n        elif isinstance(comp, Square):\n            derivedPatch = matplotlib.patches.Rectangle(\n                (x - largestPitch[0] / 2, y - largestPitch[0] / 2),\n                largestPitch[0],\n                largestPitch[0],\n            )\n        else:\n            raise TypeError(\n                f\"Shape of the pitch-defining element is not a Square or Hex it is {comp.shape}, \"\n                \"cannot plot for this type of block.\"\n            )\n        patches.append(derivedPatch)\n        data.append(material)\n        names.append(cName)\n\n    for component in sortedComps:\n        locs = component.spatialLocator\n        if not isinstance(locs, grids.MultiIndexLocation):\n            # make a single location a list to iterate.\n            locs = [locs]\n        for loc in locs:\n            x, y, _ = loc.getLocalCoordinates()\n\n            # goes through each location in stack order\n            blockPatches = _makeComponentPatch(component, (x, y), cold, cornersUp)\n            for element in blockPatches:\n                patches.append(element)\n\n                if isinstance(component.material, custom.Custom):\n                    material = component.p.customIsotopicsName\n                else:\n                    material = component.material.name\n\n                data.append(material)\n                names.append(component.name)\n\n    return patches, data, names\n\n\ndef _makeComponentPatch(component, position, cold, cornersUp=False):\n    \"\"\"Makes a component shaped patch to later be used for making block diagrams.\n\n    Parameters\n    ----------\n    component: a component of a block\n    position: tuple\n        (x, y) position\n    cold: bool\n        True if looking for dimension at cold temps\n    cornersUp: bool, optional\n        If this is a HexBlock, is it corners-up or flats-up?\n\n    Returns\n    -------\n    blockPatch: list\n        A list of Patch objects that together represent a component in the diagram.\n\n    Notes\n    -----\n    Currently accepts components of shape Circle, Helix, Hexagon, or Square\n    \"\"\"\n    x = position[0]\n    y = position[1]\n\n    if isinstance(component, Helix):\n        blockPatch = matplotlib.patches.Wedge(\n            (\n                x + component.getDimension(\"helixDiameter\", cold=cold) / 2 * math.cos(math.pi / 6),\n                y + component.getDimension(\"helixDiameter\", cold=cold) / 2 * math.sin(math.pi / 6),\n            ),\n            component.getDimension(\"od\", cold=cold) / 2,\n            0,\n            360,\n            width=(component.getDimension(\"od\", cold=cold) / 2) - (component.getDimension(\"id\", cold=cold) / 2),\n        )\n    elif isinstance(component, Circle):\n        blockPatch = matplotlib.patches.Wedge(\n            (x, y),\n            component.getDimension(\"od\", cold=cold) / 2,\n            0,\n            360,\n            width=(component.getDimension(\"od\", cold=cold) / 2) - (component.getDimension(\"id\", cold=cold) / 2),\n        )\n    elif isinstance(component, Hexagon):\n        angle = 0 if cornersUp else 30\n        outerPoints = np.array(hexagon.corners(angle) * component.getDimension(\"op\", cold=cold))\n        blockPatch = []\n\n        if component.getDimension(\"ip\", cold=cold) != 0:\n            # a hexagonal ring\n            innerPoints = np.array(hexagon.corners(angle) * component.getDimension(\"ip\", cold=cold))\n            for n in range(6):\n                corners = [\n                    innerPoints[n],\n                    innerPoints[(n + 1) % 6],\n                    outerPoints[(n + 1) % 6],\n                    outerPoints[n],\n                ]\n                patch = matplotlib.patches.Polygon(corners, fill=True)\n                blockPatch.append(patch)\n        else:\n            # a simple hexagon\n            for n in range(6):\n                corners = [\n                    outerPoints[(n + 1) % 6],\n                    outerPoints[n],\n                ]\n                patch = matplotlib.patches.Polygon(corners, fill=True)\n                blockPatch.append(patch)\n    elif isinstance(component, Rectangle):\n        if component.getDimension(\"widthInner\", cold=cold) != 0:\n            innerPoints = np.array(\n                [\n                    [\n                        x + component.getDimension(\"widthInner\", cold=cold) / 2,\n                        y + component.getDimension(\"lengthInner\", cold=cold) / 2,\n                    ],\n                    [\n                        x + component.getDimension(\"widthInner\", cold=cold) / 2,\n                        y - component.getDimension(\"lengthInner\", cold=cold) / 2,\n                    ],\n                    [\n                        x - component.getDimension(\"widthInner\", cold=cold) / 2,\n                        y - component.getDimension(\"lengthInner\", cold=cold) / 2,\n                    ],\n                    [\n                        x - component.getDimension(\"widthInner\", cold=cold) / 2,\n                        y + component.getDimension(\"lengthInner\", cold=cold) / 2,\n                    ],\n                ]\n            )\n\n            outerPoints = np.array(\n                [\n                    [\n                        x + component.getDimension(\"widthOuter\", cold=cold) / 2,\n                        y + component.getDimension(\"lengthOuter\", cold=cold) / 2,\n                    ],\n                    [\n                        x + component.getDimension(\"widthOuter\", cold=cold) / 2,\n                        y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n                    ],\n                    [\n                        x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n                        y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n                    ],\n                    [\n                        x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n                        y + component.getDimension(\"lengthOuter\", cold=cold) / 2,\n                    ],\n                ]\n            )\n            blockPatch = []\n            for n in range(4):\n                corners = [\n                    innerPoints[n],\n                    innerPoints[(n + 1) % 4],\n                    outerPoints[(n + 1) % 4],\n                    outerPoints[n],\n                ]\n                patch = matplotlib.patches.Polygon(corners, fill=True)\n                blockPatch.append(patch)\n        else:\n            # Just make it a rectangle\n            blockPatch = matplotlib.patches.Rectangle(\n                (\n                    x - component.getDimension(\"widthOuter\", cold=cold) / 2,\n                    y - component.getDimension(\"lengthOuter\", cold=cold) / 2,\n                ),\n                component.getDimension(\"widthOuter\", cold=cold),\n                component.getDimension(\"lengthOuter\", cold=cold),\n            )\n\n    if isinstance(blockPatch, list):\n        return blockPatch\n\n    return [blockPatch]\n\n\ndef plotBlockDiagram(block, fName, cold, cmapName=\"RdYlBu\", materialList=None, fileFormat=\"svg\"):\n    \"\"\"Given a Block with a spatial Grid, plot the diagram of it with all of its components (wire, duct, coolant, etc).\n\n    Parameters\n    ----------\n    block : Block\n    fName : str\n        Name of the file to save to\n    cold : bool\n        True is for cold temps, False is hot\n    cmapName : str\n        name of a colorMap to use for block colors\n    materialList : list\n        A list of material names across all blocks to be plotted so that same material on all diagrams will have the\n        same color\n    fileFormat : str\n        The format to save the picture as, e.g. svg, png, jpg, etc.\n    \"\"\"\n    _, ax = plt.subplots(figsize=(20, 20), dpi=200)\n\n    if block.spatialGrid is None:\n        return None\n\n    # building a list of materials\n    if materialList is None:\n        materialList = []\n        for component in block:\n            if isinstance(component.material, custom.Custom):\n                materialName = component.p.customIsotopicsName\n            else:\n                materialName = component.material.name\n            if materialName not in materialList:\n                materialList.append(materialName)\n\n    materialMap = {material: ai for ai, material in enumerate(np.unique(materialList))}\n    allColors = np.array(list(materialMap.values()))\n\n    # build the geometric shapes on the plot\n    patches, data, _ = _makeBlockPinPatches(block, cold)\n    collection = PatchCollection(patches, cmap=cmapName, alpha=1.0)\n\n    ourColors = np.array([materialMap[materialName] for materialName in data])\n    collection.set_array(ourColors)\n    ax.add_collection(collection)\n    collection.norm.autoscale(allColors)\n\n    # set up plot axis, labels and legends\n    legendMap = [(materialMap[materialName], \"\", f\"{materialName}\") for materialName in np.unique(data)]\n    legend = _createLegend(legendMap, collection, size=50, shape=Rectangle)\n    pltKwargs = {\"bbox_extra_artists\": (legend,), \"bbox_inches\": \"tight\"}\n\n    ax.set_xticks([])\n    ax.set_yticks([])\n    ax.spines[\"right\"].set_visible(False)\n    ax.spines[\"top\"].set_visible(False)\n    ax.spines[\"left\"].set_visible(False)\n    ax.spines[\"bottom\"].set_visible(False)\n    ax.margins(0)\n    plt.savefig(fName, format=fileFormat, **pltKwargs)\n    plt.close()\n\n    return os.path.abspath(fName)\n\n\ndef plotScatterMatrix(scatterMatrix, scatterTypeLabel=\"\", fName=None):\n    \"\"\"Plots a matrix to show scattering.\"\"\"\n    img = plt.imshow(scatterMatrix.todense(), interpolation=\"nearest\")\n    plt.grid(color=\"0.70\")\n    plt.xlabel(\"From group\")\n    plt.ylabel(\"To group\")\n    plt.title(f\"{scatterTypeLabel} scattering XS\")\n    plt.colorbar()\n\n    if fName:\n        plt.savefig(fName)\n        plt.close()\n    else:\n        plt.show()\n\n    return img\n\n\ndef plotNucXs(isotxs, nucNames, xsNames, fName=None, label=None, noShow=False, title=None):\n    \"\"\"\n    Generates a XS plot for a nuclide on the ISOTXS library.\n\n    Parameters\n    ----------\n    isotxs : IsotxsLibrary\n        A collection of cross sections (XS) for both neutron and gamma reactions.\n    nucNames : str or list\n        The nuclides to plot\n    xsNames : str or list\n        the XS to plot e.g. n,g, n,f, nalph, etc. see xsCollections for actual names.\n    fName : str, optional\n        if fName is given, the file will be written rather than plotting to screen\n    label : str, optional\n        is an optional label for image legends, useful in ipython sessions.\n    noShow : bool, optional\n        Won't finalize plot. Useful for using this to make custom plots.\n\n    Examples\n    --------\n    >>> l = ISOTXS()\n    >>> plotNucXs(l, \"U238NA\", \"fission\")\n\n    >>> # Plot n,g for all xenon and krypton isotopes\n    >>> f = lambda name: \"XE\" in name or \"KR\" in name\n    >>> plotNucXs(l, sorted(filter(f, l.nuclides.keys())), itertools.repeat(\"nGamma\"))\n\n    See Also\n    --------\n    plotScatterMatrix\n    \"\"\"\n    # convert all input to lists\n    if isinstance(nucNames, str):\n        nucNames = [nucNames]\n    if isinstance(xsNames, str):\n        xsNames = [xsNames]\n\n    for nucName, xsName in zip(nucNames, xsNames):\n        nuc = isotxs[nucName]\n        thisLabel = label or \"{0} {1}\".format(nucName, xsName)\n        x = isotxs.neutronEnergyUpperBounds / 1e6\n        y = nuc.micros[xsName]\n        plt.plot(x, y, \"-\", label=thisLabel, drawstyle=\"steps-post\")\n\n    ax = plt.gca()\n    ax.set_xscale(\"log\")\n    ax.set_yscale(\"log\")\n    plt.grid(color=\"0.70\")\n    plt.title(title or f\"microscopic XS from {isotxs}\")\n    plt.xlabel(\"Energy (MeV)\")\n    plt.ylabel(\"microscopic XS (barns)\")\n    plt.legend()\n\n    if fName:\n        plt.savefig(fName)\n        plt.close()\n    elif not noShow:\n        plt.show()\n\n\ndef plotConvertedBlock(sourceBlock, convertedBlock, fName=None):\n    \"\"\"Render an image of the converted block.\"\"\"\n    runLog.extra(f\"Plotting equivalent cylindrical block of {sourceBlock}\")\n    fig, ax = plt.subplots()\n    fig.patch.set_visible(False)\n    ax.patch.set_visible(False)\n    ax.axis(\"off\")\n    patches = []\n    colors = []\n    for circleComp in convertedBlock:\n        innerR = circleComp.getDimension(\"id\") / 2.0\n        outerR = circleComp.getDimension(\"od\") / 2.0\n        runLog.debug(\"Plotting {:40s} with {:10.3f} {:10.3f} \".format(circleComp, innerR, outerR))\n        circle = matplotlib.patches.Wedge((0.0, 0.0), outerR, 0, 360.0, width=outerR - innerR)\n        patches.append(circle)\n        colors.append(circleComp.density())\n\n    p = PatchCollection(patches, alpha=1.0, linewidths=0.1, cmap=cm.YlGn)\n    p.set_array(np.array(colors))\n    ax.add_collection(p)\n    ax.autoscale_view(True, True, True)\n    ax.set_aspect(\"equal\")\n    fig.tight_layout()\n\n    if fName:\n        plt.savefig(fName)\n        plt.close()\n    else:\n        plt.show()\n\n    return fName\n\n\ndef plotConvertedRZTReactor(reactor, fNameBase=None):\n    \"\"\"\n    Generate plots for the converted RZT reactor.\n\n    Parameters\n    ----------\n    fNameBase : str, optional\n        A name that will form the basis of the N plots that are generated by this method. Will get split on extension\n        and have numbers added. Should be like ``coreMap.png``.\n    \"\"\"\n    runLog.info(f\"Generating plot(s) of the converted {str(reactor.core.geomType).upper()} reactor\")\n    figs = []\n    colConv = matplotlib.colors.ColorConverter()\n    colGen = colorGenerator(5)\n    blockColors = {}\n    thetaMesh, radialMesh, axialMesh = _getReactorMeshCoordinates(reactor)\n    innerTheta = 0.0\n    for i, outerTheta in enumerate(thetaMesh):\n        fig, ax = plt.subplots(figsize=(12, 12))\n        innerRadius = 0.0\n        for outerRadius in radialMesh:\n            innerAxial = 0.0\n            for outerAxial in axialMesh:\n                b = _getBlockAtMeshPoint(\n                    reactor,\n                    innerTheta,\n                    outerTheta,\n                    innerRadius,\n                    outerRadius,\n                    innerAxial,\n                    outerAxial,\n                )\n                blockType = b.getType()\n                blockColor = _getBlockColor(colConv, colGen, blockColors, blockType)\n                if blockColor is not None:\n                    blockColors[blockType] = blockColor\n                blockPatch = matplotlib.patches.Rectangle(\n                    (innerRadius, innerAxial),\n                    (outerRadius - innerRadius),\n                    (outerAxial - innerAxial),\n                    facecolor=blockColors[blockType],\n                    linewidth=0,\n                    alpha=0.7,\n                )\n                ax.add_patch(blockPatch)\n                innerAxial = outerAxial\n            innerRadius = outerRadius\n        ax.set_title(\n            \"{} Core Map from {} to {:.4f} revolutions\".format(\n                str(reactor.core.geomType).upper(),\n                innerTheta * units.RAD_TO_REV,\n                outerTheta * units.RAD_TO_REV,\n            ),\n            y=1.03,\n        )\n        ax.set_xticks([0.0] + radialMesh)\n        ax.set_yticks([0.0] + axialMesh)\n        ax.tick_params(axis=\"both\", which=\"major\", labelsize=11, length=0, width=0)\n        ax.grid()\n        labels = ax.get_xticklabels()\n        for label in labels:\n            label.set_rotation(270)\n        handles = []\n        labels = []\n        for blockType, blockColor in blockColors.items():\n            line = matplotlib.lines.Line2D([], [], color=blockColor, markersize=15, label=blockType)\n            handles.append(line)\n            labels.append(line.get_label())\n\n        ax.set_xlabel(\"RADIAL MESH (CM)\", labelpad=20)\n        ax.set_ylabel(\"AXIAL MESH (CM)\", labelpad=20)\n        if fNameBase:\n            root, ext = os.path.splitext(fNameBase)\n            fName = root + f\"{i}\" + ext\n            plt.savefig(fName)\n            plt.close()\n        else:\n            figs.append(fig)\n        innerTheta = outerTheta\n\n    return figs\n\n\ndef _getReactorMeshCoordinates(reactor):\n    \"\"\"A helper for plotConvertedRZTReactor.\"\"\"\n    thetaMesh, radialMesh, axialMesh = reactor.core.findAllMeshPoints(applySubMesh=False)\n    thetaMesh.remove(0.0)\n    radialMesh.remove(0.0)\n    axialMesh.remove(0.0)\n    return thetaMesh, radialMesh, axialMesh\n\n\ndef _getBlockAtMeshPoint(reactor, innerTheta, outerTheta, innerRadius, outerRadius, innerAxial, outerAxial):\n    \"\"\"A helper for plotConvertedRZTReactor.\"\"\"\n    for b in reactor.core.iterBlocks():\n        blockMidTh, blockMidR, blockMidZ = b.spatialLocator.getGlobalCoordinates(nativeCoords=True)\n        if (blockMidTh >= innerTheta) and (blockMidTh <= outerTheta):\n            if (blockMidR >= innerRadius) and (blockMidR <= outerRadius):\n                if (blockMidZ >= innerAxial) and (blockMidZ <= outerAxial):\n                    return b\n\n    raise ValueError(\n        \"No block found between ({}, {}), ({}, {}), ({}, {})\\nLast block had TRZ= {} {} {}\".format(\n            innerTheta,\n            outerTheta,\n            innerRadius,\n            outerRadius,\n            innerAxial,\n            outerAxial,\n            blockMidTh,\n            blockMidR,\n            blockMidZ,\n        )\n    )\n\n\ndef _getBlockColor(colConverter, colGenerator, blockColors, blockType):\n    \"\"\"A helper for plotConvertedRZTReactor.\"\"\"\n    nextColor = None\n    if blockType not in blockColors:\n        if \"fuel\" in blockType:\n            nextColor = \"tomato\"\n        elif \"structure\" in blockType:\n            nextColor = \"lightgrey\"\n        elif \"radial shield\" in blockType:\n            nextColor = \"lightgrey\"\n        elif \"duct\" in blockType:\n            nextColor = \"grey\"\n        else:\n            while True:\n                try:\n                    nextColor = next(colGenerator)\n                    colConverter.to_rgba(nextColor)\n                    break\n                except ValueError:\n                    continue\n\n    return nextColor\n"
  },
  {
    "path": "armi/utils/properties.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"This module contains methods for adding properties with custom behaviors to classes.\"\"\"\n\nimport numpy as np\n\n\ndef areEqual(val1, val2, relativeTolerance=0.0):\n    hackEqual = numpyHackForEqual(val1, val2)\n    if hackEqual or not relativeTolerance:  # takes care of dictionaries and strings.\n        return hackEqual\n    return np.allclose(val1, val2, rtol=relativeTolerance, atol=0.0)  # does not work for dictionaries or strings\n\n\ndef numpyHackForEqual(val1, val2):\n    \"\"\"Checks lots of types for equality like strings and dicts.\"\"\"\n    # when doing this with numpy arrays you get an array of booleans which causes the value error\n    if isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray):\n        if val1.size != val2.size:\n            return False\n\n    notEqual = val1 != val2\n    try:  # should work for everything but numpy arrays\n        if isinstance(notEqual, np.ndarray) and notEqual.size == 0:\n            return True\n        return not notEqual.__bool__()\n    except (AttributeError, ValueError):  # from comparing 2 numpy arrays\n        return not notEqual.any()\n\n\ndef createImmutableProperty(name, dependencyAction, doc):\n    \"\"\"Create a properrty that raises useful AttributeErrors when the attribute has not been assigned.\n\n    Parameters\n    ----------\n    name : str\n        Name of the property. This is unfortunately necessary, because the method does not know the name of\n        the property being assigned by the developer.\n\n    dependencyAction : str\n        Description of an action that needs to be performed in order to set the value of the property.\n\n    doc : str\n        Docstring of the property.\n\n    See Also\n    --------\n    armi.utils.properties.unlockImmutableProperties\n    armi.utils.properties.lockImmutableProperties\n\n    Examples\n    --------\n    The following example is essentially exactly how this should be used.\n\n    >>> class SomeClass:\n    ...     myNum = createImmutableProperty(\"myNum\", \"You must invoke the initialize() method\", \"My random number\")\n    ...\n    ...     def initialize(self, val):\n    ...         unlockImmutableProperties(self)\n    ...         try:\n    ...             self.myNum = val\n    ...         finally:\n    ...             lockImmutableProperties(self)\n    >>> sc = SomeClass()\n    >>> sc.myNum.__doc__\n    My Random Number\n    >>> sc.myNum  # raises error, because it hasn't been assigned\n    ImmutablePropertyError\n    >>> sc.myNum = 42.1\n    >>> sc.myNum\n    42.1\n    >>> sc.myNum = 21.05 * 2  # raises error, because the value cannot change after it has been assigned.\n    ImmutablePropertyError\n    >>> sc.initialize(42.1)  # this works, because the values are the same.\n    >>> sc.initialize(100)  # this fails, because the value cannot change\n    ImmutablePropertyError\n    \"\"\"\n    privateName = \"_\" + name\n\n    def _getter(self):\n        try:\n            return getattr(self, privateName)\n        except AttributeError:\n            if getattr(self, \"-unlocked\", False):\n                return None\n            raise ImmutablePropertyError(\n                \"Attribute {} on {} has not been set, must read {} file first.\".format(name, self, dependencyAction)\n            )\n\n    def _setter(self, value):\n        if hasattr(self, privateName):\n            currentVal = getattr(self, privateName)\n            if currentVal is None or value is None:\n                setattr(self, privateName, value if currentVal is None else currentVal)\n            elif not numpyHackForEqual(currentVal, value):\n                raise ImmutablePropertyError(\n                    \"{} on {} has already been set by reading {} file.\\n\"\n                    \"The original value:           ({})\\n\"\n                    \"does not match the new value: ({}).\".format(name, self, dependencyAction, currentVal, value)\n                )\n        else:\n            setattr(self, privateName, value)\n\n    return property(_getter, _setter, doc=doc)\n\n\nclass ImmutablePropertyError(Exception):\n    \"\"\"Exception raised when performing an illegal operation on an immutable property.\"\"\"\n\n\ndef unlockImmutableProperties(lib):\n    \"\"\"Unlock an object that has immutable properties for modification.\n\n    This will prevent raising errors when reading or assigning values to an immutable property\n\n    See Also\n    --------\n    armi.utils.properties.createImmutableProperty\n    \"\"\"\n    setattr(lib, \"-unlocked\", True)\n\n\ndef lockImmutableProperties(lib):\n    \"\"\"Lock an object that has immutable properties such that accessing unassigned properties, or attempting\n    to modify the properties raises an exception.\n\n    See Also\n    --------\n    armi.utils.properties.createImmutableProperty\n    \"\"\"\n    del lib.__dict__[\"-unlocked\"]\n"
  },
  {
    "path": "armi/utils/reportPlotting.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nPlotting Utils specific to reports.\n\nThis module makes heavy use of matplotlib. Beware that plots generated with matplotlib\nmay not free their memory, even after the plot is closed, and excessive use of\nplotting functions may gobble up all of your machine's memory.\n\nTherefore, you should use these plotting tools judiciously. It is not advisable to,\nfor instance, plot some sequence of objects in a loop at every time node. If you start\nto see your memory usage grow inexplicably, you should question any plots that you are\ngenerating.\n\"\"\"\n\nimport itertools\nimport math\nimport os\n\nimport matplotlib.path\nimport matplotlib.projections.polar\nimport matplotlib.pyplot as plt\nimport matplotlib.spines\nimport numpy as np\nfrom matplotlib import colormaps\nfrom matplotlib import colors as mpltcolors\n\nfrom armi import runLog, settings\nfrom armi.bookkeeping import report\nfrom armi.reactor.flags import Flags\n\n\ndef plotReactorPerformance(reactor, dbi, buGroups, extension=None, history=None):\n    \"\"\"\n    Generates a set of plots useful in reactor analysis given a populated reactor.\n\n    Parameters\n    ----------\n    reactor : armi.reactor.reactors.Reactor\n        The reactor to plot\n\n    dbi : armi.bookkeeping.db.DatabaseInterface\n        The DatabaseInterface object from which to pull historical data\n\n    buGroups : list of float\n        The burnup groups in the problem\n\n    extension : str, optional\n        The file extension for saving plots\n\n    history: armi.bookkeeping.historyTracker.HistoryTrackerInterface object\n        The history tracker interface\n    \"\"\"\n    try:\n        data = dbi.getHistory(reactor, params=[\"cycle\", \"time\"])\n        data.update(\n            dbi.getHistory(\n                reactor.core,\n                params=[\n                    \"keff\",\n                    \"keffUnc\",\n                    \"maxPD\",\n                    \"maxBuI\",\n                    \"maxBuF\",\n                    \"maxDPA\",\n                    \"numMoves\",\n                ],\n            )\n        )\n    except Exception as ee:\n        runLog.warning(\n            \"Cannot plot rxPerformance without the data model present in the database.\\nError: {}\".format(ee)\n        )\n        return\n\n    # data is a dict of OrderedDict: { <paramName> : { (<cycle>, <node>) : value } }\n    scalars = {key: list(timeStepDict.values()) for key, timeStepDict in data.items()}\n    runLog.info(\"scalars for plotting {}\".format(scalars))\n\n    valueVsTime(\n        reactor.name,\n        scalars[\"time\"],\n        scalars[\"maxPD\"],\n        \"maxPD\",\n        \"Max Areal PD (MW/m^2)\",\n        \"Max Areal PD vs. time\",\n        0.0,\n        extension=extension,\n    )\n    keffVsTime(\n        reactor.name,\n        scalars[\"time\"],\n        scalars[\"keff\"],\n        scalars[\"keffUnc\"],\n        ymin=1.0,\n        extension=extension,\n    )\n    movesVsCycle(reactor.name, scalars, extension=extension)\n\n\ndef valueVsTime(name, x, y, key, yaxis, title, ymin=None, extension=None):\n    \"\"\"\n    Plots a value vs. time with a standard graph format.\n\n    Parameters\n    ----------\n    name : str\n        Reactor.name\n    x : iterable\n        The x-axis values (the abscissa)\n    y : iterable\n        The y-axis values (the ordinate)\n    key : str\n        A key word to add the item to the report interface\n    yaxis : str\n        The y axis label\n    title : str\n        the plot title\n    ymin : str, optional\n        The minimum y-axis value. If any ordinates are less than this value,\n        it will be ignored.\n    extension : str, optional\n        The file extension for saving the figure\n    \"\"\"\n    extension = extension or settings.Settings()[\"outputFileExtension\"]\n\n    plt.figure()\n    plt.plot(x, y, \".-\")\n    plt.xlabel(\"Time (yr)\")\n    plt.ylabel(yaxis)\n    plt.grid(color=\"0.70\")\n    plt.title(title + \" for {0}\".format(name))\n\n    if ymin is not None and all([yi > ymin for yi in y]):\n        # set ymin all values are greater than it and it exists.\n        ax = plt.gca()\n        ax.set_ylim(bottom=ymin)\n\n    figName = name + \".\" + key + \".\" + extension\n    plt.savefig(figName)\n    plt.close(1)\n\n    report.setData(\"PlotTime\", os.path.abspath(figName), report.TIME_PLOT)\n\n\ndef keffVsTime(name, time, keff, keffUnc=None, ymin=None, extension=None):\n    \"\"\"\n    Plots core keff vs. time.\n\n    Parameters\n    ----------\n    name : str\n        reactor.name\n    time : list\n        Time in years\n    keff : list\n        Keff in years\n    keffUnc : list, optional\n        Uncontrolled keff or None (will be plotted as secondary series)\n    ymin : float, optional\n        Minimum y-axis value to target.\n    extension : str, optional\n        The file extension for saving the figure\n    \"\"\"\n    extension = extension or settings.Settings()[\"outputFileExtension\"]\n\n    plt.figure()\n    if any(keffUnc):\n        label1 = \"Controlled k-eff\"\n        label2 = \"Uncontrolled k-eff\"\n    else:\n        label1 = None\n\n    plt.plot(time, keff, \".-\", label=label1)\n    if any(keffUnc):\n        plt.plot(time, keffUnc, \".-\", label=label2)\n        plt.legend()\n    plt.xlabel(\"Time (yr)\")\n    plt.ylabel(\"k-eff\")\n    plt.grid(color=\"0.70\")\n    plt.title(\"k-eff vs. time\" + \" for {0}\".format(name))\n\n    if ymin is not None and all([yi > ymin for yi in keff]):\n        # set ymin all values are greater than it and it exists.\n        ax = plt.gca()\n        ax.set_ylim(bottom=ymin)\n\n    figName = name + \".keff.\" + extension\n    plt.savefig(figName)\n    plt.close(1)\n\n    report.setData(\"K-Eff\", os.path.abspath(figName), report.KEFF_PLOT)\n\n\ndef movesVsCycle(name, scalars, extension=None):\n    \"\"\"\n    Make a bar chart showing the number of moves per cycle in the full core.\n\n    A move is defined as an assembly being picked up, moved, and put down. So if\n    two assemblies are swapped, that is 2 moves. Note that it does not count\n    temporary storage for such swaps. This is an approximation because in a chain of moves,\n    only one out of the chain would have to be temporarily stored. So as the chains get longer,\n    this approximation gets more accurate.\n\n    Parameters\n    ----------\n    name : str\n        reactor.name\n    extension : str, optional\n        The file extension for saving the figure\n\n    See Also\n    --------\n    FuelHandler.outage : sets the number of moves in each cycle\n    \"\"\"\n    extension = extension or settings.Settings()[\"outputFileExtension\"]\n\n    cycles = []\n    yvals = []\n    for moves, cycle in zip(scalars[\"numMoves\"], scalars[\"cycle\"]):\n        if moves is None:\n            moves = 0.0\n        if cycle not in cycles:  # only one move per cycle\n            # use the cycles scalar val in case burnSteps is dynamic\n            cycles.append(cycle)\n            yvals.append(moves)\n\n    plt.figure(figsize=(12, 6))  # make it wide and short\n    plt.bar(cycles, yvals, align=\"center\")\n    if len(cycles) > 1:\n        plt.xticks(cycles)\n    plt.grid(color=\"0.70\")\n    plt.xlabel(\"Cycle\")\n    plt.ylabel(\"Number of Moves\")\n    plt.title(\"Fuel management rate for \" + name)\n    figName = name + \".moves.\" + extension\n    plt.savefig(figName)\n    plt.close(1)\n\n    report.setData(\"Moves Plot\", os.path.abspath(figName), report.MOVES_PLOT)\n\n\ndef plotCoreOverviewRadar(reactors, reactorNames=None):\n    \"\"\"\n    Plot key features of a set of reactors on radar/spider plots.\n\n    Useful for comparing reactors to one another.\n    \"\"\"\n    runLog.info(\"Plotting reactor comparison.\")\n    fig = plt.figure(figsize=(17, 9))\n    fig.subplots_adjust(wspace=0.25, hspace=0.20, top=0.85, bottom=0.05)\n    colors = itertools.cycle([\"b\", \"r\", \"g\"])\n    axes = {}\n    thetas = {}\n    scrapers = [\n        _getNeutronicVals,\n        _getMechanicalVals,\n        _getFuelVals,\n        _getPhysicalVals,\n    ]\n    firstReactorVals = {}  # for normalization\n    numRows, numCols = 2, (len(scrapers) + 1) // 2\n    for r, color in zip(reactors, colors):\n        for si, scraper in enumerate(scrapers):\n            physicsName, physicsLabels, physicsVals = scraper(r)\n            runLog.info(\"{}\".format(physicsName))\n            runLog.info(\"\\n\".join([\"{:10s} {}\".format(label, val) for label, val in zip(physicsLabels, physicsVals)]))\n            physicsVals = np.array(physicsVals)\n            theta = thetas.get(physicsName)\n            if theta is None:\n                # first time through. Build the radar, store the axis\n                theta = _radarFactory(len(physicsLabels), frame=\"polygon\")\n                thetas[physicsName] = theta\n                firstReactorVals[physicsName] = physicsVals\n                ax = fig.add_subplot(numRows, numCols, si + 1, projection=\"radar\")\n                axes[physicsName] = ax\n                ax.set_title(\n                    physicsName,\n                    weight=\"bold\",\n                    size=\"medium\",\n                    position=(0.5, 1.1),\n                    horizontalalignment=\"center\",\n                    verticalalignment=\"center\",\n                )\n                ax.set_var_labels(physicsLabels)\n                plt.rgrids([0.2, 0.4, 0.6, 0.8])  # radial grid lines\n            else:\n                ax = axes[physicsName]\n            with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n                vals = (\n                    physicsVals / firstReactorVals[physicsName]\n                )  # normalize to first reactor b/c values differ by a lot.\n                vals[np.isnan(vals)] = 0.2\n            ax.plot(theta, vals, color=color)\n            ax.fill(theta, vals, facecolor=color, alpha=0.25)\n\n    if reactorNames:\n        plt.subplot(numRows, numCols, 1)  # legend on top-left plot\n        legend = plt.legend(reactorNames, loc=(0.9, 0.95), labelspacing=0.1)\n    plt.setp(legend.get_texts(), fontsize=\"small\")\n    plt.figtext(\n        0.5,\n        0.965,\n        \"Comparison\",\n        ha=\"center\",\n        color=\"black\",\n        weight=\"bold\",\n        size=\"large\",\n    )\n    plt.savefig(\"reactor_comparison.png\")\n    plt.close()\n\n\ndef _getNeutronicVals(r):\n    labels, vals = list(\n        zip(\n            *[\n                (\"Rx. Swing\", r.core.p.rxSwing),\n                (\"Beta\", r.core.p.beta),\n                (\"Peak flux\", r.core.p.maxFlux),\n            ]\n        )\n    )\n    return \"Neutronics\", labels, vals\n\n\ndef _getMechanicalVals(r):\n    labels, vals = list(\n        zip(\n            *[\n                (\"Hold down\", 1.0),\n                (\"Distortion\", 3.0),\n            ]\n        )\n    )\n\n    return \"Mechanical\", labels, vals\n\n\ndef _getPhysicalVals(r):\n    avgHeight = 0.0\n    fuelA = r.core.getAssemblies(Flags.FUEL)\n\n    # get average height\n    avgHeight = 0\n    for a in fuelA:\n        for b in a.iterBlocks(Flags.FUEL):\n            try:\n                avgHeight += b.getInputHeight()\n            except AttributeError:\n                avgHeight += b.getHeight()\n    avgHeight /= len(fuelA)\n\n    radius = r.core.getCoreRadius()\n    labels, vals = list(\n        zip(\n            *[\n                (\"Cold fuel height\", avgHeight),\n                (\"Fuel assems\", len(fuelA)),\n                (\"Assem weight\", r.core.getFirstAssembly(Flags.FUEL).getMass()),\n                (\"Core radius\", radius),\n                (\"Core aspect ratio\", (2 * radius) / avgHeight),  # width/height\n                (\"Fissile mass\", r.core.getFissileMass()),\n            ]\n        )\n    )\n\n    return \"Dimensions\", labels, vals\n\n\ndef _getFuelVals(r):\n    tOverD = 0.0\n    numClad = 0.0\n    for b in r.core.iterBlocks(Flags.FUEL):\n        clad = b.getComponent(Flags.CLAD)\n        if clad:\n            cladOD = clad.getDimension(\"od\")\n            cladID = clad.getDimension(\"id\")\n            tOverD += (cladOD - cladID) / cladOD\n            numClad += 1\n    tOverD /= numClad\n    data = [\n        (\n            \"Smear dens.\",\n            r.core.calcAvgParam(\"smearDensity\", generationNum=2, typeSpec=Flags.FUEL),\n        ),\n        (\"Clad T/D\", tOverD),\n        (\"dpa\", r.core.p.maxdetailedDpaPeak),\n    ]\n    labels, vals = list(zip(*data))\n    return \"Fuel Perf.\", labels, vals\n\n\ndef _radarFactory(numVars, frame=\"circle\"):\n    \"\"\"Create a radar chart with `numVars` axes.\n\n    This function creates a RadarAxes projection and registers it.\n\n    Raises\n    ------\n    ValueError\n        If value of the frame is unknown.\n\n    Parameters\n    ----------\n    numVars : int\n        Number of variables for radar chart.\n    frame : {'circle' | 'polygon'}\n        Shape of frame surrounding axes.\n    \"\"\"\n    # calculate evenly-spaced axis angles\n    # rotate theta such that the first axis is at the top\n    # keep within 0 to 2pi range though.\n    theta = (np.linspace(0, 2 * np.pi, numVars, endpoint=False) + np.pi / 2) % (2.0 * np.pi)\n\n    def drawPolyPatch():\n        verts = _unitPolyVerts(theta)\n        return plt.Polygon(verts, closed=True, edgecolor=\"k\")\n\n    def drawCirclePatch():\n        # unit circle centered on (0.5, 0.5)\n        return plt.Circle((0.5, 0.5), 0.5)\n\n    def close_line(line):\n        \"\"\"Closes the input line.\"\"\"\n        x, y = line.get_data()\n        if x[0] != x[-1]:\n            x = np.concatenate((x, [x[0]]))\n            y = np.concatenate((y, [y[0]]))\n            line.set_data(x, y)\n\n    patchDict = {\"polygon\": drawPolyPatch, \"circle\": drawCirclePatch}\n    if frame not in patchDict:\n        raise ValueError(\"unknown value for `frame`: %s\" % frame)\n\n    class _RadarAxes(matplotlib.projections.polar.PolarAxes):\n        \"\"\"\n        Radar projection.\n\n        Note different PEP8 naming convention to comply with parent class.\n        \"\"\"\n\n        name = \"radar\"\n        # use 1 line segment to connect specified points\n        RESOLUTION = 1\n        # define draw_frame method\n        draw_patch = staticmethod(patchDict[frame])\n\n        def fill(self, *args, **kwargs):\n            \"\"\"Override fill so that line is closed by default.\"\"\"\n            closed = kwargs.pop(\"closed\", True)\n            return super(_RadarAxes, self).fill(closed=closed, *args, **kwargs)\n\n        def plot(self, *args, **kwargs):\n            \"\"\"Override plot so that line is closed by default.\"\"\"\n            lines = super(_RadarAxes, self).plot(*args, **kwargs)\n            for line in lines:\n                close_line(line)\n\n        def set_var_labels(self, labels):\n            self.set_thetagrids(np.degrees(theta), labels)\n\n        def _gen_axes_patch(self):\n            return self.draw_patch()\n\n        def _gen_axes_spines(self):\n            if frame == \"circle\":\n                return matplotlib.projections.polar.PolarAxes._gen_axes_spines(self)\n            # The following is a hack to get the spines (i.e. the axes frame)\n            # to draw correctly for a polygon frame.\n\n            # spine_type must be 'left', 'right', 'top', 'bottom', or `circle`.\n            spine_type = \"circle\"\n            verts = _unitPolyVerts(theta)\n            # close off polygon by repeating first vertex\n            verts.append(verts[0])\n            path = matplotlib.path.Path(verts)\n\n            spine = matplotlib.spines.Spine(self, spine_type, path)\n            spine.set_transform(self.transAxes)\n            return {\"polar\": spine}\n\n    matplotlib.projections.register_projection(_RadarAxes)\n    return theta\n\n\ndef _unitPolyVerts(theta):\n    \"\"\"Return vertices of polygon for subplot axes.\n\n    This polygon is circumscribed by a unit circle centered at (0.5, 0.5)\n    \"\"\"\n    x0 = y0 = r = 0.5\n    verts = list(zip(r * np.cos(theta) + x0, r * np.sin(theta) + y0))\n    return verts\n\n\ndef createPlotMetaData(title, xLabel, yLabel, xMajorTicks=None, yMajorTicks=None, legendLabels=None):\n    \"\"\"\n    Create plot metadata (title, labels, ticks).\n\n    Parameters\n    ----------\n    title : str\n        Plot title\n\n    xLabel : str\n        x-axis label\n\n    yLabel : str\n        y-axis label\n\n    xMajorTicks : list of float\n        List of axial position at which to insert major ticks\n\n    yMajorTicks : list of float\n        List of axial position at which to insert major ticks\n\n    legendsLabels : list of str\n        Labels to used in the plot legend\n\n    Returns\n    -------\n    metadata : dict\n        Dictionary with all plot metadata information\n    \"\"\"\n    metadata = {}\n\n    metadata[\"title\"] = title\n    metadata[\"xlabel\"] = xLabel\n    metadata[\"ylabel\"] = yLabel\n    metadata[\"xMajorTicks\"] = xMajorTicks\n    metadata[\"yMajorTicks\"] = yMajorTicks\n    metadata[\"legendLabels\"] = legendLabels\n\n    return metadata\n\n\ndef plotAxialProfile(zVals, dataVals, fName, metadata, nPlot=1, yLog=False):\n    \"\"\"\n    Plot the axial profile of quantity zVals.\n\n    Parameters\n    ----------\n    zVals: list of float\n        Axial position of the quantity to be plotted\n\n    dataVals: list of float\n        Axial quantity to be plotted\n\n    fName: str\n        The file name for the plot image file.\n\n    metadata : bool\n        Metadata (title, labels, legends, ticks)\n\n    nPlot: int\n        Number of plots to be generated\n\n    yLog: bool\n        Boolean flag indicating that y-axis is to be plotted on a log scale.\n    \"\"\"\n    plt.figure(figsize=(15, 10))\n\n    plt.xlabel(metadata[\"xlabel\"])\n    plt.ylabel(metadata[\"ylabel\"])\n    plt.title(metadata[\"title\"])\n    if metadata[\"legendLabels\"]:\n        plt.legend(metadata[\"legendLabels\"], loc=1, fontsize=\"small\")\n\n    ax = plt.gca()\n\n    if yLog:  # plot the axial profiles on a log scale\n        dataVals = np.log10(abs(dataVals))\n\n    if nPlot > 1:\n        colormap = colormaps[\"jet\"]\n        norm = mpltcolors.Normalize(0, nPlot - 1)\n\n        # alternate between line styles to help distinguish neighboring groups (close on the color map)\n        lineTypes = [\"\", \":\", \"--\", \"-.\"]\n        nLineTypes = len(lineTypes)\n        for n in range(nPlot):\n            # reverse order for color map, so high E is red and low E is blue\n            n_ = nPlot - n - 1\n            color = colormap(norm(n_))\n            lineTypeIndex = int(math.fmod(n, nLineTypes))\n            plt.plot(zVals, dataVals[:, n], lineTypes[lineTypeIndex], color=color)\n    else:\n        plt.plot(zVals, dataVals)\n\n    ax.autoscale_view()\n\n    if metadata[\"xMajorTicks\"]:\n        ax.set_xticks(metadata[\"xMajorTicks\"])\n        ax.set_xticklabels([str(int(x)) for x in metadata[\"xMajorTicks\"]], fontsize=12)\n\n    if metadata[\"yMajorTicks\"]:\n        ax.set_xticks(metadata[\"yMajorTicks\"])\n        ax.set_xticklabels([str(int(x)) for x in metadata[\"yMajorTicks\"]], fontsize=12)\n\n    ax.xaxis.grid()\n    ax.yaxis.grid()\n\n    plt.savefig(fName)\n    plt.close()\n"
  },
  {
    "path": "armi/utils/tabulate.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Pretty-print tabular data.\n\nThis file started out as the MIT-licensed \"tabulate\". Though we have made, and will continue to\nmake, many arbitrary changes as we need. Thanks to the tabulate team.\n\nhttps://github.com/astanin/python-tabulate\n\nUsage\n-----\nThe module provides just one function, `tabulate`, which takes a list of lists or other tabular data\ntype as the first argument, and outputs anicely-formatted plain-text table::\n\n    >>> from armi.utils.tabulate import tabulate\n\n    >>> table = [[\"Sun\",696000,1989100000],[\"Earth\",6371,5973.6],\n    ...          [\"Moon\",1737,73.5],[\"Mars\",3390,641.85]]\n\n    >>> print(tabulate(table))\n    -----  ------  -------------\n    Sun    696000     1.9891e+09\n    Earth    6371  5973.6\n    Moon     1737    73.5\n    Mars     3390   641.85\n    -----  ------  -------------\n\nThe following tabular data types are supported:\n\n- list of lists or another iterable of iterables\n- list or another iterable of dicts (keys as columns)\n- dict of iterables (keys as columns)\n- list of dataclasses (field names as columns)\n- two-dimensional NumPy array\n- NumPy record arrays (names as columns)\n\nTable headers\n-------------\nTo print nice column headers, supply the second argument (`headers`):\n\n  - `headers` can be an explicit list of column headers\n  - if `headers=\"firstrow\"`, then the first row of data is used\n  - if `headers=\"keys\"`, then dictionary keys or column indices are used\n\nOtherwise a headerless table is produced.\n\nIf the number of headers is less than the number of columns, they are supposed to be names of\nthe last columns. This is consistent with the plain-text format of R::\n\n    >>> print(tabulate([[\"sex\",\"age\"],[\"Alice\",\"F\",24],[\"Bob\",\"M\",19]],\n    ...       headers=\"firstrow\"))\n           sex      age\n    -----  -----  -----\n    Alice  F         24\n    Bob    M         19\n\nColumn and Headers alignment\n----------------------------\n`tabulate` tries to detect column types automatically, and aligns the values properly. By\ndefault it aligns decimal points of the numbers (or flushes integer numbers to the right), and\nflushes everything else to the left. Possible column alignments (`numAlign`, `strAlign`) are:\n\"right\", \"center\", \"left\", \"decimal\" (only for `numAlign`), and None (to disable alignment).\n\n`colGlobalAlign` allows for global alignment of columns, before any specific override from\n    `colAlign`. Possible values are: None (defaults according to coltype), \"right\", \"center\",\n    \"decimal\", \"left\".\n`colAlign` allows for column-wise override starting from left-most column. Possible values are:\n    \"global\" (no override), \"right\", \"center\", \"decimal\", \"left\".\n`headersGlobalAlign` allows for global headers alignment, before any specific override from\n    `headersAlign`. Possible values are: None (follow columns alignment), \"right\", \"center\",\n    \"left\".\n`headersAlign` allows for header-wise override starting from left-most given header. Possible\n    values are: \"global\" (no override), \"same\" (follow column alignment), \"right\", \"center\",\n    \"left\".\n\nNote on intended behaviour: If there is no `data`, any column alignment argument is ignored. Hence,\nin this case, header alignment cannot be inferred from column alignment.\n\nTable formats\n-------------\n`intFmt` is a format specification used for columns which contain numeric data without a decimal\npoint. This can also be a list or tuple of format strings, one per column.\n\n`floatFmt` is a format specification used for columns which contain numeric data with a decimal\npoint. This can also be a list or tuple of format strings, one per column.\n\n`None` values are replaced with a `missingVal` string (like `floatFmt`, this can also be a list\nof values for different columns)::\n\n    >>> print(tabulate([[\"spam\", 1, None],\n    ...                 [\"eggs\", 42, 3.14],\n    ...                 [\"other\", None, 2.7]], missingVal=\"?\"))\n    -----  --  ----\n    spam    1  ?\n    eggs   42  3.14\n    other   ?  2.7\n    -----  --  ----\n\nVarious plain-text table formats (`tableFmt`) are supported: 'plain', 'simple', 'grid', 'rst', and\n`tsv`. Variable `tabulateFormats` contains the list of currently supported formats.\n\n\"plain\" format doesn't use any pseudographics to draw tables, it separates columns with a double\nspace::\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]],\n    ...                 [\"strings\", \"numbers\"], \"plain\"))\n    strings      numbers\n    spam         41.9999\n    eggs        451\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]], tableFmt=\"plain\"))\n    spam   41.9999\n    eggs  451\n\n\"simple\" format is like Pandoc simple_tables::\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]],\n    ...                 [\"strings\", \"numbers\"], \"simple\"))\n    strings      numbers\n    ---------  ---------\n    spam         41.9999\n    eggs        451\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]], tableFmt=\"simple\"))\n    ----  --------\n    spam   41.9999\n    eggs  451\n    ----  --------\n\n\"grid\" is similar to tables produced by Emacs table.el package or Pandoc grid_tables::\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]],\n    ...                [\"strings\", \"numbers\"], \"grid\"))\n    +-----------+-----------+\n    | strings   |   numbers |\n    +===========+===========+\n    | spam      |   41.9999 |\n    +-----------+-----------+\n    | eggs      |  451      |\n    +-----------+-----------+\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]], tableFmt=\"grid\"))\n    +------+----------+\n    | spam |  41.9999 |\n    +------+----------+\n    | eggs | 451      |\n    +------+----------+\n\n\"rst\" is like a simple table format from reStructuredText; please note that reStructuredText\naccepts also \"grid\" tables::\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]],\n    ...                [\"strings\", \"numbers\"], \"rst\"))\n    =========  =========\n    strings      numbers\n    =========  =========\n    spam         41.9999\n    eggs        451\n    =========  =========\n\n    >>> print(tabulate([[\"spam\", 41.9999], [\"eggs\", \"451.0\"]], tableFmt=\"rst\"))\n    ====  ========\n    spam   41.9999\n    eggs  451\n    ====  ========\n\nNumber parsing\n--------------\nBy default, anything which can be parsed as a number is a number. This ensures numbers represented\nas strings are aligned properly. This can lead to weird results for particular strings such as\nspecific git SHAs e.g. \"42992e1\" will be parsed into the number 429920 and aligned as such.\n\nTo completely disable number parsing (and alignment), use `disableNumParse=True`. For more fine\ngrained control, a list column indices is used to disable number parsing only on those columns e.g.\n`disableNumParse=[0, 2]` would disable number parsing only on the first and third columns.\n\nColumn Widths and Auto Line Wrapping\n------------------------------------\nTabulate will, by default, set the width of each column to the length of the longest element in that\ncolumn. However, in situations where fields are expected to reasonably be too long to look good as a\nsingle line, tabulate can help automate word wrapping long fields for you. Use the parameter\n`maxcolwidth` to provide a list of maximal column widths::\n\n    >>> print(tabulate( \\\n          [('1', 'John Smith', \\\n            'This is a rather long description that might look better if it is wrapped a bit')], \\\n          headers=(\"Issue Id\", \"Author\", \"Description\"), \\\n          maxColWidths=[None, None, 30], \\\n          tableFmt=\"grid\"  \\\n        ))\n    +------------+------------+-------------------------------+\n    |   Issue Id | Author     | Description                   |\n    +============+============+===============================+\n    |          1 | John Smith | This is a rather long         |\n    |            |            | description that might look   |\n    |            |            | better if it is wrapped a bit |\n    +------------+------------+-------------------------------+\n\nHeader column width can be specified in a similar way using `maxheadercolwidth`.\n\"\"\"\n\nimport dataclasses\nimport math\nimport re\nfrom collections import namedtuple\nfrom collections.abc import Iterable, Sized\nfrom functools import partial, reduce\nfrom itertools import chain, zip_longest\nfrom textwrap import TextWrapper\n\nfrom armi import runLog\n\n__all__ = [\"tabulate\", \"tabulateFormats\"]\n\n\n# minimum extra space in headers\nMIN_PADDING = 2\n\n# Whether or not to preserve leading/trailing whitespace in data.\nPRESERVE_WHITESPACE = False\n\n_DEFAULT_FLOAT_FMT = \"g\"\n_DEFAULT_INT_FMT = \"\"\n_DEFAULT_MISSING_VAL = \"\"\n# default align will be overwritten by \"left\", \"center\" or \"decimal\" depending on the formatter\n_DEFAULT_ALIGN = \"default\"\n\n# Constant that can be used as part of passed rows to generate a separating line. It is purposely an\n# unprintable character, very unlikely to be used in a table\nSEPARATING_LINE = \"\\001\"\n\nLine = namedtuple(\"Line\", [\"begin\", \"hline\", \"sep\", \"end\"])\nDataRow = namedtuple(\"DataRow\", [\"begin\", \"sep\", \"end\"])\n\n# A table structure is supposed to be:\n#\n#     --- lineabove ---------\n#         headerrow\n#     --- linebelowheader ---\n#         datarow\n#     --- linebetweenrows ---\n#     ... (more datarows) ...\n#     --- linebetweenrows ---\n#         last datarow\n#     --- linebelow ---------\n#\n# TableFormat's line* elements can be\n#\n#   - either None, if the element is not used,\n#   - or a Line tuple,\n#   - or a function: [col_widths], [col_alignments] -> string.\n#\n# TableFormat's *row elements can be\n#\n#   - either None, if the element is not used,\n#   - or a DataRow tuple,\n#   - or a function: [cell_values], [col_widths], [col_alignments] -> string.\n#\n# padding (an integer) is the amount of white space around data values.\n#\n# withHeaderHide:\n#\n#   - either None, to display all table elements unconditionally,\n#   - or a list of elements not to be displayed if the table has column headers.\n#\nTableFormat = namedtuple(\n    \"TableFormat\",\n    [\n        \"lineabove\",\n        \"linebelowheader\",\n        \"linebetweenrows\",\n        \"linebelow\",\n        \"headerrow\",\n        \"datarow\",\n        \"padding\",\n        \"withHeaderHide\",\n    ],\n)\n\n\ndef _isSeparatingLine(row):\n    rowType = type(row)\n    isSl = (rowType is list or rowType is str) and (\n        (len(row) >= 1 and row[0] == SEPARATING_LINE) or (len(row) >= 2 and row[1] == SEPARATING_LINE)\n    )\n    return isSl\n\n\ndef _rstEscapeFirstColumn(rows, headers):\n    def escapeEmpty(val):\n        if isinstance(val, (str, bytes)) and not val.strip():\n            return \"..\"\n        else:\n            return val\n\n    newHeaders = list(headers)\n    newRows = []\n    if headers:\n        newHeaders[0] = escapeEmpty(headers[0])\n    for row in rows:\n        newRow = list(row)\n        if newRow:\n            newRow[0] = escapeEmpty(row[0])\n        newRows.append(newRow)\n    return newRows, newHeaders\n\n\n_tableFormats = {\n    \"armi\": TableFormat(\n        lineabove=Line(\"\", \"-\", \"  \", \"\"),\n        linebelowheader=Line(\"\", \"-\", \"  \", \"\"),\n        linebetweenrows=None,\n        linebelow=Line(\"\", \"-\", \"  \", \"\"),\n        headerrow=DataRow(\"\", \"  \", \"\"),\n        datarow=DataRow(\"\", \"  \", \"\"),\n        padding=0,\n        withHeaderHide=None,\n    ),\n    \"simple\": TableFormat(\n        lineabove=Line(\"\", \"-\", \"  \", \"\"),\n        linebelowheader=Line(\"\", \"-\", \"  \", \"\"),\n        linebetweenrows=None,\n        linebelow=Line(\"\", \"-\", \"  \", \"\"),\n        headerrow=DataRow(\"\", \"  \", \"\"),\n        datarow=DataRow(\"\", \"  \", \"\"),\n        padding=0,\n        withHeaderHide=[\"lineabove\", \"linebelow\"],\n    ),\n    \"plain\": TableFormat(\n        lineabove=None,\n        linebelowheader=None,\n        linebetweenrows=None,\n        linebelow=None,\n        headerrow=DataRow(\"\", \"  \", \"\"),\n        datarow=DataRow(\"\", \"  \", \"\"),\n        padding=0,\n        withHeaderHide=None,\n    ),\n    \"grid\": TableFormat(\n        lineabove=Line(\"+\", \"-\", \"+\", \"+\"),\n        linebelowheader=Line(\"+\", \"=\", \"+\", \"+\"),\n        linebetweenrows=Line(\"+\", \"-\", \"+\", \"+\"),\n        linebelow=Line(\"+\", \"-\", \"+\", \"+\"),\n        headerrow=DataRow(\"|\", \"|\", \"|\"),\n        datarow=DataRow(\"|\", \"|\", \"|\"),\n        padding=1,\n        withHeaderHide=None,\n    ),\n    \"github\": TableFormat(\n        lineabove=Line(\"|\", \"-\", \"|\", \"|\"),\n        linebelowheader=Line(\"|\", \"-\", \"|\", \"|\"),\n        linebetweenrows=None,\n        linebelow=None,\n        headerrow=DataRow(\"|\", \"|\", \"|\"),\n        datarow=DataRow(\"|\", \"|\", \"|\"),\n        padding=1,\n        withHeaderHide=[\"lineabove\"],\n    ),\n    \"pretty\": TableFormat(\n        lineabove=Line(\"+\", \"-\", \"+\", \"+\"),\n        linebelowheader=Line(\"+\", \"-\", \"+\", \"+\"),\n        linebetweenrows=None,\n        linebelow=Line(\"+\", \"-\", \"+\", \"+\"),\n        headerrow=DataRow(\"|\", \"|\", \"|\"),\n        datarow=DataRow(\"|\", \"|\", \"|\"),\n        padding=1,\n        withHeaderHide=None,\n    ),\n    \"psql\": TableFormat(\n        lineabove=Line(\"+\", \"-\", \"+\", \"+\"),\n        linebelowheader=Line(\"|\", \"-\", \"+\", \"|\"),\n        linebetweenrows=None,\n        linebelow=Line(\"+\", \"-\", \"+\", \"+\"),\n        headerrow=DataRow(\"|\", \"|\", \"|\"),\n        datarow=DataRow(\"|\", \"|\", \"|\"),\n        padding=1,\n        withHeaderHide=None,\n    ),\n    \"rst\": TableFormat(\n        lineabove=Line(\"\", \"=\", \"  \", \"\"),\n        linebelowheader=Line(\"\", \"=\", \"  \", \"\"),\n        linebetweenrows=None,\n        linebelow=Line(\"\", \"=\", \"  \", \"\"),\n        headerrow=DataRow(\"\", \"  \", \"\"),\n        datarow=DataRow(\"\", \"  \", \"\"),\n        padding=0,\n        withHeaderHide=None,\n    ),\n    \"tsv\": TableFormat(\n        lineabove=None,\n        linebelowheader=None,\n        linebetweenrows=None,\n        linebelow=None,\n        headerrow=DataRow(\"\", \"\\t\", \"\"),\n        datarow=DataRow(\"\", \"\\t\", \"\"),\n        padding=0,\n        withHeaderHide=None,\n    ),\n}\n\n\ntabulateFormats = list(sorted(_tableFormats.keys()))\n\n# The table formats for which multiline cells will be folded into subsequent table rows. The key is\n# the original format, the value is the format that will be used to represent it.\nmultilineFormats = {\n    \"armi\": \"armi\",\n    \"plain\": \"plain\",\n    \"simple\": \"simple\",\n    \"grid\": \"grid\",\n    \"pretty\": \"pretty\",\n    \"psql\": \"psql\",\n    \"rst\": \"rst\",\n}\n\n_multilineCodes = re.compile(r\"\\r|\\n|\\r\\n\")\n_multilineCodesBytes = re.compile(b\"\\r|\\n|\\r\\n\")\n\n# Handle ANSI escape sequences for both control sequence introducer (CSI) and operating system\n# command (OSC). Both of these begin with 0x1b (or octal 033), which will be shown below as ESC.\n#\n# CSI ANSI escape codes have the following format, defined in section 5.4 of ECMA-48:\n#\n# CSI: ESC followed by the '[' character (0x5b)\n# Parameter Bytes: 0..n bytes in the range 0x30-0x3f\n# Intermediate Bytes: 0..n bytes in the range 0x20-0x2f\n# Final Byte: a single byte in the range 0x40-0x7e\n#\n# Also include the terminal hyperlink sequences as described here:\n# https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda\n#\n# OSC 8 ; params ; uri ST display_text OSC 8 ;; ST\n#\n# Example: \\x1b]8;;https://example.com\\x5ctext to show\\x1b]8;;\\x5c\n#\n# Where:\n# OSC: ESC followed by the ']' character (0x5d)\n# params: 0..n optional key value pairs separated by ':' (e.g. foo=bar:baz=qux:abc=123)\n# URI: the actual URI with protocol scheme (e.g. https://, file://, ftp://)\n# ST: ESC followed by the '\\' character (0x5c)\n_esc = r\"\\x1b\"\n_csi = rf\"{_esc}\\[\"\n_osc = rf\"{_esc}\\]\"\n_st = rf\"{_esc}\\\\\"\n\n_ansiEscapePat = rf\"\"\"\n    (\n        # terminal colors, etc\n        {_csi}        # CSI\n        [\\x30-\\x3f]*  # parameter bytes\n        [\\x20-\\x2f]*  # intermediate bytes\n        [\\x40-\\x7e]   # final byte\n    |\n        # terminal hyperlinks\n        {_osc}8;        # OSC opening\n        (\\w+=\\w+:?)*    # key=value params list (submatch 2)\n        ;               # delimiter\n        ([^{_esc}]+)    # URI - anything but ESC (submatch 3)\n        {_st}           # ST\n        ([^{_esc}]+)    # link text - anything but ESC (submatch 4)\n        {_osc}8;;{_st}  # \"closing\" OSC sequence\n    )\n\"\"\"\n_ansiCodes = re.compile(_ansiEscapePat, re.VERBOSE)\n_ansiCodesBytes = re.compile(_ansiEscapePat.encode(\"utf8\"), re.VERBOSE)\n_floatWithThousandsSeparators = re.compile(r\"^(([+-]?[0-9]{1,3})(?:,([0-9]{3}))*)?(?(1)\\.[0-9]*|\\.[0-9]+)?$\")\n\n\ndef _isnumberWithThousandsSeparator(string):\n    \"\"\"Function to test of a string is a number with a thousands separator.\n\n    >>> _isnumberWithThousandsSeparator(\".\")\n    False\n    >>> _isnumberWithThousandsSeparator(\"1\")\n    True\n    >>> _isnumberWithThousandsSeparator(\"1.\")\n    True\n    >>> _isnumberWithThousandsSeparator(\".1\")\n    True\n    >>> _isnumberWithThousandsSeparator(\"1000\")\n    False\n    >>> _isnumberWithThousandsSeparator(\"1,000\")\n    True\n    >>> _isnumberWithThousandsSeparator(\"1,0000\")\n    False\n    >>> _isnumberWithThousandsSeparator(b\"1,000.1234\")\n    True\n    >>> _isnumberWithThousandsSeparator(\"+1,000.1234\")\n    True\n    >>> _isnumberWithThousandsSeparator(\"-1,000.1234\")\n    True\n    \"\"\"\n    try:\n        string = string.decode()\n    except (UnicodeDecodeError, AttributeError):\n        pass\n\n    return bool(re.match(_floatWithThousandsSeparators, string))\n\n\ndef _isconvertible(conv, string):\n    try:\n        conv(string)\n        return True\n    except (ValueError, TypeError):\n        return False\n\n\ndef _isnumber(string):\n    \"\"\"Helper function; is this string a number.\n\n    >>> _isnumber(\"123.45\")\n    True\n    >>> _isnumber(\"123\")\n    True\n    >>> _isnumber(\"spam\")\n    False\n    >>> _isnumber(\"123e45678\")\n    False\n    >>> _isnumber(\"inf\")\n    True\n    \"\"\"\n    if not _isconvertible(float, string):\n        return False\n    elif isinstance(string, (str, bytes)) and (math.isinf(float(string)) or math.isnan(float(string))):\n        return string.lower() in [\"inf\", \"-inf\", \"nan\"]\n    return True\n\n\ndef _isint(string, inttype=int):\n    \"\"\"Determine if a string is an integer.\n\n    >>> _isint(\"123\")\n    True\n    >>> _isint(\"123.45\")\n    False\n    \"\"\"\n    return (\n        type(string) is inttype\n        or (\n            (hasattr(string, \"is_integer\") or hasattr(string, \"__array__\"))\n            and str(type(string)).startswith(\"<class 'numpy.int\")\n        )  # numpy.int64 and similar\n        or (isinstance(string, (bytes, str)) and _isconvertible(inttype, string))  # integer as string\n    )\n\n\ndef _isbool(string):\n    \"\"\"Test if a string is a boolean.\n\n    >>> _isbool(True)\n    True\n    >>> _isbool(\"False\")\n    True\n    >>> _isbool(1)\n    False\n    \"\"\"\n    return type(string) is bool or (isinstance(string, (bytes, str)) and string in (\"True\", \"False\"))\n\n\ndef _type(string, hasInvisible=True, numparse=True):\n    r\"\"\"The least generic type (type(None), int, float, str, unicode).\n\n    >>> _type(None) is type(None)\n    True\n    >>> _type(\"foo\") is type(\"\")\n    True\n    >>> _type(\"1\") is type(1)\n    True\n    >>> _type(\"\\x1b[31m42\\x1b[0m\") is type(42)\n    True\n    >>> _type(\"\\x1b[31m42\\x1b[0m\") is type(42)\n    True\n\n    \"\"\"\n    if hasInvisible and isinstance(string, (str, bytes)):\n        string = _stripAnsi(string)\n\n    if string is None:\n        return type(None)\n    elif hasattr(string, \"isoformat\"):\n        # datetime.datetime, date, and time\n        return str\n    elif _isbool(string):\n        return bool\n    elif _isint(string) and numparse:\n        return int\n    elif _isnumber(string) and numparse:\n        return float\n    elif isinstance(string, bytes):\n        return bytes\n    else:\n        return str\n\n\ndef _afterpoint(string):\n    \"\"\"Symbols after a decimal point, -1 if the string lacks the decimal point.\n\n    >>> _afterpoint(\"123.45\")\n    2\n    >>> _afterpoint(\"1001\")\n    -1\n    >>> _afterpoint(\"eggs\")\n    -1\n    >>> _afterpoint(\"123e45\")\n    2\n    >>> _afterpoint(\"123,456.78\")\n    2\n\n    \"\"\"\n    if _isnumber(string) or _isnumberWithThousandsSeparator(string):\n        if _isint(string):\n            return -1\n        else:\n            pos = string.rfind(\".\")\n            pos = string.lower().rfind(\"e\") if pos < 0 else pos\n            if pos >= 0:\n                return len(string) - pos - 1\n            else:\n                # no point\n                return -1\n    else:\n        # not a number\n        return -1\n\n\ndef _padleft(width, s):\n    r\"\"\"Flush right.\n\n    >>> _padleft(6, \"\\u044f\\u0439\\u0446\\u0430\") == \"  \\u044f\\u0439\\u0446\\u0430\"\n    True\n\n    \"\"\"\n    fmt = \"{0:>%ds}\" % width\n    return fmt.format(s)\n\n\ndef _padright(width, s):\n    r\"\"\"Flush left.\n\n    >>> _padright(6, \"\\u044f\\u0439\\u0446\\u0430\") == \"\\u044f\\u0439\\u0446\\u0430  \"\n    True\n\n    \"\"\"\n    fmt = \"{0:<%ds}\" % width\n    return fmt.format(s)\n\n\ndef _padboth(width, s):\n    r\"\"\"Center string.\n\n    >>> _padboth(6, \"\\u044f\\u0439\\u0446\\u0430\") == \" \\u044f\\u0439\\u0446\\u0430 \"\n    True\n\n    \"\"\"\n    fmt = \"{0:^%ds}\" % width\n    return fmt.format(s)\n\n\ndef _padnone(ignoreWidth, s):\n    return s\n\n\ndef _stripAnsi(s):\n    r\"\"\"Remove ANSI escape sequences, both CSI and OSC hyperlinks.\n\n    CSI sequences are simply removed from the output, while OSC hyperlinks are replaced with the\n    link text. Note: it may be desirable to show the URI instead but this is not supported.\n\n        >>> repr(_stripAnsi(\"\\x1b]8;;https://example.com\\x1b\\\\This is a link\\x1b]8;;\\x1b\\\\\"))\n        \"'This is a link'\"\n\n        >>> repr(_stripAnsi(\"\\x1b[31mred\\x1b[0m text\"))\n        \"'red text'\"\n\n    \"\"\"\n    if isinstance(s, str):\n        return _ansiCodes.sub(r\"\\4\", s)\n    else:  # a bytestring\n        return _ansiCodesBytes.sub(r\"\\4\", s)\n\n\ndef _visibleWidth(s):\n    r\"\"\"Visible width of a printed string.\n\n    >>> _visibleWidth(\"\\x1b[31mhello\\x1b[0m\"), _visibleWidth(\"world\")\n    (5, 5)\n\n    \"\"\"\n    if isinstance(s, (str, bytes)):\n        return len(_stripAnsi(s))\n    else:\n        return len(str(s))\n\n\ndef _isMultiline(s):\n    if isinstance(s, str):\n        return bool(re.search(_multilineCodes, s))\n    else:\n        # a bytestring\n        return bool(re.search(_multilineCodesBytes, s))\n\n\ndef _multilineWidth(multilineS, lineWidthFn=len):\n    \"\"\"Visible width of a potentially multiline content.\"\"\"\n    return max(map(lineWidthFn, re.split(\"[\\r\\n]\", multilineS)))\n\n\ndef _chooseWidthFn(hasInvisible, isMultiline):\n    \"\"\"Return a function to calculate visible cell width.\"\"\"\n    if hasInvisible:\n        lineWidthFn = _visibleWidth\n    else:\n        lineWidthFn = len\n\n    if isMultiline:\n        widthFn = lambda s: _multilineWidth(s, lineWidthFn)\n    else:\n        widthFn = lineWidthFn\n\n    return widthFn\n\n\ndef _alignColumnChoosePadfn(strings, alignment, hasInvisible):\n    if alignment == \"right\":\n        if not PRESERVE_WHITESPACE:\n            strings = [s.strip() for s in strings]\n        padfn = _padleft\n    elif alignment == \"center\":\n        if not PRESERVE_WHITESPACE:\n            strings = [s.strip() for s in strings]\n        padfn = _padboth\n    elif alignment == \"decimal\":\n        if hasInvisible:\n            decimals = [_afterpoint(_stripAnsi(s)) for s in strings]\n        else:\n            decimals = [_afterpoint(s) for s in strings]\n        maxdecimals = max(decimals)\n        strings = [s + (maxdecimals - decs) * \" \" for s, decs in zip(strings, decimals)]\n        padfn = _padleft\n    elif not alignment:\n        padfn = _padnone\n    else:\n        if not PRESERVE_WHITESPACE:\n            strings = [s.strip() for s in strings]\n        padfn = _padright\n    return strings, padfn\n\n\ndef _alignColumnChooseWidthFn(hasInvisible, isMultiline):\n    if hasInvisible:\n        lineWidthFn = _visibleWidth\n    else:\n        lineWidthFn = len\n\n    if isMultiline:\n        widthFn = lambda s: _alignColumnMultilineWidth(s, lineWidthFn)\n    else:\n        widthFn = lineWidthFn\n\n    return widthFn\n\n\ndef _alignColumnMultilineWidth(multilineS, lineWidthFn=len):\n    \"\"\"Visible width of a potentially multiline content.\"\"\"\n    return list(map(lineWidthFn, re.split(\"[\\r\\n]\", multilineS)))\n\n\ndef _flatList(nestedList):\n    ret = []\n    for item in nestedList:\n        if isinstance(item, list):\n            for subitem in item:\n                ret.append(subitem)\n        else:\n            ret.append(item)\n    return ret\n\n\ndef _alignColumn(strings, alignment, minwidth=0, hasInvisible=True, isMultiline=False):\n    \"\"\"[string] -> [padded_string].\"\"\"\n    strings, padfn = _alignColumnChoosePadfn(strings, alignment, hasInvisible)\n    widthFn = _alignColumnChooseWidthFn(hasInvisible, isMultiline)\n\n    sWidths = list(map(widthFn, strings))\n    maxwidth = max(max(_flatList(sWidths)), minwidth)\n    if isMultiline:\n        if not hasInvisible:\n            paddedStrings = [\"\\n\".join([padfn(maxwidth, s) for s in ms.splitlines()]) for ms in strings]\n        else:\n            # enable wide-character width corrections\n            sLens = [[len(s) for s in re.split(\"[\\r\\n]\", ms)] for ms in strings]\n            visibleWidths = [[maxwidth - (w - ll) for w, ll in zip(mw, ml)] for mw, ml in zip(sWidths, sLens)]\n            # wcswidth and _visibleWidth don't count invisible characters;\n            # padfn doesn't need to apply another correction\n            paddedStrings = [\n                \"\\n\".join([padfn(w, s) for s, w in zip((ms.splitlines() or ms), mw)])\n                for ms, mw in zip(strings, visibleWidths)\n            ]\n    else:  # single-line cell values\n        if not hasInvisible:\n            paddedStrings = [padfn(maxwidth, s) for s in strings]\n        else:\n            # enable wide-character width corrections\n            sLens = list(map(len, strings))\n            visibleWidths = [maxwidth - (w - ll) for w, ll in zip(sWidths, sLens)]\n            # wcswidth and _visibleWidth don't count invisible characters;\n            # padfn doesn't need to apply another correction\n            paddedStrings = [padfn(w, s) for s, w in zip(strings, visibleWidths)]\n\n    return paddedStrings\n\n\ndef _moreGeneric(type1, type2):\n    types = {\n        type(None): 0,\n        bool: 1,\n        int: 2,\n        float: 3,\n        bytes: 4,\n        str: 5,\n    }\n    invtypes = {\n        5: str,\n        4: bytes,\n        3: float,\n        2: int,\n        1: bool,\n        0: type(None),\n    }\n    moregeneric = max(types.get(type1, 5), types.get(type2, 5))\n    return invtypes[moregeneric]\n\n\ndef _columnType(strings, hasInvisible=True, numparse=True):\n    r\"\"\"The least generic type all column values are convertible to.\n\n    >>> _columnType([True, False]) is bool\n    True\n    >>> _columnType([\"1\", \"2\"]) is int\n    True\n    >>> _columnType([\"1\", \"2.3\"]) is float\n    True\n    >>> _columnType([\"1\", \"2.3\", \"four\"]) is str\n    True\n    >>> _columnType([\"four\", \"\\u043f\\u044f\\u0442\\u044c\"]) is str\n    True\n    >>> _columnType([None, \"brux\"]) is str\n    True\n    >>> _columnType([1, 2, None]) is int\n    True\n    >>> import datetime as dt\n    >>> _columnType([dt.datetime(1991, 2, 19), dt.time(17, 35)]) is str\n    True\n\n    \"\"\"\n    types = [_type(s, hasInvisible, numparse) for s in strings]\n    return reduce(_moreGeneric, types, bool)\n\n\ndef _format(val, valtype, floatFmt, intFmt, missingVal=\"\", hasInvisible=True):\n    r\"\"\"Format a value according to its type.\n\n    Unicode is supported::\n\n        >>> hrow = ['\\u0431\\u0443\\u043a\\u0432\\u0430', '\\u0446\\u0438\\u0444\\u0440\\u0430'] ; \\\n            tbl = [['\\u0430\\u0437', 2], ['\\u0431\\u0443\\u043a\\u0438', 4]] ; \\\n            good_result = '\\\\u0431\\\\u0443\\\\u043a\\\\u0432\\\\u0430      \\\\u0446\\\\u0438\\\\u0444\\\\u0440\\\\u0430\\\\n-------  -------\\\\n\\\\u0430\\\\u0437             2\\\\n\\\\u0431\\\\u0443\\\\u043a\\\\u0438           4' ; \\\n            tabulate(tbl, headers=hrow) == good_result\n        True\n\n    \"\"\"  # noqa\n    if val is None:\n        return missingVal\n\n    if valtype is str:\n        return f\"{val}\"\n    elif valtype is int:\n        return format(val, intFmt)\n    elif valtype is bytes:\n        try:\n            return str(val, \"ascii\")\n        except (TypeError, UnicodeDecodeError):\n            return str(val)\n    elif valtype is float:\n        isAColoredNumber = hasInvisible and isinstance(val, (str, bytes))\n        if isAColoredNumber:\n            rawVal = _stripAnsi(val)\n            formattedVal = format(float(rawVal), floatFmt)\n            return val.replace(rawVal, formattedVal)\n        else:\n            return format(float(val), floatFmt)\n    else:\n        return f\"{val}\"\n\n\ndef _alignHeader(header, alignment, width, visibleWidth, isMultiline=False, widthFn=None):\n    \"\"\"Pad string header to width chars given known visibleWidth of the header.\"\"\"\n    if isMultiline:\n        headerLines = re.split(_multilineCodes, header)\n        paddedLines = [_alignHeader(h, alignment, width, widthFn(h)) for h in headerLines]\n        return \"\\n\".join(paddedLines)\n\n    ninvisible = len(header) - visibleWidth\n    width += ninvisible\n    if alignment == \"left\":\n        return _padright(width, header)\n    elif alignment == \"center\":\n        return _padboth(width, header)\n    elif not alignment:\n        return f\"{header}\"\n    else:\n        return _padleft(width, header)\n\n\ndef _removeSeparatingLines(rows):\n    if type(rows) is list:\n        separatingLines = []\n        sansRows = []\n        for index, row in enumerate(rows):\n            if _isSeparatingLine(row):\n                separatingLines.append(index)\n            else:\n                sansRows.append(row)\n        return sansRows, separatingLines\n    else:\n        return rows, None\n\n\ndef _reinsertSeparatingLines(rows, separatingLines):\n    if separatingLines:\n        for index in separatingLines:\n            rows.insert(index, SEPARATING_LINE)\n\n\ndef _prependRowIndex(rows, index):\n    \"\"\"Add a left-most index column.\"\"\"\n    if index is None or index is False:\n        return rows\n    if isinstance(index, Sized) and len(index) != len(rows):\n        raise ValueError(\n            \"index must be as long as the number of data rows: \"\n            + \"len(index)={} len(rows)={}\".format(len(index), len(rows))\n        )\n    sansRows, separatingLines = _removeSeparatingLines(rows)\n    newRows = []\n    indexIter = iter(index)\n    for row in sansRows:\n        indexV = next(indexIter)\n        newRows.append([indexV] + list(row))\n    rows = newRows\n    _reinsertSeparatingLines(rows, separatingLines)\n    return rows\n\n\ndef _bool(val):\n    \"\"\"A wrapper around standard bool() which doesn't throw on NumPy arrays.\"\"\"\n    try:\n        return bool(val)\n    except ValueError:\n        # val is likely to be a numpy array with many elements\n        return False\n\n\ndef _normalizeTabularData(data, headers, showIndex=\"default\"):\n    \"\"\"Transform a supported data type to a list of lists & a list of headers, with header padding.\n\n    Supported tabular data types:\n\n    * list-of-lists or another iterable of iterables\n    * list of named tuples (usually used with headers=\"keys\")\n    * list of dicts (usually used with headers=\"keys\")\n    * list of OrderedDicts (usually used with headers=\"keys\")\n    * list of dataclasses (Python 3.7+ only, usually used with headers=\"keys\")\n    * 2D NumPy arrays\n    * NumPy record arrays (usually used with headers=\"keys\")\n    * dict of iterables (usually used with headers=\"keys\")\n\n    The first row can be used as headers if headers=\"firstrow\", column indices can be used as\n    headers if headers=\"keys\".\n\n    If showIndex=\"always\", show row indices for all types of data.\n    If showIndex=\"never\", don't show row indices for all types of data.\n    If showIndex is an iterable, show its values as row indices.\n    \"\"\"\n    try:\n        bool(headers)\n    except ValueError:\n        # numpy.ndarray, ...\n        headers = list(headers)\n\n    index = None\n    if hasattr(data, \"keys\"):\n        # dict-like\n        keys = data.keys()\n\n        # fill out default values, to ensure all data lists are the same length\n        vals = list(data.values())\n        maxLen = max([len(v) for v in vals], default=0)\n        vals = [[v for v in vv] + [None] * (maxLen - len(vv)) for vv in vals]\n        rows = [tuple(v[i] for v in vals) for i in range(maxLen)]\n\n        if headers == \"keys\":\n            # headers should be strings\n            headers = list(map(str, keys))\n    else:\n        # it's a usual iterable of iterables, or a NumPy array, or an iterable of dataclasses\n        rows = list(data)\n\n        if headers == \"keys\" and not rows:\n            # an empty table\n            headers = []\n        elif headers == \"keys\" and hasattr(data, \"dtype\") and getattr(data.dtype, \"names\"):\n            # numpy record array\n            headers = data.dtype.names\n        elif headers == \"keys\" and len(rows) > 0 and isinstance(rows[0], tuple) and hasattr(rows[0], \"_fields\"):\n            # namedtuple\n            headers = list(map(str, rows[0]._fields))\n        elif len(rows) > 0 and hasattr(rows[0], \"keys\") and hasattr(rows[0], \"values\"):\n            # dict-like object\n            uniqKeys = set()  # implements hashed lookup\n            keys = []  # storage for set\n            if headers == \"firstrow\":\n                firstdict = rows[0] if len(rows) > 0 else {}\n                keys.extend(firstdict.keys())\n                uniqKeys.update(keys)\n                rows = rows[1:]\n            for row in rows:\n                for k in row.keys():\n                    # Save unique items in input order\n                    if k not in uniqKeys:\n                        keys.append(k)\n                        uniqKeys.add(k)\n            if headers == \"keys\":\n                headers = keys\n            elif isinstance(headers, dict):\n                # a dict of headers for a list of dicts\n                headers = [headers.get(k, k) for k in keys]\n                headers = list(map(str, headers))\n            elif headers == \"firstrow\":\n                if len(rows) > 0:\n                    headers = [firstdict.get(k, k) for k in keys]\n                    headers = list(map(str, headers))\n                else:\n                    headers = []\n            elif headers:\n                raise ValueError(\"headers for a list of dicts is not a dict or a keyword\")\n            rows = [[row.get(k) for k in keys] for row in rows]\n        elif len(rows) > 0 and dataclasses.is_dataclass(rows[0]):\n            # Python 3.7+'s dataclass\n            fieldNames = [field.name for field in dataclasses.fields(rows[0])]\n            if headers == \"keys\":\n                headers = fieldNames\n            rows = [[getattr(row, f) for f in fieldNames] for row in rows]\n        elif headers == \"keys\" and len(rows) > 0:\n            # keys are column indices\n            headers = list(map(str, range(len(rows[0]))))\n\n    # take headers from the first row if necessary\n    if headers == \"firstrow\" and len(rows) > 0:\n        if index is not None:\n            headers = [index[0]] + list(rows[0])\n            index = index[1:]\n        else:\n            headers = rows[0]\n        headers = list(map(str, headers))  # headers should be strings\n        rows = rows[1:]\n    elif headers == \"firstrow\":\n        headers = []\n\n    headers = list(map(str, headers))\n    rows = list(map(lambda r: r if _isSeparatingLine(r) else list(r), rows))\n\n    # add or remove an index column\n    showIndexIsSStr = type(showIndex) in [str, bytes]\n    if showIndex == \"default\" and index is not None:\n        rows = _prependRowIndex(rows, index)\n    elif isinstance(showIndex, Sized) and not showIndexIsSStr:\n        rows = _prependRowIndex(rows, list(showIndex))\n    elif isinstance(showIndex, Iterable) and not showIndexIsSStr:\n        rows = _prependRowIndex(rows, showIndex)\n    elif showIndex == \"always\" or (_bool(showIndex) and not showIndexIsSStr):\n        if index is None:\n            index = list(range(len(rows)))\n        rows = _prependRowIndex(rows, index)\n\n    # pad with empty headers for initial columns if necessary\n    headersPad = 0\n    if headers and len(rows) > 0:\n        headersPad = max(0, len(rows[0]) - len(headers))\n        headers = [\"\"] * headersPad + headers\n\n    return rows, headers, headersPad\n\n\ndef _wrapTextToColWidths(listOfLists, colwidths, numparses=True):\n    if len(listOfLists):\n        numCols = len(listOfLists[0])\n    else:\n        numCols = 0\n\n    numparses = _expandIterable(numparses, numCols, True)\n    result = []\n\n    for row in listOfLists:\n        newRow = []\n        for cell, width, numparse in zip(row, colwidths, numparses):\n            if _isnumber(cell) and numparse:\n                newRow.append(cell)\n                continue\n\n            if width is not None:\n                wrapper = TextWrapper(width=width)\n                # Cast based on our internal type handling. Any future custom formatting of types\n                # (such as datetimes) may need to be more explicit than just `str` of the object\n                castedCell = str(cell) if _isnumber(cell) else _type(cell, numparse)(cell)\n                wrapped = [\"\\n\".join(wrapper.wrap(line)) for line in castedCell.splitlines() if line.strip() != \"\"]\n                newRow.append(\"\\n\".join(wrapped))\n            else:\n                newRow.append(cell)\n        result.append(newRow)\n\n    return result\n\n\ndef _toStr(s, encoding=\"utf8\", errors=\"ignore\"):\n    \"\"\"\n    A type safe wrapper for converting a bytestring to str.\n\n    This is essentially just a wrapper around .decode() intended for use with things like map(), but\n    with some specific behavior:\n\n    1. if the given parameter is not a bytestring, it is returned unmodified\n    2. decode() is called for the given parameter and assumes utf8 encoding, but the default error\n       behavior is changed from 'strict' to 'ignore'\n\n        >>> repr(_toStr(b\"foo\"))\n        \"'foo'\"\n\n        >>> repr(_toStr(\"foo\"))\n        \"'foo'\"\n\n        >>> repr(_toStr(42))\n        \"'42'\"\n\n    \"\"\"\n    if isinstance(s, bytes):\n        return s.decode(encoding=encoding, errors=errors)\n    return str(s)\n\n\ndef tabulate(\n    data,\n    headers=(),\n    tableFmt=\"simple\",\n    floatFmt=_DEFAULT_FLOAT_FMT,\n    intFmt=_DEFAULT_INT_FMT,\n    numAlign=_DEFAULT_ALIGN,\n    strAlign=_DEFAULT_ALIGN,\n    missingVal=_DEFAULT_MISSING_VAL,\n    showIndex=\"default\",\n    disableNumParse=False,\n    colGlobalAlign=None,\n    colAlign=None,\n    maxColWidths=None,\n    headersGlobalAlign=None,\n    headersAlign=None,\n    rowAlign=None,\n    maxHeaderColWidths=None,\n):\n    \"\"\"Format a fixed width table for pretty printing.\n\n    Parameters\n    ----------\n    data : object\n        The tabular data you want to print. This can be a list-of-lists/iterables, dict-of-lists/\n        iterables, 2D numpy arrays, or list of dataclasses.\n    headers=(), optional\n        Nice column names. If this is \"firstrow\", the first row of the data will be used. If it is\n        \"keys\"m, then dictionary keys or column indices are used.\n    tableFmt : str, optional\n        There are custom table formats defined in this file, and you can choose between them with\n        this string: \"armi\", \"simple\", \"plain\", \"grid\", \"github\", \"pretty\", \"psql\", \"rst\", \"tsv\".\n    floatFmt : str, optional\n        A format specification used for columns which contain numeric data with a decimal point.\n        This can also be a list or tuple of format strings, one per column.\n    intFmt : str, optional\n        A format specification used for columns which contain numeric data without a decimal point.\n        This can also be a list or tuple of format strings, one per column.\n    numAlign : str, optional\n        Specially align numbers, options: \"right\", \"center\", \"left\", \"decimal\".\n    strAlign : str, optional\n        Specially align strings, options: \"right\", \"center\", \"left\".\n    missingVal : str, optional\n        `None` values are replaced with a `missingVal` string.\n    showIndex : str, optional\n        Show these rows of data. If \"always\", show row indices for all types of data. If \"never\",\n        don't show row indices for all types of data. If showIndex is an iterable, show its values..\n    disableNumParse : bool, optional\n        To disable number parsing (and alignment), use `disableNumParse=True`. For more fine grained\n        control, `[0, 2]` would disable number parsing on the first and third columns.\n    colGlobalAlign : str, optional\n        Allows for global alignment of columns, before any specific override from `colAlign`.\n        Possible values are: None, \"right\", \"center\", \"decimal\", \"left\".\n    colAlign : str, optional\n        Allows for column-wise override starting from left-most column. Possible values are:\n        \"global\" (no override), \"right\", \"center\", \"decimal\", \"left\".\n    maxColWidths : list, optional\n        A list of the maximum column widths.\n    headersGlobalAlign : str, optional\n        Allows for global headers alignment, before any specific override from `headersAlign`.\n        Possible values are: None (follow columns alignment), \"right\", \"center\", \"left\".\n    headersAlign : str, optional\n        Allows for header-wise override starting from left-most given header. Possible values are:\n        \"global\" (no override), \"same\" (follow column alignment), \"right\", \"center\", \"left\".\n    rowAlign : str, optional\n        How do you want to align rows: \"right\", \"center\", \"decimal\", \"left\".\n    maxHeaderColWidths : list, optional\n        List of column widths for the header.\n\n    Returns\n    -------\n    str\n        A text representation of the tabular data.\n    \"\"\"\n    if data is None:\n        data = []\n\n    listOfLists, headers, headersPad = _normalizeTabularData(data, headers, showIndex=showIndex)\n    listOfLists, separatingLines = _removeSeparatingLines(listOfLists)\n\n    if maxColWidths is not None:\n        if len(listOfLists):\n            numCols = len(listOfLists[0])\n        else:\n            numCols = 0\n        if isinstance(maxColWidths, int):  # Expand scalar for all columns\n            maxColWidths = _expandIterable(maxColWidths, numCols, maxColWidths)\n        else:  # Ignore col width for any 'trailing' columns\n            maxColWidths = _expandIterable(maxColWidths, numCols, None)\n\n        numparses = _expandNumparse(disableNumParse, numCols)\n        listOfLists = _wrapTextToColWidths(listOfLists, maxColWidths, numparses=numparses)\n\n    if maxHeaderColWidths is not None:\n        numCols = len(listOfLists[0])\n        if isinstance(maxHeaderColWidths, int):  # Expand scalar for all columns\n            maxHeaderColWidths = _expandIterable(maxHeaderColWidths, numCols, maxHeaderColWidths)\n        else:  # Ignore col width for any 'trailing' columns\n            maxHeaderColWidths = _expandIterable(maxHeaderColWidths, numCols, None)\n\n        numparses = _expandNumparse(disableNumParse, numCols)\n        headers = _wrapTextToColWidths([headers], maxHeaderColWidths, numparses=numparses)[0]\n\n    # empty values in the first column of RST tables should be escaped\n    # \"\" should be escaped as \"\\\\ \" or \"..\"\n    if tableFmt == \"rst\":\n        listOfLists, headers = _rstEscapeFirstColumn(listOfLists, headers)\n\n    # Pretty table formatting does not use any extra padding. Numbers are not parsed and are treated\n    # the same as strings for alignment. Check if pretty is the format being used and override the\n    # defaults so it does not impact other formats.\n    minPadding = MIN_PADDING\n    if tableFmt == \"pretty\":\n        minPadding = 0\n        disableNumParse = True\n        numAlign = \"center\" if numAlign == _DEFAULT_ALIGN else numAlign\n        strAlign = \"center\" if strAlign == _DEFAULT_ALIGN else strAlign\n    else:\n        numAlign = \"decimal\" if numAlign == _DEFAULT_ALIGN else numAlign\n        strAlign = \"left\" if strAlign == _DEFAULT_ALIGN else strAlign\n\n    # optimization: look for ANSI control codes once, enable smart width functions only if a control\n    # code is found\n    #\n    # convert the headers and rows into a single, tab-delimited string ensuring that any bytestrings\n    # are decoded safely (i.e. errors ignored)\n    plainText = \"\\t\".join(\n        chain(\n            # headers\n            map(_toStr, headers),\n            # rows: chain the rows together into a single iterable after mapping the bytestring\n            # conversion to each cell value\n            chain.from_iterable(map(_toStr, row) for row in listOfLists),\n        )\n    )\n\n    hasInvisible = _ansiCodes.search(plainText) is not None\n\n    if not isinstance(tableFmt, TableFormat) and tableFmt in multilineFormats and _isMultiline(plainText):\n        tableFmt = multilineFormats.get(tableFmt, tableFmt)\n        isMultiline = True\n    else:\n        isMultiline = False\n    widthFn = _chooseWidthFn(hasInvisible, isMultiline)\n\n    # format rows and columns, convert numeric values to strings\n    cols = list(zip_longest(*listOfLists))\n    numparses = _expandNumparse(disableNumParse, len(cols))\n    coltypes = [_columnType(col, numparse=np) for col, np in zip(cols, numparses)]\n    if isinstance(floatFmt, str):\n        # old version: just duplicate the string to use in each column\n        floatFormats = len(cols) * [floatFmt]\n    else:  # if floatFmt is list, tuple etc we have one per column\n        floatFormats = list(floatFmt)\n        if len(floatFormats) < len(cols):\n            floatFormats.extend((len(cols) - len(floatFormats)) * [_DEFAULT_FLOAT_FMT])\n    if isinstance(intFmt, str):\n        # old version: just duplicate the string to use in each column\n        intFormats = len(cols) * [intFmt]\n    else:  # if intFmt is list, tuple etc we have one per column\n        intFormats = list(intFmt)\n        if len(intFormats) < len(cols):\n            intFormats.extend((len(cols) - len(intFormats)) * [_DEFAULT_INT_FMT])\n    if isinstance(missingVal, str):\n        missingVals = len(cols) * [missingVal]\n    else:\n        missingVals = list(missingVal)\n        if len(missingVals) < len(cols):\n            missingVals.extend((len(cols) - len(missingVals)) * [_DEFAULT_MISSING_VAL])\n    cols = [\n        [_format(v, ct, flFmt, intFmt, missV, hasInvisible) for v in c]\n        for c, ct, flFmt, intFmt, missV in zip(cols, coltypes, floatFormats, intFormats, missingVals)\n    ]\n\n    # align columns\n    # first set global alignment\n    if colGlobalAlign is not None:  # if global alignment provided\n        aligns = [colGlobalAlign] * len(cols)\n    else:  # default\n        aligns = [numAlign if ct in [int, float] else strAlign for ct in coltypes]\n\n    # then specific alignments\n    if colAlign is not None:\n        assert isinstance(colAlign, Iterable)\n        if isinstance(colAlign, str):\n            runLog.warning(\n                f\"As a string, `colAlign` is interpreted as {[c for c in colAlign]}. Did you \"\n                + f'mean `colGlobalAlign = \"{colAlign}\"` or `colAlign = (\"{colAlign}\",)`?'\n            )\n        for idx, align in enumerate(colAlign):\n            if not idx < len(aligns):\n                break\n            elif align != \"global\":\n                aligns[idx] = align\n    minwidths = [widthFn(h) + minPadding for h in headers] if headers else [0] * len(cols)\n    cols = [_alignColumn(c, a, minw, hasInvisible, isMultiline) for c, a, minw in zip(cols, aligns, minwidths)]\n\n    alignsHeaders = None\n    if headers:\n        # align headers and add headers\n        tCols = cols or [[\"\"]] * len(headers)\n        # first set global alignment\n        if headersGlobalAlign is not None:  # if global alignment provided\n            alignsHeaders = [headersGlobalAlign] * len(tCols)\n        else:  # default\n            alignsHeaders = aligns or [strAlign] * len(headers)\n        # then specific header alignments\n        if headersAlign is not None:\n            assert isinstance(headersAlign, Iterable)\n            if isinstance(headersAlign, str):\n                runLog.warning(\n                    f\"As a string, `headersAlign` is interpreted as {[c for c in headersAlign]}. \"\n                    + f'Did you mean `headersGlobalAlign = \"{headersAlign}\"` or `headersAlign = '\n                    + f'(\"{headersAlign}\",)`?'\n                )\n            for idx, align in enumerate(headersAlign):\n                hidx = headersPad + idx\n                if not hidx < len(alignsHeaders):\n                    break\n                elif align == \"same\" and hidx < len(aligns):  # same as column align\n                    alignsHeaders[hidx] = aligns[hidx]\n                elif align != \"global\":\n                    alignsHeaders[hidx] = align\n        minwidths = [max(minw, max(widthFn(cl) for cl in c)) for minw, c in zip(minwidths, tCols)]\n        headers = [\n            _alignHeader(h, a, minw, widthFn(h), isMultiline, widthFn)\n            for h, a, minw in zip(headers, alignsHeaders, minwidths)\n        ]\n        rows = list(zip(*cols))\n    else:\n        minwidths = [max(widthFn(cl) for cl in c) for c in cols]\n        rows = list(zip(*cols))\n\n    if not isinstance(tableFmt, TableFormat):\n        tableFmt = _tableFormats.get(tableFmt, _tableFormats[\"simple\"])\n\n    raDefault = rowAlign if isinstance(rowAlign, str) else None\n    rowAligns = _expandIterable(rowAlign, len(rows), raDefault)\n    _reinsertSeparatingLines(rows, separatingLines)\n\n    return _formatTable(\n        tableFmt,\n        headers,\n        alignsHeaders,\n        rows,\n        minwidths,\n        aligns,\n        isMultiline,\n        rowAligns=rowAligns,\n    )\n\n\ndef _expandNumparse(disableNumParse, columnCount):\n    \"\"\"\n    Return a list of bools of length `columnCount` which indicates whether number parsing should be\n    used on each column.\n\n    If `disableNumParse` is a list of indices, each of those indices are False, and everything else\n    is True. If `disableNumParse` is a bool, then the returned list is all the same.\n    \"\"\"\n    if isinstance(disableNumParse, Iterable):\n        numparses = [True] * columnCount\n        for index in disableNumParse:\n            numparses[index] = False\n        return numparses\n    else:\n        return [not disableNumParse] * columnCount\n\n\ndef _expandIterable(original, numDesired, default):\n    \"\"\"\n    Expands the `original` argument to return a return a list of length `numDesired`. If `original`\n    is shorter than `numDesired`, it will be padded with the value in `default`.\n\n    If `original` is not a list to begin with (i.e. scalar value) a list of length `numDesired`\n    completely populated with `default` will be returned\n    \"\"\"\n    if isinstance(original, Iterable) and not isinstance(original, str):\n        return original + [default] * (numDesired - len(original))\n    else:\n        return [default] * numDesired\n\n\ndef _padRow(cells, padding):\n    if cells:\n        pad = \" \" * padding\n        paddedCells = [pad + cell + pad for cell in cells]\n        return paddedCells\n    else:\n        return cells\n\n\ndef _buildSimpleRow(paddedCells, rowfmt):\n    \"\"\"Format row according to DataRow format without padding.\"\"\"\n    begin, sep, end = rowfmt\n    return (begin + sep.join(paddedCells) + end).rstrip()\n\n\ndef _buildRow(paddedCells, colwidths, colAligns, rowfmt):\n    \"\"\"Return a string which represents a row of data cells.\"\"\"\n    if not rowfmt:\n        return None\n    if hasattr(rowfmt, \"__call__\"):\n        return rowfmt(paddedCells, colwidths, colAligns)\n    else:\n        return _buildSimpleRow(paddedCells, rowfmt)\n\n\ndef _appendBasicRow(lines, paddedCells, colwidths, colAligns, rowfmt, rowAlign=None):\n    # NOTE: rowAlign is ignored and exists for api compatibility with _appendMultilineRow\n    lines.append(_buildRow(paddedCells, colwidths, colAligns, rowfmt))\n    return lines\n\n\ndef _alignCellVeritically(textLines, numLines, columnWidth, rowAlignment):\n    deltaLines = numLines - len(textLines)\n    blank = [\" \" * columnWidth]\n    if rowAlignment == \"bottom\":\n        return blank * deltaLines + textLines\n    elif rowAlignment == \"center\":\n        topDelta = deltaLines // 2\n        bottomDelta = deltaLines - topDelta\n        return topDelta * blank + textLines + bottomDelta * blank\n    else:\n        return textLines + blank * deltaLines\n\n\ndef _appendMultilineRow(lines, paddedMultilineCells, paddedWidths, colAligns, rowfmt, pad, rowAlign=None):\n    colwidths = [w - 2 * pad for w in paddedWidths]\n    cellsLines = [c.splitlines() for c in paddedMultilineCells]\n    nlines = max(map(len, cellsLines))  # number of lines in the row\n\n    cellsLines = [_alignCellVeritically(cl, nlines, w, rowAlign) for cl, w in zip(cellsLines, colwidths)]\n    linesCells = [[cl[i] for cl in cellsLines] for i in range(nlines)]\n    for ln in linesCells:\n        paddedLn = _padRow(ln, pad)\n        _appendBasicRow(lines, paddedLn, colwidths, colAligns, rowfmt)\n\n    return lines\n\n\ndef _buildLine(colwidths, colAligns, linefmt):\n    \"\"\"Return a string which represents a horizontal line.\"\"\"\n    if not linefmt:\n        return None\n    if hasattr(linefmt, \"__call__\"):\n        return linefmt(colwidths, colAligns)\n    else:\n        begin, fill, sep, end = linefmt\n        cells = [fill * w for w in colwidths]\n        return _buildSimpleRow(cells, (begin, sep, end))\n\n\ndef _appendLine(lines, colwidths, colAligns, linefmt):\n    lines.append(_buildLine(colwidths, colAligns, linefmt))\n    return lines\n\n\ndef _formatTable(fmt, headers, headersAligns, rows, colwidths, colAligns, isMultiline, rowAligns):\n    \"\"\"Produce a plain-text representation of the table.\"\"\"\n    lines = []\n    hidden = fmt.withHeaderHide if (headers and fmt.withHeaderHide) else []\n    pad = fmt.padding\n    headerrow = fmt.headerrow\n\n    paddedWidths = [(w + 2 * pad) for w in colwidths]\n    if isMultiline:\n        padRow = lambda row, _: row\n        appendRow = partial(_appendMultilineRow, pad=pad)\n    else:\n        padRow = _padRow\n        appendRow = _appendBasicRow\n\n    paddedHeaders = padRow(headers, pad)\n    paddedRows = [padRow(row, pad) for row in rows]\n\n    if fmt.lineabove and \"lineabove\" not in hidden:\n        _appendLine(lines, paddedWidths, colAligns, fmt.lineabove)\n\n    if paddedHeaders:\n        appendRow(lines, paddedHeaders, paddedWidths, headersAligns, headerrow)\n        if fmt.linebelowheader and \"linebelowheader\" not in hidden:\n            _appendLine(lines, paddedWidths, colAligns, fmt.linebelowheader)\n\n    if paddedRows and fmt.linebetweenrows and \"linebetweenrows\" not in hidden:\n        # initial rows with a line below\n        for row, ralign in zip(paddedRows[:-1], rowAligns):\n            appendRow(lines, row, paddedWidths, colAligns, fmt.datarow, rowAlign=ralign)\n            _appendLine(lines, paddedWidths, colAligns, fmt.linebetweenrows)\n        # the last row without a line below\n        appendRow(\n            lines,\n            paddedRows[-1],\n            paddedWidths,\n            colAligns,\n            fmt.datarow,\n            rowAlign=rowAligns[-1],\n        )\n    else:\n        separatingLine = (\n            fmt.linebetweenrows or fmt.linebelowheader or fmt.linebelow or fmt.lineabove or Line(\"\", \"\", \"\", \"\")\n        )\n        for row in paddedRows:\n            # test to see if either the 1st column or the 2nd column has the SEPARATING_LINE flag\n            if _isSeparatingLine(row):\n                _appendLine(lines, paddedWidths, colAligns, separatingLine)\n            else:\n                appendRow(lines, row, paddedWidths, colAligns, fmt.datarow)\n\n    if fmt.linebelow and \"linebelow\" not in hidden:\n        _appendLine(lines, paddedWidths, colAligns, fmt.linebelow)\n\n    if headers or rows:\n        return \"\\n\".join(lines)\n    else:\n        return \"\"\n"
  },
  {
    "path": "armi/utils/tests/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n"
  },
  {
    "path": "armi/utils/tests/resources/lower/includeA.yaml",
    "content": "full_name: Jennifer Person\n# some comment in includeA\nchildren: \n  !include includeB.yaml\n"
  },
  {
    "path": "armi/utils/tests/resources/lower/includeB.yaml",
    "content": "- full_name: Elizabeth Person\n- full_name: Catharine Person\n"
  },
  {
    "path": "armi/utils/tests/resources/root.yaml",
    "content": "# Behold, the Person family\nbobby: &bobby\n  full_name: Robert Person\n\nbilly:\n  full_name: William Person\n  # comment\n  children:\n  - *bobby\n  - !include lower/includeA.yaml\n"
  },
  {
    "path": "armi/utils/tests/test_asciimaps.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test ASCII maps.\"\"\"\n\nimport io\nimport unittest\n\nfrom armi.utils import asciimaps\n\nCARTESIAN_MAP = \"\"\"2 2 2 2 2\n2 2 2 2 2\n2 1 1 1 2\n2 1 3 1 2\n2 3 1 1 2\n2 2 2 2 2\n\"\"\"\n\nHEX_THIRD_MAP = \"\"\"- - 3 3\n - 3 3 3\n  3 2 2 3\n 3 2 2 2 3\n  2 1 1 2 3\n   1 1 1 2 3\n  1 1 1 1 2 3\n   1 1 1 1 2 3\n    1 1 1 1 2\n   1 1 1 1 1 3\n    1 1 1 1 2 3\n     1 1 1 1 2\n    1 1 1 1 1 3\n     1 1 1 1 2\n      1 1 1 1 3\n     1 1 1 1 2\n      1 1 1 1 3\n     1 1 1 1 2\n\"\"\"\n\n# This core map is from refTestBase, and exhibited some issues when trying to read with\n# an older implementation of the 1/3 hex lattice reader.\nHEX_THIRD_MAP_2 = \"\"\"-   -   SH  SH\n  -   SH  SH  SH\n    SH  OC  OC  SH\n  SH  OC  OC  OC  SH\n    OC  EX  EX  OC  SH\n      EX  EX  EX  OC  SH\n    EX  MC  MC  EX  OC  SH\n      MC  HX  MC  EX  OC  SH\n        MC  MC  PC  EX  OC\n      MC  IC  MC  MC  EX  SH\n        IC  IC  MC  MC  OC  SH\n          PC  IC  MC  EX  OC\n        FA  FA  IC  TG  EX  SH\n          IC  FA  IC  MC  OC\n            IC  US  MC  EX  SH\n          EX  IC  IC  MC  OC\n            EX  FA  MC  EX  SH\n          EX  IC  IC  PC  OC\n\"\"\"\n\nHEX_THIRD_MAP_WITH_HOLES = \"\"\"-   -   SH  SH\n  -   SH  SH  SH\n    SH  OC  OC  SH\n  SH  OC  OC  OC  SH\n    OC  EX  EX  OC  SH\n      EX  EX  EX  OC  SH\n    EX  MC  MC  EX  OC  SH\n      MC  HX  MC  EX  OC  SH\n        MC  -   PC  EX  OC\n      MC  IC  MC  MC  EX  SH\n        IC  IC  MC  MC  OC  SH\n          PC  IC  MC  EX  OC\n        FA  FA  IC  TG  EX  SH\n          IC  FA  IC  -   OC\n            -   US  MC  EX  SH\n          EX  IC  IC  MC  OC\n            EX  FA  MC  EX  SH\n          EX  IC  IC  PC  OC\n\"\"\"\n\nHEX_THIRD_MAP_WITH_EMPTY_ROW = \"\"\"-   -   SH  SH\n  -   SH  SH  SH\n    SH  OC  OC  SH\n  SH  OC  OC  OC  SH\n    OC  EX  EX  OC  SH\n      EX  EX  EX  OC  SH\n    EX  MC  MC  EX  OC  SH\n      MC  HX  MC  EX  OC  SH\n        MC  -   PC  EX  OC\n      MC  IC  MC  MC  EX  SH\n        IC  IC  MC  MC  OC  SH\n          -   -   -   -   - \n        FA  FA  IC  TG  EX  SH\n          IC  FA  IC  -   OC\n            -   US  MC  EX  SH\n          EX  IC  IC  MC  OC\n            EX  FA  MC  EX  SH\n          EX  IC  IC  PC  OC\n\"\"\"\n\n# This is a \"corners-up\" hexagonal map.\nHEX_FULL_MAP = \"\"\"- - - - - - - - - 1 1 1 1 1 1 1 1 1 4\n - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1\n  - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1\n   - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1\n    - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n     - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n      - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n       - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n        - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n         7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1\n          1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1\n           1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n            1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n             1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n              1 1 1 1 1 1 1 1 1 1 1 1 1 1\n               1 1 1 1 1 1 1 1 1 3 1 1 1\n                1 1 1 1 1 1 1 1 1 1 1 1\n                 1 6 1 1 1 1 1 1 1 1 1\n                  1 1 1 1 1 1 1 1 1 1\n\"\"\"\n\n# This is a \"flats-up\" hexagonal map.\nHEX_FULL_MAP_FLAT = \"\"\"-       -       -       -       ORS     ORS     ORS \n    -       -       -       ORS     ORS     ORS     ORS \n-       -       -       ORS     IRS     IRS     IRS     ORS \n    -       -       ORS     IRS     IRS     IRS     IRS     ORS \n-       -       ORS     IRS     RR89    RR89    RR89    IRS     ORS \n    -       ORS     IRS     RR89    RR89    RR89    RR89    IRS     ORS \n-       ORS     IRS     RR89    RR89    RR7     RR89    RR89    IRS     ORS \n    -       IRS     RR89    RR89    RR7     RR7     RR89    RR89    IRS \n-       ORS     RR89    RR89    RR7     OC      RR7     RR89    RR89    ORS \n    ORS     IRS     RR89    RR7     OC      OC      RR7     RR89    IRS     ORS \n-       IRS     RR89    RR7     OC      OC      FS      RR7     RR89    IRS \n    ORS     RR89    RR7     OC      OC      OC      OC      RR7     RR89    ORS \nORS     IRS     RR7     OC      OC      IC      OC      OC      RR7     IRS     ORS \n    IRS     RR89    OC      SC      ICS     IC      SC      OC      RR89    IRS \nORS     RR89    RR7     OC      IC      IC      IC      OC      RR7     RR89    ORS \n    IRS     RR89    OC      IC      IC      IC      IC      OC      RR89    IRS \nORS     RR89    RR7     SC      PC      ICS     PC      SC      RR7     RR89    ORS \n    IRS     RR89    OC      IC      IC      IC      IC      OC      RR89    IRS \nORS     RR89    RR7     OC      IC      IC      IC      OC      RR7     RR89    ORS \n    IRS     RR89    VOTA    ICS     IC      IRT     ICS     OC      RR89    IRS \nORS     RR89    RR7     OC      IC      IC      IC      OC      RR7     RR89    ORS \n    IRS     RR89    OC      IC      IC      IC      IC      OC      RR89    IRS \nORS     RR89    FS      OC      ICS     PC      ICS     OC      RR7     RR89    ORS \n    IRS     RR89    OC      OC      IC      IC      OC      OC      RR89    IRS \nORS     IRS     RR7     OC      OC      IC      OC      OC      RR7     IRS     ORS \n    ORS     RR89    RR7     OC      SC      SC      OC      FS      RR89    ORS \n-       IRS     RR89    RR7     OC      OC      OC      RR7     RR89    IRS \n    ORS     IRS     RR89    RR7     OC      OC      RR7     RR89    IRS     ORS \n-       ORS     RR89    RR89    RR7     OC      RR7     RR89    RR89    ORS \n    -       IRS     RR89    RR89    RR7     RR7     RR89    RR89    IRS \n        ORS     IRS     RR89    RR89    RR7     RR89    RR89    IRS     ORS \n            ORS     IRS     RR89    RR89    RR89    RR89    IRS     ORS \n                ORS     IRS     RR89    RR89    RR89    IRS     ORS \n                    ORS     IRS     IRS     IRS     IRS     ORS \n                        ORS     IRS     IRS     IRS     ORS \n                            ORS     ORS     ORS     ORS \n                                ORS     ORS     ORS \n\"\"\"\n\nHEX_FULL_MAP_SMALL = \"\"\"F\n F F\nF\nF F\n F\n\"\"\"\n\n\nclass TestAsciiMaps(unittest.TestCase):\n    \"\"\"Test ascii maps.\"\"\"\n\n    def test_cartesian(self):\n        \"\"\"Make sure we can read Cartesian maps.\"\"\"\n        asciimap = asciimaps.AsciiMapCartesian()\n        with io.StringIO() as stream:\n            stream.write(CARTESIAN_MAP)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        self.assertEqual(asciimap[0, 0], \"2\")\n        self.assertEqual(asciimap[1, 1], \"3\")\n        self.assertEqual(asciimap[2, 2], \"3\")\n        self.assertEqual(asciimap[3, 3], \"1\")\n        with self.assertRaises(KeyError):\n            asciimap[5, 2]\n\n        outMap = asciimaps.AsciiMapCartesian()\n        outMap.asciiLabelByIndices = asciimap.asciiLabelByIndices\n        outMap.gridContentsToAscii()\n        with io.StringIO() as stream:\n            outMap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, CARTESIAN_MAP)\n\n    def test_hexThird(self):\n        \"\"\"Read 1/3 core flats-up maps.\"\"\"\n        asciimap = asciimaps.AsciiMapHexThirdFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_THIRD_MAP)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_THIRD_MAP)\n\n        self.assertEqual(asciimap[7, 0], \"2\")\n        self.assertEqual(asciimap[8, 0], \"3\")\n        self.assertEqual(asciimap[8, -4], \"2\")\n        self.assertEqual(asciimap[0, 8], \"3\")\n        self.assertEqual(asciimap[0, 0], \"1\")\n        with self.assertRaises(KeyError):\n            asciimap[10, 0]\n\n    def test_hexWithHoles(self):\n        \"\"\"Read 1/3 core flats-up maps with holes.\"\"\"\n        asciimap = asciimaps.AsciiMapHexThirdFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_THIRD_MAP_WITH_HOLES)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_THIRD_MAP_WITH_HOLES)\n\n        self.assertEqual(asciimap[1, 1], asciimaps.PLACEHOLDER)\n        self.assertEqual(asciimap[5, 0], \"TG\")\n        with self.assertRaises(KeyError):\n            asciimap[10, 0]\n\n        # also test writing from pure data (vs. reading) gives the exact same map :o\n        with io.StringIO() as stream:\n            asciimap2 = asciimaps.AsciiMapHexThirdFlatsUp()\n            asciimap2.asciiLabelByIndices = asciimap.asciiLabelByIndices\n            asciimap2.gridContentsToAscii()\n            asciimap2.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_THIRD_MAP_WITH_HOLES)\n\n    def test_hexWithEmptyRow(self):\n        \"\"\"Read 1/3 core flats-up maps with one entirely empty row.\"\"\"\n        asciimap = asciimaps.AsciiMapHexThirdFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_THIRD_MAP_WITH_EMPTY_ROW)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_THIRD_MAP_WITH_EMPTY_ROW)\n\n        self.assertEqual(asciimap[1, 1], asciimaps.PLACEHOLDER)\n        self.assertEqual(asciimap[6, 0], asciimaps.PLACEHOLDER)\n        self.assertEqual(asciimap[5, 0], \"TG\")\n        with self.assertRaises(KeyError):\n            asciimap[10, 0]\n\n    def test_troublesomeHexThird(self):\n        asciimap = asciimaps.AsciiMapHexThirdFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_THIRD_MAP_2)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_THIRD_MAP_2)\n\n        self.assertEqual(asciimap[5, 0], \"TG\")\n\n    def test_hexFullCornersUpSpotCheck(self):\n        \"\"\"Spot check some hex grid coordinates are what they should be.\"\"\"\n        # The corners and a central line of non-zero values.\n        corners_map = \"\"\"- - - - - - - - - 3 0 0 0 0 0 0 0 0 2\n         - - - - - - - - 0 0 0 0 0 0 0 0 0 0 0\n          - - - - - - - 0 0 0 0 0 0 0 0 0 0 0 0\n           - - - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0\n            - - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n             - - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n              - - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n               - - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                - 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                 4 0 0 0 0 0 0 0 0 0 1 2 3 4 5 6 7 0 1\n                  0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                   0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                    0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                     0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                      0 0 0 0 0 0 0 0 0 0 0 0 0 0\n                       0 0 0 0 0 0 0 0 0 0 0 0 0\n                        0 0 0 0 0 0 0 0 0 0 0 0\n                         0 0 0 0 0 0 0 0 0 0 0\n                          5 0 0 0 0 0 0 0 0 6\n        \"\"\"\n\n        # hex map is 19 rows tall: from -9 to 9\n        asciimap = asciimaps.AsciiMapHexFullTipsUp()\n        asciimap.readAscii(corners_map)\n\n        # verify the corners\n        self.assertEqual(asciimap[9, -9], \"1\")\n        self.assertEqual(asciimap[9, 0], \"2\")\n        self.assertEqual(asciimap[0, 9], \"3\")\n        self.assertEqual(asciimap[-9, 9], \"4\")\n        self.assertEqual(asciimap[-9, 0], \"5\")\n        self.assertEqual(asciimap[0, -9], \"6\")\n\n        # verify a line of coordinates\n        self.assertEqual(asciimap[0, 0], \"0\")\n        self.assertEqual(asciimap[1, -1], \"1\")\n        self.assertEqual(asciimap[2, -2], \"2\")\n        self.assertEqual(asciimap[3, -3], \"3\")\n        self.assertEqual(asciimap[4, -4], \"4\")\n        self.assertEqual(asciimap[5, -5], \"5\")\n        self.assertEqual(asciimap[6, -6], \"6\")\n        self.assertEqual(asciimap[7, -7], \"7\")\n\n    def test_hexFullCornersUp(self):\n        \"\"\"Test sample full hex map (with hex corners up) against known answers.\"\"\"\n        # hex map is 19 rows tall: from -9 to 9\n        asciimap = asciimaps.AsciiMapHexFullTipsUp()\n        asciimap.readAscii(HEX_FULL_MAP)\n\n        # spot check some values in the map\n        self.assertIn(\"7 1 1 1 1 1 1 1 1 0\", str(asciimap))\n        self.assertEqual(asciimap[-9, 9], \"7\")\n        self.assertEqual(asciimap[-8, 0], \"6\")\n        self.assertEqual(asciimap[-1, 0], \"2\")\n        self.assertEqual(asciimap[-1, 8], \"8\")\n        self.assertEqual(asciimap[0, -6], \"3\")\n        self.assertEqual(asciimap[0, 0], \"0\")\n        self.assertEqual(asciimap[9, 0], \"4\")\n\n        # also test writing from pure data (vs. reading) gives the exact same map\n        asciimap2 = asciimaps.AsciiMapHexFullTipsUp()\n        for ij, spec in asciimap.items():\n            asciimap2.asciiLabelByIndices[ij] = spec\n\n        with io.StringIO() as stream:\n            asciimap2.gridContentsToAscii()\n            asciimap2.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_FULL_MAP)\n\n        self.assertIn(\"7 1 1 1 1 1 1 1 1 0\", str(asciimap))\n        self.assertIn(\"7 1 1 1 1 1 1 1 1 0\", str(asciimap2))\n\n    def test_hexFullFlatsUp(self):\n        \"\"\"Test sample full hex map (with hex flats up) against known answers.\"\"\"\n        # hex map is 21 rows tall: from -10 to 10\n        asciimap = asciimaps.AsciiMapHexFullFlatsUp()\n        asciimap.readAscii(HEX_FULL_MAP_FLAT)\n\n        # spot check some values in the map\n        self.assertIn(\"VOTA    ICS     IC      IRT     ICS     OC\", str(asciimap))\n        self.assertEqual(asciimap[-3, 10], \"ORS\")\n        self.assertEqual(asciimap[0, -9], \"ORS\")\n        self.assertEqual(asciimap[0, 0], \"IC\")\n        self.assertEqual(asciimap[0, 9], \"ORS\")\n        self.assertEqual(asciimap[4, -6], \"RR7\")\n        self.assertEqual(asciimap[6, 0], \"RR7\")\n        self.assertEqual(asciimap[7, -1], \"RR89\")\n\n        # also test writing from pure data (vs. reading) gives the exact same map\n        asciimap2 = asciimaps.AsciiMapHexFullFlatsUp()\n        for ij, spec in asciimap.items():\n            asciimap2.asciiLabelByIndices[ij] = spec\n\n        with io.StringIO() as stream:\n            asciimap2.gridContentsToAscii()\n            asciimap2.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_FULL_MAP_FLAT)\n\n        self.assertIn(\"VOTA    ICS     IC      IRT     ICS     OC\", str(asciimap))\n        self.assertIn(\"VOTA    ICS     IC      IRT     ICS     OC\", str(asciimap2))\n\n    def test_hexFullFlat(self):\n        \"\"\"Test sample full hex map against known answers.\"\"\"\n        # hex map is 19 rows tall, so it should go from -9 to 9\n        asciimap = asciimaps.AsciiMapHexFullFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_FULL_MAP_FLAT)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_FULL_MAP_FLAT)\n\n        self.assertEqual(asciimap[0, 0], \"IC\")\n        self.assertEqual(asciimap[-5, 2], \"VOTA\")\n        self.assertEqual(asciimap[2, 3], \"FS\")\n\n        # also test writing from pure data (vs. reading) gives the exact same map\n        with io.StringIO() as stream:\n            asciimap2 = asciimaps.AsciiMapHexFullFlatsUp()\n            asciimap2.asciiLabelByIndices = asciimap.asciiLabelByIndices\n            asciimap2.gridContentsToAscii()\n            asciimap2.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_FULL_MAP_FLAT)\n\n    def test_hexSmallFlat(self):\n        asciimap = asciimaps.AsciiMapHexFullFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_FULL_MAP_SMALL)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n\n        with io.StringIO() as stream:\n            asciimap.writeAscii(stream)\n            stream.seek(0)\n            output = stream.read()\n            self.assertEqual(output, HEX_FULL_MAP_SMALL)\n\n    def test_flatHexBases(self):\n        \"\"\"For the full core with 2 lines chopped, get the first 3 bases.\"\"\"\n        asciimap = asciimaps.AsciiMapHexFullFlatsUp()\n        with io.StringIO() as stream:\n            stream.write(HEX_FULL_MAP_FLAT)\n            stream.seek(0)\n            asciimap.readAscii(stream.read())\n        bases = []\n        for li in range(3):\n            bases.append(asciimap._getIJBaseByAsciiLine(li))\n\n        self.assertEqual(bases, [(-2, -8), (-3, -7), (-4, -6)])  # chopped\n"
  },
  {
    "path": "armi/utils/tests/test_codeTiming.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for code timing.\"\"\"\n\nimport time\nimport unittest\n\nfrom armi.utils import codeTiming\n\n\nclass CodeTimingTest(unittest.TestCase):\n    def setUp(self):\n        codeTiming._Timer._frozen = False\n        codeTiming.MasterTimer._instance = None\n\n    def tearDown(self):\n        codeTiming._Timer._frozen = False\n        codeTiming.MasterTimer._instance = None\n\n    def test_methodDefinitions(self):\n        \"\"\"Test that the timer decorators work and don't interupt the code.\"\"\"\n\n        @codeTiming.timed\n        def someMethod(boop):\n            time.sleep(0.01)\n            return boop\n\n        @codeTiming.timed(\"I have this name\")\n        def someOtherMethod(boop):\n            time.sleep(0.01)\n            return boop\n\n        # verify the decorator allows the code to run\n        x = someMethod(\"dingdong\")\n        y = someOtherMethod(\"bingbong\")\n\n        self.assertEqual(x, \"dingdong\")\n        self.assertEqual(y, \"bingbong\")\n\n        # verify the decorators work\n        table = codeTiming.MasterTimer.report(inclusionCutoff=0.01, totalTime=True)\n        self.assertIn(\"  AVERAGE \", table)\n        self.assertIn(\"  CUMULATIVE \", table)\n        self.assertIn(\"  NUM ITERS\", table)\n        self.assertIn(\"TIMER REPORTS  \", table)\n        self.assertIn(\"TOTAL TIME \", table)\n        self.assertIn(\"someMethod\", table)\n        self.assertIn(\"I have this name\", table)\n\n    def test_countStartsStops(self):\n        \"\"\"Test the start and stop counting logic.\"\"\"\n        # test the start() and stop() methods, and their side effects\n        master = codeTiming.MasterTimer.getMasterTimer()\n        timer = master.startTimer(\"bananananana\")\n        t0 = timer.stop()\n        self.assertEqual(timer.overStart, 0)\n\n        # run start a few times in a row, to trip the overstart\n        for i in range(5):\n            time.sleep(0.01)\n            t1 = timer.start()\n            self.assertGreater(t1, t0)\n            t0 = t1\n            self.assertEqual(timer.overStart, i)\n\n        # run stop a few times in a row, which is allowed for race conditions\n        for i in range(5):\n            time.sleep(0.01)\n            t2 = timer.stop()\n            self.assertGreater(t2, t1)\n            t1 = t2\n            self.assertEqual(timer.overStart, 3 - i if 3 - i > 0 else 0)\n\n        # start will always work from a stopped state\n        time.sleep(0.01)\n        t6 = timer.start()\n        self.assertGreater(t6, t2)\n        self.assertEqual(timer.overStart, 0)\n\n        # start a second timer to show two can run at once\n        time.sleep(0.01)\n        timer2 = master.endTimer(\"wazzlewazllewazzzle\")\n        t7 = timer2.start()\n        self.assertGreater(t7, t6)\n        self.assertEqual(timer2.overStart, 0)\n\n        # use the timers as context managers\n        with timer2:\n            with timer:\n                pass\n\n        # There should be one start/stop each, leaving the over start count the same\n        self.assertEqual(timer.overStart, 0)\n        self.assertEqual(timer2.overStart, 0)\n\n    def test_propertyAccess(self):\n        \"\"\"Test property access is okay.\"\"\"\n        master = codeTiming.MasterTimer.getMasterTimer()\n        timer = master.startTimer(\"sometimer\")\n\n        t0 = timer.time\n        time.sleep(0.01)\n        self.assertGreaterEqual(t0, 0)\n        ts = timer.times\n        self.assertEqual(len(ts), 1)\n        self.assertEqual(len(ts[0]), 2)\n        self.assertGreaterEqual(ts[0][0], 0)\n        self.assertGreaterEqual(ts[0][1], 0)\n        tName = timer.name\n        self.assertEqual(tName, \"sometimer\")\n        tActive = timer.isActive\n        self.assertTrue(tActive)\n\n    def test_master(self):\n        master = codeTiming.MasterTimer.getMasterTimer()\n        _ = master.time\n\n        master.startAll()\n        actives = master.getActiveTimers()\n        self.assertEqual(list(master.timers.values()), actives)\n\n        master.stopAll()\n        actives = master.getActiveTimers()\n        self.assertEqual([], actives)\n\n        with self.assertRaises(RuntimeError):\n            codeTiming.MasterTimer()\n\n    def test_messyStartsAndStops(self):\n        master = codeTiming.MasterTimer.getMasterTimer()\n\n        name = \"sometimerthatihaventmadeyet\"\n        larger_time_start = master.time()\n        time.sleep(0.01)\n        timer = master.getTimer(name)\n        time.sleep(0.01)\n        lesser_time_start = master.time()\n\n        timer.start()  # 1st time pair\n        timer.start()  # 2nd time pair\n        timer.start()  # 3rd time pair\n        timer.stop()\n        self.assertIn(name, str(timer))\n        self.assertTrue(timer.isActive)\n\n        timer.stop()\n        timer.stop()\n        self.assertFalse(timer.isActive)\n\n        timer.stop()\n        timer.stop()\n        timer.start()  # 4th time pair\n        self.assertTrue(timer.isActive)\n\n        lesser_time_end = master.time()\n        time.sleep(0.01)\n        timer.stop()\n        self.assertIn(name, str(timer))\n        self.assertEqual(len(timer.times), 4)\n        time.sleep(0.01)\n        larger_time_end = master.time()\n\n        # even with all the starts and stops the total time needs to be between these two values.\n        self.assertGreater(timer.time, lesser_time_end - lesser_time_start)\n        self.assertLess(timer.time, larger_time_end - larger_time_start)\n        self.assertEqual(timer.numIterations, 3)\n\n    def test_report(self):\n        master = codeTiming.MasterTimer.getMasterTimer()\n        name1 = \"test_report1\"\n        timer1 = master.getTimer(name1)\n        timer1.start()\n        time.sleep(0.01)\n        timer1.stop()\n\n        name2 = \"test_report2\"\n        timer2 = master.getTimer(name2)\n        timer2.start()\n        time.sleep(0.01)\n        timer2.stop()\n\n        # basic validation of the reports\n        table = codeTiming.MasterTimer.report(inclusionCutoff=0.01, totalTime=True)\n        self.assertIn(\"  AVERAGE \", table)\n        self.assertIn(\"  CUMULATIVE \", table)\n        self.assertIn(\"  NUM ITERS\", table)\n        self.assertIn(\"TIMER REPORTS  \", table)\n        self.assertIn(name1, table)\n        self.assertIn(name2, table)\n\n        lines = table.strip().split(\"\\n\")\n        self.assertEqual(len(lines), 4)\n        self.assertEqual(len(lines[1].strip().split()), 4)\n        self.assertEqual(len(lines[2].strip().split()), 4)\n"
  },
  {
    "path": "armi/utils/tests/test_custom_exceptions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Basic tests of the custom exceptions.\"\"\"\n\nimport unittest\n\nfrom armi.tests import mockRunLogs\nfrom armi.utils.customExceptions import important, info, warn, warn_when_root\n\n\nclass CustomExceptionTests(unittest.TestCase):\n    @info\n    def exampleInfoMessage(self):\n        return \"output message\"\n\n    def test_info_decorator(self):\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            for ii in range(1, 3):\n                self.exampleInfoMessage()\n                self.assertEqual(\"[info] output message\\n\" * ii, mock.getStdout())\n\n    @important\n    def exampleImportantMessage(self):\n        return \"important message?\"\n\n    def test_important_decorator(self):\n        with mockRunLogs.BufferLog() as mock:\n            self.assertEqual(\"\", mock.getStdout())\n            for ii in range(1, 3):\n                self.exampleImportantMessage()\n                self.assertEqual(\"[impt] important message?\\n\" * ii, mock.getStdout())\n\n    @warn\n    def exampleWarnMessage(self):\n        return \"you're not tall enough to ride this elephant\".format()\n\n    def test_warn_decorator(self):\n        with mockRunLogs.BufferLog() as mock:\n            for ii in range(1, 4):\n                self.exampleWarnMessage()\n                self.assertEqual(\n                    \"[warn] you're not tall enough to ride this elephant\\n\" * ii,\n                    mock.getStdout(),\n                )\n\n    @warn_when_root\n    def exampleWarnWhenRootMessage(self):\n        return \"warning from root\".format()\n\n    def test_warn_when_root_decorator(self):\n        import armi\n\n        with mockRunLogs.BufferLog() as mock:\n            for ii in range(1, 4):\n                self.exampleWarnWhenRootMessage()\n                msg = \"[warn] warning from root\\n\" * ii\n                self.assertEqual(msg, mock.getStdout())\n                armi.MPI_RANK = 1\n                self.exampleWarnWhenRootMessage()\n                self.assertEqual(msg, mock.getStdout())\n                armi.MPI_RANK = 0\n"
  },
  {
    "path": "armi/utils/tests/test_densityTools.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test densityTools.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom armi.materials.material import Material\nfrom armi.materials.uraniumOxide import UO2\nfrom armi.nucDirectory.nuclideBases import NuclideBases\nfrom armi.utils import densityTools\n\n\nclass UraniumOxide(Material):\n    \"\"\"A test material that needs to be stored in a different namespace.\n\n    This is a duplicate (by name only) of :py:class:`armi.materials.uraniumOxide.UraniumOxide`\n    and is used for testing in :py:meth:`armi.materials.tests.test_materials.MaterialFindingTests.test_namespacing`\n    \"\"\"\n\n    def pseudoDensity(self, Tk=None, Tc=None):\n        return 0.0\n\n    def density(self, Tk=None, Tc=None):\n        return 0.0\n\n\nclass TestDensityTools(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.nuclideBases = NuclideBases()\n        cls.elements = cls.nuclideBases.elements\n\n    def test_expandElementalMassFracsToNuclides(self):\n        \"\"\"\n        Expand mass fraction to nuclides.\n\n        .. test:: Expand mass fractions to nuclides.\n            :id: T_ARMI_UTIL_EXP_MASS_FRACS\n            :tests: R_ARMI_UTIL_EXP_MASS_FRACS\n        \"\"\"\n        element = self.elements.bySymbol[\"N\"]\n        mass = {\"N\": 1.0}\n        densityTools.expandElementalMassFracsToNuclides(mass, [(element, None)])\n        self.assertNotIn(\"N\", mass)\n        self.assertIn(\"N15\", mass)\n        self.assertIn(\"N14\", mass)\n        self.assertAlmostEqual(sum(mass.values()), 1.0)\n        self.assertNotIn(\"N13\", mass)  # nothing unnatural.\n\n    def test_expandElementalZeroMassFrac(self):\n        \"\"\"As above, but try with a zero mass frac elemental.\"\"\"\n        elementals = [(self.elements.bySymbol[\"N\"], None), (self.elements.bySymbol[\"O\"], None)]\n        mass = {\"N\": 0.0, \"O\": 1.0}\n        densityTools.expandElementalMassFracsToNuclides(mass, elementals)\n        self.assertNotIn(\"N\", mass)\n        self.assertNotIn(\"O\", mass)\n        # Current expectation is for elements with zero mass fraction get expanded and\n        # isotopes with zero mass remain in the dictionary.\n        self.assertIn(\"N14\", mass)\n        self.assertAlmostEqual(sum(mass.values()), 1.0)\n\n    def test_getChemicals(self):\n        u235 = self.nuclideBases.byName[\"U235\"]\n        u238 = self.nuclideBases.byName[\"U238\"]\n        o16 = self.nuclideBases.byName[\"O16\"]\n\n        uo2 = UO2()\n        uo2Chemicals = densityTools.getChemicals(uo2.massFrac)\n        for symbol in [\"U\", \"O\"]:\n            self.assertIn(symbol, uo2Chemicals.keys())\n\n        self.assertAlmostEqual(uo2Chemicals[\"U\"], uo2.massFrac[\"U235\"] + uo2.massFrac[\"U238\"], 6)\n        self.assertAlmostEqual(uo2Chemicals[\"O\"], uo2.massFrac[\"O\"], 6)\n\n        # ensure getChemicals works if the nuclideBase is the dict key\n        massFrac = {u238: 0.87, u235: 0.12, o16: 0.01}\n        uo2Chemicals = densityTools.getChemicals(massFrac)\n        for symbol in [\"U\", \"O\"]:\n            self.assertIn(symbol, uo2Chemicals.keys())\n\n        self.assertAlmostEqual(uo2Chemicals[\"U\"], massFrac[u235] + massFrac[u238], 2)\n        self.assertAlmostEqual(uo2Chemicals[\"O\"], massFrac[o16], 2)\n\n    def test_expandElement(self):\n        \"\"\"Ensure isotopic subset feature works in expansion.\"\"\"\n        elemental = self.elements.bySymbol[\"O\"]\n        massFrac = 1.0\n        subset = [self.nuclideBases.byName[\"O16\"], self.nuclideBases.byName[\"O17\"]]\n        m1 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac)\n        m2 = densityTools.expandElementalNuclideMassFracs(elemental, massFrac, subset)\n        self.assertIn(\"O18\", m1)\n        self.assertNotIn(\"O18\", m2)\n        self.assertAlmostEqual(1.0, sum(m1.values()))\n        self.assertAlmostEqual(1.0, sum(m2.values()))\n        # expect some small difference due to renormalization\n        self.assertNotAlmostEqual(m1[\"O17\"], m2[\"O17\"])\n        self.assertAlmostEqual(m1[\"O17\"], m2[\"O17\"], delta=1e-5)\n\n    def test_applyIsotopicsMix(self):\n        \"\"\"Ensure isotopc classes get mixed properly.\"\"\"\n        uo2 = UO2()\n        massFracO = uo2.massFrac[\"O\"]\n        uo2.class1_wt_frac = 0.2\n        enrichedMassFracs = {\"U235\": 0.3, \"U234\": 0.1, \"PU239\": 0.6}\n        fertileMassFracs = {\"U238\": 0.3, \"PU240\": 0.7}\n        densityTools.applyIsotopicsMix(uo2, enrichedMassFracs, fertileMassFracs)\n\n        self.assertAlmostEqual(uo2.massFrac[\"U234\"], (1 - massFracO) * 0.2 * 0.1)  # HM blended\n        self.assertAlmostEqual(uo2.massFrac[\"U238\"], (1 - massFracO) * 0.8 * 0.3)  # HM blended\n        self.assertAlmostEqual(uo2.massFrac[\"O\"], massFracO)  # non-HM stays unchanged\n\n    def test_getNDensFromMasses(self):\n        \"\"\"\n        Number densities from masses.\n\n        .. test:: Number densities are retrievable from masses.\n            :id: T_ARMI_UTIL_MASS2N_DENS\n            :tests: R_ARMI_UTIL_MASS2N_DENS\n        \"\"\"\n        nucs, nDens = densityTools.getNDensFromMasses(1, {\"O\": 1, \"H\": 2})\n        O = np.where(nucs == \"O\".encode())[0]\n        H = np.where(nucs == \"H\".encode())[0]\n\n        self.assertAlmostEqual(nDens[O][0], 0.03764, 5)\n        self.assertAlmostEqual(nDens[H][0], 1.19490, 5)\n\n    def test_getMassFractions(self):\n        \"\"\"Number densities to mass fraction.\"\"\"\n        numDens = {\"O17\": 0.1512, \"PU239\": 1.5223, \"U234\": 0.135}\n        massFracs = densityTools.getMassFractions(numDens)\n\n        self.assertAlmostEqual(massFracs[\"O17\"], 0.006456746320668389)\n        self.assertAlmostEqual(massFracs[\"PU239\"], 0.9141724414849527)\n        self.assertAlmostEqual(massFracs[\"U234\"], 0.07937081219437897)\n\n    def test_calculateNumberDensity(self):\n        \"\"\"Mass fraction to number density.\"\"\"\n        nDens = densityTools.calculateNumberDensity(\"U235\", 1, 1)\n        self.assertAlmostEqual(nDens, 0.0025621344549254283)\n\n        nDens = densityTools.calculateNumberDensity(\"PU239\", 0.00012, 0.001)\n        self.assertAlmostEqual(nDens, 0.0003023009578309138)\n\n        nDens = densityTools.calculateNumberDensity(\"N15\", 111, 222)\n        self.assertAlmostEqual(nDens, 0.020073659896941428)\n\n    def test_getMassInGrams(self):\n        m = densityTools.getMassInGrams(\"N16\", 1.001, None)\n        self.assertEqual(m, 0)\n\n        m = densityTools.getMassInGrams(\"O17\", 1.001, 0.00123)\n        self.assertAlmostEqual(m, 0.034754813848559635)\n\n        m = densityTools.getMassInGrams(\"PU239\", 1.001, 2.123)\n        self.assertAlmostEqual(m, 843.5790671316283)\n\n    def test_normalizeNuclideList(self):\n        \"\"\"Normalize a nuclide list.\"\"\"\n        nList = {\"PU239\": 23.2342, \"U234\": 0.001234, \"U235\": 34.152}\n        norm = densityTools.normalizeNuclideList(nList)\n\n        self.assertAlmostEqual(norm[\"PU239\"], 0.40486563661306063)\n        self.assertAlmostEqual(norm[\"U234\"], 2.1502965265880334e-05)\n        self.assertAlmostEqual(norm[\"U235\"], 0.5951128604216736)\n\n    def test_formatMaterialCard(self):\n        \"\"\"Formatting material information into an MCNP input card.\n\n        .. test:: Create MCNP material card\n            :id: T_ARMI_UTIL_MCNP_MAT_CARD\n            :tests: R_ARMI_UTIL_MCNP_MAT_CARD\n        \"\"\"\n        u235 = self.nuclideBases.byName[\"U235\"]\n        pu239 = self.nuclideBases.byName[\"PU239\"]\n        o16 = self.nuclideBases.byName[\"O16\"]\n        numDens = {o16: 0.7, pu239: 0.1, u235: 0.2}\n        matCard = densityTools.formatMaterialCard(\n            numDens,\n            matNum=1,\n            sigFigs=4,\n        )\n        refMatCard = \"\"\"m1\n       8016 7.0000e-01\n      92235 2.0000e-01\n      94239 1.0000e-01\n\"\"\"\n        self.assertEqual(refMatCard, \"\".join(matCard))\n\n        lfp35 = self.nuclideBases.byName[\"LFP35\"]\n        dump1 = self.nuclideBases.byName[\"DUMP1\"]\n        o16 = self.nuclideBases.byName[\"O16\"]\n        numDens = {o16: 0.7, pu239: 1e-8, u235: 0.2, lfp35: 1e-3, dump1: 1e-4}\n        matCard = densityTools.formatMaterialCard(\n            numDens,\n            matNum=-1,\n            minDens=1e-6,\n            mcnp6Compatible=True,\n            mcnpLibrary=\"81\",\n        )\n        refMatCard = \"\"\"m{}\n       8016 7.00000000e-01\n      92235 2.00000000e-01\n      94239 1.00000000e-06\n      nlib=81c\n\"\"\"\n        self.assertEqual(refMatCard, \"\".join(matCard))\n\n        numDens = {lfp35: 0.5, dump1: 0.5}\n        matCard = densityTools.formatMaterialCard(\n            numDens,\n            mcnp6Compatible=False,\n            mcnpLibrary=None,\n        )\n        refMatCard = []\n        self.assertEqual(refMatCard, matCard)\n"
  },
  {
    "path": "armi/utils/tests/test_directoryChangers.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Module for testing directoryChangers.\"\"\"\n\nimport os\nimport shutil\nimport unittest\nfrom pathlib import Path\n\nfrom armi.utils import directoryChangers, directoryChangersMpi\n\n\nclass ExpectedException(Exception):\n    pass\n\n\nclass TestDirectoryChangers(unittest.TestCase):\n    \"\"\"Tests for directory changers.\"\"\"\n\n    def setUp(self):\n        self.temp_directory = self._testMethodName + \"ThisIsATemporaryDirectory-AAZZ0099\"\n        if os.path.exists(self.temp_directory):\n            shutil.rmtree(self.temp_directory)\n\n    def tearDown(self):\n        if os.path.exists(self.temp_directory):\n            shutil.rmtree(self.temp_directory)\n\n    def test_mpiAction(self):\n        try:\n            os.mkdir(self.temp_directory)\n            cdma = directoryChangersMpi._ChangeDirectoryMpiAction(self.temp_directory)\n            self.assertTrue(cdma.invoke(None, None, None))\n        finally:\n            os.chdir(\"..\")\n            os.rmdir(self.temp_directory)\n\n    def test_mpiActionFailsOnNonexistentPath(self):\n        with self.assertRaises(IOError):\n            cdma = directoryChangersMpi._ChangeDirectoryMpiAction(self.temp_directory)\n            cdma.invoke(None, None, None)\n\n    def test_exception(self):\n        \"\"\"Make sure directory changers bring back full folder when an exception is raised.\"\"\"\n        try:\n            with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory):\n                Path(\"file1.txt\").touch()\n                Path(\"file2.txt\").touch()\n                os.mkdir(\"subdir\")\n                raise ExpectedException(\"Ooops\")\n        except ExpectedException:\n            pass\n\n        retrievedFolder = f\"dump-{self.temp_directory}\"\n        self.assertTrue(os.path.exists(os.path.join(retrievedFolder, \"file1.txt\")))\n        self.assertTrue(os.path.exists(os.path.join(retrievedFolder, \"file2.txt\")))\n        shutil.rmtree(retrievedFolder)\n\n    def test_exception_disabled(self):\n        \"\"\"Make sure directory changers do not bring back full folder when handling is disabled.\"\"\"\n        try:\n            with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory, dumpOnException=False):\n                Path(\"file1.txt\").touch()\n                Path(\"file2.txt\").touch()\n                raise ExpectedException(\"Ooops\")\n        except ExpectedException:\n            pass\n\n        self.assertFalse(os.path.exists(os.path.join(f\"dump-{self.temp_directory}\", \"file1.txt\")))\n\n    def test_change_to_nonexisting_fails(self):\n        \"\"\"Fail if destination doesn't exist.\"\"\"\n        with self.assertRaises(OSError):\n            with directoryChangers.DirectoryChanger(self.temp_directory):\n                pass\n\n    def test_change_to_nonexisting_works_forced(self):\n        \"\"\"Succeed with forced creation even when destination doesn't exist.\"\"\"\n        with directoryChangers.ForcedCreationDirectoryChanger(self.temp_directory):\n            pass\n\n    def test_temporary_cleans(self):\n        \"\"\"Make sure Temporary cleaner cleans up temporary files.\"\"\"\n        with directoryChangers.TemporaryDirectoryChanger() as dc:\n            Path(\"file1.txt\").touch()\n            Path(\"file2.txt\").touch()\n            tempName = dc.destination\n\n        self.assertFalse(os.path.exists(tempName))\n\n    def test_file_retrieval(self):\n        \"\"\"\n        Make sure requested files and/or globs get copied back.\n\n        * Checks basic copy feature\n        * Checks rename feature\n        * Checks glob expansion\n        * Checks copy to output path\n        \"\"\"\n\n        def f(name):\n            \"\"\"Utility to avoid test clashes during cleanups.\"\"\"\n            return self._testMethodName + name\n\n        with directoryChangers.TemporaryDirectoryChanger(filesToRetrieve=[(f(\"file1.txt\"), f(\"newfile1.txt\"))]):\n            Path(f(\"file1.txt\")).touch()\n            Path(f(\"file2.txt\")).touch()\n\n        self.assertTrue(os.path.exists(f(\"newfile1.txt\")))\n        os.remove(f(\"newfile1.txt\"))\n\n        with directoryChangers.TemporaryDirectoryChanger(\n            filesToRetrieve=[f(\"file*.txt\")],\n            outputPath=\"temp\",\n        ) as _:\n            Path(f(\"file1.txt\")).touch()\n            Path(f(\"file2.txt\")).touch()\n\n        self.assertTrue(os.path.exists(f(\"file1.txt\")))\n        self.assertTrue(os.path.exists(f(\"file2.txt\")))\n        os.remove(f(\"file1.txt\"))\n        os.remove(f(\"file2.txt\"))\n\n        self.assertTrue(os.path.exists(os.path.join(\"temp\", f(\"file1.txt\"))))\n        self.assertTrue(os.path.exists(os.path.join(\"temp\", f(\"file2.txt\"))))\n        shutil.rmtree(\"temp\")\n\n    def test_file_retrieval_missing_file(self):\n        \"\"\"Tests that the directory changer still returns a subset of files even if all do not exist.\"\"\"\n\n        def f(name):\n            \"\"\"Utility to avoid test clashes during cleanups.\"\"\"\n            return self._testMethodName + name\n\n        with directoryChangers.TemporaryDirectoryChanger(filesToRetrieve=[f(\"file1.txt\"), f(\"file2.txt\")]):\n            Path(f(\"file1.txt\")).touch()\n\n        self.assertTrue(os.path.exists(f(\"file1.txt\")))\n        self.assertFalse(os.path.exists(f(\"file2.txt\")))\n        os.remove(f(\"file1.txt\"))\n\n\nclass TestDirectoryChangersEnvEdits(unittest.TestCase):\n    \"\"\"Tests that will use monkeypatch to alter an environment variable.\"\"\"\n\n    def setUp(self):\n        # We cannot import pytest at the top of the file right now. The ARMI unit tests are currently imported at\n        # runtime, and until that is changed, we don't want pytest to be a runtime dependency. For now, hide the import\n        # down here. Once the testing module is complete and ARMI's unit tests aren't all imported, the pytest import\n        # can move up to where it belongs.\n        import pytest\n\n        self.monkeypatch = pytest.MonkeyPatch()\n\n    def tearDown(self):\n        self.monkeypatch.undo()\n\n    def test_tempDirChangerNonDefault(self):\n        \"\"\"Make sure TemporaryDirectoryChanger uses an alternative root when user edits the appropriate environment\n        variable.\n        \"\"\"\n        # Alter the root path to be in this directory\n        altRoot = Path(__file__).parent / \"altRoot\"\n        self.monkeypatch.setenv(\"ARMI_TEMP_ROOT_PATH\", str(altRoot))\n        with directoryChangers.TemporaryDirectoryChanger() as td:\n            self.assertEqual(Path(td.destination).parent, altRoot)\n        # This test creates a path that isn't auto deleted with TempDirChanger, which deletes the temp dir, not the root\n        if os.path.exists(altRoot):\n            shutil.rmtree(altRoot)\n"
  },
  {
    "path": "armi/utils/tests/test_directoryChangersMpi.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTest the MpiDirectoryChanger.\n\nThese tests will be generally ignored by pytest if you are trying to run\nthem in an environment without MPI installed.\n\nTo run these tests from the command line, install MPI and mpi4py, and do:\n\nmpiexec -n 2 python -m pytest test_parallel.py\nor\nmpiexec.exe -n 2 python -m pytest test_parallel.py\n\"\"\"\n\nimport os\nimport shutil\nimport unittest\n\nfrom armi import context, mpiActions\nfrom armi.utils.directoryChangersMpi import MpiDirectoryChanger\n\n# determine if this is a parallel run, and MPI is installed\nMPI_EXE = None\nif shutil.which(\"mpiexec.exe\") is not None:\n    MPI_EXE = \"mpiexec.exe\"\nelif shutil.which(\"mpiexec\") is not None:\n    MPI_EXE = \"mpiexec\"\n\n\nclass RevealYourDirectory(mpiActions.MpiAction):\n    def invokeHook(self):\n        # make a dir with name corresponding to the rank, that way we can confirm\n        # that all ranks actually executed this code\n        os.mkdir(str(context.MPI_RANK))\n        return True\n\n\nclass TestMPI(unittest.TestCase):\n    def setUp(self):\n        self.targetDir = \"mpiDir\"\n        if context.MPI_RANK == 0:\n            os.mkdir(self.targetDir)\n\n    def tearDown(self):\n        context.MPI_COMM.barrier()\n        if context.MPI_RANK == 0:\n            shutil.rmtree(self.targetDir)\n\n    @unittest.skipIf(context.MPI_SIZE <= 1 or MPI_EXE is None, \"Parallel test only\")\n    def test_MpiDirectoryChanger(self):\n        # make sure all workers start outside the targetDir\n        self.assertNotIn(self.targetDir, os.getcwd())\n\n        # put the workers in a loop, waiting for command from the main process\n        if context.MPI_RANK != 0:\n            while True:\n                cmd = context.MPI_COMM.bcast(None, root=0)\n                print(cmd)\n                if cmd == \"quit\":\n                    break\n                cmd.invoke(None, None, None)\n\n        # from main, send commands to the workers to move into the targetDir\n        # and then create folders within there\n        if context.MPI_RANK == 0:\n            with MpiDirectoryChanger(self.targetDir):\n                RevealYourDirectory.invokeAsMaster(None, None, None)\n\n            # make the workers exit the waiting loop\n            context.MPI_COMM.bcast(\"quit\", root=0)\n\n        context.MPI_COMM.barrier()\n        if context.MPI_RANK == 0:\n            # from main, confirm that subdirectories were created by all workers\n            for i in range(context.MPI_SIZE):\n                self.assertTrue(os.path.isdir(os.path.join(os.getcwd(), self.targetDir, str(i))))\n\n        # make sure all workers have moved back out from the targetDir\n        self.assertNotIn(self.targetDir, os.getcwd())\n\n        context.MPI_COMM.barrier()\n"
  },
  {
    "path": "armi/utils/tests/test_flags.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Testing flags.py.\"\"\"\n\nimport unittest\n\nfrom armi.reactor.composites import FlagSerializer\nfrom armi.utils.flags import Flag, auto\n\n\nclass ExampleFlag(Flag):\n    FOO = auto()\n    BAR = auto()\n    BAZ = auto()\n\n\nclass TestFlag(unittest.TestCase):\n    \"\"\"Tests for the utility Flag class and cohorts.\"\"\"\n\n    def test_auto(self):\n        \"\"\"\n        Make sure that auto() works right, and that mixing it with explicit values\n        doesn't lead to collision.\n        \"\"\"\n\n        class F(Flag):\n            foo = auto()\n            bar = 1\n            baz = auto()\n\n        f = F(F.bar)\n        self.assertEqual(int(f), 1)\n        # check that baz got a higher number than foo. Not a guaranteed behavior/bit of\n        # an implementation detail, but nice to know we understand what's happening\n        # under the hood.\n        self.assertTrue(int(F.baz) > int(F.foo))\n\n    def test_extend(self):\n        \"\"\"Ensure the set of flags can be programmatically extended.\"\"\"\n\n        class F(Flag):\n            foo = auto()\n            bar = 1\n            baz = auto()\n\n        self.assertEqual(F.width(), 1)\n\n        F.extend({\"A\": auto(), \"B\": 8, \"C\": auto(), \"D\": auto(), \"E\": auto()})\n\n        self.assertEqual(int(F.B), 8)\n        self.assertEqual(F.width(), 1)\n\n        F.extend({\"LAST\": auto()})\n        self.assertEqual(F.width(), 2)\n\n        f = F.A | F.foo | F.C\n        array = f.to_bytes()\n        self.assertEqual(len(array), 2)\n\n        f2 = F.from_bytes(array)\n        self.assertEqual(f, f2)\n\n    def test_collision_extension(self):\n        \"\"\"Ensure the set of flags cannot be programmatically extended if duplicate created.\n\n        .. test:: Set of flags are extensible without loss of uniqueness.\n            :id: T_ARMI_FLAG_EXTEND0\n            :tests: R_ARMI_FLAG_EXTEND\n        \"\"\"\n\n        class F(Flag):\n            foo = auto()\n            bar = 1\n            baz = auto()\n\n        F.extend({\"a\": auto()})\n        F.extend({\"b\": 1})\n\n    def test_collision_creation(self):\n        \"\"\"Make sure that we catch value collisions upon creation.\n\n        .. test:: No two flags have equivalence.\n            :id: T_ARMI_FLAG_DEFINE\n            :tests: R_ARMI_FLAG_DEFINE\n        \"\"\"\n        with self.assertRaises(AssertionError):\n\n            class F(Flag):\n                foo = 1\n                bar = 1\n\n        class D(Flag):\n            foo = auto()\n            bar = auto()\n            baz = auto()\n\n        self.assertEqual(D.foo._value, 1)\n        self.assertEqual(D.bar._value, 2)\n        self.assertEqual(D.baz._value, 4)\n\n    def test_bool(self):\n        f = ExampleFlag()\n        self.assertFalse(f)\n\n    def test_inclusion(self):\n        f = ExampleFlag.FOO | ExampleFlag.BAZ\n        self.assertIn(ExampleFlag.FOO, f)\n        self.assertIn(ExampleFlag.BAZ, f)\n        self.assertNotIn(ExampleFlag.BAR, f)\n\n    def test_bitwise(self):\n        \"\"\"Make sure that bitwise operators work right.\"\"\"\n        f = ExampleFlag.FOO | ExampleFlag.BAR\n        self.assertTrue(f & ExampleFlag.FOO)\n        self.assertTrue(f & ExampleFlag.BAR)\n        self.assertFalse(f & ExampleFlag.BAZ)\n\n        # mask off BAR\n        f &= ExampleFlag.FOO\n        self.assertEqual(f, ExampleFlag.FOO)\n\n        # OR in BAZ\n        f |= ExampleFlag.BAZ\n        self.assertIn(ExampleFlag.BAZ, f)\n\n        # XOR them. Should turn off FOO, since they both have it\n        f2 = ExampleFlag.FOO | ExampleFlag.BAR\n        self.assertEqual(f2 ^ f, ExampleFlag.BAR | ExampleFlag.BAZ)\n\n    def test_iteration(self):\n        \"\"\"We want to be able to iterate over set flags.\"\"\"\n        f = ExampleFlag.FOO | ExampleFlag.BAZ\n        flagsOn = [val for val in f]\n        self.assertIn(ExampleFlag.FOO, flagsOn)\n        self.assertIn(ExampleFlag.BAZ, flagsOn)\n        self.assertNotIn(ExampleFlag.BAR, flagsOn)\n\n    def test_hashable(self):\n        f1 = ExampleFlag.FOO\n        f2 = ExampleFlag.BAR\n        self.assertNotEqual(hash(f1), hash(f2))\n\n    def test_getitem(self):\n        self.assertEqual(ExampleFlag[\"FOO\"], ExampleFlag.FOO)\n\n    def test_duplicateFlags(self):\n        \"\"\"Show that duplicate flags can be added and silently ignored.\"\"\"\n\n        class F(Flag):\n            @classmethod\n            def len(cls):\n                return len(cls._nameToValue)\n\n        F.extend({\"FLAG0\": auto()})\n        for i in range(1, 12):\n            F.extend({f\"FLAG{i}\": auto()})\n            num = F.len()\n            F.extend({f\"FLAG{i - 1}\": auto()})\n            self.assertEqual(F.len(), num)\n\n            # While the next two lines do not assert anything, these lines used to raise an error.\n            # So these lines remain as proof against that error in the future.\n            ff = getattr(F, f\"FLAG{i}\")\n            FlagSerializer._packImpl(\n                [\n                    ff,\n                ],\n                F,\n            )\n            self.assertEqual(F.len(), num)\n\n    def test_soManyFlags(self):\n        \"\"\"Show that many flags can be added without issue.\"\"\"\n\n        class F(Flag):\n            @classmethod\n            def len(cls):\n                return len(cls._nameToValue)\n\n        for i in range(1, 100):\n            num = F.len()\n            flagName = f\"FLAG{i}\"\n            F.extend({flagName: auto()})\n            self.assertEqual(F.len(), num + 1)\n\n            flag = getattr(F, flagName)\n            flag.to_bytes()\n            self.assertEqual(F.len(), num + 1)\n"
  },
  {
    "path": "armi/utils/tests/test_hexagon.py",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test hexagon tools.\"\"\"\n\nimport math\nimport random\nimport unittest\n\nfrom armi.utils import hexagon\n\n\nclass TestHexagon(unittest.TestCase):\n    N_FUZZY_DRAWS: int = 10\n    \"\"\"Number of random draws to use in some fuzzy testing\"\"\"\n\n    def test_hexagon_area(self):\n        \"\"\"Area of a hexagon.\"\"\"\n        # Calculate area given a pitch\n        self.assertEqual(hexagon.area(1), math.sqrt(3.0) / 2)\n        self.assertEqual(hexagon.area(2), 4 * math.sqrt(3.0) / 2)\n\n    def test_numPositionsInRing(self):\n        \"\"\"Calculate number of positions in a ring of hexagons.\"\"\"\n        self.assertEqual(hexagon.numPositionsInRing(1), 1)\n        self.assertEqual(hexagon.numPositionsInRing(2), 6)\n        self.assertEqual(hexagon.numPositionsInRing(3), 12)\n        self.assertEqual(hexagon.numPositionsInRing(4), 18)\n\n    def test_rotatedCellCenter(self):\n        \"\"\"Test that location of the center cell is invariant through rotation.\"\"\"\n        for rot in range(6):\n            self.assertTrue(hexagon.getIndexOfRotatedCell(1, rot), 1)\n\n    def test_rotatedFirstRing(self):\n        \"\"\"Simple test for the corners of the first ring are maintained during rotation.\"\"\"\n        # A 60 degree rotation is just incrementing the cell index by one here\n        locations = list(range(2, 8))\n        for locIndex, initialPosition in enumerate(locations):\n            for rot in range(6):\n                actual = hexagon.getIndexOfRotatedCell(initialPosition, rot)\n                newIndex = (locIndex + rot) % 6\n                expectedPosition = locations[newIndex]\n                self.assertEqual(actual, expectedPosition, msg=f\"{initialPosition=}, {rot=}\")\n\n    def test_rotateFuzzy(self):\n        \"\"\"Select some position number and rotation and check for consistency.\"\"\"\n        N_DRAWS = 100\n        for _ in range(N_DRAWS):\n            self._rotateFuzzyInner()\n\n    def _rotateFuzzyInner(self):\n        rot = random.randint(1, 5)\n        initialCell = random.randint(2, 300)\n        testInfoMsg = f\"{rot=}, {initialCell=}\"\n        newCell = hexagon.getIndexOfRotatedCell(initialCell, rot)\n        self.assertNotEqual(newCell, initialCell, msg=testInfoMsg)\n        # should be in the same ring\n        initialRing = hexagon.numRingsToHoldNumCells(initialCell)\n        newRing = hexagon.numRingsToHoldNumCells(newCell)\n        self.assertEqual(newRing, initialRing, msg=testInfoMsg)\n        # If we un-rotate, we should get our initial cell\n        reverseRot = (6 - rot) % 6\n        reverseCell = hexagon.getIndexOfRotatedCell(newCell, reverseRot)\n        self.assertEqual(reverseCell, initialCell, msg=testInfoMsg)\n\n    def test_positionsUpToRing(self):\n        \"\"\"Test totalPositionsUpToRing is consistent with numPositionsInRing.\"\"\"\n        self.assertEqual(hexagon.totalPositionsUpToRing(1), 1)\n        self.assertEqual(hexagon.totalPositionsUpToRing(2), 7)\n        self.assertEqual(hexagon.totalPositionsUpToRing(3), 19)\n\n        totalPositions = 19\n        for ring in range(4, 30):\n            posInThisRing = hexagon.numPositionsInRing(ring)\n            totalPositions += posInThisRing\n            self.assertEqual(hexagon.totalPositionsUpToRing(ring), totalPositions, msg=f\"{ring=}\")\n\n    def test_rotatedCellIndexErrors(self):\n        \"\"\"Test errors for non-positive initial cell indices during rotation.\"\"\"\n        self._testNonPosRotIndex(0)\n        for _ in range(self.N_FUZZY_DRAWS):\n            index = random.randint(-100, -1)\n            self._testNonPosRotIndex(index)\n\n    def _testNonPosRotIndex(self, index: int):\n        with self.assertRaisesRegex(ValueError, \".*must be positive\", msg=f\"{index=}\"):\n            hexagon.getIndexOfRotatedCell(index, 0)\n\n    def test_rotatedCellOrientationErrors(self):\n        \"\"\"Test errors for invalid orientation numbers during rotation.\"\"\"\n        for _ in range(self.N_FUZZY_DRAWS):\n            upper = random.randint(6, 100)\n            self._testRotOrientation(upper)\n            lower = random.randint(-100, -1)\n            self._testRotOrientation(lower)\n\n    def _testRotOrientation(self, orientation: int):\n        with self.assertRaisesRegex(ValueError, \"Orientation number\", msg=f\"{orientation=}\"):\n            hexagon.getIndexOfRotatedCell(initialCellIndex=1, orientationNumber=orientation)\n\n    def test_indexWithNoRotation(self):\n        \"\"\"Test that the initial cell location is returned if not rotated.\"\"\"\n        for _ in range(self.N_FUZZY_DRAWS):\n            ix = random.randint(1, 300)\n            postRotation = hexagon.getIndexOfRotatedCell(ix, orientationNumber=0)\n            self.assertEqual(postRotation, ix)\n"
  },
  {
    "path": "armi/utils/tests/test_iterables.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unittests for iterables.py.\"\"\"\n\nimport unittest\n\nimport numpy as np\n\nfrom armi.utils import iterables\n\n# CONSTANTS\n_TEST_DATA = {\"turtle\": [float(vv) for vv in range(-2000, 2000)]}\n\n\nclass TestIterables(unittest.TestCase):\n    \"\"\"Testing our custom Iterables.\"\"\"\n\n    def test_flatten(self):\n        self.assertEqual(\n            iterables.flatten([[1, 2, 3], [4, 5, 6], [7, 8], [9, 10]]),\n            [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n        )\n        self.assertEqual(\n            iterables.flatten([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]]),\n            [1, 2, 3, 4, 5, 6, 7, 8, 9, 10],\n        )\n\n    def test_chunk(self):\n        self.assertEqual(\n            list(iterables.chunk([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 4)),\n            [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10]],\n        )\n\n    def test_split(self):\n        data = list(range(50))\n        chu = iterables.split(data, 10)\n        self.assertEqual(len(chu), 10)\n        unchu = iterables.flatten(chu)\n        self.assertEqual(data, unchu)\n\n        chu = iterables.split(data, 1)\n        self.assertEqual(len(chu), 1)\n        unchu = iterables.flatten(chu)\n        self.assertEqual(data, unchu)\n\n        chu = iterables.split(data, 60, padWith=[None])\n        self.assertEqual(len(chu), 60)\n        unchu = iterables.flatten(chu)\n        self.assertEqual(len(unchu), 60)\n\n        chu = iterables.split(data, 60, padWith=[None])\n        self.assertEqual(len(chu), 60)\n\n        data = [0]\n        chu = iterables.split(data, 1)\n        unchu = iterables.flatten(chu)\n        self.assertEqual(unchu, data)\n\n    def test_packingAndUnpackingBinaryStrings(self):\n        packed = iterables.packBinaryStrings(_TEST_DATA)\n        unpacked = iterables.unpackBinaryStrings(packed[\"turtle\"][0])\n        self.assertEqual(_TEST_DATA[\"turtle\"], unpacked)\n\n    def test_packingAndUnpackingHexStrings(self):\n        packed = iterables.packHexStrings(_TEST_DATA)\n        unpacked = iterables.unpackHexStrings(packed[\"turtle\"][0])\n        self.assertEqual(_TEST_DATA[\"turtle\"], unpacked)\n\n    def test_sequenceInit(self):\n        # init an empty sequence\n        s = iterables.Sequence()\n        for item in s:\n            self.assertTrue(False, \"This shouldn't happen.\")\n\n        # init a sequence with another sequence\n        example = [1, 2, 3]\n        s2 = iterables.Sequence(example)\n        s3 = iterables.Sequence(s2)\n\n        i = 0\n        for item in s3:\n            i += 1\n\n        self.assertEqual(i, len(example))\n\n    def test_sequence(self):\n        # sequentially using methods in the usual way\n        s = iterables.Sequence(range(1000000))\n        s.drop(lambda i: i % 2 == 0)\n        s.select(lambda i: i < 20)\n        s.transform(lambda i: i * 10)\n        result = tuple(s)\n        self.assertEqual(result, (10, 30, 50, 70, 90, 110, 130, 150, 170, 190))\n\n        # stringing together the methods in a more modern Python way\n        s = iterables.Sequence(range(1000000))\n        result = tuple(s.drop(lambda i: i % 2 == 0).select(lambda i: i < 20).transform(lambda i: i * 10))\n        self.assertEqual(result, (10, 30, 50, 70, 90, 110, 130, 150, 170, 190))\n\n        # call tuple() after a couple methods\n        s = iterables.Sequence(range(1000000))\n        s.drop(lambda i: i % 2 == 0)\n        s.select(lambda i: i < 20)\n        result = tuple(s)\n        self.assertEqual(result, (1, 3, 5, 7, 9, 11, 13, 15, 17, 19))\n\n        # you can't just call tuple() a second time, there is no data left\n        s.transform(lambda i: i * 10)\n        result = tuple(s)\n        self.assertEqual(result, ())\n\n    def test_copySequence(self):\n        s = iterables.Sequence(range(4, 8))\n        sCopy = s.copy()\n\n        vals = [item for item in sCopy]\n        self.assertEqual(vals[0], 4)\n        self.assertEqual(vals[-1], 7)\n        self.assertEqual(len(vals), 4)\n\n    def test_extendSequence(self):\n        s = iterables.Sequence(range(3))\n        ex = range(3, 8)\n        s.extend(ex)\n\n        vals = [item for item in s]\n        self.assertEqual(vals[0], 0)\n        self.assertEqual(vals[-1], 7)\n        self.assertEqual(len(vals), 8)\n\n    def test_appendSequence(self):\n        s = iterables.Sequence(range(3))\n        s.extend([999])\n\n        vals = [item for item in s]\n        self.assertEqual(vals[0], 0)\n        self.assertEqual(vals[-1], 999)\n        self.assertEqual(len(vals), 4)\n\n    def test_addingSequences(self):\n        s1 = iterables.Sequence(range(3))\n        s2 = iterables.Sequence(range(3, 6))\n\n        s3 = s1 + s2\n\n        vals = [item for item in s3]\n        self.assertEqual(vals[0], 0)\n        self.assertEqual(vals[-1], 5)\n        self.assertEqual(len(vals), 6)\n\n        s1 += s2\n\n        vals = [item for item in s1]\n        self.assertEqual(vals[0], 0)\n        self.assertEqual(vals[-1], 5)\n        self.assertEqual(len(vals), 6)\n\n    def test_listPivot(self):\n        data = list(range(10))\n        loc = 4\n        actual = iterables.pivot(data, loc)\n        self.assertEqual(actual, data[loc:] + data[:loc])\n\n    def test_arrayPivot(self):\n        data = np.arange(10)\n        loc = -7\n        actual = iterables.pivot(data, loc)\n        expected = np.array(iterables.pivot(data.tolist(), loc))\n        self.assertTrue((actual == expected).all(), msg=f\"{actual=} != {expected=}\")\n        # Catch a silent failure case where pivot doesn't change the iterable\n        self.assertTrue(\n            (actual != data).all(),\n            msg=f\"Pre-pivot {data=} should not equal post-pivot {actual=}\",\n        )\n"
  },
  {
    "path": "armi/utils/tests/test_mathematics.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Testing mathematics utilities.\"\"\"\n\nimport unittest\nfrom math import sqrt\n\nimport numpy as np\n\nfrom armi.utils.mathematics import (\n    average1DWithinTolerance,\n    convertToSlice,\n    efmt,\n    expandRepeatedFloats,\n    findClosest,\n    findNearestValue,\n    fixThreeDigitExp,\n    getFloat,\n    getStepsFromValues,\n    isMonotonic,\n    linearInterpolation,\n    minimizeScalarFunc,\n    newtonsMethod,\n    parabolaFromPoints,\n    parabolicInterpolation,\n    relErr,\n    resampleStepwise,\n    rotateXY,\n)\n\n\nclass TestMath(unittest.TestCase):\n    \"\"\"Tests for various math utilities.\"\"\"\n\n    def test_average1DWithinTolerance(self):\n        vals = np.array([np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])])\n        result = average1DWithinTolerance(vals, 0.1)\n        self.assertEqual(len(result), 3)\n        self.assertEqual(result[0], 4.0)\n        self.assertEqual(result[1], 5.0)\n        self.assertEqual(result[2], 6.0)\n\n    def test_average1DWithinToleranceInvalid(self):\n        vals = np.array([np.array([1, -2, 3]), np.array([4, -5, 6]), np.array([7, -8, 9])])\n        with self.assertRaises(ValueError):\n            average1DWithinTolerance(vals, 0.1)\n\n    def test_convertToSlice(self):\n        slice1 = convertToSlice(2)\n        self.assertEqual(slice1, slice(2, 3, None))\n        slice1 = convertToSlice(2.0, increment=-1)\n        self.assertEqual(slice1, slice(1, 2, None))\n        slice1 = convertToSlice(None)\n        self.assertEqual(slice1, slice(None, None, None))\n        slice1 = convertToSlice([1, 2, 3])\n        self.assertTrue(np.allclose(slice1, np.array([1, 2, 3])))\n        slice1 = convertToSlice(slice(2, 3, None))\n        self.assertEqual(slice1, slice(2, 3, None))\n        slice1 = convertToSlice(np.array([1, 2, 3]))\n        self.assertTrue(np.allclose(slice1, np.array([1, 2, 3])))\n        with self.assertRaises(Exception):\n            slice1 = convertToSlice(\"slice\")\n\n    def test_efmt(self):\n        self.assertAlmostEqual(efmt(\"1.0e+001\"), \"1.0E+01\")\n        self.assertAlmostEqual(efmt(\"1.0E+01\"), \"1.0E+01\")\n\n    def test_expandRepeatedFloats(self):\n        repeatedFloats = [\"150\", \"2R\", 200.0, 175, \"4r\", 180.0, \"0R\"]\n        expectedFloats = [150] * 3 + [200] + [175] * 5 + [180]\n        self.assertEqual(expandRepeatedFloats(repeatedFloats), expectedFloats)\n\n    def test_findClosest(self):\n        l1 = range(10)\n        self.assertEqual(findClosest(l1, 5.6), 6)\n        self.assertEqual(findClosest(l1, 10.1), 9)\n        self.assertEqual(findClosest(l1, -200), 0)\n\n        # with index\n        self.assertEqual(findClosest(l1, 5.6, indx=True), (6, 6))\n\n    def test_findNearestValue(self):\n        searchList = [0.1, 0.2, 0.25, 0.35, 0.4]\n        searchValue = 0.225\n        self.assertEqual(findNearestValue(searchList, searchValue), 0.2)\n        searchValue = 0.226\n        self.assertEqual(findNearestValue(searchList, searchValue), 0.25)\n        searchValue = 0.0\n        self.assertEqual(findNearestValue(searchList, searchValue), 0.1)\n        searchValue = 10\n        self.assertEqual(findNearestValue(searchList, searchValue), 0.4)\n\n    def test_fixThreeDigitExp(self):\n        fixed = fixThreeDigitExp(\"-9.03231714805651E+101\")\n        self.assertEqual(-9.03231714805651e101, fixed)\n        fixed = fixThreeDigitExp(\"9.03231714805651-101\")\n        self.assertEqual(9.03231714805651e-101, fixed)\n        fixed = fixThreeDigitExp(\"-2.4594981981654+101\")\n        self.assertEqual(-2.4594981981654e101, fixed)\n        fixed = fixThreeDigitExp(\"-2.4594981981654-101\")\n        self.assertEqual(-2.4594981981654e-101, fixed)\n\n    def test_getFloat(self):\n        self.assertIsNone(getFloat(\"word\"))\n\n        for flt in [-9.123 + f * 0.734 for f in range(25)]:\n            self.assertEqual(getFloat(flt), flt)\n            self.assertEqual(getFloat(str(flt)), flt)\n\n    def test_getStepsFromValues(self):\n        steps = getStepsFromValues([1.0, 3.0, 6.0, 10.0], prevValue=0.0)\n        self.assertListEqual(steps, [1.0, 2.0, 3.0, 4.0])\n\n    def test_isMonotonic(self):\n        self.assertEqual(True, isMonotonic([1, 2, 2, 3], \"<=\"))\n        self.assertEqual(False, isMonotonic([1, 2, 2, 1], \"<=\"))\n\n        self.assertEqual(True, isMonotonic([1, 2, 3], \"<\"))\n        self.assertEqual(False, isMonotonic([1, 2, 2], \"<\"))\n\n        self.assertEqual(True, isMonotonic([3, 2, 1, 1], \">=\"))\n        self.assertEqual(False, isMonotonic([3, 2, 1, 2], \">=\"))\n\n        self.assertEqual(True, isMonotonic([3, 2, 1], \">\"))\n        self.assertEqual(False, isMonotonic([3, 2, 2], \">\"))\n\n        with self.assertRaises(ValueError):\n            isMonotonic([1, 2, 3, 2], \"invalidRelation\")\n\n    def test_linearInterpolation(self):\n        y = linearInterpolation(1.0, 2.0, 3.0, 4.0, targetX=20.0)\n        x = linearInterpolation(1.0, 2.0, 3.0, 4.0, targetY=y)\n\n        x2 = linearInterpolation(1.0, 1.0, 2.0, 2.0, targetY=50)\n\n        self.assertEqual(x, 20.0)\n        self.assertEqual(x2, 50.0)\n\n        with self.assertRaises(ZeroDivisionError):\n            _ = linearInterpolation(1.0, 1.0, 1.0, 2.0)\n\n    def test_minimizeScalarFunc(self):\n        f = lambda x: (x + 1) ** 2\n        minimum = minimizeScalarFunc(f, -3.0, 10.0, maxIterations=10)\n        self.assertAlmostEqual(minimum, -1.0, places=3)\n        minimum = minimizeScalarFunc(f, -3.0, 10.0, maxIterations=10, positiveGuesses=True)\n        self.assertAlmostEqual(minimum, 0.0, places=3)\n\n    def test_newtonsMethod(self):\n        f = lambda x: (x + 2) * (x - 1)\n        root = newtonsMethod(f, 0.0, 5.0, maxIterations=10, positiveGuesses=True)\n        self.assertAlmostEqual(root, 1.0, places=3)\n        root = newtonsMethod(f, 0.0, -10.0, maxIterations=10)\n        self.assertAlmostEqual(root, -2.0, places=3)\n\n    def test_parabola(self):\n        # test the parabola function\n        a, b, c = parabolaFromPoints((0, 1), (1, 2), (-1, 2))\n        self.assertEqual(a, 1.0)\n        self.assertEqual(b, 0.0)\n        self.assertEqual(c, 1.0)\n\n        with self.assertRaises(Exception):\n            a, b, c = parabolaFromPoints((0, 1), (0, 1), (-1, 2))\n\n    def test_parabolicInterpolation(self):\n        realRoots = parabolicInterpolation(2.0e-6, -5.0e-4, 1.02, 1.0)\n        self.assertAlmostEqual(realRoots[0][0], 200.0)\n        self.assertAlmostEqual(realRoots[0][1], 3.0e-4)\n        self.assertAlmostEqual(realRoots[1][0], 50.0)\n        self.assertAlmostEqual(realRoots[1][1], -3.0e-4)\n        noRoots = parabolicInterpolation(2.0e-6, -4.0e-4, 1.03, 1.0)\n        self.assertAlmostEqual(noRoots[0][0], -100.0)\n        self.assertAlmostEqual(noRoots[0][1], 0.0)\n        # 3. run time error\n        with self.assertRaises(RuntimeError):\n            _ = parabolicInterpolation(2.0e-6, 4.0e-4, 1.02, 1.0)\n\n    def test_relErr(self):\n        self.assertAlmostEqual(relErr(1.00, 1.01), 0.01)\n        self.assertAlmostEqual(relErr(100.0, 97.0), -0.03)\n        self.assertAlmostEqual(relErr(0.00, 1.00), -1e99)\n\n    def test_resampleStepwiseAvg0(self):\n        \"\"\"Test resampleStepwise() averaging when in and out bins match.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [4.76, 9.99, -123.456]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertAlmostEqual(yout[0], 4.76)\n        self.assertAlmostEqual(yout[1], 9.99)\n        self.assertAlmostEqual(yout[2], -123.456)\n\n    def test_resampleStepwiseAvg1(self):\n        \"\"\"Test resampleStepwise() averaging for one arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [3, 2, 5, 3]\n        xout = [0, 2, 3.5, 4]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 2.5)\n        self.assertAlmostEqual(yout[1], 4.333333333333333)\n        self.assertEqual(yout[2], 3)\n\n    def test_resampleStepwiseAvg2(self):\n        \"\"\"Test resampleStepwise() averaging for another arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4, 5]\n        yin = [3, 2, 5, 3, 4]\n        xout = [0, 2, 3.5, 5]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 2.5)\n        self.assertAlmostEqual(yout[1], 4.333333333333333)\n        self.assertAlmostEqual(yout[2], 3.6666666666666665)\n\n    def test_resampleStepwiseAvg3(self):\n        \"\"\"Test resampleStepwise() averaging for another arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4, 6]\n        yin = [3, 2, 5, 3, 4]\n        xout = [0, 2, 3.5, 6]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 2.5)\n        self.assertAlmostEqual(yout[1], 4.333333333333333)\n        self.assertEqual(yout[2], 3.8)\n\n    def test_resampleStepwiseAvg4(self):\n        \"\"\"Test resampleStepwise() averaging for matching, but uneven intervals.\"\"\"\n        xin = [0, 3, 5, 6.777, 9.123]\n        yin = [3.1, 2.2, 5.3, 3.4]\n        xout = [0, 3, 5, 6.777, 9.123]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 3.1)\n        self.assertEqual(yout[1], 2.2)\n        self.assertEqual(yout[2], 5.3)\n        self.assertEqual(yout[3], 3.4)\n\n    def test_resampleStepwiseAvg5(self):\n        \"\"\"Test resampleStepwise() averaging for almost matching intervals.\"\"\"\n        xin = [0, 3, 5, 6.777, 9.123]\n        yin = [3.1, 2.2, 5.3, 3.4]\n        xout = [0, 5, 9.123]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 2.74)\n        self.assertAlmostEqual(yout[1], 4.21889400921659)\n\n    def test_resampleStepwiseAvg6(self):\n        \"\"\"Test resampleStepwise() averaging when the intervals don't line up.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [11, 22, 33, 44]\n        xout = [2, 3, 4, 5, 6]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 33)\n        self.assertEqual(yout[1], 44)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 0)\n\n    def test_resampleStepwiseAvg7(self):\n        \"\"\"Test resampleStepwise() averaging when the intervals don't line up.\"\"\"\n        xin = [2, 4, 6, 8, 10]\n        yin = [11, 22, 33, 44]\n        xout = [-1, 0, 1, 2, 3, 4]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 0)\n        self.assertEqual(yout[1], 0)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 11)\n        self.assertEqual(yout[4], 11)\n\n    def test_resampleStepwiseSum0(self):\n        \"\"\"Test resampleStepwise() summing when in and out bins match.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [4.76, 9.99, -123.456]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertAlmostEqual(yout[0], 4.76)\n        self.assertAlmostEqual(yout[1], 9.99)\n        self.assertAlmostEqual(yout[2], -123.456)\n        self.assertAlmostEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum1(self):\n        \"\"\"Test resampleStepwise() summing for one arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [3, 2, 5, 3]\n        xout = [0, 2, 3.5, 4]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 5)\n        self.assertEqual(yout[1], 6.5)\n        self.assertEqual(yout[2], 1.5)\n        self.assertEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum2(self):\n        \"\"\"Test resampleStepwise() summing for another arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4, 5]\n        yin = [3, 2, 5, 3, 4]\n        xout = [0, 2, 3.5, 5]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 5)\n        self.assertEqual(yout[1], 6.5)\n        self.assertEqual(yout[2], 5.5)\n        self.assertEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum3(self):\n        \"\"\"Test resampleStepwise() summing for another arbitrary case.\"\"\"\n        xin = [0, 1, 2, 3, 4, 6]\n        yin = [3, 2, 5, 3, 4]\n        xout = [0, 2, 3.5, 6]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 5)\n        self.assertEqual(yout[1], 6.5)\n        self.assertEqual(yout[2], 5.5)\n        self.assertEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum4(self):\n        \"\"\"Test resampleStepwise() summing for matching, but uneven intervals.\"\"\"\n        xin = [0, 3, 5, 6.777, 9.123]\n        yin = [3.1, 2.2, 5.3, 3.4]\n        xout = [0, 3, 5, 6.777, 9.123]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 3.1)\n        self.assertEqual(yout[1], 2.2)\n        self.assertEqual(yout[2], 5.3)\n        self.assertEqual(yout[3], 3.4)\n        self.assertEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum5(self):\n        \"\"\"Test resampleStepwise() summing for almost matching intervals.\"\"\"\n        xin = [0, 3, 5, 6.777, 9.123]\n        yin = [3.1, 2.2, 5.3, 3.4]\n        xout = [0, 5, 9.123]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertAlmostEqual(yout[0], 5.3)\n        self.assertAlmostEqual(yout[1], 8.7)\n        self.assertAlmostEqual(sum(yin), sum(yout))\n\n    def test_resampleStepwiseSum6(self):\n        \"\"\"Test resampleStepwise() summing when the intervals don't line up.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [11, 22, 33, 44]\n        xout = [2, 3, 4, 5, 6]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 33)\n        self.assertEqual(yout[1], 44)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 0)\n\n    def test_resampleStepwiseSum7(self):\n        \"\"\"Test resampleStepwise() summing when the intervals don't line up.\"\"\"\n        xin = [2, 4, 6, 8, 10]\n        yin = [11, 22, 33, 44]\n        xout = [-1, 0, 1, 2, 3, 4]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 0)\n        self.assertEqual(yout[1], 0)\n        self.assertEqual(yout[2], 0)\n        self.assertAlmostEqual(yout[3], 11 / 2)\n        self.assertAlmostEqual(yout[4], 11 / 2)\n\n    def test_resampleStepwiseAvgAllNones(self):\n        \"\"\"Test resampleStepwise() averaging when the inputs are all None.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [None, None, None]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertIsNone(yout[0])\n        self.assertIsNone(yout[1])\n        self.assertIsNone(yout[2])\n\n    def test_resampleStepwiseAvgOneNone(self):\n        \"\"\"Test resampleStepwise() averaging when one input is None.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [None, 1, 2]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertIsNone(yout[0])\n        self.assertEqual(yout[1], 1)\n        self.assertEqual(yout[2], 2)\n\n    def test_resampleStepwiseSumAllNones(self):\n        \"\"\"Test resampleStepwise() summing when the inputs are all None.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [None, None, None]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertIsNone(yout[0])\n        self.assertIsNone(yout[1])\n        self.assertIsNone(yout[2])\n\n    def test_resampleStepwiseSumOneNone(self):\n        \"\"\"Test resampleStepwise() summing when one inputs is None.\"\"\"\n        xin = [0, 1, 2, 13.3]\n        yin = [None, 1, 2]\n        xout = [0, 1, 2, 13.3]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertIsNone(yout[0])\n        self.assertEqual(yout[1], 1)\n        self.assertEqual(yout[2], 2)\n\n    def test_resampleStepwiseAvgComplicatedNone(self):\n        \"\"\"Test resampleStepwise() averaging with a None value, when the intervals don't line up.\"\"\"\n        xin = [2, 4, 6, 8, 10]\n        yin = [11, None, 33, 44]\n        xout = [-1, 0, 1, 2, 4, 7, 9]\n\n        yout = resampleStepwise(xin, yin, xout)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertEqual(yout[0], 0)\n        self.assertEqual(yout[1], 0)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 11)\n        self.assertIsNone(yout[4])\n        self.assertEqual(yout[5], 38.5)\n\n    def test_resampleStepwiseAvgNpArray(self):\n        \"\"\"Test resampleStepwise() averaging when some of the values are arrays.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [11, np.array([1, 1]), np.array([2, 2]), 44]\n        xout = [2, 4, 5, 6, 7]\n\n        yout = resampleStepwise(xin, yin, xout, avg=True)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertTrue(isinstance(yout[0], type(yin[1])))\n        self.assertEqual(yout[0][0], 23.0)\n        self.assertEqual(yout[0][1], 23.0)\n        self.assertEqual(yout[1], 0)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 0)\n\n    def test_resampleStepwiseAvgNpArrayAverage(self):\n        \"\"\"Test resampleStepwise() summing when some of the values are arrays.\"\"\"\n        xin = [0, 1, 2, 3, 4]\n        yin = [11, np.array([1, 1]), np.array([2, 2]), 44]\n        xout = [2, 4, 5, 6, 7]\n\n        yout = resampleStepwise(xin, yin, xout, avg=False)\n\n        self.assertEqual(len(yout), len(xout) - 1)\n        self.assertTrue(isinstance(yout[0], type(yin[1])))\n        self.assertEqual(yout[0][0], 46.0)\n        self.assertEqual(yout[0][1], 46.0)\n        self.assertEqual(yout[1], 0)\n        self.assertEqual(yout[2], 0)\n        self.assertEqual(yout[3], 0)\n\n    def test_rotateXY(self):\n        x = [1.0, -1.0]\n        y = [1.0, 1.0]\n\n        # test operation on scalar\n        xr, yr = rotateXY(x[0], y[0], 45.0)\n        self.assertAlmostEqual(xr, 0.0)\n        self.assertAlmostEqual(yr, sqrt(2))\n\n        xr, yr = rotateXY(x[1], y[1], 45.0)\n        self.assertAlmostEqual(xr, -sqrt(2))\n        self.assertAlmostEqual(yr, 0.0)\n\n        # test operation on list\n        xr, yr = rotateXY(x, y, 45.0)\n        self.assertAlmostEqual(xr[0], 0.0)\n        self.assertAlmostEqual(yr[0], sqrt(2))\n        self.assertAlmostEqual(xr[1], -sqrt(2))\n        self.assertAlmostEqual(yr[1], 0.0)\n"
  },
  {
    "path": "armi/utils/tests/test_outputCache.py",
    "content": "# Copyright 2022 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of the output cache tools.\"\"\"\n\nimport os\nimport time\nimport unittest\n\nfrom armi.utils import directoryChangers, outputCache\n\n\nclass TestOutputCache(unittest.TestCase):\n    def _buildOutputCache(self, arbitraryString):\n        \"\"\"\n        Helper method, to set up a semi-stupid output cache directory\n        It will have one file and a manifest.\n        It is expected this will be run from within a self-cleaning temp dir.\n        \"\"\"\n        # create some temp file\n        outFile = \"something_{0}.txt\".format(arbitraryString)\n        with open(outFile, \"w\") as f:\n            f.write(\"test\")\n\n        # create an output location\n        os.mkdir(arbitraryString)\n\n        # do the worK: call the function that creates the manifest\n        outputCache._makeOutputManifest([outFile], arbitraryString)\n\n    def test_hashFiles(self):\n        with directoryChangers.TemporaryDirectoryChanger() as _:\n            files = [\"test_hashFiles1.txt\", \"test_hashFiles2.txt\"]\n            for fileName in files:\n                with open(fileName, \"w\") as f:\n                    f.write(\"hi\")\n\n            hashed = outputCache._hashFiles(files)\n\n            self.assertEqual(hashed, \"e9f5713dec55d727bb35392cec6190ce\")\n\n    def test_deleteCache(self):\n        with directoryChangers.TemporaryDirectoryChanger() as _:\n            outDir = \"snapshotOutput_Cache\"\n            self.assertFalse(os.path.exists(outDir))\n\n            os.mkdir(outDir)\n            with open(os.path.join(outDir, \"test_deleteCache2.txt\"), \"w\") as f:\n                f.write(\"hi there\")\n\n            self.assertTrue(os.path.exists(outDir))\n            time.sleep(2)\n            outputCache.deleteCache(outDir)\n            self.assertFalse(os.path.exists(outDir))\n\n    def test_getCachedFolder(self):\n        with directoryChangers.TemporaryDirectoryChanger() as _:\n            exePath = \"/path/to/what.exe\"\n            inputPaths = [\"/path/to/something.txt\", \"/path/what/some.ini\"]\n            cacheDir = \"/tmp/thing/what/\"\n            with self.assertRaises(FileNotFoundError):\n                _ = outputCache._getCachedFolder(exePath, inputPaths, cacheDir)\n\n            fakeExe = \"what_getCachedFolder.exe\"\n            with open(fakeExe, \"w\") as f:\n                f.write(\"hi\")\n\n            with self.assertRaises(FileNotFoundError):\n                _ = outputCache._getCachedFolder(fakeExe, inputPaths, cacheDir)\n\n            fakeIni = \"fake_getCachedFolder.ini\"\n            with open(fakeIni, \"w\") as f:\n                f.write(\"hey\")\n\n            folder = outputCache._getCachedFolder(fakeExe, [fakeIni], cacheDir)\n            self.assertTrue(folder.startswith(\"/tmp/thing/what/what_getCachedFolder\"))\n\n    def test_makeOutputManifest(self):\n        with directoryChangers.TemporaryDirectoryChanger() as _:\n            # validate manifest doesn't exist yet\n            manifest = \"test_makeOutputManifest/CRC-manifest.json\"\n            self.assertFalse(os.path.exists(manifest))\n\n            # create outputCache dir and manifest\n            self._buildOutputCache(\"test_makeOutputManifest\")\n\n            # validate manifest was created\n            manifest = \"test_makeOutputManifest/CRC-manifest.json\"\n            self.assertTrue(os.path.exists(manifest))\n\n    def test_retrieveOutput(self):\n        with directoryChangers.TemporaryDirectoryChanger() as _:\n            # create outputCache dir and manifest\n            cacheDir = \"test_retrieveOutput_Output_Cache\"\n            self._buildOutputCache(cacheDir)\n\n            # validate manifest was created\n            manifest = \"{0}/CRC-manifest.json\".format(cacheDir)\n            self.assertTrue(os.path.exists(manifest))\n\n            # create a dummy file (not executable), to stand in for the executable\n            fakeExe = \"what_{0}.exe\".format(cacheDir)\n            with open(fakeExe, \"w\") as f:\n                f.write(\"hi\")\n\n            # create folder to retrieve to\n            inputPaths = [\"something_{0}.txt\".format(cacheDir)]\n            newFolder = outputCache._getCachedFolder(fakeExe, inputPaths, cacheDir)\n            os.makedirs(newFolder)\n\n            # throw a new manifest into the new out cache\n            with open(os.path.join(newFolder, \"CRC-manifest.json\"), \"w\") as f:\n                f.write(open(manifest, \"r\").read())\n\n            # attempt to retrieve some output from dummy caches\n            result = outputCache.retrieveOutput(fakeExe, inputPaths, cacheDir, newFolder)\n            self.assertFalse(result)\n"
  },
  {
    "path": "armi/utils/tests/test_parsing.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for parsing.\"\"\"\n\nimport unittest\n\nfrom armi.utils import parsing\n\n\nclass LiteralEvalTest(unittest.TestCase):\n    def test_tryLiteralEval(self):\n        self.assertEqual(parsing.tryLiteralEval(\"1\"), 1)\n        self.assertEqual(parsing.tryLiteralEval(1), 1)\n        self.assertEqual(parsing.tryLiteralEval(\"1.0\"), 1.0)\n        self.assertEqual(parsing.tryLiteralEval(1.0), 1.0)\n        self.assertEqual(parsing.tryLiteralEval(1), 1)\n        self.assertEqual(\n            parsing.tryLiteralEval(\"['apple','banana','mango']\"),\n            [\"apple\", \"banana\", \"mango\"],\n        )\n        self.assertEqual(\n            parsing.tryLiteralEval([\"apple\", \"banana\", \"mango\"]),\n            [\"apple\", \"banana\", \"mango\"],\n        )\n        self.assertEqual(\n            parsing.tryLiteralEval(\"{'apple':1,'banana':2,'mango':3}\"),\n            {\"apple\": 1, \"banana\": 2, \"mango\": 3},\n        )\n        self.assertEqual(\n            parsing.tryLiteralEval({\"apple\": 1, \"banana\": 2, \"mango\": 3}),\n            {\"apple\": 1, \"banana\": 2, \"mango\": 3},\n        )\n        self.assertEqual(parsing.tryLiteralEval(\"(1,2)\"), (1, 2))\n        self.assertEqual(parsing.tryLiteralEval((1, 2)), (1, 2))\n        self.assertEqual(parsing.tryLiteralEval(\"u'apple'\"), \"apple\")\n        self.assertEqual(parsing.tryLiteralEval(\"apple\"), \"apple\")\n        self.assertEqual(parsing.tryLiteralEval(\"apple\"), \"apple\")\n        self.assertEqual(parsing.tryLiteralEval(tuple), tuple)\n\n    def test_parseValue(self):\n        self.assertEqual(parsing.parseValue(\"5\", int), 5)\n        self.assertEqual(parsing.parseValue(5, int), 5)\n        self.assertEqual(parsing.parseValue(\"5\", float), 5.0)\n        self.assertEqual(parsing.parseValue(\"True\", bool), True)\n        self.assertEqual(\n            parsing.parseValue(\"['apple','banana','mango']\", list),\n            [\"apple\", \"banana\", \"mango\"],\n        )\n        self.assertEqual(\n            parsing.parseValue({\"apple\": 1, \"banana\": 2, \"mango\": 3}, dict),\n            {\"apple\": 1, \"banana\": 2, \"mango\": 3},\n        )\n        self.assertEqual(\n            parsing.parseValue(\"{'apple':1,'banana':2,'mango':3}\", dict),\n            {\"apple\": 1, \"banana\": 2, \"mango\": 3},\n        )\n        self.assertEqual(parsing.parseValue(\"(1,2)\", tuple), (1, 2))\n\n        self.assertEqual(parsing.parseValue(\"None\", int, True), 0)\n        self.assertEqual(parsing.parseValue(None, int, True), 0)\n        self.assertEqual(parsing.parseValue(\"None\", bool, True), False)\n        self.assertEqual(parsing.parseValue(None, bool, True), False)\n\n        self.assertEqual(parsing.parseValue(None, bool, True, False), None)\n\n        with self.assertRaises(TypeError):\n            parsing.parseValue(\"5\", str)\n        with self.assertRaises(ValueError):\n            parsing.parseValue(\"5\", bool)\n"
  },
  {
    "path": "armi/utils/tests/test_pathTools.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Unit tests for pathTools.\"\"\"\n\nimport os\nimport time\nimport types\nimport unittest\n\nfrom armi import context\nfrom armi.tests import mockRunLogs\nfrom armi.utils import pathTools\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\n\n\nclass PathToolsTests(unittest.TestCase):\n    def test_copyOrWarnFile(self):\n        with TemporaryDirectoryChanger():\n            # Test a successful copy\n            path = \"test.txt\"\n            pathCopy = \"testcopy.txt\"\n            with open(path, \"w\") as f1:\n                f1.write(\"test\")\n            pathTools.copyOrWarn(\"Test File\", path, pathCopy)\n            self.assertTrue(os.path.exists(pathCopy))\n\n            # Test a non-existent file\n            with mockRunLogs.BufferLog() as mock:\n                pathTools.copyOrWarn(\"Test File\", \"FileDoesntExist.txt\", pathCopy)\n                self.assertIn(\"Could not copy\", mock.getStdout())\n\n    def test_copyOrWarnDir(self):\n        with TemporaryDirectoryChanger():\n            # Test a successful copy\n            pathDir = \"testDir\"\n            path = os.path.join(pathDir, \"test.txt\")\n            pathDirCopy = \"testcopy\"\n            os.mkdir(pathDir)\n            with open(path, \"w\") as f1:\n                f1.write(\"test\")\n            pathTools.copyOrWarn(\"Test File\", pathDir, pathDirCopy)\n            self.assertTrue(os.path.exists(pathDirCopy))\n            self.assertTrue(os.path.exists(os.path.join(pathDirCopy, \"test.txt\")))\n\n            # Test a non-existent file\n            with mockRunLogs.BufferLog() as mock:\n                pathTools.copyOrWarn(\"Test File\", \"DirDoesntExist\", pathDirCopy)\n                self.assertIn(\"Could not copy\", mock.getStdout())\n\n    def test_separateModuleAndAttribute(self):\n        self.assertRaises(ValueError, pathTools.separateModuleAndAttribute, r\"path/with/no/colon\")\n        self.assertEqual(\n            (r\"aPath/file.py\", \"MyClass\"),\n            pathTools.separateModuleAndAttribute(r\"aPath/file.py:MyClass\"),\n        )\n        # testing windows stuff mapped drives since they have more than 1 colon\n        self.assertEqual(\n            (r\"c:/aPath/file.py\", \"MyClass\"),\n            pathTools.separateModuleAndAttribute(r\"c:/aPath/file.py:MyClass\"),\n        )\n        # not what we want but important to demonstrate what you get when no module\n        # attribute is defined.\n        self.assertEqual(\n            (\"c\", r\"/aPath/file.py\"),\n            pathTools.separateModuleAndAttribute(r\"c:/aPath/file.py\"),\n        )\n\n    def test_importCustomModule(self):\n        \"\"\"Test that importCustomPyModule is usable just like any other module.\"\"\"\n        module = pathTools.importCustomPyModule(os.path.join(THIS_DIR, __file__))\n        self.assertIsInstance(module, types.ModuleType)\n        self.assertIn(\"THIS_DIR\", module.__dict__)\n        # test that this class is present in the import\n        self.assertIn(self.__class__.__name__, module.__dict__)\n\n    def test_moduleAndAttributeExist(self):\n        \"\"\"Test that determination of existence of module attribute works.\"\"\"\n        # test that no `:` doesn't raise an exception\n        self.assertFalse(pathTools.moduleAndAttributeExist(r\"path/that/not/exist.py\"))\n        # test that multiple `:` doesn't raise an exception\n        self.assertFalse(pathTools.moduleAndAttributeExist(r\"c:/path/that/not/exist.py:MyClass\"))\n        thisFile = os.path.join(THIS_DIR, __file__)\n        # no module attribute specified\n        self.assertFalse(pathTools.moduleAndAttributeExist(thisFile))\n        self.assertFalse(pathTools.moduleAndAttributeExist(thisFile + \":doesntExist\"))\n        self.assertTrue(pathTools.moduleAndAttributeExist(thisFile + \":THIS_DIR\"))\n        self.assertTrue(pathTools.moduleAndAttributeExist(thisFile + \":PathToolsTests\"))\n\n    @unittest.skipUnless(context.MPI_RANK == 0, \"test only on root node\")\n    def test_cleanPathNoMpi(self):\n        \"\"\"Simple tests of cleanPath(), in the no-MPI scenario.\"\"\"\n        with TemporaryDirectoryChanger():\n            # TEST 0: File is not safe to delete, due not being a temp dir or under FAST_PATH\n            filePath0 = \"test0_cleanPathNoMpi\"\n            open(filePath0, \"w\").write(\"something\")\n            self.assertTrue(os.path.exists(filePath0))\n            with self.assertRaises(Exception):\n                pathTools.cleanPath(filePath0, mpiRank=0)\n\n            # TEST 1: Delete a single file under FAST_PATH\n            filePath1 = os.path.join(context.getFastPath(), \"test1_cleanPathNoMpi\")\n            open(filePath1, \"w\").write(\"something\")\n            self.assertTrue(os.path.exists(filePath1))\n            pathTools.cleanPath(filePath1, mpiRank=0)\n            self.assertFalse(os.path.exists(filePath1))\n\n            # TEST 2: Delete an empty directory under FAST_PATH\n            dir2 = os.path.join(context.getFastPath(), \"letitgo\")\n            os.mkdir(dir2)\n            self.assertTrue(os.path.exists(dir2))\n            pathTools.cleanPath(dir2, mpiRank=0)\n            self.assertFalse(os.path.exists(dir2))\n\n            # TEST 3: Delete an empty directory with forceClean=True\n            dir3 = \"noyoureadirectory\"\n            os.mkdir(dir3)\n            self.assertTrue(os.path.exists(dir3))\n            pathTools.cleanPath(dir3, mpiRank=0, forceClean=True)\n            self.assertFalse(os.path.exists(dir3))\n\n            # TEST 4: Delete a directory with two files inside with forceClean=True\n            dir4 = \"dirplease\"\n            os.mkdir(dir4)\n            open(os.path.join(dir4, \"file1.txt\"), \"w\").write(\"something1\")\n            open(os.path.join(dir4, \"file2.txt\"), \"w\").write(\"something2\")\n            # delete the directory and test\n            self.assertTrue(os.path.exists(dir4))\n            self.assertTrue(os.path.exists(os.path.join(dir4, \"file1.txt\")))\n            self.assertTrue(os.path.exists(os.path.join(dir4, \"file2.txt\")))\n            pathTools.cleanPath(dir4, mpiRank=0, forceClean=True)\n            self.assertFalse(os.path.exists(dir4))\n\n    def test_isFilePathNewer(self):\n        with TemporaryDirectoryChanger():\n            path1 = \"test_isFilePathNewer1.txt\"\n            with open(path1, \"w\") as f1:\n                f1.write(\"test1\")\n\n            time.sleep(1)\n\n            path2 = \"test_isFilePathNewer2.txt\"\n            with open(path2, \"w\") as f2:\n                f2.write(\"test2\")\n\n            self.assertFalse(pathTools.isFilePathNewer(path1, path2))\n            self.assertTrue(pathTools.isFilePathNewer(path2, path1))\n\n    def test_isAccessible(self):\n        with TemporaryDirectoryChanger():\n            path1 = \"test_isAccessible.txt\"\n            with open(path1, \"w\") as f1:\n                f1.write(\"test\")\n\n            self.assertTrue(pathTools.isAccessible(path1))\n"
  },
  {
    "path": "armi/utils/tests/test_plotting.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for basic plotting tools.\"\"\"\n\nimport os\nimport shutil\nimport unittest\nfrom glob import glob\nfrom unittest.mock import patch\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom armi import settings\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.reactor import blueprints, reactors\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import ISOAA_PATH, TEST_ROOT, getEmptyHexReactor\nfrom armi.utils import plotting\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\n\nclass TestPlotting(unittest.TestCase):\n    \"\"\"\n    Test and demonstrate some plotting capabilities of ARMI.\n\n    Notes\n    -----\n    These tests don't do a great job of making sure the plot appears correctly, but they do check that the lines of code\n    run, and that an image is produced, and demonstrate how they are meant to be called.\n    \"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        cls.o, cls.r = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n    def test_plotDepthMap(self):\n        \"\"\"Indirectly tests plot face map.\"\"\"\n        with TemporaryDirectoryChanger():\n            # set some params to visualize\n            for i, b in enumerate(self.o.r.core.iterBlocks()):\n                b.p.percentBu = i / 100\n            fName = plotting.plotBlockDepthMap(self.r.core, param=\"percentBu\", fName=\"depthMapPlot.png\", depthIndex=2)\n            self._checkFileExists(fName)\n\n            # catch an edge case error (no matching assemblies)\n            with self.assertRaises(ValueError):\n                r = getEmptyHexReactor()\n                plotting.plotBlockDepthMap(r.core)\n\n    def test_plotFaceMap(self):\n        \"\"\"Indirectly tests plot face map.\"\"\"\n        with TemporaryDirectoryChanger():\n            for i, b in enumerate(self.o.r.core.iterBlocks()):\n                b.p.percentBu = i / 100\n\n            # make sure some of the plot files exist\n            fName = plotting.plotFaceMap(self.r.core, param=\"percentBu\", fName=\"faceMapPlot0.png\", makeColorBar=True)\n            self._checkFileExists(fName)\n\n            fName = plotting.plotFaceMap(self.r.core, param=\"percentBu\", fName=\"faceMapPlot1.png\", vals=\"average\")\n            self._checkFileExists(fName)\n\n            # catch an edge case error (bad val name)\n            with self.assertRaises(ValueError):\n                plotting.plotFaceMap(self.r.core, param=\"percentBu\", fName=\"faceMapPlot2.png\", vals=\"whoops\")\n\n            # this should not throw an error\n            plotting.close()\n\n    def test_plotAssemblyTypes(self):\n        with TemporaryDirectoryChanger():\n            plotPath = \"coreAssemblyTypes1.png\"\n            plotting.plotAssemblyTypes(list(self.r.core.parent.blueprints.assemblies.values()), plotPath)\n            self._checkFileExists(plotPath)\n\n            if os.path.exists(plotPath):\n                os.remove(plotPath)\n\n            plotPath = \"coreAssemblyTypes2.png\"\n            fig = plotting.plotAssemblyTypes(\n                list(self.r.core.parent.blueprints.assemblies.values()),\n                plotPath,\n                yAxisLabel=\"y axis\",\n                title=\"title\",\n            )\n            self.assertFalse(fig.subfigures(1, 1).subplots().has_data())\n            self.assertEqual(fig.axes[0]._children[0].xy, (0.5, 0))\n            self._checkFileExists(plotPath)\n\n            for _ in range(3):\n                if os.path.exists(plotPath):\n                    os.remove(plotPath)\n\n    def test_plotRadialReactorLayouts(self):\n        figs = plotting.plotRadialReactorLayouts(self.r)\n        self.assertEqual(len(figs), 1)\n        self.assertEqual(figs[0].axes[0]._children[0].xy, (0.5, 0))\n\n        plotPath = \"coreAssemblyTypes1-rank0.png\"\n        for _ in range(3):\n            if os.path.exists(plotPath):\n                os.remove(plotPath)\n\n    def test_plotScatterMatrix(self):\n        plotPath = \"test_plotScatterMatrix.png\"\n        lib = isotxs.readBinary(ISOAA_PATH)\n        u235 = lib.getNuclide(\"U235\", \"AA\")\n        scatterMatrix = u235.micros.inelasticScatter\n        img = plotting.plotScatterMatrix(scatterMatrix, fName=plotPath)\n        self.assertGreater(len(img.axes.get_children()), 10)\n        self.assertLess(len(img.axes.get_children()), 30)\n        self.assertTrue(img.axes.has_data())\n\n        for _ in range(3):\n            if os.path.exists(plotPath):\n                os.remove(plotPath)\n\n    def test_plotBlocksInAssembly(self):\n        _fig, ax = plt.subplots(figsize=(15, 15), dpi=300)\n        xBlockLoc, yBlockHeights, yBlockAxMesh = plotting._plotBlocksInAssembly(\n            ax,\n            self.r.core.getFirstAssembly(Flags.FUEL),\n            True,\n            [],\n            set(),\n            0.5,\n            5.6,\n            True,\n            hot=True,\n        )\n        self.assertEqual(xBlockLoc, 0.5)\n        self.assertEqual(yBlockHeights[0], 25.0)\n        yBlockAxMesh = list(yBlockAxMesh)[0]\n        self.assertIn(10.0, yBlockAxMesh)\n        self.assertIn(25.0, yBlockAxMesh)\n        self.assertIn(1, yBlockAxMesh)\n\n    def test_plotBlockFlux(self):\n        with TemporaryDirectoryChanger():\n            xslib = isotxs.readBinary(ISOAA_PATH)\n            self.r.core.lib = xslib\n\n            blocks = self.r.core.getBlocks()\n            for b in blocks:\n                b.p.mgFlux = range(33)\n\n            plotting.plotBlockFlux(self.r.core, fName=\"flux.png\", bList=blocks)\n            self.assertTrue(os.path.exists(\"flux.png\"))\n            plotting.plotBlockFlux(self.r.core, fName=\"peak.png\", bList=blocks, peak=True)\n            self._checkFileExists(\"peak.png\")\n            plotting.plotBlockFlux(\n                self.r.core,\n                fName=\"bList2.png\",\n                bList=blocks,\n                bList2=blocks,\n            )\n            self._checkFileExists(\"bList2.png\")\n\n    def test_plotHexBlock(self):\n        with TemporaryDirectoryChanger():\n            first_fuel_block = self.r.core.getFirstBlock(Flags.FUEL)\n            first_fuel_block.autoCreateSpatialGrids(self.r.core.spatialGrid)\n            plotting.plotBlockDiagram(first_fuel_block, \"blockDiagram23.svg\", True)\n            self._checkFileExists(\"blockDiagram23.svg\")\n\n    def test_plotCartesianBlock(self):\n        with TemporaryDirectoryChanger():\n            cs = settings.Settings(os.path.join(TESTING_ROOT, \"reactors\", \"c5g7\", \"c5g7-settings.yaml\"))\n            blueprint = blueprints.loadFromCs(cs)\n            _ = reactors.factory(cs, blueprint)\n            for name, bDesign in blueprint.blockDesigns.items():\n                b = bDesign.construct(cs, blueprint, 0, 1, 1, \"AA\", {})\n                plotting.plotBlockDiagram(b, \"{}.svg\".format(name), True)\n\n            self._checkFileExists(\"uo2.svg\")\n            self._checkFileExists(\"mox.svg\")\n\n    def _checkFileExists(self, fName):\n        self.assertTrue(os.path.exists(fName))\n\n\nclass TestPatches(unittest.TestCase):\n    \"\"\"Test the ability to correctly make patches.\"\"\"\n\n    @classmethod\n    def setUpClass(cls):\n        # Prepare the input files. This is important so the unit tests run from wherever they need to run from.\n        cls.td = TemporaryDirectoryChanger()\n        cls.td.__enter__()\n\n    @classmethod\n    def tearDownClass(cls):\n        cls.td.__exit__(None, None, None)\n\n    @patch(\"armi.utils.plotting.plt.figure\")\n    @patch(\"armi.utils.plotting.plt.savefig\")\n    def test_makeAssemPatches(self, mockSavefig, mockFigure):\n        # mock up a flats-up version of the smallest test reactor\n        for fPath in glob(os.path.join(TEST_ROOT, \"smallestTestReactor\", \"*.yaml\")):\n            fName = os.path.basename(fPath)\n            shutil.copyfile(fPath, fName)\n\n        txt = open(\"refSmallestReactor.yaml\", \"r\").read()\n        txt = txt.replace(\"geom: hex_corners_up\", \"geom: hex\")\n        with open(\"refSmallestReactor.yaml\", \"w\") as f:\n            f.write(txt)\n\n        # this one is flats-up with many assemblies in the core\n        _, rHexFlatsUp = test_reactors.loadTestReactor(inputFilePath=\".\", inputFileName=\"armiRunSmallest.yaml\")\n\n        nAssems = len(rHexFlatsUp.core)\n        self.assertEqual(nAssems, 1)\n        patches = plotting._makeAssemPatches(rHexFlatsUp.core)\n        self.assertEqual(len(patches), nAssems)\n\n        # find the patch corresponding to the center assembly\n        for pat in patches:\n            if np.allclose(pat.xy, (0, 0)):\n                break\n\n        vertices = pat.get_verts()\n        # there should be 1 more than the number of points in the shape\n        self.assertEqual(len(vertices), 7)\n        # for flats-up, the first vertex should have a y position of ~zero\n        self.assertAlmostEqual(vertices[0][1], 0)\n\n        # this one is corners-up, with only a single assembly\n        _, rHexCornersUp = test_reactors.loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        nAssems = len(rHexCornersUp.core)\n        self.assertEqual(nAssems, 1)\n        patches = plotting._makeAssemPatches(rHexCornersUp.core)\n        self.assertEqual(len(patches), 1)\n\n        vertices = patches[0].get_verts()\n        self.assertEqual(len(vertices), 7)\n        # for corners-up, the first vertex should have an x position of ~zero\n        self.assertAlmostEqual(vertices[0][0], 0)\n\n        # this one is cartestian, with many assemblies in the core\n        _, rCartesian = test_reactors.loadTestReactor(inputFileName=\"refTestCartesian.yaml\")\n\n        nAssems = len(rCartesian.core)\n        self.assertGreater(nAssems, 1)\n        patches = plotting._makeAssemPatches(rCartesian.core)\n        self.assertEqual(nAssems, len(patches))\n\n        # Just pick a given patch and ensure that it is square-like. Orientation is not important here.\n        vertices = patches[0].get_verts()\n        self.assertEqual(len(vertices), 5)\n"
  },
  {
    "path": "armi/utils/tests/test_properties.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests of the properties class.\"\"\"\n\nimport unittest\n\nfrom armi.utils import properties\n\n\nclass ImmutableClass:\n    myNum = properties.createImmutableProperty(\"myNum\", \"You must invoke the initialize() method\", \"My random number\")\n\n    def initialize(self, val):\n        properties.unlockImmutableProperties(self)\n        try:\n            self.myNum = val\n        finally:\n            properties.lockImmutableProperties(self)\n\n\nclass ImmutablePropertyTests(unittest.TestCase):\n    def test_retreivingUnassignedValue(self):\n        \"\"\"Attempting to retreive an unassigned value should raise an error.\"\"\"\n        ic = ImmutableClass()\n        with self.assertRaises(properties.ImmutablePropertyError):\n            print(ic.myNum)\n\n    def test_noAssignImmutableProperty(self):\n        \"\"\"Cannot assign a value to an immutable property.\"\"\"\n        ic = ImmutableClass()\n        ic.myNum = 4.0\n        with self.assertRaises(properties.ImmutablePropertyError):\n            ic.myNum = 2.2\n        self.assertEqual(ic.myNum, 4.0)\n\n    def test_unlockImmutableReassignment(self):\n        \"\"\"Unlock does not permit reassignment of an immutable property.\"\"\"\n        ic = ImmutableClass()\n        ic.myNum = 7.7\n        with self.assertRaises(properties.ImmutablePropertyError):\n            ic.initialize(3.4)\n        self.assertEqual(ic.myNum, 7.7)\n"
  },
  {
    "path": "armi/utils/tests/test_reportPlotting.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test plotting.\"\"\"\n\nimport copy\nimport os\nimport unittest\n\nimport numpy as np\n\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.tests import TEST_ROOT\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\nfrom armi.utils.reportPlotting import (\n    _getPhysicalVals,\n    createPlotMetaData,\n    keffVsTime,\n    movesVsCycle,\n    plotAxialProfile,\n    plotCoreOverviewRadar,\n    valueVsTime,\n)\n\n\nclass TestRadar(unittest.TestCase):\n    def setUp(self):\n        self.o, self.r = test_reactors.loadTestReactor(\n            TEST_ROOT, inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\"\n        )\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n    def tearDown(self):\n        self.td.__exit__(None, None, None)\n\n    def test_radar(self):\n        \"\"\"Test execution of radar plot. Note this has no asserts and is therefore a smoke test.\"\"\"\n        r2 = copy.deepcopy(self.r)\n        plotCoreOverviewRadar([self.r, r2], [\"Label1\", \"Label2\"])\n        self.assertTrue(os.path.exists(\"reactor_comparison.png\"))\n\n    def test_getPhysicalVals(self):\n        dims, labels, vals = _getPhysicalVals(self.r)\n        self.assertEqual(dims, \"Dimensions\")\n\n        self.assertEqual(labels[0], \"Cold fuel height\")\n        self.assertEqual(labels[1], \"Fuel assems\")\n        self.assertEqual(labels[2], \"Assem weight\")\n        self.assertEqual(labels[3], \"Core radius\")\n        self.assertEqual(labels[4], \"Core aspect ratio\")\n        self.assertEqual(labels[5], \"Fissile mass\")\n        self.assertEqual(len(labels), 6)\n\n        self.assertEqual(vals[0], 25.0)\n        self.assertEqual(vals[1], 1)\n        self.assertAlmostEqual(vals[2], 52474.8927038, delta=1e-5)\n        self.assertEqual(vals[3], 16.8)\n        self.assertAlmostEqual(vals[5], 4290.60340961, delta=1e-5)\n        self.assertEqual(len(vals), 6)\n\n        # this test will use getInputHeight() instead of getHeight()\n        radius = self.r.core.getCoreRadius()\n        avgHeight = 0\n        fuelA = self.r.core.getAssemblies(Flags.FUEL)\n        for a in fuelA:\n            for b in a.getBlocks(Flags.FUEL):\n                avgHeight += b.getInputHeight()\n        avgHeight /= len(fuelA)\n        coreAspectRatio = (2 * radius) / avgHeight\n        self.assertEqual(vals[4], coreAspectRatio)\n\n    def test_createPlotMetaData(self):\n        title = \"test_createPlotMetaData\"\n        xLabel = \"xLabel\"\n        yLabel = \"yLabel\"\n        xTicks = [1, 2]\n        yTicks = [3, 4]\n        labels = [\"a\", \"b\"]\n        meta = createPlotMetaData(title, xLabel, yLabel, xTicks, yTicks, labels)\n\n        self.assertEqual(len(meta), 6)\n        self.assertEqual(meta[\"title\"], title)\n        self.assertEqual(meta[\"xlabel\"], xLabel)\n        self.assertEqual(meta[\"ylabel\"], yLabel)\n\n    def test_plotAxialProfile(self):\n        vals = list(range(1, 10, 2))\n        fName = \"test_plotAxialProfile\"\n\n        xLabel = \"xLabel\"\n        yLabel = \"yLabel\"\n        xTicks = [1, 2]\n        yTicks = [3, 4]\n        labels = [\"a\", \"b\"]\n        meta = createPlotMetaData(fName, xLabel, yLabel, xTicks, yTicks, labels)\n\n        plotAxialProfile(vals, np.ones((5, 2)), fName, meta, nPlot=2)\n        self.assertTrue(os.path.exists(fName + \".png\"))\n\n    def test_keffVsTime(self):\n        t = list(range(12))\n        ext = \"png\"\n\n        # plot with no keff function\n        keffVsTime(self.r.name, t, t, keffUnc=[], extension=ext)\n        self.assertTrue(os.path.exists(\"R-armiRunSmallest.keff.png\"))\n        self.assertGreater(os.path.getsize(\"R-armiRunSmallest.keff.png\"), 0)\n\n        # plot with a keff function\n        keffVsTime(self.r.name, t, t, t, extension=ext)\n        self.assertTrue(os.path.exists(\"R-armiRunSmallest.keff.png\"))\n        self.assertGreater(os.path.getsize(\"R-armiRunSmallest.keff.png\"), 0)\n\n    def test_valueVsTime(self):\n        t = list(range(12))\n        ext = \"png\"\n        valueVsTime(self.r.name, t, t, \"val\", \"yaxis\", \"title\", extension=ext)\n        self.assertTrue(os.path.exists(\"R-armiRunSmallest.val.png\"))\n        self.assertGreater(os.path.getsize(\"R-armiRunSmallest.val.png\"), 0)\n\n    def test_movesVsCycle(self):\n        name = \"movesVsCycle\"\n        scalars = {\n            \"cycle\": [1, 2, 3, 4],\n            \"maxBuF\": [6, 7, 8, 9],\n            \"maxBuI\": [6, 7, 8, 9],\n            \"maxDPA\": [6, 7, 8, 9],\n            \"numMoves\": [2, 2, 2, 2],\n            \"time\": [1, 2, 3, 4],\n        }\n        figName = name + \".moves.png\"\n        movesVsCycle(name, scalars, \"png\")\n        self.assertTrue(os.path.exists(figName))\n        self.assertGreater(os.path.getsize(figName), 0)\n"
  },
  {
    "path": "armi/utils/tests/test_tabulate.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for tabulate.\n\nThis file started out as the MIT-licensed \"tabulate\". Though we have made, and will continue to make\nmany arbitrary changes as we need. Thanks to the tabulate team.\n\nhttps://github.com/astanin/python-tabulate\n\"\"\"\n\nimport unittest\nfrom collections import OrderedDict, UserDict, defaultdict, namedtuple\nfrom dataclasses import dataclass\nfrom datetime import datetime\n\nimport numpy as np\n\nfrom armi.utils.tabulate import (\n    SEPARATING_LINE,\n    _alignCellVeritically,\n    _alignColumn,\n    _bool,\n    _buildLine,\n    _buildRow,\n    _format,\n    _isMultiline,\n    _multilineWidth,\n    _normalizeTabularData,\n    _tableFormats,\n    _type,\n    _visibleWidth,\n    _wrapTextToColWidths,\n    tabulate,\n    tabulateFormats,\n)\n\n\nclass TestTabulateAPI(unittest.TestCase):\n    def test_tabulateFormats(self):\n        \"\"\"API: tabulateFormats is a list of strings.\"\"\"\n        supported = tabulateFormats\n        self.assertEqual(type(supported), list)\n        for fmt in supported:\n            self.assertEqual(type(fmt), str)\n\n\nclass TestTabulateInputs(unittest.TestCase):\n    def test_iterableOfEmpties(self):\n        \"\"\"Input: test various empty inputs.\"\"\"\n        ii = iter(map(lambda x: iter(x), []))\n        result = tabulate(ii, \"firstrow\")\n        self.assertEqual(\"\", result)\n\n        ij = iter(map(lambda x: iter(x), [\"abcde\"]))\n        expected = \"\\n\".join(\n            [\n                \"a    b    c    d    e\",\n                \"---  ---  ---  ---  ---\",\n            ]\n        )\n        result = tabulate(ij, \"firstrow\")\n        self.assertEqual(expected, result)\n\n        ik = iter([])\n        expected = \"\\n\".join(\n            [\n                \"a    b    c\",\n                \"---  ---  ---\",\n            ]\n        )\n        result = tabulate(ik, \"abc\")\n        self.assertEqual(expected, result)\n\n    def test_iterableOfIterables(self):\n        \"\"\"Input: an iterable of iterables.\"\"\"\n        ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)]))\n        expected = \"\\n\".join([\"-  -  -  -  -\", \"0  1  2  3  4\", \"5  4  3  2  1\", \"-  -  -  -  -\"])\n        result = tabulate(ii, headersAlign=\"center\")\n        self.assertEqual(expected, result)\n\n    def test_iterableOfIterablesHeaders(self):\n        \"\"\"Input: an iterable of iterables with headers.\"\"\"\n        ii = iter(map(lambda x: iter(x), [range(5), range(5, 0, -1)]))\n        expected = \"\\n\".join(\n            [\n                \"  a    b    c    d    e\",\n                \"---  ---  ---  ---  ---\",\n                \"  0    1    2    3    4\",\n                \"  5    4    3    2    1\",\n            ]\n        )\n        result = tabulate(ii, \"abcde\")\n        self.assertEqual(expected, result)\n\n    def test_iterableOfIterablesFirstrow(self):\n        \"\"\"Input: an iterable of iterables with the first row as headers.\"\"\"\n        ii = iter(map(lambda x: iter(x), [\"abcde\", range(5), range(5, 0, -1)]))\n        expected = \"\\n\".join(\n            [\n                \"  a    b    c    d    e\",\n                \"---  ---  ---  ---  ---\",\n                \"  0    1    2    3    4\",\n                \"  5    4    3    2    1\",\n            ]\n        )\n        result = tabulate(ii, \"firstrow\")\n        self.assertEqual(expected, result)\n\n    def test_listOfLists(self):\n        \"\"\"Input: a list of lists with headers.\"\"\"\n        ll = [[\"a\", \"one\", 1], [\"b\", \"two\", None]]\n        expected = \"\\n\".join(\n            [\n                \"    string      number\",\n                \"--  --------  --------\",\n                \"a   one              1\",\n                \"b   two\",\n            ]\n        )\n        result = tabulate(ll, headers=[\"string\", \"number\"])\n        self.assertEqual(expected, result)\n\n    def test_listOfListsFirstrow(self):\n        \"\"\"Input: a list of lists with the first row as headers.\"\"\"\n        ll = [[\"string\", \"number\"], [\"a\", \"one\", 1], [\"b\", \"two\", None]]\n        expected = \"\\n\".join(\n            [\n                \"    string      number\",\n                \"--  --------  --------\",\n                \"a   one              1\",\n                \"b   two\",\n            ]\n        )\n        result = tabulate(ll, headers=\"firstrow\")\n        self.assertEqual(expected, result)\n\n    def test_listOfListsKeys(self):\n        \"\"\"Input: a list of lists with column indices as headers.\"\"\"\n        ll = [[\"a\", \"one\", 1], [\"b\", \"two\", None]]\n        expected = \"\\n\".join([\"0    1      2\", \"---  ---  ---\", \"a    one    1\", \"b    two\"])\n        result = tabulate(ll, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_dictLike(self):\n        \"\"\"Input: a dict of iterables with keys as headers.\"\"\"\n        # columns should be padded with None, keys should be used as headers\n        dd = {\"a\": range(3), \"b\": range(101, 105)}\n        # keys' order (hence columns' order) is not deterministic in Python 3\n        # => we have to consider both possible results as valid\n        expected1 = \"\\n\".join([\"  a    b\", \"---  ---\", \"  0  101\", \"  1  102\", \"  2  103\", \"     104\"])\n        result = tabulate(dd, \"keys\")\n        self.assertEqual(result, expected1)\n\n    def test_numpy2d(self):\n        \"\"\"Input: a 2D NumPy array with headers.\"\"\"\n        na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5\n        expected = \"\\n\".join(\n            [\n                \"    a      b      c\",\n                \"-----  -----  -----\",\n                \"  0.5    4     13.5\",\n                \" 32     62.5  108\",\n                \"171.5  256    364.5\",\n            ]\n        )\n        result = tabulate(na, [\"a\", \"b\", \"c\"])\n        self.assertEqual(expected, result)\n\n    def test_numpy2dFirstrow(self):\n        \"\"\"Input: a 2D NumPy array with the first row as headers.\"\"\"\n        na = np.arange(1, 10, dtype=np.int32).reshape((3, 3)) ** 3\n        expected = \"\\n\".join([\"  1    8    27\", \"---  ---  ----\", \" 64  125   216\", \"343  512   729\"])\n        result = tabulate(na, headers=\"firstrow\")\n        self.assertEqual(expected, result)\n\n    def test_numpy2dKeys(self):\n        \"\"\"Input: a 2D NumPy array with column indices as headers.\"\"\"\n        na = (np.arange(1, 10, dtype=np.float32).reshape((3, 3)) ** 3) * 0.5\n        expected = \"\\n\".join(\n            [\n                \"    0      1      2\",\n                \"-----  -----  -----\",\n                \"  0.5    4     13.5\",\n                \" 32     62.5  108\",\n                \"171.5  256    364.5\",\n            ]\n        )\n        result = tabulate(na, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_numpyRecordArray(self):\n        \"\"\"Input: a 2D NumPy record array without header.\"\"\"\n        na = np.asarray(\n            [(\"Alice\", 23, 169.5), (\"Bob\", 27, 175.0)],\n            dtype={\n                \"names\": [\"name\", \"age\", \"height\"],\n                \"formats\": [\"S32\", \"uint8\", \"float32\"],\n            },\n        )\n        expected = \"\\n\".join(\n            [\n                \"-----  --  -----\",\n                \"Alice  23  169.5\",\n                \"Bob    27  175\",\n                \"-----  --  -----\",\n            ]\n        )\n        result = tabulate(na)\n        self.assertEqual(expected, result)\n\n    def test_numpyRecordArrayKeys(self):\n        \"\"\"Input: a 2D NumPy record array with column names as headers.\"\"\"\n        na = np.asarray(\n            [(\"Alice\", 23, 169.5), (\"Bob\", 27, 175.0)],\n            dtype={\n                \"names\": [\"name\", \"age\", \"height\"],\n                \"formats\": [\"S32\", \"uint8\", \"float32\"],\n            },\n        )\n        expected = \"\\n\".join(\n            [\n                \"name      age    height\",\n                \"------  -----  --------\",\n                \"Alice      23     169.5\",\n                \"Bob        27     175\",\n            ]\n        )\n        result = tabulate(na, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_numpyRecordArrayHeaders(self):\n        \"\"\"Input: a 2D NumPy record array with user-supplied headers.\"\"\"\n        na = np.asarray(\n            [(\"Alice\", 23, 169.5), (\"Bob\", 27, 175.0)],\n            dtype={\n                \"names\": [\"name\", \"age\", \"height\"],\n                \"formats\": [\"S32\", \"uint8\", \"float32\"],\n            },\n        )\n        expected = \"\\n\".join(\n            [\n                \"person      years     cm\",\n                \"--------  -------  -----\",\n                \"Alice          23  169.5\",\n                \"Bob            27  175\",\n            ]\n        )\n        result = tabulate(na, headers=[\"person\", \"years\", \"cm\"])\n        self.assertEqual(expected, result)\n\n    def test_listOfNamedtuples(self):\n        \"\"\"Input: a list of named tuples with field names as headers.\"\"\"\n        NT = namedtuple(\"NT\", [\"foo\", \"bar\"])\n        lt = [NT(1, 2), NT(3, 4)]\n        expected = \"\\n\".join([\"-  -\", \"1  2\", \"3  4\", \"-  -\"])\n        result = tabulate(lt)\n        self.assertEqual(expected, result)\n\n    def test_listOfNamedtuplesKeys(self):\n        \"\"\"Input: a list of named tuples with field names as headers.\"\"\"\n        NT = namedtuple(\"NT\", [\"foo\", \"bar\"])\n        lt = [NT(1, 2), NT(3, 4)]\n        expected = \"\\n\".join([\"  foo    bar\", \"-----  -----\", \"    1      2\", \"    3      4\"])\n        result = tabulate(lt, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_listOfDicts(self):\n        \"\"\"Input: a list of dictionaries.\"\"\"\n        lod = [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]\n        expected1 = \"\\n\".join([\"-  -\", \"1  2\", \"3  4\", \"-  -\"])\n        expected2 = \"\\n\".join([\"-  -\", \"2  1\", \"4  3\", \"-  -\"])\n        result = tabulate(lod)\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfUserdicts(self):\n        \"\"\"Input: a list of UserDicts.\"\"\"\n        lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)]\n        expected1 = \"\\n\".join([\"-  -\", \"1  2\", \"3  4\", \"-  -\"])\n        expected2 = \"\\n\".join([\"-  -\", \"2  1\", \"4  3\", \"-  -\"])\n        result = tabulate(lod)\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfDictsKeys(self):\n        \"\"\"Input: a list of dictionaries, with keys as headers.\"\"\"\n        lod = [{\"foo\": 1, \"bar\": 2}, {\"foo\": 3, \"bar\": 4}]\n        expected1 = \"\\n\".join([\"  foo    bar\", \"-----  -----\", \"    1      2\", \"    3      4\"])\n        expected2 = \"\\n\".join([\"  bar    foo\", \"-----  -----\", \"    2      1\", \"    4      3\"])\n        result = tabulate(lod, headers=\"keys\")\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfUserdictsKeys(self):\n        \"\"\"Input: a list of UserDicts.\"\"\"\n        lod = [UserDict(foo=1, bar=2), UserDict(foo=3, bar=4)]\n        expected1 = \"\\n\".join([\"  foo    bar\", \"-----  -----\", \"    1      2\", \"    3      4\"])\n        expected2 = \"\\n\".join([\"  bar    foo\", \"-----  -----\", \"    2      1\", \"    4      3\"])\n        result = tabulate(lod, headers=\"keys\")\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfDictsMissingKeys(self):\n        \"\"\"Input: a list of dictionaries, with missing keys.\"\"\"\n        lod = [{\"foo\": 1}, {\"bar\": 2}, {\"foo\": 4, \"baz\": 3}]\n        expected = \"\\n\".join(\n            [\n                \"  foo    bar    baz\",\n                \"-----  -----  -----\",\n                \"    1\",\n                \"           2\",\n                \"    4             3\",\n            ]\n        )\n        result = tabulate(lod, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_listOfDictsFirstrow(self):\n        \"\"\"Input: a list of dictionaries, with the first dict as headers.\"\"\"\n        lod = [{\"foo\": \"FOO\", \"bar\": \"BAR\"}, {\"foo\": 3, \"bar\": 4, \"baz\": 5}]\n        # if some key is missing in the first dict, use the key name instead\n        expected1 = \"\\n\".join([\"  FOO    BAR    baz\", \"-----  -----  -----\", \"    3      4      5\"])\n        expected2 = \"\\n\".join([\"  BAR    FOO    baz\", \"-----  -----  -----\", \"    4      3      5\"])\n        result = tabulate(lod, headers=\"firstrow\")\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfDictsDictOfHeaders(self):\n        \"\"\"Input: a dict of user headers for a list of dicts.\"\"\"\n        table = [{\"letters\": \"ABCDE\", \"digits\": 12345}]\n        headers = {\"digits\": \"DIGITS\", \"letters\": \"LETTERS\"}\n        expected1 = \"\\n\".join([\"  DIGITS  LETTERS\", \"--------  ---------\", \"   12345  ABCDE\"])\n        expected2 = \"\\n\".join([\"LETTERS      DIGITS\", \"---------  --------\", \"ABCDE         12345\"])\n        result = tabulate(table, headers=headers)\n        self.assertIn(result, [expected1, expected2])\n\n    def test_listOfDictsListOfHeaders(self):\n        \"\"\"Input: ValueError on a list of headers with a list of dicts.\"\"\"\n        table = [{\"letters\": \"ABCDE\", \"digits\": 12345}]\n        headers = [\"DIGITS\", \"LETTERS\"]\n        with self.assertRaises(ValueError):\n            tabulate(table, headers=headers)\n\n    def test_listOfOrdereddicts(self):\n        \"\"\"Input: a list of OrderedDicts.\"\"\"\n        od = OrderedDict([(\"b\", 1), (\"a\", 2)])\n        lod = [od, od]\n        expected = \"\\n\".join([\"  b    a\", \"---  ---\", \"  1    2\", \"  1    2\"])\n        result = tabulate(lod, headers=\"keys\")\n        self.assertEqual(expected, result)\n\n    def test_listBytes(self):\n        \"\"\"Input: a list of bytes.\"\"\"\n        lb = [[\"你好\".encode(\"utf-8\")], [\"你好\"]]\n        expected = \"\\n\".join(\n            [\n                \"bytes\",\n                \"---------------------------\",\n                r\"b'\\xe4\\xbd\\xa0\\xe5\\xa5\\xbd'\",\n                \"你好\",\n            ]\n        )\n        result = tabulate(lb, headers=[\"bytes\"])\n        self.assertEqual(expected, result)\n\n    def test_tightCouplingExample(self):\n        \"\"\"Input: Real world-ish example from tight coupling.\"\"\"\n        # the two examples below should both produce the same output:\n        border = \"--  ------------------------------  --------------  ----------------------------\"\n        expected = \"\\n\".join(\n            [\n                border,\n                \"      criticalCrIteration: keffUnc    dif3d: power    thInterface: THavgCladTemp\",\n                border,\n                \" 0                     9.01234e-05      0.00876543                    0.00123456\",\n                border,\n            ]\n        )\n\n        # the data is a regular dictionary\n        data = {\n            \"criticalCrIteration: keffUnc\": [9.01234e-05],\n            \"dif3d: power\": [0.00876543],\n            \"thInterface: THavgCladTemp\": [0.00123456],\n        }\n        result = tabulate(data, headers=\"keys\", showIndex=True, tableFmt=\"armi\")\n        self.assertEqual(expected, result)\n\n        # the data is a defaultdict\n        dataD = defaultdict(list)\n        for key, vals in data.items():\n            for val in vals:\n                dataD[key].append(val)\n\n        result2 = tabulate(dataD, headers=\"keys\", showIndex=True, tableFmt=\"armi\")\n        self.assertEqual(expected, result2)\n\n\nclass TestTabulateInternal(unittest.TestCase):\n    def test_alignColumnDecimal(self):\n        \"\"\"Internal: _align_column(..., 'decimal').\"\"\"\n        column = [\"12.345\", \"-1234.5\", \"1.23\", \"1234.5\", \"1e+234\", \"1.0e234\"]\n        result = _alignColumn(column, \"decimal\")\n        expected = [\n            \"   12.345  \",\n            \"-1234.5    \",\n            \"    1.23   \",\n            \" 1234.5    \",\n            \"    1e+234 \",\n            \"    1.0e234\",\n        ]\n        self.assertEqual(expected, result)\n\n    def test_alignColDecThousandSeps(self):\n        \"\"\"Internal: _align_column(..., 'decimal').\"\"\"\n        column = [\"12.345\", \"-1234.5\", \"1.23\", \"1,234.5\", \"1e+234\", \"1.0e234\"]\n        output = _alignColumn(column, \"decimal\")\n        expected = [\n            \"   12.345  \",\n            \"-1234.5    \",\n            \"    1.23   \",\n            \"1,234.5    \",\n            \"    1e+234 \",\n            \"    1.0e234\",\n        ]\n        self.assertEqual(expected, output)\n\n    def test_alignColDecIncorrectThousandSeps(self):\n        \"\"\"Internal: _align_column(..., 'decimal').\"\"\"\n        column = [\"12.345\", \"-1234.5\", \"1.23\", \"12,34.5\", \"1e+234\", \"1.0e234\"]\n        output = _alignColumn(column, \"decimal\")\n        expected = [\n            \"     12.345  \",\n            \"  -1234.5    \",\n            \"      1.23   \",\n            \"12,34.5      \",\n            \"      1e+234 \",\n            \"      1.0e234\",\n        ]\n        self.assertEqual(expected, output)\n\n    def test_alignColumnNone(self):\n        \"\"\"Internal: _align_column(..., None).\"\"\"\n        column = [\"123.4\", \"56.7890\"]\n        output = _alignColumn(column, None)\n        expected = [\"123.4\", \"56.7890\"]\n        self.assertEqual(expected, output)\n\n    def test_alignColumnMultiline(self):\n        \"\"\"Internal: _align_column(..., is_multiline=True).\"\"\"\n        column = [\"1\", \"123\", \"12345\\n6\"]\n        output = _alignColumn(column, \"center\", isMultiline=True)\n        expected = [\"  1  \", \" 123 \", \"12345\" + \"\\n\" + \"  6  \"]\n        self.assertEqual(expected, output)\n\n    def test_alignCellVeriticallyOneLineOnly(self):\n        \"\"\"Internal: Aligning a single height cell is same regardless of alignment value.\"\"\"\n        lines = [\"one line\"]\n        column_width = 8\n\n        top = _alignCellVeritically(lines, 1, column_width, \"top\")\n        center = _alignCellVeritically(lines, 1, column_width, \"center\")\n        bottom = _alignCellVeritically(lines, 1, column_width, \"bottom\")\n        none = _alignCellVeritically(lines, 1, column_width, None)\n\n        expected = [\"one line\"]\n        assert top == center == bottom == none == expected\n\n    def test_alignCellVertTopSingleTxtMultiPad(self):\n        \"\"\"Internal: Align single cell text to top.\"\"\"\n        result = _alignCellVeritically([\"one line\"], 3, 8, \"top\")\n        expected = [\"one line\", \"        \", \"        \"]\n        self.assertEqual(expected, result)\n\n    def test_alignCellVertCenterSingleTxtMultiPad(self):\n        \"\"\"Internal: Align single cell text to center.\"\"\"\n        result = _alignCellVeritically([\"one line\"], 3, 8, \"center\")\n        expected = [\"        \", \"one line\", \"        \"]\n        self.assertEqual(expected, result)\n\n    def test_alignCellVertBottomSingleTxtMultiPad(self):\n        \"\"\"Internal: Align single cell text to bottom.\"\"\"\n        result = _alignCellVeritically([\"one line\"], 3, 8, \"bottom\")\n        expected = [\"        \", \"        \", \"one line\"]\n        self.assertEqual(expected, result)\n\n    def test_alignCellVertTopMultiTxtMultiPad(self):\n        \"\"\"Internal: Align multiline celltext text to top.\"\"\"\n        text = [\"just\", \"one \", \"cell\"]\n        result = _alignCellVeritically(text, 6, 4, \"top\")\n        expected = [\"just\", \"one \", \"cell\", \"    \", \"    \", \"    \"]\n        self.assertEqual(expected, result)\n\n    def test_alignCellVertCenterMultiTxtMultiPad(self):\n        \"\"\"Internal: Align multiline celltext text to center.\"\"\"\n        text = [\"just\", \"one \", \"cell\"]\n        result = _alignCellVeritically(text, 6, 4, \"center\")\n\n        # Even number of rows, can't perfectly center, but we pad less\n        # at top when required to do make a judgement\n        expected = [\"    \", \"just\", \"one \", \"cell\", \"    \", \"    \"]\n        self.assertEqual(expected, result)\n\n    def test_alignCellVertBottomMultiTxtMultiPad(self):\n        \"\"\"Internal: Align multiline celltext text to bottom.\"\"\"\n        text = [\"just\", \"one \", \"cell\"]\n        result = _alignCellVeritically(text, 6, 4, \"bottom\")\n        expected = [\"    \", \"    \", \"    \", \"just\", \"one \", \"cell\"]\n        self.assertEqual(expected, result)\n\n    def test_assortedRareEdgeCases(self):\n        \"\"\"Test some of the more rare edge cases in the purely internal functions.\"\"\"\n        from armi.utils.tabulate import (\n            _alignHeader,\n            _prependRowIndex,\n            _removeSeparatingLines,\n        )\n\n        self.assertEqual(_alignHeader(\"123\", False, 3, 3, False, None), \"123\")\n\n        result = _removeSeparatingLines(123)\n        self.assertEqual(result[0], 123)\n        self.assertIsNone(result[1])\n\n        self.assertEqual(_prependRowIndex([123], None), [123])\n\n    def test_bool(self):\n        self.assertTrue(_bool(\"stuff\"))\n        self.assertFalse(_bool(\"\"))\n        self.assertTrue(_bool(123))\n        self.assertFalse(_bool(np.array([1, 0, -1])))\n\n    def test_buildLine(self):\n        \"\"\"Basic sanity test of internal _buildLine() function.\"\"\"\n        lineFormat = _tableFormats[\"armi\"].lineabove\n        self.assertEqual(_buildLine([2, 2], [\"center\", \"center\"], lineFormat), \"--  --\")\n\n        formatter = lambda a, b: \"xyz\"\n        self.assertEqual(_buildLine([2, 2], [\"center\", \"center\"], formatter), \"xyz\")\n\n        self.assertIsNone(_buildLine([2, 2], [\"center\", \"center\"], None))\n\n    def test_buildRow(self):\n        \"\"\"Basic sanity test of internal _buildRow() function.\"\"\"\n        rowFormat = _tableFormats[\"armi\"].datarow\n        self.assertEqual(_buildRow(\"\", [2, 2], [\"center\", \"center\"], rowFormat), \"\")\n\n        formatter = lambda a, b, c: \"xyz\"\n        d = {\"a\": 1, \"b\": 2}\n        self.assertEqual(_buildRow(d, [2, 2], [\"center\", \"center\"], formatter), \"xyz\")\n\n        lst = [\"ab\", \"cd\"]\n        self.assertEqual(_buildRow(lst, [2, 2], [\"center\", \"center\"], rowFormat), \"ab  cd\")\n\n        self.assertIsNone(_buildRow(\"ab\", [2, 2], [\"center\", \"center\"], \"\"))\n\n    def test_format(self):\n        \"\"\"Basic sanity test of internal _format() function.\"\"\"\n        self.assertEqual(_format(None, str, \"8\", \"\", \"X\", True), \"X\")\n        self.assertEqual(_format(123, str, \"8\", \"\", \"X\", True), \"123\")\n        self.assertEqual(_format(\"123\", int, \"8\", \"\", \"X\", True), \"123\")\n        self.assertEqual(_format(bytes(\"abc\", \"utf-8\"), bytes, \"8\", \"\", \"X\", True), \"abc\")\n        self.assertEqual(_format(\"3.14\", float, \"4\", \"\", \"X\", True), \"3.14\")\n        colorNum = \"\\x1b[31m3.14\\x1b[0m\"\n        self.assertEqual(_format(colorNum, float, \"4\", \"\", \"X\", True), colorNum)\n        self.assertEqual(_format(None, None, \"8\", \"\", \"X\", True), \"X\")\n\n    def test_isMultiline(self):\n        \"\"\"Basic sanity test of internal _isMultiline() function.\"\"\"\n        self.assertFalse(_isMultiline(\"world\"))\n        self.assertTrue(_isMultiline(\"hello\\nworld\"))\n        self.assertFalse(_isMultiline(bytes(\"world\", \"utf-8\")))\n        self.assertTrue(_isMultiline(bytes(\"hello\\nworld\", \"utf-8\")))\n\n    def test_multilineWidth(self):\n        \"\"\"Internal: _multilineWidth().\"\"\"\n        multilineString = \"\\n\".join([\"foo\", \"barbaz\", \"spam\"])\n        self.assertEqual(_multilineWidth(multilineString), 6)\n        onelineString = \"12345\"\n        self.assertEqual(_multilineWidth(onelineString), len(onelineString))\n\n    def test_normalizeTabularData(self):\n        \"\"\"Basic sanity test of internal _normalizeTabularData() function.\"\"\"\n        res = _normalizeTabularData([[1, 2], [3, 4]], np.array([\"a\", \"b\"]), \"default\")\n        self.assertEqual(res[0], [[1, 2], [3, 4]])\n        self.assertEqual(res[1], [\"a\", \"b\"])\n        self.assertEqual(res[2], 0)\n\n        res = _normalizeTabularData([], \"keys\", \"default\")\n        self.assertEqual(len(res[0]), 0)\n        self.assertEqual(len(res[1]), 0)\n        self.assertEqual(res[2], 0)\n\n        res = _normalizeTabularData([], \"firstrow\", \"default\")\n        self.assertEqual(len(res[0]), 0)\n        self.assertEqual(len(res[1]), 0)\n        self.assertEqual(res[2], 0)\n\n        @dataclass\n        class row:\n            a: int\n            b: int\n\n        rows = [row(1, 2), row(3, 4)]\n        res = _normalizeTabularData(rows, \"keys\", \"default\")\n        self.assertEqual(res[0], [[1, 2], [3, 4]])\n        self.assertEqual(res[1], [\"a\", \"b\"])\n        self.assertEqual(res[2], 0)\n\n        res = _normalizeTabularData(rows, [\"x\", \"y\"], \"default\")\n        self.assertEqual(res[0], [[1, 2], [3, 4]])\n        self.assertEqual(res[1], [\"x\", \"y\"])\n        self.assertEqual(res[2], 0)\n\n    def test_type(self):\n        \"\"\"Basic sanity test of internal _type() function.\"\"\"\n        self.assertEqual(_type(None), type(None))\n        self.assertEqual(_type(\"foo\"), type(\"\"))\n        self.assertEqual(_type(\"1\"), type(1))\n        self.assertEqual(_type(\"\\x1b[31m42\\x1b[0m\"), type(42))\n        self.assertEqual(_type(\"\\x1b[31m42\\x1b[0m\"), type(42))\n        self.assertEqual(_type(datetime.now()), type(\"2024-12-31\"))\n\n    def test_visibleWidth(self):\n        \"\"\"Basic sanity test of internal _visibleWidth() function.\"\"\"\n        self.assertEqual(_visibleWidth(\"world\"), 5)\n        self.assertEqual(_visibleWidth(\"\\x1b[31mhello\\x1b[0m\"), 5)\n        self.assertEqual(_visibleWidth(np.ones(3)), 10)\n\n    def test_wrapTextToColWidths(self):\n        \"\"\"Basic sanity test of internal _wrapTextToColWidths() function.\"\"\"\n        res = _wrapTextToColWidths([], [2, 2], True)\n        self.assertEqual(len(res), 0)\n\n        res = _wrapTextToColWidths([[1], [2]], [2, 2], True)\n        self.assertEqual(res[0][0], 1)\n        self.assertEqual(res[1][0], 2)\n\n        res = _wrapTextToColWidths([[\"1\"], [\"2\"]], [2, 2], False)\n        self.assertEqual(res[0][0], \"1\")\n        self.assertEqual(res[1][0], \"2\")\n\n\nclass TestTabulateOutput(unittest.TestCase):\n    @classmethod\n    def setUpClass(cls):\n        cls.testTable = [[\"spam\", 41.9999], [\"eggs\", \"451.0\"]]\n        cls.testTableWithSepLine = [\n            [\"spam\", 41.9999],\n            SEPARATING_LINE,\n            [\"eggs\", \"451.0\"],\n        ]\n        cls.testTableHeaders = [\"strings\", \"numbers\"]\n\n    def test_plain(self):\n        \"\"\"Output: plain with headers.\"\"\"\n        expected = \"\\n\".join([\"strings      numbers\", \"spam         41.9999\", \"eggs        451\"])\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainNoHeader(self):\n        \"\"\"Output: plain without headers.\"\"\"\n        expected = \"\\n\".join([\"spam   41.9999\", \"eggs  451\"])\n        result = tabulate(self.testTable, tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMultilineNoHeader(self):\n        \"\"\"Output: plain with multiline cells without headers.\"\"\"\n        table = [[\"foo bar\\nbaz\\nbau\", \"hello\"], [\"\", \"multiline\\nworld\"]]\n        expected = \"\\n\".join(\n            [\n                \"foo bar    hello\",\n                \"  baz\",\n                \"  bau\",\n                \"         multiline\",\n                \"           world\",\n            ]\n        )\n        result = tabulate(table, strAlign=\"center\", tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMultiline(self):\n        \"\"\"Output: plain with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\"more\\nspam \\x1b[31meggs\\x1b[0m\", \"more spam\\n& eggs\")\n        expected = \"\\n\".join(\n            [\n                \"       more  more spam\",\n                \"  spam \\x1b[31meggs\\x1b[0m  & eggs\",\n                \"          2  foo\",\n                \"             bar\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMultilineLinks(self):\n        \"\"\"Output: plain with multiline cells with links and headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\n            \"more\\nspam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\\",\n            \"more spam\\n& eggs\",\n        )\n        expected = \"\\n\".join(\n            [\n                \"       more  more spam\",\n                \"  spam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\  & eggs\",\n                \"          2  foo\",\n                \"             bar\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMultilineEmptyCells(self):\n        \"\"\"Output: plain with multiline cells and empty cells with headers.\"\"\"\n        table = [\n            [\"hdr\", \"data\", \"fold\"],\n            [\"1\", \"\", \"\"],\n            [\"2\", \"very long data\", \"fold\\nthis\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"  hdr  data            fold\",\n                \"    1\",\n                \"    2  very long data  fold\",\n                \"                       this\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMultilineEmptyCellsNoHeader(self):\n        \"\"\"Output: plain with multiline cells and empty cells without headers.\"\"\"\n        table = [[\"0\", \"\", \"\"], [\"1\", \"\", \"\"], [\"2\", \"very long data\", \"fold\\nthis\"]]\n        expected = \"\\n\".join([\"0\", \"1\", \"2  very long data  fold\", \"                   this\"])\n        result = tabulate(table, tableFmt=\"plain\")\n        self.assertEqual(expected, result)\n\n    def test_plainMaxcolwidthAutowraps(self):\n        \"\"\"Output: maxcolwidth will result in autowrapping longer cells.\"\"\"\n        table = [[\"hdr\", \"fold\"], [\"1\", \"very long data\"]]\n        expected = \"\\n\".join([\"  hdr  fold\", \"    1  very long\", \"       data\"])\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"plain\", maxColWidths=[10, 10])\n        self.assertEqual(expected, result)\n\n    def test_plainMaxcolwidthAutowrapsSep(self):\n        \"\"\"Output: maxcolwidth will result in autowrapping longer cells and separating line.\"\"\"\n        table = [\n            [\"hdr\", \"fold\"],\n            [\"1\", \"very long data\"],\n            SEPARATING_LINE,\n            [\"2\", \"last line\"],\n        ]\n        expected = \"\\n\".join([\"  hdr  fold\", \"    1  very long\", \"       data\", \"\", \"    2  last line\"])\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"plain\", maxColWidths=[10, 10])\n        self.assertEqual(expected, result)\n\n    def test_maxColWidthsingleValue(self):\n        \"\"\"Output: maxcolwidth can be specified as a single number that works for each column.\"\"\"\n        table = [\n            [\"hdr\", \"fold1\", \"fold2\"],\n            [\"mini\", \"this is short\", \"this is a bit longer\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"hdr    fold1    fold2\",\n                \"mini   this     this\",\n                \"       is       is a\",\n                \"       short    bit\",\n                \"                longer\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"plain\", maxColWidths=6)\n        self.assertEqual(expected, result)\n\n    def test_maxcolwidthPadTailingWidths(self):\n        \"\"\"Output: maxcolwidth, if only partly specified, pads tailing cols with None.\"\"\"\n        table = [\n            [\"hdr\", \"fold1\", \"fold2\"],\n            [\"mini\", \"this is short\", \"this is a bit longer\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"hdr    fold1    fold2\",\n                \"mini   this     this is a bit longer\",\n                \"       is\",\n                \"       short\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"plain\", maxColWidths=[None, 6])\n        self.assertEqual(expected, result)\n\n    def test_maxcolwidthHonorDisableParsenum(self):\n        \"\"\"Output: Using maxcolwidth in conjunction with disable_parsenum is honored.\"\"\"\n        table = [\n            [\"first number\", 123.456789, \"123.456789\"],\n            [\"second number\", \"987654321.123\", \"987654321.123\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"+--------+---------------+--------+\",\n                \"| first  | 123.457       | 123.45 |\",\n                \"| number |               | 6789   |\",\n                \"+--------+---------------+--------+\",\n                \"| second |   9.87654e+08 | 987654 |\",\n                \"| number |               | 321.12 |\",\n                \"|        |               | 3      |\",\n                \"+--------+---------------+--------+\",\n            ]\n        )\n        # Grid makes showing the alignment difference a little easier\n        result = tabulate(table, tableFmt=\"grid\", maxColWidths=6, disableNumParse=[2])\n        self.assertEqual(expected, result)\n\n    def test_plainmaxHeaderColWidthsAutowraps(self):\n        \"\"\"Output: maxHeaderColWidths will result in autowrapping header cell.\"\"\"\n        table = [[\"hdr\", \"fold\"], [\"1\", \"very long data\"]]\n        expected = \"\\n\".join([\"  hdr  fo\", \"       ld\", \"    1  very long\", \"       data\"])\n        result = tabulate(\n            table,\n            headers=\"firstrow\",\n            tableFmt=\"plain\",\n            maxColWidths=[10, 10],\n            maxHeaderColWidths=[None, 2],\n        )\n        self.assertEqual(expected, result)\n\n    def test_simple(self):\n        \"\"\"Output: simple with headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"strings      numbers\",\n                \"---------  ---------\",\n                \"spam         41.9999\",\n                \"eggs        451\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleSepLine(self):\n        \"\"\"Output: simple with headers and separating line.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"strings      numbers\",\n                \"---------  ---------\",\n                \"spam         41.9999\",\n                \"---------  ---------\",\n                \"eggs        451\",\n            ]\n        )\n        result = tabulate(self.testTableWithSepLine, self.testTableHeaders, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_readmeExampleSep(self):\n        table = [[\"Earth\", 6371], [\"Mars\", 3390], SEPARATING_LINE, [\"Moon\", 1737]]\n        expected = \"\\n\".join(\n            [\n                \"-----  ----\",\n                \"Earth  6371\",\n                \"Mars   3390\",\n                \"-----  ----\",\n                \"Moon   1737\",\n                \"-----  ----\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultiline2(self):\n        \"\"\"Output: simple with multiline cells.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \" key     value\",\n                \"-----  ---------\",\n                \" foo      bar\",\n                \"spam   multiline\",\n                \"         world\",\n            ]\n        )\n        table = [[\"key\", \"value\"], [\"foo\", \"bar\"], [\"spam\", \"multiline\\nworld\"]]\n        result = tabulate(table, headers=\"firstrow\", strAlign=\"center\", tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultiline2SepLine(self):\n        \"\"\"Output: simple with multiline cells.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \" key     value\",\n                \"-----  ---------\",\n                \" foo      bar\",\n                \"-----  ---------\",\n                \"spam   multiline\",\n                \"         world\",\n            ]\n        )\n        table = [\n            [\"key\", \"value\"],\n            [\"foo\", \"bar\"],\n            SEPARATING_LINE,\n            [\"spam\", \"multiline\\nworld\"],\n        ]\n        result = tabulate(table, headers=\"firstrow\", strAlign=\"center\", tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleNoHeader(self):\n        \"\"\"Output: simple without headers.\"\"\"\n        expected = \"\\n\".join([\"----  --------\", \"spam   41.9999\", \"eggs  451\", \"----  --------\"])\n        result = tabulate(self.testTable, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleNoHeaderSepLine(self):\n        \"\"\"Output: simple without headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"----  --------\",\n                \"spam   41.9999\",\n                \"----  --------\",\n                \"eggs  451\",\n                \"----  --------\",\n            ]\n        )\n        result = tabulate(self.testTableWithSepLine, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultilineNoHeader(self):\n        \"\"\"Output: simple with multiline cells without headers.\"\"\"\n        table = [[\"foo bar\\nbaz\\nbau\", \"hello\"], [\"\", \"multiline\\nworld\"]]\n        expected = \"\\n\".join(\n            [\n                \"-------  ---------\",\n                \"foo bar    hello\",\n                \"  baz\",\n                \"  bau\",\n                \"         multiline\",\n                \"           world\",\n                \"-------  ---------\",\n            ]\n        )\n        result = tabulate(table, strAlign=\"center\", tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultiline(self):\n        \"\"\"Output: simple with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\"more\\nspam \\x1b[31meggs\\x1b[0m\", \"more spam\\n& eggs\")\n        expected = \"\\n\".join(\n            [\n                \"       more  more spam\",\n                \"  spam \\x1b[31meggs\\x1b[0m  & eggs\",\n                \"-----------  -----------\",\n                \"          2  foo\",\n                \"             bar\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultilineLinks(self):\n        \"\"\"Output: simple with multiline cells with links and headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\n            \"more\\nspam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\\",\n            \"more spam\\n& eggs\",\n        )\n        expected = \"\\n\".join(\n            [\n                \"       more  more spam\",\n                \"  spam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\  & eggs\",\n                \"-----------  -----------\",\n                \"          2  foo\",\n                \"             bar\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultilineEmptyCells(self):\n        \"\"\"Output: simple with multiline cells and empty cells with headers.\"\"\"\n        table = [\n            [\"hdr\", \"data\", \"fold\"],\n            [\"1\", \"\", \"\"],\n            [\"2\", \"very long data\", \"fold\\nthis\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"  hdr  data            fold\",\n                \"-----  --------------  ------\",\n                \"    1\",\n                \"    2  very long data  fold\",\n                \"                       this\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_simpleMultilineEmptyCellsNoHeader(self):\n        \"\"\"Output: simple with multiline cells and empty cells without headers.\"\"\"\n        table = [[\"0\", \"\", \"\"], [\"1\", \"\", \"\"], [\"2\", \"very long data\", \"fold\\nthis\"]]\n        expected = \"\\n\".join(\n            [\n                \"-  --------------  ----\",\n                \"0\",\n                \"1\",\n                \"2  very long data  fold\",\n                \"                   this\",\n                \"-  --------------  ----\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_github(self):\n        \"\"\"Output: github with headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"| strings   |   numbers |\",\n                \"|-----------|-----------|\",\n                \"| spam      |   41.9999 |\",\n                \"| eggs      |  451      |\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"github\")\n        self.assertEqual(expected, result)\n\n    def test_grid(self):\n        \"\"\"Output: grid with headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"+-----------+-----------+\",\n                \"| strings   |   numbers |\",\n                \"+===========+===========+\",\n                \"| spam      |   41.9999 |\",\n                \"+-----------+-----------+\",\n                \"| eggs      |  451      |\",\n                \"+-----------+-----------+\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_gridNoHeader(self):\n        \"\"\"Output: grid without headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"+------+----------+\",\n                \"| spam |  41.9999 |\",\n                \"+------+----------+\",\n                \"| eggs | 451      |\",\n                \"+------+----------+\",\n            ]\n        )\n        result = tabulate(self.testTable, tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_gridMultilineNoHeader(self):\n        \"\"\"Output: grid with multiline cells without headers.\"\"\"\n        table = [[\"foo bar\\nbaz\\nbau\", \"hello\"], [\"\", \"multiline\\nworld\"]]\n        expected = \"\\n\".join(\n            [\n                \"+---------+-----------+\",\n                \"| foo bar |   hello   |\",\n                \"|   baz   |           |\",\n                \"|   bau   |           |\",\n                \"+---------+-----------+\",\n                \"|         | multiline |\",\n                \"|         |   world   |\",\n                \"+---------+-----------+\",\n            ]\n        )\n        result = tabulate(table, strAlign=\"center\", tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_gridMultiline(self):\n        \"\"\"Output: grid with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\"more\\nspam \\x1b[31meggs\\x1b[0m\", \"more spam\\n& eggs\")\n        expected = \"\\n\".join(\n            [\n                \"+-------------+-------------+\",\n                \"|        more | more spam   |\",\n                \"|   spam \\x1b[31meggs\\x1b[0m | & eggs      |\",\n                \"+=============+=============+\",\n                \"|           2 | foo         |\",\n                \"|             | bar         |\",\n                \"+-------------+-------------+\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_gridMultilineEmptyCells(self):\n        \"\"\"Output: grid with multiline cells and empty cells with headers.\"\"\"\n        table = [\n            [\"hdr\", \"data\", \"fold\"],\n            [\"1\", \"\", \"\"],\n            [\"2\", \"very long data\", \"fold\\nthis\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"+-------+----------------+--------+\",\n                \"|   hdr | data           | fold   |\",\n                \"+=======+================+========+\",\n                \"|     1 |                |        |\",\n                \"+-------+----------------+--------+\",\n                \"|     2 | very long data | fold   |\",\n                \"|       |                | this   |\",\n                \"+-------+----------------+--------+\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_gridMultilineEmptyCellsNoHeader(self):\n        \"\"\"Output: grid with multiline cells and empty cells without headers.\"\"\"\n        table = [[\"0\", \"\", \"\"], [\"1\", \"\", \"\"], [\"2\", \"very long data\", \"fold\\nthis\"]]\n        expected = \"\\n\".join(\n            [\n                \"+---+----------------+------+\",\n                \"| 0 |                |      |\",\n                \"+---+----------------+------+\",\n                \"| 1 |                |      |\",\n                \"+---+----------------+------+\",\n                \"| 2 | very long data | fold |\",\n                \"|   |                | this |\",\n                \"+---+----------------+------+\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"grid\")\n        self.assertEqual(expected, result)\n\n    def test_pretty(self):\n        \"\"\"Output: pretty with headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"+---------+---------+\",\n                \"| strings | numbers |\",\n                \"+---------+---------+\",\n                \"|  spam   | 41.9999 |\",\n                \"|  eggs   |  451.0  |\",\n                \"+---------+---------+\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyNoHeader(self):\n        \"\"\"Output: pretty without headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"+------+---------+\",\n                \"| spam | 41.9999 |\",\n                \"| eggs |  451.0  |\",\n                \"+------+---------+\",\n            ]\n        )\n        result = tabulate(self.testTable, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyMultilineNoHeader(self):\n        \"\"\"Output: pretty with multiline cells without headers.\"\"\"\n        table = [[\"foo bar\\nbaz\\nbau\", \"hello\"], [\"\", \"multiline\\nworld\"]]\n        expected = \"\\n\".join(\n            [\n                \"+---------+-----------+\",\n                \"| foo bar |   hello   |\",\n                \"|   baz   |           |\",\n                \"|   bau   |           |\",\n                \"|         | multiline |\",\n                \"|         |   world   |\",\n                \"+---------+-----------+\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyMultiline(self):\n        \"\"\"Output: pretty with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\"more\\nspam \\x1b[31meggs\\x1b[0m\", \"more spam\\n& eggs\")\n        expected = \"\\n\".join(\n            [\n                \"+-----------+-----------+\",\n                \"|   more    | more spam |\",\n                \"| spam \\x1b[31meggs\\x1b[0m |  & eggs   |\",\n                \"+-----------+-----------+\",\n                \"|     2     |    foo    |\",\n                \"|           |    bar    |\",\n                \"+-----------+-----------+\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyMultilineLinks(self):\n        \"\"\"Output: pretty with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\n            \"more\\nspam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\\",\n            \"more spam\\n& eggs\",\n        )\n        expected = \"\\n\".join(\n            [\n                \"+-----------+-----------+\",\n                \"|   more    | more spam |\",\n                \"| spam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\ |  & eggs   |\",\n                \"+-----------+-----------+\",\n                \"|     2     |    foo    |\",\n                \"|           |    bar    |\",\n                \"+-----------+-----------+\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyMultilineEmptyCells(self):\n        \"\"\"Output: pretty with multiline cells and empty cells with headers.\"\"\"\n        table = [\n            [\"hdr\", \"data\", \"fold\"],\n            [\"1\", \"\", \"\"],\n            [\"2\", \"very long data\", \"fold\\nthis\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"+-----+----------------+------+\",\n                \"| hdr |      data      | fold |\",\n                \"+-----+----------------+------+\",\n                \"|  1  |                |      |\",\n                \"|  2  | very long data | fold |\",\n                \"|     |                | this |\",\n                \"+-----+----------------+------+\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_prettyMultilineEmptyCellsNoHeader(self):\n        \"\"\"Output: pretty with multiline cells and empty cells without headers.\"\"\"\n        table = [[\"0\", \"\", \"\"], [\"1\", \"\", \"\"], [\"2\", \"very long data\", \"fold\\nthis\"]]\n        expected = \"\\n\".join(\n            [\n                \"+---+----------------+------+\",\n                \"| 0 |                |      |\",\n                \"| 1 |                |      |\",\n                \"| 2 | very long data | fold |\",\n                \"|   |                | this |\",\n                \"+---+----------------+------+\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"pretty\")\n        self.assertEqual(expected, result)\n\n    def test_rst(self):\n        \"\"\"Output: rst with headers.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"=========  =========\",\n                \"strings      numbers\",\n                \"=========  =========\",\n                \"spam         41.9999\",\n                \"eggs        451\",\n                \"=========  =========\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstEmptyValuesInFirstColumn(self):\n        \"\"\"Output: rst with dots in first column.\"\"\"\n        test_headers = [\"\", \"what\"]\n        test_data = [(\"\", \"spam\"), (\"\", \"eggs\")]\n        expected = \"\\n\".join(\n            [\n                \"====  ======\",\n                \"..    what\",\n                \"====  ======\",\n                \"..    spam\",\n                \"..    eggs\",\n                \"====  ======\",\n            ]\n        )\n        result = tabulate(test_data, test_headers, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstNoHeader(self):\n        \"\"\"Output: rst without headers.\"\"\"\n        expected = \"\\n\".join([\"====  ========\", \"spam   41.9999\", \"eggs  451\", \"====  ========\"])\n        result = tabulate(self.testTable, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstMultiline(self):\n        \"\"\"Output: rst with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\"more\\nspam \\x1b[31meggs\\x1b[0m\", \"more spam\\n& eggs\")\n        expected = \"\\n\".join(\n            [\n                \"===========  ===========\",\n                \"       more  more spam\",\n                \"  spam \\x1b[31meggs\\x1b[0m  & eggs\",\n                \"===========  ===========\",\n                \"          2  foo\",\n                \"             bar\",\n                \"===========  ===========\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstMultilineLinks(self):\n        \"\"\"Output: rst with multiline cells with headers.\"\"\"\n        table = [[2, \"foo\\nbar\"]]\n        headers = (\n            \"more\\nspam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\\",\n            \"more spam\\n& eggs\",\n        )\n        expected = \"\\n\".join(\n            [\n                \"===========  ===========\",\n                \"       more  more spam\",\n                \"  spam \\x1b]8;;target\\x1b\\\\eggs\\x1b]8;;\\x1b\\\\  & eggs\",\n                \"===========  ===========\",\n                \"          2  foo\",\n                \"             bar\",\n                \"===========  ===========\",\n            ]\n        )\n        result = tabulate(table, headers, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstMultilineEmptyCells(self):\n        \"\"\"Output: rst with multiline cells and empty cells with headers.\"\"\"\n        table = [\n            [\"hdr\", \"data\", \"fold\"],\n            [\"1\", \"\", \"\"],\n            [\"2\", \"very long data\", \"fold\\nthis\"],\n        ]\n        expected = \"\\n\".join(\n            [\n                \"=====  ==============  ======\",\n                \"  hdr  data            fold\",\n                \"=====  ==============  ======\",\n                \"    1\",\n                \"    2  very long data  fold\",\n                \"                       this\",\n                \"=====  ==============  ======\",\n            ]\n        )\n        result = tabulate(table, headers=\"firstrow\", tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_rstMultilineEmptyCellsNoHeader(self):\n        \"\"\"Output: rst with multiline cells and empty cells without headers.\"\"\"\n        table = [[\"0\", \"\", \"\"], [\"1\", \"\", \"\"], [\"2\", \"very long data\", \"fold\\nthis\"]]\n        expected = \"\\n\".join(\n            [\n                \"=  ==============  ====\",\n                \"0\",\n                \"1\",\n                \"2  very long data  fold\",\n                \"                   this\",\n                \"=  ==============  ====\",\n            ]\n        )\n        result = tabulate(table, tableFmt=\"rst\")\n        self.assertEqual(expected, result)\n\n    def test_noData(self):\n        \"\"\"Output: table with no data.\"\"\"\n        expected = \"\\n\".join([\"strings    numbers\", \"---------  ---------\"])\n        result = tabulate(None, self.testTableHeaders, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_emptyData(self):\n        \"\"\"Output: table with empty data.\"\"\"\n        expected = \"\\n\".join([\"strings    numbers\", \"---------  ---------\"])\n        result = tabulate([], self.testTableHeaders, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_noDataNoHeader(self):\n        \"\"\"Output: table with no data and no headers.\"\"\"\n        expected = \"\"\n        result = tabulate(None, tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_emptyDataNoHeaders(self):\n        \"\"\"Output: table with empty data and no headers.\"\"\"\n        expected = \"\"\n        result = tabulate([], tableFmt=\"simple\")\n        self.assertEqual(expected, result)\n\n    def test_intFmt(self):\n        \"\"\"Output: integer format.\"\"\"\n        result = tabulate([[10000], [10]], intFmt=\",\", tableFmt=\"plain\")\n        expected = \"10,000\\n    10\"\n        self.assertEqual(expected, result)\n\n    def test_emptyDataHeader(self):\n        \"\"\"Output: table with empty data and headers as firstrow.\"\"\"\n        expected = \"\"\n        result = tabulate([], headers=\"firstrow\")\n        self.assertEqual(expected, result)\n\n    def test_floatFmt(self):\n        \"\"\"Output: floating point format.\"\"\"\n        result = tabulate([[\"1.23456789\"], [1.0]], floatFmt=\".3f\", tableFmt=\"plain\")\n        expected = \"1.235\\n1.000\"\n        self.assertEqual(expected, result)\n\n    def test_floatFmtMulti(self):\n        \"\"\"Output: floating point format different for each column.\"\"\"\n        result = tabulate([[0.12345, 0.12345, 0.12345]], floatFmt=(\".1f\", \".3f\"), tableFmt=\"plain\")\n        expected = \"0.1  0.123  0.12345\"\n        self.assertEqual(expected, result)\n\n    def test_colAlignMulti(self):\n        \"\"\"Output: string columns with custom colAlign.\"\"\"\n        result = tabulate([[\"one\", \"two\"], [\"three\", \"four\"]], colAlign=(\"right\",), tableFmt=\"plain\")\n        expected = \"  one  two\\nthree  four\"\n        self.assertEqual(expected, result)\n\n    def test_colAlignMultiSepLine(self):\n        \"\"\"Output: string columns with custom colAlign.\"\"\"\n        result = tabulate(\n            [[\"one\", \"two\"], SEPARATING_LINE, [\"three\", \"four\"]],\n            colAlign=(\"right\",),\n            tableFmt=\"plain\",\n        )\n        expected = \"  one  two\\n\\nthree  four\"\n        self.assertEqual(expected, result)\n\n    def test_columnGlobalAndSpecificAlignment(self):\n        \"\"\"Test `colGlobalAlign` and `\"global\"` parameter for `colAlign`.\"\"\"\n        table = [[1, 2, 3, 4], [111, 222, 333, 444]]\n        colGlobalAlign = \"center\"\n        colAlign = (\"global\", \"left\", \"right\")\n        result = tabulate(table, colGlobalAlign=colGlobalAlign, colAlign=colAlign)\n        expected = \"\\n\".join(\n            [\n                \"---  ---  ---  ---\",\n                \" 1   2      3   4\",\n                \"111  222  333  444\",\n                \"---  ---  ---  ---\",\n            ]\n        )\n        self.assertEqual(expected, result)\n\n    def test_headersGlobalAndSpecificAlignment(self):\n        \"\"\"Test `headersGlobalAlign` and `headersAlign`.\"\"\"\n        table = [[1, 2, 3, 4, 5, 6], [111, 222, 333, 444, 555, 666]]\n        colGlobalAlign = \"center\"\n        colAlign = (\"left\",)\n        headers = [\"h\", \"e\", \"a\", \"d\", \"e\", \"r\"]\n        headersGlobalAlign = \"right\"\n        headersAlign = (\"same\", \"same\", \"left\", \"global\", \"center\")\n        result = tabulate(\n            table,\n            headers=headers,\n            colGlobalAlign=colGlobalAlign,\n            colAlign=colAlign,\n            headersGlobalAlign=headersGlobalAlign,\n            headersAlign=headersAlign,\n        )\n        expected = \"\\n\".join(\n            [\n                \"h     e   a      d   e     r\",\n                \"---  ---  ---  ---  ---  ---\",\n                \"1     2    3    4    5    6\",\n                \"111  222  333  444  555  666\",\n            ]\n        )\n        self.assertEqual(expected, result)\n\n    def test_colAlignOrheadersAlignTooLong(self):\n        \"\"\"Test `colAlign` and `headersAlign` too long.\"\"\"\n        table = [[1, 2], [111, 222]]\n        colAlign = (\"global\", \"left\", \"center\")\n        headers = [\"h\"]\n        headersAlign = (\"center\", \"right\", \"same\")\n        result = tabulate(table, headers=headers, colAlign=colAlign, headersAlign=headersAlign)\n        expected = \"\\n\".join([\"      h\", \"---  ---\", \"  1  2\", \"111  222\"])\n        self.assertEqual(expected, result)\n\n    def test_floatConversions(self):\n        \"\"\"Output: float format parsed.\"\"\"\n        test_headers = [\n            \"str\",\n            \"bad_float\",\n            \"just_float\",\n            \"with_inf\",\n            \"with_nan\",\n            \"neg_inf\",\n        ]\n        testTable = [\n            [\"spam\", 41.9999, \"123.345\", \"12.2\", \"nan\", \"0.123123\"],\n            [\"eggs\", \"451.0\", 66.2222, \"inf\", 123.1234, \"-inf\"],\n            [\"asd\", \"437e6548\", 1.234e2, float(\"inf\"), float(\"nan\"), 0.22e23],\n        ]\n        result = tabulate(testTable, test_headers, tableFmt=\"grid\")\n        expected = \"\\n\".join(\n            [\n                \"+-------+-------------+--------------+------------+------------+-------------+\",\n                \"| str   | bad_float   |   just_float |   with_inf |   with_nan |     neg_inf |\",\n                \"+=======+=============+==============+============+============+=============+\",\n                \"| spam  | 41.9999     |     123.345  |       12.2 |    nan     |    0.123123 |\",\n                \"+-------+-------------+--------------+------------+------------+-------------+\",\n                \"| eggs  | 451.0       |      66.2222 |      inf   |    123.123 | -inf        |\",\n                \"+-------+-------------+--------------+------------+------------+-------------+\",\n                \"| asd   | 437e6548    |     123.4    |      inf   |    nan     |    2.2e+22  |\",\n                \"+-------+-------------+--------------+------------+------------+-------------+\",\n            ]\n        )\n        self.assertEqual(expected, result)\n\n    def test_missingVal(self):\n        \"\"\"Output: substitution of missing values.\"\"\"\n        result = tabulate([[\"Alice\", 10], [\"Bob\", None]], missingVal=\"n/a\", tableFmt=\"plain\")\n        expected = \"Alice   10\\nBob    n/a\"\n        self.assertEqual(expected, result)\n\n    def test_missingValMulti(self):\n        \"\"\"Output: substitution of missing values with different values per column.\"\"\"\n        result = tabulate(\n            [[\"Alice\", \"Bob\", \"Charlie\"], [None, None, None]],\n            missingVal=(\"n/a\", \"?\"),\n            tableFmt=\"plain\",\n        )\n        expected = \"Alice  Bob  Charlie\\nn/a    ?\"\n        self.assertEqual(expected, result)\n\n    def test_columnAlignment(self):\n        \"\"\"Output: custom alignment for text and numbers.\"\"\"\n        expected = \"\\n\".join([\"-----  ---\", \"Alice   1\", \"  Bob  333\", \"-----  ---\"])\n        result = tabulate([[\"Alice\", 1], [\"Bob\", 333]], strAlign=\"right\", numAlign=\"center\")\n        self.assertEqual(expected, result)\n\n    def test_dictLikeIndex(self):\n        \"\"\"Output: a table with a running index.\"\"\"\n        dd = {\"b\": range(101, 104)}\n        expected = \"\\n\".join([\"      b\", \"--  ---\", \" 0  101\", \" 1  102\", \" 2  103\"])\n        result = tabulate(dd, \"keys\", showIndex=True)\n        self.assertEqual(expected, result)\n\n    def test_listOfListsIndex(self):\n        \"\"\"Output: a table with a running index.\"\"\"\n        dd = zip(*[range(3), range(101, 104)])\n        # keys' order (hence columns' order) is not deterministic in Python 3\n        # => we have to consider both possible results as valid\n        expected = \"\\n\".join(\n            [\n                \"      a    b\",\n                \"--  ---  ---\",\n                \" 0    0  101\",\n                \" 1    1  102\",\n                \" 2    2  103\",\n            ]\n        )\n        result = tabulate(dd, headers=[\"a\", \"b\"], showIndex=True)\n        self.assertEqual(expected, result)\n\n    def test_listOfListsIndexSepLine(self):\n        \"\"\"Output: a table with a running index.\"\"\"\n        dd = [(0, 101), SEPARATING_LINE, (1, 102), (2, 103)]\n        # keys' order (hence columns' order) is not deterministic in Python 3\n        # => we have to consider both possible results as valid\n        expected = \"\\n\".join(\n            [\n                \"      a    b\",\n                \"--  ---  ---\",\n                \" 0    0  101\",\n                \"--  ---  ---\",\n                \" 1    1  102\",\n                \" 2    2  103\",\n            ]\n        )\n        result = tabulate(dd, headers=[\"a\", \"b\"], showIndex=True)\n        self.assertEqual(expected, result)\n\n    def test_listOfListsSuppliedIndex(self):\n        \"\"\"Output: a table with a supplied index.\"\"\"\n        dd = zip(*[list(range(3)), list(range(101, 104))])\n        expected = \"\\n\".join(\n            [\n                \"      a    b\",\n                \"--  ---  ---\",\n                \" 1    0  101\",\n                \" 2    1  102\",\n                \" 3    2  103\",\n            ]\n        )\n        result = tabulate(dd, headers=[\"a\", \"b\"], showIndex=[1, 2, 3])\n        self.assertEqual(expected, result)\n        # the index must be as long as the number of rows\n        with self.assertRaises(ValueError):\n            tabulate(dd, headers=[\"a\", \"b\"], showIndex=[1, 2])\n\n    def test_listOfListsIndexFirstrow(self):\n        \"\"\"Output: a table with a running index and header='firstrow'.\"\"\"\n        dd = zip(*[[\"a\"] + list(range(3)), [\"b\"] + list(range(101, 104))])\n        expected = \"\\n\".join(\n            [\n                \"      a    b\",\n                \"--  ---  ---\",\n                \" 0    0  101\",\n                \" 1    1  102\",\n                \" 2    2  103\",\n            ]\n        )\n        result = tabulate(dd, headers=\"firstrow\", showIndex=True)\n        self.assertEqual(expected, result)\n        # the index must be as long as the number of rows\n        with self.assertRaises(ValueError):\n            tabulate(dd, headers=\"firstrow\", showIndex=[1, 2])\n\n    def test_disableNumParseDefault(self):\n        \"\"\"Output: Default table output with number parsing and alignment.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"strings      numbers\",\n                \"---------  ---------\",\n                \"spam         41.9999\",\n                \"eggs        451\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders)\n        self.assertEqual(expected, result)\n        result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=False)\n        self.assertEqual(expected, result)\n\n    def test_disableNumParseTrue(self):\n        \"\"\"Output: Default table output, but without number parsing and alignment.\"\"\"\n        expected = \"\\n\".join(\n            [\n                \"strings    numbers\",\n                \"---------  ---------\",\n                \"spam       41.9999\",\n                \"eggs       451.0\",\n            ]\n        )\n        result = tabulate(self.testTable, self.testTableHeaders, disableNumParse=True)\n        self.assertEqual(expected, result)\n\n    def test_disableNumParseList(self):\n        \"\"\"Output: Default table output, but with number parsing selectively disabled.\"\"\"\n        tableHeaders = [\"h1\", \"h2\", \"h3\"]\n        testTable = [[\"foo\", \"bar\", \"42992e1\"]]\n        expected = \"\\n\".join([\"h1    h2    h3\", \"----  ----  -------\", \"foo   bar   42992e1\"])\n        result = tabulate(testTable, tableHeaders, disableNumParse=[2])\n        self.assertEqual(expected, result)\n\n        expected = \"\\n\".join([\"h1    h2        h3\", \"----  ----  ------\", \"foo   bar   429920\"])\n        result = tabulate(testTable, tableHeaders, disableNumParse=[0, 1])\n        self.assertEqual(expected, result)\n"
  },
  {
    "path": "armi/utils/tests/test_textProcessors.py",
    "content": "# Copyright 2020 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for functions in textProcessors.py.\"\"\"\n\nimport logging\nimport os\nimport pathlib\nimport unittest\nfrom io import StringIO\n\nimport ruamel\n\nfrom armi import runLog\nfrom armi.testing import TESTING_ROOT\nfrom armi.tests import mockRunLogs\nfrom armi.utils import textProcessors\nfrom armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTHIS_DIR = os.path.dirname(__file__)\nRES_DIR = os.path.join(THIS_DIR, \"resources\")\n\n\nclass TestTextProcessor(unittest.TestCase):\n    \"\"\"Test Text processor.\"\"\"\n\n    def setUp(self):\n        godivaSettings = os.path.join(TESTING_ROOT, \"reactors\", \"godiva\", \"godiva.armi.unittest.yaml\")\n        self.tp = textProcessors.TextProcessor(godivaSettings)\n\n    def test_fsearch(self):\n        \"\"\"Test fsearch in re mode.\"\"\"\n        line = self.tp.fsearch(\"nTasks\")\n        self.assertIn(\"36\", line)\n        self.assertEqual(self.tp.fsearch(\"nTasks\"), \"\")\n\n    def test_fsearchText(self):\n        \"\"\"Test fsearch in text mode.\"\"\"\n        line = self.tp.fsearch(\"nTasks\", textFlag=True)\n        self.assertIn(\"36\", line)\n        self.assertEqual(self.tp.fsearch(\"nTasks\"), \"\")\n\n\nclass YamlIncludeTest(unittest.TestCase):\n    def test_resolveIncludes(self):\n        with open(os.path.join(RES_DIR, \"root.yaml\")) as f:\n            resolved = textProcessors.resolveMarkupInclusions(f, root=pathlib.Path(RES_DIR))\n\n        # Make sure that there aren't any !include tags left in the converted stream\n        anyIncludes = False\n        for l in resolved:\n            if \"!include\" in l:\n                anyIncludes = True\n        self.assertFalse(anyIncludes)\n\n        # Re-parse the resolved stream, make sure that we included the stuff that we want\n        resolved.seek(0)\n        data = ruamel.yaml.YAML().load(resolved)\n        self.assertEqual(data[\"billy\"][\"children\"][1][\"full_name\"], \"Jennifer Person\")\n        self.assertEqual(data[\"billy\"][\"children\"][1][\"children\"][0][\"full_name\"], \"Elizabeth Person\")\n\n        # Check that we preserved other round-trip data\n        resolved.seek(0)\n        commentFound = False\n        anchorFound = False\n        for l in resolved:\n            if l.strip() == \"# some comment in includeA\":\n                commentFound = True\n            if \"*bobby\" in l:\n                anchorFound = True\n\n        self.assertTrue(commentFound)\n        self.assertTrue(anchorFound)\n\n    def test_resolveIncludes_StringIO(self):\n        \"\"\"Tests that resolveMarkupInclusions handles StringIO input.\"\"\"\n        yaml = ruamel.yaml.YAML()\n        with open(os.path.join(RES_DIR, \"root.yaml\")) as f:\n            loadedYaml = yaml.load(f)\n        stringIO = StringIO()\n        yaml.dump(loadedYaml, stringIO)\n        resolved = textProcessors.resolveMarkupInclusions(src=stringIO, root=pathlib.Path(RES_DIR))\n        with open(os.path.join(RES_DIR, \"root.yaml\")) as f:\n            expected = textProcessors.resolveMarkupInclusions(f, root=pathlib.Path(RES_DIR))\n        # strip it because one method gives an extra newline we don't care about\n        self.assertEqual(resolved.getvalue().strip(), expected.getvalue().strip())\n\n    def test_findIncludes(self):\n        includes = textProcessors.findYamlInclusions(pathlib.Path(RES_DIR) / \"root.yaml\")\n        for i, _mark in includes:\n            self.assertTrue((RES_DIR / i).exists())\n\n        self.assertEqual(len(includes), 2)\n\n\nclass SequentialReaderTests(unittest.TestCase):\n    textStream = \"\"\"This is an example test stream.\nThis has multiple lines in it and below it contains a set of data that\ncan be found using a regular expression pattern.\nFILE DATA\nX  Y  3.5\nX  Y  4.2\nX  Y  0.0\"\"\"\n\n    _DUMMY_FILE_NAME = \"DUMMY.txt\"\n\n    def setUp(self):\n        self.td = TemporaryDirectoryChanger()\n        self.td.__enter__()\n\n        with open(self._DUMMY_FILE_NAME, \"w\") as f:\n            f.write(self.textStream)\n\n    def tearDown(self):\n        if os.path.exists(self._DUMMY_FILE_NAME):\n            try:\n                os.remove(self._DUMMY_FILE_NAME)\n            except OSError:\n                pass\n\n        self.td.__exit__(None, None, None)\n\n    def test_readFile(self):\n        with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr:\n            self.assertTrue(sr.searchForText(\"FILE DATA\"))\n            self.assertFalse(sr.searchForText(\"This text isn't here.\"))\n\n    def test_readFileWithPattern(self):\n        with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr:\n            self.assertTrue(sr.searchForPattern(r\"(X\\s+Y\\s+\\d+\\.\\d+)\"))\n            self.assertEqual(float(sr.line.split()[2]), 3.5)\n\n    def test_issueWarningOnFindingText(self):\n        with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr:\n            warningMsg = \"Oh no\"\n            sr.issueWarningOnFindingText(\"example test stream\", warningMsg)\n\n            with mockRunLogs.BufferLog() as mock:\n                runLog.LOG.startLog(\"test_issueWarningOnFindingText\")\n                runLog.LOG.setVerbosity(logging.WARNING)\n                self.assertEqual(\"\", mock.getStdout())\n                self.assertTrue(sr.searchForPattern(\"example test stream\"))\n                self.assertIn(warningMsg, mock.getStdout())\n\n                self.assertFalse(sr.searchForPattern(\"Killer Tomatoes\"))\n\n    def test_raiseErrorOnFindingText(self):\n        with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr:\n            sr.raiseErrorOnFindingText(\"example test stream\", IOError)\n\n            with self.assertRaises(IOError):\n                self.assertTrue(sr.searchForPattern(\"example test stream\"))\n\n    def test_consumeLine(self):\n        with textProcessors.SequentialReader(self._DUMMY_FILE_NAME) as sr:\n            sr.line = \"hi\"\n            sr.match = 1\n            sr.consumeLine()\n            self.assertEqual(len(sr.line), 0)\n            self.assertIsNone(sr.match)\n"
  },
  {
    "path": "armi/utils/tests/test_triangle.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test the basic triangle math.\"\"\"\n\nimport math\nimport unittest\n\nfrom armi.utils import triangle\n\n\nclass TestTriangle(unittest.TestCase):\n    def test_getTriangleArea(self):\n        \"\"\"Test that getTriangleArea correctly calculates the area of a right triangle.\"\"\"\n        x1 = 0.0\n        y1 = 0.0\n        x2 = 1.0\n        y2 = 0.0\n        x3 = 0.0\n        y3 = 1.0\n        refArea = 1.0 / 2.0 * (y3 - y1) * (x2 - x1)\n        Area = triangle.getTriangleArea(x1, y1, x2, y2, x3, y3)\n        self.assertAlmostEqual(refArea, Area, 6)\n\n    def test_getTriangleCentroid(self):\n        # Right triangle\n        x, y = triangle.getTriangleCentroid(0, 0, 0, 1, 1, 0)\n        self.assertAlmostEqual(x, 1 / 3, delta=1e-10)\n        self.assertAlmostEqual(y, 1 / 3, delta=1e-10)\n\n        # Right triangle, but all in the negative part of the coordinate plane\n        x, y = triangle.getTriangleCentroid(-10, -10, -10, -9, -9, -10)\n        self.assertAlmostEqual(x, -10 + 1 / 3, delta=1e-10)\n        self.assertAlmostEqual(y, -10 + 1 / 3, delta=1e-10)\n\n        # Isosceles triangle\n        x, y = triangle.getTriangleCentroid(-2, 0, 2, 0, 0, 8)\n        self.assertAlmostEqual(x, 0.0, delta=1e-10)\n        self.assertAlmostEqual(y, 2 + 2 / 3, delta=1e-10)\n\n        # Equilateral triangle\n        x, y = triangle.getTriangleCentroid(0, 0, 2, 0, 1, math.sqrt(3))\n        self.assertAlmostEqual(x, 1.0, delta=1e-10)\n        self.assertAlmostEqual(y, 1 / math.sqrt(3), delta=1e-10)\n\n    def test_checkIfPointIsInTriangle(self):\n        \"\"\"Test that checkIfPointIsInTrinagle can correctly identify if a point is inside or outside of a triangle.\"\"\"\n        # First check the right triangle case\n        xT1 = 0.0\n        yT1 = 0.0\n        xT2 = 1.0\n        yT2 = 0.0\n        xT3 = 0.0\n        yT3 = 1.0\n        xP = 0.0\n        yP = 0.0\n        rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n\n        self.assertTrue(rightTriangleInOrOut)\n\n        # now create a case that should evaluate False\n        xP = 2.0\n        yP = 0.5\n        rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertFalse(rightTriangleInOrOut)\n\n        # Now check non right triangle\n        xT1 = 26.0\n        yT1 = 10.0\n        xT2 = 100.0\n        yT2 = 0.0\n        xT3 = 0.0\n        yT3 = 100.0\n        xP = 50.0\n        yP = 50.0\n\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertTrue(generalTriangleInOrOut)\n\n        # now check false case\n        xP = 1.0\n        yP = 60.0\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertFalse(generalTriangleInOrOut)\n\n        # Check a case that should cause failure since only two triangle can be drawn\n        xP = 0.0\n        yP = 0.17\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertFalse(generalTriangleInOrOut)\n\n    def test_checkIfPointIsInTriangle2(self):\n        \"\"\"Test that barycentricCheckIfPointIsInTriangle can identify if a point is inside or outside of a triangle.\"\"\"\n        # First check the right triangle case\n        xT1 = 0.0\n        yT1 = 0.0\n        xT2 = 1.0\n        yT2 = 0.0\n        xT3 = 0.0\n        yT3 = 1.0\n        xP = 0.5\n        yP = 0.5\n        rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertTrue(rightTriangleInOrOut)\n\n        # Check a case that should cause failure for checkIfPointIsInTriangle since only two triangle can be drawn\n        x1 = 0.15\n        x2 = 0.0\n        x3 = 0.0\n        y1 = 0.17\n        y2 = 0.054\n        y3 = 0.376\n        xP = 0.0\n        yP = 0.17\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(x1, y1, x2, y2, x3, y3, xP, yP)\n        self.assertTrue(generalTriangleInOrOut)\n\n        # now create a case that should evaluate False\n        xP = 2.0\n        yP = 0.5\n        rightTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertFalse(rightTriangleInOrOut)\n\n        # Now check non right triangle\n        xT1 = 26.0\n        yT1 = 10.0\n        xT2 = 100.0\n        yT2 = 0.0\n        xT3 = 0.0\n        yT3 = 100.0\n        xP = 50.0\n        yP = 50.0\n\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertTrue(generalTriangleInOrOut)\n\n        # now check false case\n        xP = 1.0\n        yP = 60.0\n        generalTriangleInOrOut = triangle.checkIfPointIsInTriangle(xT1, yT1, xT2, yT2, xT3, yT3, xP, yP)\n        self.assertFalse(generalTriangleInOrOut)\n"
  },
  {
    "path": "armi/utils/tests/test_units.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test armi.utils.units.py.\"\"\"\n\nimport unittest\n\nfrom armi.utils import units\n\n\nclass TestUnits(unittest.TestCase):\n    def test_getTc(self):\n        self.assertAlmostEqual(units.getTc(Tc=200), 200.0)\n        self.assertAlmostEqual(units.getTc(Tk=300), 26.85)\n\n        ## error if no argument provided\n        with self.assertRaisesRegex(ValueError, \"Tc=None and Tk=None\"):\n            units.getTc()\n\n        ## error if two arguments provided even if those arguments are \"falsy\"\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=0\"):\n            units.getTc(Tc=0, Tk=0)\n\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=200\"):\n            units.getTc(Tc=0, Tk=200)\n\n    def test_getTk(self):\n        self.assertAlmostEqual(units.getTk(Tc=200), 473.15)\n        self.assertAlmostEqual(units.getTk(Tk=300), 300.00)\n\n        ## error if no argument provided\n        with self.assertRaisesRegex(ValueError, \"Tc=None and Tk=None\"):\n            units.getTk()\n\n        ## error if two arguments provided even if those arguments are \"falsy\"\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=0\"):\n            units.getTk(Tc=0, Tk=0)\n\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=200\"):\n            units.getTk(Tc=0, Tk=200)\n\n    def test_getTf(self):\n        # 0 C = 32 F\n        self.assertAlmostEqual(units.getTf(Tc=0), 32.0)\n        self.assertAlmostEqual(units.getTf(Tk=273.15), 32.0)\n\n        # 100 C = 212 F\n        self.assertAlmostEqual(units.getTf(Tc=100), 212.0)\n        self.assertAlmostEqual(units.getTf(Tk=373.15), 212.0)\n\n        # -40 C = -40 F\n        self.assertAlmostEqual(units.getTf(Tc=-40), -40)\n\n        ## error if no argument provided\n        with self.assertRaisesRegex(ValueError, \"Tc=None and Tk=None\"):\n            units.getTf()\n\n        ## error if two arguments provided even if those arguments are \"falsy\"\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=0\"):\n            units.getTf(Tc=0, Tk=0)\n\n        with self.assertRaisesRegex(ValueError, \"Tc=0 and Tk=200\"):\n            units.getTf(Tc=0, Tk=200)\n\n    def test_pressure_converter(self):\n        \"\"\"Converter Pascals to Pascals should just be a pass-through.\"\"\"\n        for val in [0.0, -99.141, 123, 3.14159, -2.51212e-12]:\n            self.assertEqual(val, units.PRESSURE_CONVERTERS[\"Pa\"](val))\n\n    def test_getTmev(self):\n        val = units.getTmev(Tc=45.0)\n        self.assertAlmostEqual(val, 2.74160430306e-08)\n\n        val = units.getTmev(Tc=145.0)\n        self.assertAlmostEqual(val, 3.60333754306e-08)\n\n        val = units.getTmev(Tk=445.0)\n        self.assertAlmostEqual(val, 3.8347129180000004e-08)\n\n    def test_getTemperature(self):\n        val = units.getTemperature(Tc=42, tempUnits=\"Tc\")\n        self.assertEqual(val, 42)\n\n        val = units.getTemperature(Tk=42, tempUnits=\"Tk\")\n        self.assertEqual(val, 42)\n\n        val = units.getTemperature(Tc=42, tempUnits=\"Tk\")\n        self.assertAlmostEqual(val, 315.15)\n\n        val = units.getTemperature(Tk=42, tempUnits=\"Tc\")\n        self.assertAlmostEqual(val, -231.15)\n\n        with self.assertRaises(ValueError):\n            units.getTemperature(Tc=42)\n\n    def test_convertXtoPascal(self):\n        val = units.convertMmhgToPascal(11.1)\n        self.assertAlmostEqual(val, 1479.8782894736883)\n\n        val = units.convertBarToPascal(2.2)\n        self.assertAlmostEqual(val, 220000)\n\n        val = units.convertAtmToPascal(3.1)\n        self.assertAlmostEqual(val, 314107.5)\n\n    def test_sanitizeAngle(self):\n        val = units.sanitizeAngle(0)\n        self.assertEqual(val, 0)\n\n        val = units.sanitizeAngle(1.01)\n        self.assertEqual(val, 1.01)\n\n        val = units.sanitizeAngle(-6)\n        self.assertAlmostEqual(val, 0.28318530717958623)\n\n        val = units.sanitizeAngle(9)\n        self.assertAlmostEqual(val, 2.7168146928204138)\n\n    def test_getXYLineParameters(self):\n        a, b, c, d = units.getXYLineParameters(0)\n        self.assertEqual(a, 0.0)\n        self.assertEqual(b, 1.0)\n        self.assertEqual(c, 0.0)\n        self.assertEqual(d, 0.0)\n\n        a, b, c, d = units.getXYLineParameters(1, 0.1, 0.2)\n        self.assertEqual(a, 1)\n        self.assertEqual(b, 0)\n        self.assertEqual(c, 0)\n        self.assertEqual(d, 0.1)\n"
  },
  {
    "path": "armi/utils/tests/test_utils.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Testing some utility functions.\"\"\"\n\nimport os\nimport unittest\nfrom collections import defaultdict\n\nimport numpy as np\n\nfrom armi import utils\nfrom armi.settings.caseSettings import Settings\nfrom armi.testing import loadTestReactor\nfrom armi.tests import mockRunLogs\nfrom armi.utils import (\n    codeTiming,\n    directoryChangers,\n    getAvailabilityFactors,\n    getBurnSteps,\n    getCumulativeNodeNum,\n    getCycleLengths,\n    getCycleNames,\n    getCycleNodeFromCumulativeNode,\n    getCycleNodeFromCumulativeStep,\n    getFileSHA1Hash,\n    getMaxBurnSteps,\n    getNodesPerCycle,\n    getPowerFractions,\n    getPreviousTimeNode,\n    getStepLengths,\n    hasBurnup,\n    safeCopy,\n    safeMove,\n)\n\n\nclass TestGeneralUtils(unittest.TestCase):\n    def test_getFileSHA1Hash(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            path = \"test.txt\"\n            with open(path, \"w\") as f1:\n                f1.write(\"test\")\n            sha = getFileSHA1Hash(path)\n            self.assertIn(\"a94a8\", sha)\n\n    def test_getFileSHA1HashDir(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            pathDir = \"testDir\"\n            path1 = os.path.join(pathDir, \"test1.txt\")\n            path2 = os.path.join(pathDir, \"test2.txt\")\n            os.mkdir(pathDir)\n            for i, path in enumerate([path1, path2]):\n                with open(path, \"w\") as f1:\n                    f1.write(f\"test{i}\")\n            sha = getFileSHA1Hash(pathDir)\n            self.assertIn(\"ccd13\", sha)\n\n    def test_mergeableDictionary(self):\n        mergeableDict = utils.MergeableDict()\n        normalDict = {\"luna\": \"thehusky\", \"isbegging\": \"fortreats\", \"right\": \"now\"}\n        mergeableDict.merge({\"luna\": \"thehusky\"}, {\"isbegging\": \"fortreats\"}, {\"right\": \"now\"})\n        self.assertEqual(mergeableDict, normalDict)\n\n    def test_createFormattedStrWithDelimiter(self):\n        # Test with a random list of strings\n        dataList = [\"hello\", \"world\", \"1\", \"2\", \"3\", \"4\", \"5\"]\n        maxNumberOfValuesBeforeDelimiter = 3\n        delimiter = \"\\n\"\n        outputStr = utils.createFormattedStrWithDelimiter(\n            dataList=dataList,\n            maxNumberOfValuesBeforeDelimiter=maxNumberOfValuesBeforeDelimiter,\n            delimiter=delimiter,\n        )\n        self.assertEqual(outputStr, \"hello, world, 1,\\n2, 3,\\n4, 5\\n\")\n\n        outputStr = utils.createFormattedStrWithDelimiter(\n            dataList=dataList,\n            maxNumberOfValuesBeforeDelimiter=0,\n            delimiter=delimiter,\n        )\n        self.assertEqual(outputStr, \"hello, world, 1, 2, 3, 4, 5\\n\")\n\n        # test with an empty list\n        dataList = []\n        outputStr = utils.createFormattedStrWithDelimiter(\n            dataList=dataList,\n            maxNumberOfValuesBeforeDelimiter=maxNumberOfValuesBeforeDelimiter,\n            delimiter=delimiter,\n        )\n        self.assertEqual(outputStr, \"\")\n\n    def test_capStrLen(self):\n        # Test with strings\n        str1 = utils.capStrLen(\"sodium\", 5)\n        self.assertEqual(\"so...\", str1)\n        str1 = utils.capStrLen(\"potassium\", 6)\n        self.assertEqual(\"pot...\", str1)\n        str1 = utils.capStrLen(\"rubidium\", 7)\n        self.assertEqual(\"rubi...\", str1)\n        with self.assertRaises(Exception):\n            str1 = utils.capStrLen(\"sodium\", 2)\n\n    def test_list2str(self):\n        # Test with list of strings\n        list1 = [\"One\", \"Two\"]\n        list2 = [\"Three\", \"Four\"]\n        str1 = \"OneTwo\"\n        str2 = utils.list2str(list1, 4, None, None)\n        self.assertEqual(str1, str2)\n        str1 = \"One  Two  \"\n        str2 = utils.list2str(list1, None, None, 5)\n        self.assertEqual(str1, str2)\n        str1 = \"OneTwoThreeFour\"\n        str2 = utils.list2str(list2, None, list1, None)\n        self.assertEqual(str1, str2)\n        str1 = \"OneTwoThreeFourT...Four\"\n        str2 = utils.list2str(list2, 4, list1, None)\n        self.assertEqual(str1, str2)\n        str1 = \"OneTwoThreeFourT...FourThreeFour \"\n        str2 = utils.list2str(list2, None, list1, 5)\n        self.assertEqual(str1, str2)\n        str1 = \"OneTwoThreeFourT...FourThreeFour T... Four \"\n        str2 = utils.list2str(list2, 4, list1, 5)\n        self.assertEqual(str1, str2)\n\n    def test_slantSplit(self):\n        x1 = utils.slantSplit(10.0, 4.0, 4)\n        x2 = utils.slantSplit(10.0, 4.0, 4, order=\"high first\")\n        self.assertListEqual(x1, [1.0, 2.0, 3.0, 4.0])\n        self.assertListEqual(x2, [4.0, 3.0, 2.0, 1.0])\n\n    def test_prependToList(self):\n        a = [\"hello\", \"world\"]\n        b = [1, 2, 3]\n        utils.prependToList(a, b)\n        self.assertListEqual(a, [1, 2, 3, \"hello\", \"world\"])\n\n    def test_plotMatrix(self):\n        matrix = np.zeros([2, 2], dtype=float)\n        matrix[0, 0] = 1\n        matrix[0, 1] = 2\n        matrix[1, 0] = 3\n        matrix[1, 1] = 4\n        xtick = ([0, 1], [\"1\", \"2\"])\n        ytick = ([0, 1], [\"1\", \"2\"])\n        fname = \"test_plotMatrix_testfile\"\n        with directoryChangers.TemporaryDirectoryChanger():\n            utils.plotMatrix(matrix, fname, show=False, title=\"plot\")\n            utils.plotMatrix(matrix, fname, show=False, minV=0, maxV=5, figsize=[3, 4])\n            utils.plotMatrix(matrix, fname, show=False, xticks=xtick, yticks=ytick)\n\n    def test_classesInHierarchy(self):\n        \"\"\"Tests the classesInHierarchy utility.\"\"\"\n        # load the test reactor\n        _o, r = loadTestReactor(inputFileName=\"smallestTestReactor/armiRunSmallest.yaml\")\n\n        # call the `classesInHierarchy` function\n        classCounts = defaultdict(lambda: 0)\n        utils.classesInHierarchy(r, classCounts, None)\n\n        # validate the `classesInHierarchy` function\n        self.assertGreater(len(classCounts), 30)\n        self.assertEqual(classCounts[type(r)], 1)\n        self.assertEqual(classCounts[type(r.core)], 1)\n\n        # further validate the Reactor hierarchy is in place\n        self.assertEqual(len(r.core.getAssemblies()), 1)\n        self.assertEqual(len(r.core.getBlocks()), 1)\n\n    def test_codeTiming(self):\n        \"\"\"Test that codeTiming preserves function attributes when it wraps a function.\"\"\"\n\n        @codeTiming.timed\n        def testFunc():\n            \"\"\"Test function docstring.\"\"\"\n            pass\n\n        self.assertEqual(getattr(testFunc, \"__doc__\"), \"Test function docstring.\")\n        self.assertEqual(getattr(testFunc, \"__name__\"), \"testFunc\")\n\n    def test_safeCopy(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            os.mkdir(\"dir1\")\n            os.mkdir(\"dir2\")\n            file1 = \"dir1/file1.txt\"\n            with open(file1, \"w\") as f:\n                f.write(\"Hello\")\n            file2 = \"dir1\\\\file2.txt\"\n            with open(file2, \"w\") as f:\n                f.write(\"Hello2\")\n\n            with mockRunLogs.BufferLog() as mock:\n                # Test Linuxy file path\n                self.assertEqual(\"\", mock.getStdout())\n                safeCopy(file1, \"dir2\")\n                self.assertIn(\"Copied\", mock.getStdout())\n                self.assertIn(\"file1\", mock.getStdout())\n                self.assertIn(\"->\", mock.getStdout())\n                # Clean up for next safeCopy\n                mock.emptyStdout()\n                # Test Windowsy file path\n                self.assertEqual(\"\", mock.getStdout())\n                safeCopy(file2, \"dir2\")\n                self.assertIn(\"Copied\", mock.getStdout())\n                self.assertIn(\"file2\", mock.getStdout())\n                self.assertIn(\"->\", mock.getStdout())\n            self.assertTrue(os.path.exists(os.path.join(\"dir2\", \"file1.txt\")))\n\n    def test_safeMove(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            os.mkdir(\"dir1\")\n            os.mkdir(\"dir2\")\n            file1 = \"dir1/file1.txt\"\n            with open(file1, \"w\") as f:\n                f.write(\"Hello\")\n            file2 = \"dir1\\\\file2.txt\"\n            with open(file2, \"w\") as f:\n                f.write(\"Hello2\")\n\n            with mockRunLogs.BufferLog() as mock:\n                # Test Linuxy file path\n                self.assertEqual(\"\", mock.getStdout())\n                safeMove(file1, \"dir2\")\n                self.assertIn(\"Moved\", mock.getStdout())\n                self.assertIn(\"file1\", mock.getStdout())\n                self.assertIn(\"->\", mock.getStdout())\n                # Clean up for next safeCopy\n                mock.emptyStdout()\n                # Test Windowsy file path\n                self.assertEqual(\"\", mock.getStdout())\n                safeMove(file2, \"dir2\")\n                self.assertIn(\"Moved\", mock.getStdout())\n                self.assertIn(\"file2\", mock.getStdout())\n                self.assertIn(\"->\", mock.getStdout())\n            self.assertTrue(os.path.exists(os.path.join(\"dir2\", \"file1.txt\")))\n\n    def test_safeMoveDir(self):\n        with directoryChangers.TemporaryDirectoryChanger():\n            os.mkdir(\"dir1\")\n            file1 = \"dir1/file1.txt\"\n            with open(file1, \"w\") as f:\n                f.write(\"Hello\")\n            file2 = \"dir1\\\\file2.txt\"\n            with open(file2, \"w\") as f:\n                f.write(\"Hello2\")\n\n            with mockRunLogs.BufferLog() as mock:\n                self.assertEqual(\"\", mock.getStdout())\n                safeMove(\"dir1\", \"dir2\")\n                self.assertIn(\"Moved\", mock.getStdout())\n                self.assertIn(\"dir1\", mock.getStdout())\n                self.assertIn(\"dir2\", mock.getStdout())\n            self.assertTrue(os.path.exists(os.path.join(\"dir2\", \"file1.txt\")))\n\n\nclass CyclesSettingsTests(unittest.TestCase):\n    \"\"\"\n    Check reading of the various cycle history settings for both the detailed\n    and simple input options.\n    \"\"\"\n\n    detailedCyclesSettings = \"\"\"\nmetadata:\n  version: uncontrolled\nsettings:\n  power: 1000000000.0\n  nCycles: 3\n  cycles:\n    - name: dog\n      cumulative days: [1, 2, 3]\n      power fractions: [0.1, 0.2, 0.3]\n      availability factor: 0.1\n    - cycle length: 10\n      burn steps: 5\n      power fractions: [0.2, 0.2, 0.2, 0.2, 0]\n      availability factor: 0.5\n    - name: ferret\n      step days: [3, R4]\n      power fractions: [0.3, R4]\n  runType: Standard\n\"\"\"\n    simpleCyclesSettings = \"\"\"\nmetadata:\n  version: uncontrolled\nsettings:\n  power: 1000000000.0\n  nCycles: 3\n  availabilityFactors: [0.1, R2]\n  cycleLengths: [1, 2, 3]\n  powerFractions: [0.1, 0.2, R1]\n  burnSteps: 3\n  runType: Standard\n  \"\"\"\n\n    powerFractionsDetailedSolution = [\n        [0.1, 0.2, 0.3],\n        [0.2, 0.2, 0.2, 0.2, 0],\n        [0.3, 0.3, 0.3, 0.3, 0.3],\n    ]\n    powerFractionsSimpleSolution = [[0.1, 0.1, 0.1], [0.2, 0.2, 0.2], [0.2, 0.2, 0.2]]\n    cycleNamesDetailedSolution = [\"dog\", None, \"ferret\"]\n    cycleNamesSimpleSolution = [None, None, None]\n    availabilityFactorsDetailedSolution = [0.1, 0.5, 1]\n    availabilityFactorsSimpleSolution = [0.1, 0.1, 0.1]\n    stepLengthsDetailedSolution = [\n        [1, 1, 1],\n        [10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5, 10 / 5 * 0.5],\n        [3, 3, 3, 3, 3],\n    ]\n    stepLengthsSimpleSolution = [\n        [1 * 0.1 / 3, 1 * 0.1 / 3, 1 * 0.1 / 3],\n        [2 * 0.1 / 3, 2 * 0.1 / 3, 2 * 0.1 / 3],\n        [3 * 0.1 / 3, 3 * 0.1 / 3, 3 * 0.1 / 3],\n    ]\n    cycleLengthsDetailedSolution = [30, 10, 15]\n    cycleLengthsSimpleSolution = [1, 2, 3]\n    burnStepsDetailedSolution = [3, 5, 5]\n    burnStepsSimpleSolution = [3, 3, 3]\n    nodesPerCycleDetailedSolution = [4, 6, 6]\n    nodesPerCycleSimpleSolution = [4, 4, 4]\n    maxBurnStepsDetailedSolution = 5\n    maxBurnStepsSimpleSolution = 3\n\n    def setUp(self):\n        self.standaloneDetailedCS = Settings()\n        self.standaloneDetailedCS.loadFromString(self.detailedCyclesSettings)\n\n        self.standaloneSimpleCS = Settings()\n        self.standaloneSimpleCS.loadFromString(self.simpleCyclesSettings)\n\n    def test_getPowerFractions(self):\n        self.assertEqual(\n            getPowerFractions(self.standaloneDetailedCS),\n            self.powerFractionsDetailedSolution,\n        )\n\n        self.assertEqual(\n            getPowerFractions(self.standaloneSimpleCS),\n            self.powerFractionsSimpleSolution,\n        )\n\n    def test_getCycleNames(self):\n        self.assertEqual(getCycleNames(self.standaloneDetailedCS), self.cycleNamesDetailedSolution)\n\n        self.assertEqual(getCycleNames(self.standaloneSimpleCS), self.cycleNamesSimpleSolution)\n\n    def test_getAvailabilityFactors(self):\n        self.assertEqual(\n            getAvailabilityFactors(self.standaloneDetailedCS),\n            self.availabilityFactorsDetailedSolution,\n        )\n\n        self.assertEqual(\n            getAvailabilityFactors(self.standaloneSimpleCS),\n            self.availabilityFactorsSimpleSolution,\n        )\n\n    def test_getStepLengths(self):\n        self.assertEqual(\n            getStepLengths(self.standaloneDetailedCS),\n            self.stepLengthsDetailedSolution,\n        )\n\n        self.assertEqual(\n            getStepLengths(self.standaloneSimpleCS),\n            self.stepLengthsSimpleSolution,\n        )\n\n    def test_getCycleLengths(self):\n        self.assertEqual(\n            getCycleLengths(self.standaloneDetailedCS),\n            self.cycleLengthsDetailedSolution,\n        )\n\n        self.assertEqual(getCycleLengths(self.standaloneSimpleCS), self.cycleLengthsSimpleSolution)\n\n    def test_getBurnSteps(self):\n        self.assertEqual(getBurnSteps(self.standaloneDetailedCS), self.burnStepsDetailedSolution)\n\n        self.assertEqual(getBurnSteps(self.standaloneSimpleCS), self.burnStepsSimpleSolution)\n\n    def test_hasBurnup(self):\n        self.assertTrue(hasBurnup(self.standaloneDetailedCS))\n\n    def test_getMaxBurnSteps(self):\n        self.assertEqual(\n            getMaxBurnSteps(self.standaloneDetailedCS),\n            self.maxBurnStepsDetailedSolution,\n        )\n\n        self.assertEqual(getMaxBurnSteps(self.standaloneSimpleCS), self.maxBurnStepsSimpleSolution)\n\n    def test_getNodesPerCycle(self):\n        self.assertEqual(\n            getNodesPerCycle(self.standaloneDetailedCS),\n            self.nodesPerCycleDetailedSolution,\n        )\n\n        self.assertEqual(getNodesPerCycle(self.standaloneSimpleCS), self.nodesPerCycleSimpleSolution)\n\n    def test_getCycleNodeFromCumulativeStep(self):\n        self.assertEqual(getCycleNodeFromCumulativeStep(8, self.standaloneDetailedCS), (1, 4))\n        self.assertEqual(getCycleNodeFromCumulativeStep(12, self.standaloneDetailedCS), (2, 3))\n\n        self.assertEqual(getCycleNodeFromCumulativeStep(4, self.standaloneSimpleCS), (1, 0))\n        self.assertEqual(getCycleNodeFromCumulativeStep(8, self.standaloneSimpleCS), (2, 1))\n\n    def test_getCycleNodeFromCumulativeNode(self):\n        self.assertEqual(getCycleNodeFromCumulativeNode(8, self.standaloneDetailedCS), (1, 4))\n        self.assertEqual(getCycleNodeFromCumulativeNode(12, self.standaloneDetailedCS), (2, 2))\n\n        self.assertEqual(getCycleNodeFromCumulativeNode(3, self.standaloneSimpleCS), (0, 3))\n        self.assertEqual(getCycleNodeFromCumulativeNode(8, self.standaloneSimpleCS), (2, 0))\n\n        with self.assertRaises(ValueError):\n            getCycleNodeFromCumulativeNode(-1, self.standaloneSimpleCS)\n\n    def test_getPreviousTimeNode(self):\n        with self.assertRaises(ValueError):\n            getPreviousTimeNode(0, 0, \"foo\")\n        self.assertEqual(getPreviousTimeNode(1, 1, self.standaloneSimpleCS), (1, 0))\n        self.assertEqual(getPreviousTimeNode(1, 0, self.standaloneSimpleCS), (0, 3))\n        self.assertEqual(getPreviousTimeNode(1, 0, self.standaloneDetailedCS), (0, 3))\n        self.assertEqual(getPreviousTimeNode(2, 4, self.standaloneDetailedCS), (2, 3))\n\n    def test_getCumulativeNodeNum(self):\n        self.assertEqual(getCumulativeNodeNum(2, 0, self.standaloneSimpleCS), 8)\n        self.assertEqual(getCumulativeNodeNum(1, 2, self.standaloneSimpleCS), 6)\n\n        self.assertEqual(getCumulativeNodeNum(2, 0, self.standaloneDetailedCS), 10)\n        self.assertEqual(getCumulativeNodeNum(1, 0, self.standaloneDetailedCS), 4)\n"
  },
  {
    "path": "armi/utils/textProcessors.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Utility classes and functions for manipulating text files.\"\"\"\n\nimport io\nimport os\nimport pathlib\nimport re\nfrom typing import List, Optional, TextIO, Tuple, Union\n\nfrom armi import runLog\n\n_INCLUDE_CTOR = False\n_INCLUDE_RE = re.compile(r\"^([^#]*\\s+)?!include\\s+(.*)\\n?$\")\n_INDENT_RE = re.compile(r\"^[\\s\\-\\?:]*([^\\s\\-\\?:].*)?$\")\n\n# String constants\nSCIENTIFIC_PATTERN = r\"[+-]?\\d*\\.\\d+[eEdD][+-]\\d+\"\n\"\"\"\nMatches:\n* code:` 1.23e10`\n* code:`-1.23Ee10`\n* code:`+1.23d10`\n* code:`  .23D10`\n* code:` 1.23e-10`\n* code:` 1.23e+1`\n\"\"\"\n\nFLOATING_PATTERN = r\"[+-]?\\d+\\.*\\d*\"\n\"\"\"Matches 1, 100, 1.0, -1.2, +12.234\"\"\"\n\nDECIMAL_PATTERN = r\"[+-]?\\d*\\.\\d+\"\n\"\"\"Matches .1, 1.213423, -23.2342, +.023\"\"\"\n\n\nclass FileMark:\n    def __init__(self, fName, line, column, relativeTo):\n        self.path = fName\n        self.line = line\n        self.column = column\n        # if the path is relative, where is it relative to? We need this to be able to\n        # normalize relative paths to a root file.\n        self.relativeTo = relativeTo\n\n    def __str__(self):\n        return \"{}, line {}, column {}\".format(self.path, self.line, self.column)\n\n\ndef _processIncludes(\n    src: Union[TextIO, pathlib.Path],\n    out,\n    includes: List[Tuple[pathlib.Path, FileMark]],\n    root: pathlib.Path,\n    indentation=0,\n    currentFile=\"<stream>\",\n):\n    \"\"\"\n    This is the workhorse of ``resolveMarkupInclusions`` and friends.\n\n    Recursively inserts the contents of !included YAML files into the output stream,\n    keeping track of indentation and a list of included files along the way.\n    \"\"\"\n\n    def _beginningOfContent(line: str) -> int:\n        \"\"\"\n        Return the position of the first \"content\" character.\n\n        This follows the YAML spec at https://yaml.org/spec/current.html#id2519916\n\n        In short, it will return the position of the first character that is not\n        whitespace or one of the special \"block collection\" markers (\"-\", \"?\", and \":\")\n        \"\"\"\n        m = _INDENT_RE.match(line)\n        if m and m.group(1) is not None:\n            return m.start(1)\n        else:\n            return 0\n\n    indentSpace = \" \" * indentation\n    if hasattr(src, \"getvalue\"):\n        # assume stringIO\n        lines = [ln + \"\\n\" for ln in src.getvalue().split(\"\\n\")]\n    else:\n        # assume file stream or TextIOBase, and it has a readlines attr\n        lines = src.readlines()\n    for i, line in enumerate(lines):\n        leadingSpace = indentSpace if i > 0 else \"\"\n        m = _INCLUDE_RE.match(line)\n        if m:\n            # this line has an !include on it\n            if m.group(1) is not None:\n                out.write(leadingSpace + m.group(1))\n            fName = pathlib.Path(os.path.expandvars(m.group(2)))\n            path = root / fName\n            if not path.exists():\n                raise ValueError(\"The !included file, `{}` does not exist from {}!\".format(fName, root))\n            includes.append((fName, FileMark(currentFile, i, m.start(2), root)))\n\n            with open(path, \"r\") as includedFile:\n                firstCharacterPos = _beginningOfContent(line)\n                newIndent = indentation + firstCharacterPos\n                _processIncludes(\n                    includedFile,\n                    out,\n                    includes,\n                    path.parent,\n                    indentation=newIndent,\n                    currentFile=path,\n                )\n        else:\n            out.write(leadingSpace + line)\n\n\ndef resolveMarkupInclusions(src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None) -> io.StringIO:\n    r\"\"\"\n    Process a text stream, appropriately handling ``!include`` tags.\n\n    This will take the passed IO stream or file path, replacing any instances of\n    ``!include [path]`` with the appropriate contents of the ``!include`` file.\n\n    What is returned is a new text stream, containing the contents of all of the files\n    stitched together.\n\n    Parameters\n    ----------\n    src : StringIO or TextIOBase/Path\n        If a Path is provided, read text from there. If is stream is provided, consume\n        text from the stream. If a stream is provided, ``root`` must also be provided.\n    root : Optional Path\n        The root directory to use for resolving relative paths in !include tags. If a\n        stream is provided for ``src``, ``root`` must be provided. Otherwise, the\n        directory containing the ``src`` path will be used by default.\n\n    Notes\n    -----\n    While the use of ``!include`` appears as though it would invoke some sort of special\n    custom YAML constructor code, this does not do that. Processing these inclusions as\n    part of the document parsing/composition that comes with ruamel.yaml could\n    work, but has a number of prohibitive drawbacks (or at least reasons why it might\n    not be worth doing). Using a custom constructor is more-or-less supported by\n    ruamel.yaml (which we do use, as it is what underpins the yamlize package), but it\n    carries limitations about how anchors and aliases can cross included-file\n    boundaries. Getting around this requires either monkey-patching ruamel.yaml, or\n    subclassing it, which in turn would require monkey-patching yamlize.\n\n    Instead, we treat the ``!include``\\ s as a sort of pre-processor directive, which\n    essentially pastes the contents of the ``!include``\\ d file into the location of the\n    ``!include``. The result is a text stream containing the entire contents, with all\n    ``!include``\\ s resolved. The only degree of sophistication lies in how indentation\n    is handled; since YAML cares about indentation to keep track of object hierarchy,\n    care must be taken that the included file contents are indented appropriately.\n\n    To precisely describe how the indentation works, it helps to have some definitions:\n\n     - Included file: The file specified in the ``!include [Included file]``\n     - Including line: The line that actually contains the ``!include [Included file]``\n     - Meaningful YAML content: Text in a YAML file that is not either indentation or a\n       special character like \"-\", \":\" or \"?\".\n\n    The contents of the included file will be indented such that that the first\n    character of each line in the included file will be found at the first column in the\n    including line that contains meaningful YAML content. The only exception is the\n    first line of the included file, which starts at the location of the ``!include``\n    itself and is not deliberately indented.\n\n    In the future, we may wish to do the more sophisticated processing of the\n    ``!include``\\ s as part of the YAML parse. For future reference, there is some pure\n    gold on that topic here:\n    https://stackoverflow.com/questions/44910886/pyyaml-include-file-and-yaml-aliases-anchors-references\n    \"\"\"\n    return _resolveMarkupInclusions(src, root)[0]\n\n\ndef _getRootFromSrc(src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path]) -> pathlib.Path:\n    if isinstance(src, pathlib.Path):\n        root = root or src.parent.absolute()\n    elif isinstance(src, io.TextIOBase):\n        if root is None:\n            raise ValueError(\"A stream was provided without a root directory.\")\n    else:\n        raise TypeError(\"Unsupported source type: `{}`!\".format(type(src)))\n\n    return root\n\n\ndef findYamlInclusions(\n    src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None\n) -> List[Tuple[pathlib.Path, FileMark]]:\n    \"\"\"\n    Return a list containing all of the !included YAML files from a root file.\n\n    This will attempt to \"normalize\" relative paths to the passed root. If that is not\n    possible, then an absolute path will be used instead. For example, if a file (A)\n    !includes another file (B) by an absolute path, which in turn !includes more files\n    relative to (B), all of (B)'s relative includes will be turned into absolute paths\n    from the perspective of the root file (A).\n    \"\"\"\n    includes = _resolveMarkupInclusions(src, root)[1]\n    root = _getRootFromSrc(src, root)\n    normalizedIncludes = []\n\n    for path, mark in includes:\n        if not path.is_absolute():\n            try:\n                path = (mark.relativeTo / path).relative_to(root or os.getcwd())\n            except ValueError:\n                # Can't make a relative path. IMO, pathlib gives up a little too early,\n                # but we still probably want to decay to absolute paths if the files\n                # aren't in the same tree.\n                path = (mark.relativeTo / path).absolute()\n\n        normalizedIncludes.append((path, mark))\n\n    return normalizedIncludes\n\n\ndef _resolveMarkupInclusions(\n    src: Union[TextIO, pathlib.Path], root: Optional[pathlib.Path] = None\n) -> Tuple[io.StringIO, List[Tuple[pathlib.Path, FileMark]]]:\n    root = _getRootFromSrc(src, root)\n\n    if isinstance(src, pathlib.Path):\n        # this is inefficient, but avoids having to play with io buffers\n        with open(src, \"r\") as rootFile:\n            src = io.StringIO(rootFile.read())\n\n    out = io.StringIO()\n    includes = []\n    _processIncludes(src, out, includes, root)\n\n    out.seek(0)\n    # be kind; rewind\n    src.seek(0)\n\n    return out, includes\n\n\nclass SequentialReader:\n    r\"\"\"\n    Fast sequential reader that must be used within a with statement.\n\n    Attributes\n    ----------\n    line : str\n        value of the current line\n    match : re.match\n        value of the current match\n\n    Notes\n    -----\n    This reader will sequentially search a file for a regular expression pattern or\n    string depending on the method used. When the pattern/string is matched/found, the\n    reader will stop, return :code:`True`, and set the attributes :code:`line` and\n    :code:`match`.\n\n    This pattern makes it easy to cycle through repetitive output in a very fast manner.\n    For example, if you had a text file with consistent chunks of information that\n    always started with the same text followed by information, you could do something\n    like this:\n\n    >>> with SequentialReader(\"somefile\") as sr:\n    ...     data = []\n    ...     while sr.searchForText(\"start of data chunk\"):\n    ...         # this needs to repeat for as many chunks as there are.\n    ...         if sr.searchForPatternOnNextLine(\"some-(?P<data>\\w+)-pattern\"):\n    ...             data.append(sr.match[\"data\"])\n    \"\"\"\n\n    def __init__(self, filePath):\n        self._filePath = filePath\n        self._stream = None\n        self.line = \"\"\n        self.match = None\n        self._textErrors = []\n        self._textWarnings = []\n        self._patternErrors = []\n        self.ignoreAllErrors = False\n\n    def issueWarningOnFindingText(self, text, warning):\n        \"\"\"Add a text search for every line of the file, if the text is found the specified warning will be issued.\n\n        This is important for determining if issues occurred while searching for text.\n\n        Parameters\n        ----------\n        text : str\n            text to find within the file\n        warning : str\n            An warning message to issue.\n\n        See Also\n        --------\n        raiseErrorOnFindingText\n        raiseErrorOnFindingPattern\n        \"\"\"\n        self._textWarnings.append((text, warning))\n\n    def raiseErrorOnFindingText(self, text, error):\n        \"\"\"Add a text search for every line of the file, if the text is found the specified error\n        will be raised.\n\n        This is important for determining if errors occurred while searching for text.\n\n        Parameters\n        ----------\n        text : str\n            text to find within the file\n\n        error : Exception\n            An exception to raise.\n\n        See Also\n        --------\n        raiseErrorOnFindingPattern\n        \"\"\"\n        self._textErrors.append((text, error))\n\n    def raiseErrorOnFindingPattern(self, pattern, error):\n        \"\"\"Add a pattern search for every line of the file, if the pattern is found the specified\n        error will be raised.\n\n        This is important for determining if errors occurred while searching for text.\n\n        Parameters\n        ----------\n        pattern : str\n            regular expression pattern\n\n        error : Exception\n            An exception to raise.\n\n        See Also\n        --------\n        raiseErrorOnFindingText\n        \"\"\"\n        self._patternErrors.append((re.compile(pattern), error))\n\n    def __repr__(self):\n        return \"<{} {} {}>\".format(\n            self.__class__.__name__,\n            self._filePath,\n            \"open\" if self._stream is not None else \"closed\",\n        )\n\n    def __enter__(self):\n        if not os.path.exists(self._filePath):\n            raise OSError(\"Cannot open non-existing file {}\".format(self._filePath))\n        self._stream = open(self._filePath, \"r\")\n        return self\n\n    def __exit__(self, exc_type, exc_value, traceback):\n        # if checking for errors, we need to keep reading\n        if exc_type is not None and not self.ignoreAllErrors and (self._patternErrors or self._textErrors):\n            while self._readLine():  # all lines have '\\n' terminators\n                pass\n\n        if self._stream is not None:\n            try:\n                self._stream.close()\n            except Exception:\n                # We really don't care if anything fails here, plus an exception in exit is ignored anyway\n                pass\n        self._stream = None\n\n    def searchForText(self, text):\n        \"\"\"Search the file for the next occurrence of :code:`text`, and set the\n        :code:`self.line` attribute to that line's value if it matched.\n\n        Notes\n        -----\n        This will search the file line by line until it finds the text.  This sets the\n        attribute :code:`self.line`. If the previous :code:`_searchFor*` method did not\n        match, the last line it did not match will be searched first.\n\n        Returns\n        -------\n        matched : bool\n            Boolean indicating whether or not the pattern matched\n        \"\"\"\n        self.match = None\n        while True:\n            if text in self.line:\n                return True\n            self.line = self._readLine()\n            if self.line == \"\":\n                break\n        return False\n\n    def searchForPattern(self, pattern):\n        \"\"\"Search the file for the next occurece of :code:`pattern` and set the :code:`self.line` attribute to that\n        line's value if it matched.\n\n        Notes\n        -----\n        This will search the file line by line until it finds the pattern.\n        This sets the attribute :code:`self.line`. If the previous :code:`_searchFor*`\n        method did not match, the last line it did not match will be searched first.\n\n        Returns\n        -------\n        matched : bool\n            Boolean indicating whether or not the pattern matched\n        \"\"\"\n        while True:\n            self.match = re.search(pattern, self.line)\n            if self.match is not None:\n                return True\n            self.line = self._readLine()\n            if self.line == \"\":\n                break\n        return False\n\n    def searchForPatternOnNextLine(self, pattern):\n        \"\"\"Search the next line for a given pattern, and set the :code:`self.line` attribute to that line's value if it\n        matched.\n\n        Notes\n        -----\n        This sets the attribute :code:`self.line`. If the previous :code:`_searchFor*`\n        method did not match, the last line it did not match will be searched first.\n\n        Returns\n        -------\n        matched : bool\n            Boolean indicating whether or not the pattern matched\n        \"\"\"\n        self.match = re.search(pattern, self.line)\n        if self.match is None:\n            self.line = self._readLine()\n            self.match = re.search(pattern, self.line)\n        return self.match is not None\n\n    def _readLine(self):\n        line = self._stream.readline()\n        if not self.ignoreAllErrors:\n            for text, error in self._textErrors:\n                if text in line:\n                    raise error\n            for text, warning in self._textWarnings:\n                if text in line:\n                    runLog.warning(warning)\n            for regex, error in self._patternErrors:\n                if regex.match(line):\n                    raise error\n        return line\n\n    def consumeLine(self):\n        \"\"\"Consumes the line.\n\n        This is necessary when searching for the same pattern repetitively, because\n        otherwise searchForPatternOnNextLine would not work.\n        \"\"\"\n        self.line = \"\"\n        self.match = None\n\n\nclass SequentialStringIOReader(SequentialReader):\n    r\"\"\"\n    Fast sequential reader that must be used within a with statement.\n\n    Attributes\n    ----------\n    line : str\n        value of the current line\n    match : re.match\n        value of the current match\n\n    Notes\n    -----\n    This reader will sequentially search a file for a regular expression pattern or\n    string depending on the method used. When the pattern/string is matched/found, the\n    reader will stop, return :code:`True`, and set the attributes :code:`line` and\n    :code:`match`.\n\n    This pattern makes it easy to cycle through repetitive output in a very fast manner.\n    For example, if you had a text file with consistent chunks of information that\n    always started with the same text followed by information, you could do something\n    like this:\n\n    >>> with SequentialReader(\"somefile\") as sr:\n    ...     data = []\n    ...     while sr.searchForText(\"start of data chunk\"):\n    ...         # this needs to repeat for as many chunks as there are.\n    ...         if sr.searchForPatternOnNextLine(\"some-(?P<data>\\\\w+)-pattern\"):\n    ...             data.append(sr.match[\"data\"])\n    \"\"\"\n\n    def __init__(self, stringIO):\n        SequentialReader.__init__(self, \"StringIO\")\n        self._stream = stringIO\n\n    def __enter__(self):\n        \"\"\"\n        Override to prevent trying to open/reopen a StringIO object.\n\n        We don't need to override :code:`__exit__`, because it doesn't care if closing\n        the object fails.\n\n        \"\"\"\n        return self\n\n\nclass TextProcessor:\n    \"\"\"\n    A general text processing object that extends python's abilities to scan through huge files.\n\n    Use this instead of a raw file object to read data out of output files, etc.\n    \"\"\"\n\n    scipat = SCIENTIFIC_PATTERN\n    number = FLOATING_PATTERN\n    decimal = DECIMAL_PATTERN\n\n    def __init__(self, fname, highMem=False):\n        self.eChecking = False\n        # Preserve python 2-like behavior for unit tests that pass None and provide\n        # their own text data (in py2, passing None to abspath yields cwd; py3 raises)\n        self.fpath = os.path.dirname(os.path.abspath(fname or os.getcwd()))\n        f = None\n        if fname is not None:\n            if os.path.exists(fname):\n                f = open(fname)\n            else:\n                # need this not to fail for detecting when RXSUM doesn't exist, etc.\n                # Note: Could make it check before instantiating...\n                raise FileNotFoundError(f\"{fname} does not exist.\")\n        self.f = f\n\n    def reset(self):\n        \"\"\"Rewinds the file so you can search through it again.\"\"\"\n        self.f.seek(0)\n\n    def __repr__(self):\n        return \"<Text file at {0}>\".format(self.f.name)\n\n    def errorChecking(self, checkForErrors):\n        self.eChecking = checkForErrors\n\n    def checkErrors(self, line):\n        pass\n\n    def fsearch(self, pattern, msg=None, killOn=None, textFlag=False):\n        \"\"\"\n        Searches file f for pattern and displays msg when found. Returns line in which pattern is\n        found or FALSE if no pattern is found. Stops searching if finds killOn first.\n\n        If you specify textFlag=True, the search won't use a regular expression (and can't). The\n        basic result is you get less powerful matching capabilities at a huge speedup (10x or so\n        probably, but that's just a guess.) pattern and killOn must be pure text if you do this.\n        \"\"\"\n        current = 0\n        result = \"\"\n        if textFlag:\n            # fast, text-only mode\n            for line in self.f:\n                if self.eChecking:\n                    self.checkErrors(line)\n                if pattern in line:\n                    result = line\n                    break\n                elif killOn and killOn in line:\n                    result = \"\"\n                    break\n            else:\n                result = \"\"\n        else:\n            # slower regular expression mode\n            cpat = re.compile(pattern)\n            if killOn:\n                kpat = re.compile(killOn)\n            for line in self.f:\n                if self.eChecking:\n                    self.checkErrors(line)\n                if killOn:\n                    kill = re.search(kpat, line)\n                    if kill:\n                        # the kill phrase was found first, so die.\n                        result = \"\"\n                        break\n                current = re.search(cpat, line)\n                if current:\n                    if msg:\n                        print(msg)\n                    result = line\n                    break\n            if not current:\n                result = \"\"\n\n        return result\n"
  },
  {
    "path": "armi/utils/triangle.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Generic triangle math.\"\"\"\n\nimport math\n\n\ndef getTriangleArea(x1: float, y1: float, x2: float, y2: float, x3: float, y3: float) -> float:\n    \"\"\"\n    Get the area of a triangle given the vertices of a triangle using Heron's formula.\n\n    Parameters\n    ----------\n    x1 : float\n        x coordinate of first point defining a triangle\n    y1 : float\n        y coordinate of first point defining a triangle\n    x2 : float\n        x coordinate of second point defining a triangle\n    y2 : float\n        y coordinate of second point defining a triangle\n    x3 : float\n        x coordinate of third point defining a triangle\n    y3 : float\n        y coordinate of third point defining a triangle\n\n    Notes\n    -----\n    See `https://en.wikipedia.org/wiki/Heron%27s_formula` for more information.\n    \"\"\"\n    a = math.sqrt((x1 - x2) ** 2 + (y1 - y2) ** 2)\n    b = math.sqrt((x2 - x3) ** 2 + (y2 - y3) ** 2)\n    c = math.sqrt((x1 - x3) ** 2 + (y1 - y3) ** 2)\n\n    area = 1.0 / 4.0 * math.sqrt((a + (b + c)) * (c - (a - b)) * (c + (a - b)) * (a + (b - c)))\n\n    return area\n\n\ndef getTriangleCentroid(x1, y1, x2, y2, x3, y3):\n    \"\"\"\n    Return the x and y coordinates of a triangle's centroid.\n\n    Parameters\n    ----------\n    x1 : float\n        x coordinate of first point defining a triangle\n    y1 : float\n        y coordinate of first point defining a triangle\n    x2 : float\n        x coordinate of second point defining a triangle\n    y2 : float\n        y coordinate of second point defining a triangle\n    x3 : float\n        x coordinate of third point defining a triangle\n    y3 : float\n        y coordinate of third point defining a triangle\n\n    Returns\n    -------\n    x : float\n        x coordinate of triangle's centroid\n    y : float\n        y coordinate of a triangle's centroid\n    \"\"\"\n    x = (x1 + x2 + x3) / 3.0\n    y = (y1 + y2 + y3) / 3.0\n\n    return x, y\n\n\ndef checkIfPointIsInTriangle(\n    x1: float, y1: float, x2: float, y2: float, x3: float, y3: float, x: float, y: float\n) -> bool:\n    \"\"\"\n    Test if a point defined by x,y coordinates is within a triangle defined by vertices with x,y coordinates.\n\n    Parameters\n    ----------\n    x1 : float\n        x coordinate of first point of the bounding triangle\n    y1 : float\n        y coordinate of first point of the bounding triangle\n    x2 : float\n        x coordinate of second point of the bounding triangle\n    y2 : float\n        y coordinate of second point of the bounding triangle\n    x3 : float\n        x coordinate of third point of the bounding triangle\n    y3 : float\n        y coordinate of third point of the bounding triangle\n    x : float\n        x coordinate of point being tested\n    y : float\n        y coordinate of point being tested\n\n    Notes\n    -----\n    This method uses the barycentric method.\n    See `http://totologic.blogspot.com/2014/01/accurate-point-in-triangle-test.html`\n    \"\"\"\n    a = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3))\n    b = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3))\n    c = 1.0 - a - b\n    epsilon = 1e-10  # need to have some tolerance in case the point lies on the edge of the triangle\n\n    aCondition = a + epsilon >= 0.0 and a - epsilon <= 1.0\n    bCondition = b + epsilon >= 0.0 and b - epsilon <= 1.0\n    cCondition = c + epsilon >= 0.0 and c - epsilon <= 1.0\n\n    return aCondition and bCondition and cCondition\n"
  },
  {
    "path": "armi/utils/units.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"The units module contains unit conversion functions and constants.\"\"\"\n\nimport math\n\nimport scipy.constants\n\n# Units (misc)\nDPA = \"dpa\"\nFIMA = \"FIMA\"\nPERCENT_FIMA = r\"%FIMA\"\nMB = \"MB\"  # megabytes\nMOLES = \"mole\"\nMWD = \"MWd\"\nPASCALS = \"Pa\"\nPERCENT = \"%\"\nUNITLESS = \"\"\nUSD = \"USD\"  # US currency (the dollar)\n# Units (angles)\nDEGREES = \"degrees\"\nRADIANS = \"radians\"\n# Units (energy)\nEV = \"eV\"\nMEV = \"MeV\"\nMW = \"MW\"\nWATTS = \"W\"\n# Units (length)\nCM = \"cm\"\nMETERS = \"m\"\nMICRONS = chr(181) + \"m\"\n# Units (mass)\nGRAMS = \"g\"\nKG = \"kg\"\nMT = \"MT\"\n# Units (reactivity)\nCENTS = \"cents\"  # 1/100th of a dollar\nDOLLARS = \"$\"  # (dk/k/k') / beta\nPCM = \"pcm\"\nREACTIVITY = chr(916) + \"k/k/k'\"\n# Units (temperature)\nDEGC = chr(176) + \"C\"\nDEGK = \"K\"\n# Units (time)\nDAYS = \"days\"\nMINUTES = \"min\"\nSECONDS = \"s\"\nYEARS = \"yr\"\n\n# Unit conversions\nC_TO_K = 273.15\nBOLTZMAN_CONSTANT = 8.6173324e-11  # boltzmann constant in MeV/K\nAVOGADROS_NUMBER = 6.0221415e23\nCM2_PER_BARN = 1.0e-24\nMOLES_PER_CC_TO_ATOMS_PER_BARN_CM = AVOGADROS_NUMBER * CM2_PER_BARN\nJOULES_PER_MeV = 1.60217646e-13\nJOULES_PER_eV = JOULES_PER_MeV * 1.0e-6\nSECONDS_PER_MINUTE = 60.0\nMINUTES_PER_HOUR = 60.0\nHOURS_PER_DAY = 24.0\nSECONDS_PER_HOUR = SECONDS_PER_MINUTE * MINUTES_PER_HOUR\nSECONDS_PER_DAY = HOURS_PER_DAY * SECONDS_PER_HOUR\nDAYS_PER_YEAR = 365.24219  # mean tropical year\nSECONDS_PER_YEAR = 31556926.0\nGAS_CONSTANT = 8.3144621  # J/mol-K\n\n# Cut-off is taken to be any element/nuclide with an atomic number\n# that is greater than Actinium (i.e., the first classified Actinide).\nHEAVY_METAL_CUTOFF_Z = 89\n\nMICRONS_PER_METER = 1.0e6\nCM2_PER_M2 = 1.0e4\nCM3_PER_M3 = 1.0e6\nMETERS_PER_CM = 0.01\nWATTS_PER_MW = 1.0e6\nEV_PER_MEV = 1.0e6\nMM_PER_CM = 10.0\nG_PER_KG = 1000.0\nLITERS_PER_CUBIC_METER = 1000\nCC_PER_LITER = CM3_PER_M3 / LITERS_PER_CUBIC_METER\nDEG_TO_RAD = 1.0 / 180.0 * math.pi  # Degrees to Radians\nRAD_TO_REV = 1.0 / (2 * math.pi)  # Radians to Revolutions\nATOMIC_MASS_CONSTANT_MEV = scipy.constants.physical_constants[\"atomic mass constant energy equivalent in MeV\"][0]\nABS_REACTIVITY_TO_PCM = 1.0e5\nPA_PER_ATM = scipy.constants.atm\nPA_PER_MMHG = 133.322368421053\nPA_PER_BAR = 100000.0\nCURIE_PER_BECQUEREL = 1.0 / 3.7e10\nMICROCURIES_PER_BECQUEREL = CURIE_PER_BECQUEREL * 1e-6\nG_PER_CM3_TO_KG_PER_M3 = 1000.0\n\n# constants\nASCII_MIN_CHAR = 44  # First char allowed in various FORTRAN inputs\nASCII_LETTER_A = 65\nASCII_LETTER_Z = 90\nASCII_LETTER_a = 97\nASCII_ZERO = 48\nTRACE_NUMBER_DENSITY = 1e-50\nMIN_FUEL_HM_MOLES_PER_CC = 1e-10\n\n# More than 10 decimals can create floating point comparison problems in MCNP and DIF3D\nFLOAT_DIMENSION_DECIMALS = 8\nEFFECTIVELY_ZERO = 10.0 ** (-1 * FLOAT_DIMENSION_DECIMALS)\n\n# STEFAN_BOLTZMANN_CONSTANT is for constant for radiation heat transfer [W m^-2 K^-4]\nSTEFAN_BOLTZMANN_CONSTANT = 5.67e-8  # W/m^2-K^4\n\n# GRAVITY is the acceleration due to gravity at the Earths surface in [m s^-2].\nGRAVITY = 9.80665\n\n# :code:`REYNOLDS_TURBULENT` is the Reynolds number below which a duct flow will exhibit \"laminar\"\n# conditions. Reyonlds numbers greater than :code:`REYNOLDS_TURBULENT` will involve flows that are\n# \"transitional\" or \"turbulent\".\nREYNOLDS_LAMINAR = 2100.0\n\n# :code:`REYNOLDS_TURBULENT` is the Reynolds number above which a duct flow will exhibit \"turbulent\"\n# conditions. Reynolds numbers lower than :code:`REYNOLDS_TURBULENT` will involve flows that are\n# \"transitional\" or \"laminar\".\nREYNOLDS_TURBULENT = 4000.0\n\n\ndef getTk(Tc=None, Tk=None):\n    \"\"\"\n    Return a temperature in Kelvin, given a temperature in Celsius or Kelvin.\n\n    Returns\n    -------\n    T : float\n        temperature in Kelvin\n\n    Raises\n    ------\n    TypeError\n        The temperature was not provided as an int or float.\n    \"\"\"\n    if not ((Tc is not None) ^ (Tk is not None)):\n        raise ValueError(f\"Cannot produce T in K from Tc={Tc} and Tk={Tk}. Please supply a single temperature.\")\n    return float(Tk) if Tk is not None else Tc + C_TO_K\n\n\ndef getTc(Tc=None, Tk=None):\n    \"\"\"\n    Return a temperature in Celsius, given a temperature in Celsius or Kelvin.\n\n    Returns\n    -------\n    T : float\n        temperature in Celsius\n\n    Raises\n    ------\n    TypeError\n        The temperature was not provided as an int or float.\n    \"\"\"\n    if not ((Tc is not None) ^ (Tk is not None)):\n        raise ValueError(f\"Cannot produce T in C from Tc={Tc} and Tk={Tk}. Please supply a single temperature.\")\n    return float(Tc) if Tc is not None else Tk - C_TO_K\n\n\ndef getTf(Tc=None, Tk=None):\n    \"\"\"\n    Return a temperature in Fahrenheit, given a temperature in Celsius or Kelvin.\n\n    Returns\n    -------\n    T : float\n        temperature in Fahrenheit\n\n    Raises\n    ------\n    TypeError\n        The temperature was not provided as an int or float.\n    \"\"\"\n    return 1.8 * getTc(Tc, Tk) + 32.0\n\n\ndef getTemperature(Tc=None, Tk=None, tempUnits=None):\n    \"\"\"\n    Returns the temperature in the prescribed temperature units.\n\n    Parameters\n    ----------\n    Tc : float\n        temperature in Celsius\n    Tk : float\n        temperature in Kelvin\n    tempUnits : str\n        a flag for the temperature units of the correlation 'Tk', 'K', 'Kelvin',\n        'Tc', 'C', or 'Celsius' are acceptable.\n\n    Returns\n    -------\n    T : float\n        temperature in units defined by the tempUnits flag\n\n    Raises\n    ------\n    ValueError\n        When an invalid tempUnits input is provided.\n    \"\"\"\n    if tempUnits in [\"Tk\", \"K\", \"Kelvin\"]:\n        return getTk(Tc=Tc, Tk=Tk)\n    if tempUnits in [\"Tc\", \"C\", \"Celsius\"]:\n        return getTc(Tc=Tc, Tk=Tk)\n    raise ValueError(\"Invalid inputs provided. Check docstring.\")\n\n\ndef getTmev(Tc=None, Tk=None):\n    Tk = getTk(Tc, Tk)\n    return BOLTZMAN_CONSTANT * Tk\n\n\ndef convertMmhgToPascal(mmhg):\n    \"\"\"Converts pressure from mmhg to pascal.\n\n    Parameters\n    ----------\n    mmhg : float\n        pressure in mmhg\n\n    Returns\n    -------\n    pascal : float\n        pressure in pascal\n    \"\"\"\n    return mmhg * PA_PER_MMHG\n\n\ndef convertBarToPascal(pBar):\n    \"\"\"Converts pressure from bar to pascal.\n\n    Parameters\n    ----------\n    pBar : float\n        pressure in bar\n\n    Returns\n    -------\n    pascal : float\n        pressure in pascal\n    \"\"\"\n    return pBar * PA_PER_BAR\n\n\ndef convertAtmToPascal(pAtm):\n    \"\"\"Converts pressure from atomspheres to pascal.\n\n    Parameters\n    ----------\n    pAtm : float\n        pressure in atomspheres\n\n    Returns\n    -------\n    pascal : float\n        pressure in pascal\n    \"\"\"\n    return pAtm * PA_PER_ATM\n\n\nPRESSURE_CONVERTERS = {\n    \"Pa\": lambda pa: pa,\n    \"bar\": convertBarToPascal,\n    \"mmHg\": convertMmhgToPascal,\n    \"atm\": convertAtmToPascal,\n}\n\n\ndef sanitizeAngle(theta):\n    \"\"\"\n    Returns an angle between 0 and 2pi.\n\n    Parameters\n    ----------\n    theta : float\n        an angle\n\n    Returns\n    -------\n    theta : float\n        an angle between 0 and 2*pi\n    \"\"\"\n    if theta < 0:\n        theta = theta + (1 + -1 * int(theta / (math.pi * 2.0))) * math.pi * 2.0\n\n    if theta > 2.0 * math.pi:\n        theta = theta - int(theta / (math.pi * 2.0)) * math.pi * 2.0\n\n    return theta\n\n\ndef getXYLineParameters(theta, x=0, y=0):\n    \"\"\"\n    Returns parameters A B C D for a plane in the XY direction.\n\n    Parameters\n    ----------\n    theta : float\n        angle above x-axis in radians\n\n    x : float\n        x coordinate\n\n    y : float\n        y coordinate\n\n    Returns\n    -------\n    A : float\n        line coefficient\n\n    B : float\n        line coefficient\n\n    C : float\n        line coefficient\n\n    D : float\n        line coefficient\n\n    Notes\n    -----\n    the line is in the form of A*x + B*y + C*z - D = 0 -- this corresponds to a MCNP arbitrary line equation\n    \"\"\"\n    theta = sanitizeAngle(theta)\n\n    if math.fabs(theta) < 1e-10 or math.fabs(theta - math.pi) < 1e-10 or math.fabs(theta - 2.0 * math.pi) < 1e-10:\n        # this is a py plane so y is always y\n        return 0.0, 1.0, 0.0, y\n\n    if math.fabs(theta - math.pi / 2.0) > 1e-10 or math.fabs(theta - 3 * math.pi / 2.0) > 1e-10:\n        # this is a px plane so x is always x\n        return 1.0, 0.0, 0.0, x\n\n    A = -1.0 / math.cos(theta)\n    B = 1.0 / math.sin(theta)\n    C = 0.0\n    D = A * x + B * y\n\n    return A, B, C, D\n"
  },
  {
    "path": "doc/.static/__init__.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helper tools to build the ARMI docs.\"\"\"\n"
  },
  {
    "path": "doc/.static/automateScr.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nTool to build SCR lists to be added to the RST docs.\n\nThis script is meant to be called by the docs build process, to help automate the process of generating lists of SCRs.\n\"\"\"\n\nimport argparse\nimport os\nimport subprocess\n\nimport requests\n\nPR_TYPES = {\n    \"docs\": \"Documentation-Only Changes\",\n    \"features\": \"Code Changes, Features\",\n    \"fixes\": \"Code Changes, Bugs and Fixes\",\n    \"trivial\": \"Code Changes, Maintenance, or Trivial\",\n}\n\n\ndef main():\n    \"\"\"NOTE: This is not used during CI, but exists only for testing and dev purposes.\"\"\"\n    # Instantiate the parser\n    parser = argparse.ArgumentParser(description=\"An ARMI custom doc tool to build the SCR for this release.\")\n\n    # Required positional argument\n    parser.add_argument(\"pastCommit\", help=\"The commit hash of the last release.\")\n    parser.add_argument(\n        \"prNum\", nargs=\"?\", type=int, default=-1, help=\"The current PR number (use -1 if there is no PR).\"\n    )\n\n    # Parse the command line\n    args = parser.parse_args()\n    pastCommit = args.pastCommit\n    prNum = int(args.prNum)\n\n    buildScrListing(pastCommit, prNum)\n\n\ndef _findOneLineData(lines: list, prNum: str, key: str):\n    \"\"\"Helper method to find a single line in a GH CLI PR dump.\n\n    Parameters\n    ----------\n    lines : list\n        The GH CLI dump of a PR, split into lines for convenience.\n    prNum : str\n        The GitHub PR number in question.\n    key : str\n        The substring that the line in questions starts with.\n\n    Returns\n    -------\n    str\n        Data pulled for the key in question.\n    \"\"\"\n    for line in lines:\n        if line.startswith(key):\n            return line.split(key)[1].strip()\n\n    print(f\"WARNING: SCR: Could not find {key} in PR#{prNum}.\")\n    return \"TBD\"\n\n\ndef _buildScrLine(prNum: str, ghUsers: dict):\n    \"\"\"Helper method to build a single RST list item in an SCR.\n\n    Parameters\n    ----------\n    prNum : str\n        The GitHub PR number in question.\n    ghUsers : dict\n        A mapping from GitHub user names to real names, where possible.\n\n    Returns\n    -------\n    str\n        RST-formatted list item.\n    \"\"\"\n    txt = subprocess.check_output([\"gh\", \"pr\", \"view\", prNum]).decode(\"utf-8\")\n    lines = [ln.strip() for ln in txt.split(\"\\n\") if ln.strip()]\n\n    # grab title\n    title = _findOneLineData(lines, prNum, \"title:\")\n\n    # grab author\n    author = _findOneLineData(lines, prNum, \"author:\")\n    author = ghUsers.get(author, author)\n\n    # grab reviewer(s)\n    reviewers = _findOneLineData(lines, prNum, \"reviewers:\")\n    reviewers = [rr.split(\"(\")[0].strip() for rr in reviewers.split(\",\")]\n    reviewers = [ghUsers.get(rr, rr) for rr in reviewers]\n    reviewerHeader = \"Reviewer(s)\" if len(reviewers) > 1 else \"Reviewer\"\n    reviewers = \", \".join(reviewers)\n\n    # grab one-line description\n    scrType = _findOneLineData(lines, prNum, \"Change Type:\")\n    if scrType not in PR_TYPES:\n        print(f\"WARNING: SCR: Invalid change type '{scrType}' for PR#{prNum}\")\n        scrType = \"trivial\"\n\n    # grab one-line description\n    desc = _findOneLineData(lines, prNum, \"One-Sentence Rationale:\")\n\n    # grab impact on requirements\n    impact = _findOneLineData(lines, prNum, \"One-line Impact on Requirements:\")\n\n    # build RST list item, representing this data\n    tab = \"  \"\n    content = f\"* PR #{prNum}: {title}\\n\\n\"\n    content += f\"{tab}* Rationale: {desc}\\n\"\n    content += f\"{tab}* Impact on Requirements: {impact}\\n\"\n    content += f\"{tab}* Author: {author}\\n\"\n    content += f\"{tab}* {reviewerHeader}: {reviewers}\\n\\n\"\n\n    return content, scrType\n\n\ndef _buildHeader(scrType: str):\n    \"\"\"Build a RST list header for an SCR listing.\n\n    Parameters\n    ----------\n    scrType : str\n        This has to be one of the defined SCR types: features, fixes, trivial, docs\n\n    Returns\n    -------\n    str\n        RST-formatted header title.\n    \"\"\"\n    return f\"\\nList of SCRs of type: {PR_TYPES[scrType]}\\n\\n\"\n\n\ndef isMainPR(prNum: int):\n    \"\"\"Determine if this PR is into the ARMI main branch.\n\n    Parameters\n    ----------\n    prNum : int\n        The number of this PR.\n\n    Returns\n    -------\n    bool\n        True if this PR is merging INTO the ARMI main branch. Default is True.\n    \"\"\"\n    try:\n        url = f\"https://github.com/terrapower/armi/pull/{prNum}\"\n        r = requests.get(url)\n        return \"terrapower/armi:main\" in r.text\n    except Exception as e:\n        print(f\"WARNING: SCR: Failed to determine if PR#{prNum} merged into the main branch: {e}\")\n        return True\n\n\ndef parseAuthorsFile():\n    \"\"\"\n    Parse the ARMI \"AUTHORS\" file to get a mapping from GitHub usernames to human names.\n\n    This is a custom \"data format\" where each line looks like::\n\n        Bob Ross (the-painter-bob-ross-987)\n        Emmy Noether (crazy-smart-emmy-noether-987, super-smart-dr-nother-123)\n\n    Returns\n    -------\n    dict\n        Mapping from GitHub usernames to real / human author names.\n    \"\"\"\n    ghUsers = {}\n\n    # loop through the lines in the ARMI AUTHORS file\n    filePath = os.path.join(os.path.dirname(__file__), \"..\", \"..\", \"AUTHORS\")\n    with open(filePath, \"r\") as f:\n        for ln in f.readlines():\n            line = ln.strip()\n            if line.startswith(\"#\") or not len(line):\n                # ignore comments and blank lines\n                continue\n            elif \"(\" not in line:\n                # ignore authors that don't list a GitHub username\n                continue\n\n            # finally, map one or multiple GH usernames to the author name\n            author, usernames = line.split(\"(\")\n            for user in usernames.rstrip(\")\").split(\",\"):\n                ghUsers[user.strip()] = author.strip()\n\n    return ghUsers\n\n\ndef buildScrListing(pastCommit: str, thisPrNum: int = -1):\n    \"\"\"Helper method to build an RST-formatted lists of all SCRs, by category.\n\n    Parameters\n    ----------\n    pastCommit : str\n        The shortened commit hash for a past reference commit. (This is the last commit of the last release. It will not\n        be included.)\n    thisPrNum : int\n        The number of this PR. If this is not a PR, the default is -1.\n\n    Returns\n    -------\n    str\n        RST-formatted list content.\n    \"\"\"\n    # 1. Get a list of all the commits between this one and the reference\n    txt = \"\"\n    for num in range(100, 2001, 100):\n        print(f\"Looking back {num} commits...\")\n        gitCmd = f\"git log -n {num} --pretty=oneline --all\".split(\" \")\n        txt = subprocess.check_output(gitCmd).decode(\"utf-8\")\n        if pastCommit in txt:\n            break\n\n    if not txt or pastCommit not in txt:\n        return f\"Could not find commit in git log: {pastCommit}\"\n\n    # 2. Parse commit history to get the PR numbers\n    prNums = set()\n    if thisPrNum > 0:\n        # in case the docs are not being built from a PR\n        prNums.add(thisPrNum)\n\n    for ln in txt.split(\"\\n\"):\n        line = ln.strip()\n        if pastCommit in line:\n            # do not include the reference commit\n            break\n        elif line.endswith(\")\") and \"(#\" in line:\n            # get the PR number\n            try:\n                prNums.add(int(line.split(\"(#\")[-1].split(\")\")[0]))\n            except ValueError:\n                # This is not a PR. Someone unwisely put some trash in the commit message.\n                pass\n\n    # 3. Build a list of GitHub Users\n    ghUsers = parseAuthorsFile()\n\n    # 4. Build a list for each SCR\n    data = {\"docs\": [], \"features\": [], \"fixes\": [], \"trivial\": []}\n    for prNum in sorted(prNums):\n        if not isMainPR(prNum):\n            continue\n\n        row, scrType = _buildScrLine(str(prNum), ghUsers)\n        data[scrType].append(row)\n\n    # 5. Build final RST for all four lists, to return to the docs\n    content = \"\"\n    for typ in [\"features\", \"fixes\", \"trivial\", \"docs\"]:\n        if len(data[typ]):\n            print(f\"Found {len(data[typ])} SCRs in the {typ} category\")\n            content += _buildHeader(typ)\n            for line in data[typ]:\n                content += line\n            content += \"\\n\\n\"\n\n    content += \"\\n\\n\"\n\n    print(content)\n    return content\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "doc/.static/cleanup_test_results.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Docs build helper script, used to clean up the test-results file so it is easier to read in HTML and PDF.\"\"\"\n\nfrom sys import argv\n\nCLASS_NAME = 'classname=\"'\nSKIPPED = \"</skipped>\"\n\n\ndef main():\n    assert len(argv) == 2, \"No input file provided\"\n    filePath = argv[1]\n    cleanup_test_results(filePath)\n\n\ndef cleanup_test_results(filePath: str):\n    \"\"\"Clean up the test-results file so it is easier to read in HTML and PDF.\n\n    Parameters\n    ----------\n    filePath : str\n        Path to junit pytest test results XML file.\n    \"\"\"\n    txt = open(filePath, \"r\").read()\n    bits = txt.split(CLASS_NAME)\n\n    newTxt = bits[0]\n    for i in range(1, len(bits)):\n        # split the line up into bits, using quotes\n        assert '\"' in bits[i], f\"Something is wrong with the file: {bits[i]}\"\n        row = bits[i].split('\"')\n\n        # just grab the test class name, not the whole import path\n        row[0] = row[0].split(\".\")[-1]\n\n        # skipped tests include a long file path we want to remove\n        if row[-1].startswith(\">/home/runner/\") and SKIPPED in row[-1]:\n            row[-1] = \">\" + SKIPPED + row[-1].split(SKIPPED)[-1]\n\n        # Add the classname we split on back into this line\n        newTxt += CLASS_NAME + '\"'.join(row)\n\n    with open(filePath, \"w\") as f:\n        f.write(newTxt)\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "doc/.static/css/theme_fixes.css",
    "content": "@import 'theme.css';\n\n/* override table width restrictions */\n@media screen and (min-width: 767px) {\n\n   .wy-table-responsive table td {\n      white-space: normal !important;\n   }\n\n   .wy-table-responsive {\n      overflow: visible !important;\n   }\n}\n\nimg {\n   max-width: 100% !important;\n}\n\n/* sphinx-needs */\ntable.need.need.need > tbody > tr > td {\n    padding: 0.5em .5em !important;\n}\n\n.rst-content .line-block {\n    line-height: 1em !important;\n}\n\n/* log files and raw data dumps */\n.rst-content .linenodiv pre, .rst-content div[class^=highlight] pre, .rst-content pre.literal-block {\n    line-height: 12px !important;\n    font-size: 10pt !important;\n}\n\n/* long tables */\n.rst-content table.docutils {\n    margin: 0;\n    padding: 0;\n    font-size: 10pt !important;\n}\n\n.rst-content table.docutils td, .rst-content table.docutils th, .rst-content table.field-list td, .rst-content table.field-list th, .wy-table td, .wy-table th {\n    margin: 0;\n    padding: 1px;\n}\n\nhtml.writer-html5 .rst-content table.docutils td>p, html.writer-html5 .rst-content table.docutils th>p {\n    font-size: 10px !important;\n}\n\n/* code snippets */\n.rst-content .linenodiv pre, .rst-content div[class^=highlight] pre, .rst-content pre.literal-block {\n    ssp-tiny !important;\n}\n\n/* move equation numbers to right side of the equation */\nspan.eqno {\n    float: right;\n}\n\n/* Style test needs by their result */\ntr.needs_passed td {\n    background-color: rgba(0,250,0,0.2) !important;\n}\n\ntr.needs_failure td {\n    background-color: rgba(250,0,0,0.2) !important;\n}\n\ntr.needs_skipped td {\n    background-color: rgba(0,0,0,0.1) !important;\n}"
  },
  {
    "path": "doc/.static/dochelpers.py",
    "content": "# Copyright 2024 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Helpers for Sphinx documentation.\"\"\"\n\n\ndef escapeSpecialCharacters(s):\n    \"\"\"Escapes RST special characters in inputted string.\n\n    Special characters include: ``*|_``. More to be added when found troublesome.\n\n    Parameters\n    ----------\n    s : str\n        String with characters to be escaped.\n\n    Returns\n    -------\n    str\n        Input string with special characters escaped.\n    \"\"\"\n    news = s[:]\n    for char in [\"*\", \"|\", \"_\"]:\n        news = news.replace(char, \"\\\\\" + char)\n    return news\n\n\ndef createTable(rst_table, caption=None, label=None, align=None, widths=None, width=None):\n    \"\"\"\n    This method is available within ``.. exec::``. It allows someone to create a table with a\n    caption.\n\n    The ``rst_table``\n    \"\"\"\n    rst = [\".. table:: {}\".format(caption or \"\")]\n    if label:\n        rst += [\"    :name: {}\".format(label)]\n    if align:\n        rst += [\"    :align: {}\".format(align)]\n    if width:\n        rst += [\"    :width: {}\".format(width)]\n    if widths:\n        rst += [\"    :widths: {}\".format(widths)]\n    rst += [\"\"]\n    rst += [\"    \" + line for line in rst_table.split(\"\\n\")]\n    return \"\\n\".join(rst)\n\n\ndef createListTable(rows, caption=None, align=None, widths=None, width=None, klass=None):\n    \"\"\"Take a list of data, and produce an RST-type string for a list-table.\n\n    Parameters\n    ----------\n    rows: list\n        List of input data (first row is the header).\n    align: str\n        \"left\", \"center\", or \"right\"\n    widths: str\n        \"auto\", \"grid\", or a list of integers\n    width: str\n        length or percentage of the line, surrounded by backticks\n    klass: str\n        Should be \"class\", but that is a reserved keyword.\n        \"longtable\", \"special\", or something custom\n\n    Returns\n    -------\n    str\n        RST list-table string\n    \"\"\"\n    # we need valid input data\n    assert len(rows) > 1, \"Not enough input data.\"\n    len0 = len(rows[0])\n    for row in rows[1:]:\n        assert len(row) == len0, \"Rows aren't all the same length.\"\n\n    # build the list-table header block\n    rst = [\".. list-table:: {}\".format(caption or \"\")]\n    rst += [\"    :header-rows: 1\"]\n    if klass:\n        rst += [\"    :class: {}\".format(klass)]\n    if align:\n        rst += [\"    :align: {}\".format(align)]\n    if width:\n        rst += [\"    :width: {}\".format(width)]\n    if widths:\n        rst += [\"    :widths: \" + \" \".join([str(w) for w in widths])]\n    rst += [\"\"]\n\n    # build the list-table data\n    for row in rows:\n        rst += [f\"    * - {row[0]}\"]\n        rst += [f\"      - {word}\" for word in row[1:]]\n\n    return \"\\n\".join(rst)\n\n\ndef generateParamTable(klass, fwParams, app=None):\n    \"\"\"\n    Return a string containing one or more restructured text list tables containing parameter\n    descriptions for the passed ArmiObject class.\n\n    Parameters\n    ----------\n    klass : ArmiObject subclass\n        The Class for which parameter tables should be generated\n    fwParams : ParameterDefinitionCollection\n        A parameter definition collection containing the parameters that are always defined for the\n        passed ``klass``. The rest of the parameters come from the plugins registered with the\n        passed ``app``\n    app : App, optional\n        The ARMI-based application to draw plugins from.\n\n    Returns\n    -------\n    str\n        RST-formatted string table\n    \"\"\"\n    from armi import apps\n\n    if app is None:\n        app = apps.App()\n\n    defs = {None: fwParams}\n\n    app = apps.App()\n    for plugin in app.pluginManager.get_plugins():\n        plugParams = plugin.defineParameters()\n        if plugParams is not None:\n            pDefs = plugParams.get(klass, None)\n            if pDefs is not None:\n                defs[plugin] = pDefs\n\n    headerContent = \"\"\"\n.. container:: break_before ssp-landscape\n\n    .. list-table:: {} Parameters from {{}}\n        :class: ssp-tiny\n        :widths: 30 40 30\n        :header-rows: 1\n\n        * - Name\n          - Description\n          - Units\n    \"\"\".format(klass.__name__)\n\n    content = []\n\n    for plugin, pdefs in defs.items():\n        srcName = plugin.__name__ if plugin is not None else \"Framework\"\n        content.append(f\".. _{srcName}-{klass.__name__}-param-table:\")\n        pluginContent = headerContent.format(srcName)\n        for pd in pdefs:\n            pluginContent += f\"\"\"    * - {pd.name}\n          - {escapeSpecialCharacters(str(pd.description))}\n          - {escapeSpecialCharacters(pd.units)}\n    \"\"\"\n        content.append(pluginContent + \"\\n\")\n\n    return \"\\n\".join(content)\n"
  },
  {
    "path": "doc/.static/looseCouplingIllustration.dot",
    "content": "digraph looseCoupling {\n        label=\"Loose Coupling\"\n        layout=\"dot\";\n        rankdir=TB;\n        a [label=\"Temp.\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        a1 [label=\"Temp.\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        a2 [label=\"Temp.\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        b [label=\"Power\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        b1 [label=\"Power\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        b2 [label=\"Power\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        c [label=\"Cross Sections\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        c1 [label=\"Cross Sections\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        c2 [label=\"Cross Sections\", shape=\"Rec\", style=\"rounded,filled\", color=\"white\"]\n        d [label=\"...\", shape=\"plaintext\"]\n\n        subgraph cluster_c00n00{\n            label=\"Cycle 0, Node 0\";\n            style=\"rounded,filled\";\n            color=lightblue;\n            c -> b\n            b -> a [constraint=false]\n        }\n        a -> c1 //[constraint=false]\n        subgraph cluster_c00n01{\n            label=\"Cycle 0, Node 1\"\n            style=\"rounded,filled\";\n            color=lightblue;\n            c1 -> b1\n            b1 -> a1 [constraint=false]\n        }\n        a1 -> c2 //[constraint=false]\n        subgraph cluster_c00n02{\n            label=\"Cycle 0, Node 2\"\n            style=\"rounded,filled\";\n            color=lightblue;\n            c2 -> b2\n            b2 -> a2 [constraint=false]\n        }\n        a2 -> d //[constraint=false]\n}"
  },
  {
    "path": "doc/.static/tightCouplingIllustration.dot",
    "content": "digraph tightCoupling {\n        label=\"Tight Coupling\"\n        layout=\"dot\";\n        rankdir=TB;\n        e [label=\"Converged?\", shape=\"diamond\", style=\"filled\", color=\"white\"]\n        e1 [label=\"Converged?\", shape=\"diamond\", style=\"filled\", color=\"white\"]\n        e2 [label=\"Converged?\", shape=\"diamond\", style=\"filled\", color=\"white\"]\n        a [label=\"Temp.\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        a1 [label=\"Temp.\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        a2 [label=\"Temp.\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        b [label=\"Power\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        b1 [label=\"Power\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        b2 [label=\"Power\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        c [label=\"Cross Sections\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        c1 [label=\"Cross Sections\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        c2 [label=\"Cross Sections\", shape=\"Rectangle\", style=\"rounded,filled\", color=\"white\"]\n        d [label=\"...\", shape=\"plaintext\"]\n\n        subgraph cluster_c00n00{\n            label=\"Cycle 0, Node 0\";\n            style=\"rounded,filled\";\n            color=lightblue;\n            c -> b \n            b -> a \n            a -> e [constraint=false]\n            e -> c [constraint=false, label=\"no\"]\n        }\n        e -> c1 [label=\"yes\"]\n        subgraph cluster_c00n01{\n            label=\"Cycle 0, Node 1\"\n            style=\"rounded,filled\";\n            color=lightblue;\n            c1 -> b1\n            b1 -> a1\n            a1 -> e1 [constraint=false]\n            e1 -> c1 [constraint=false, label=\"no\"]\n        }\n        e1 -> c2 [label=\"yes\"]\n        subgraph cluster_c00n02{\n            label=\"Cycle 0, Node 2\"\n            style=\"rounded,filled\";\n            color=lightblue;\n            c2 -> b2\n            b2 -> a2\n            a2 -> e2 [constraint=false]\n            e2 -> c2 [constraint=false, label=\"no\"]\n        }\n        e2 -> d [label=\"yes\"]\n}\n"
  },
  {
    "path": "doc/Makefile",
    "content": "# Minimal makefile for Sphinx documentation for BASH Linux\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHINXBUILD   = sphinx-build\nSOURCEDIR     = .\nBUILDDIR      = _build\n\n# Put it first so that \"make\" without argument is like \"make help\".\nhelp:\n\t@$(SPHINXBUILD) -M help \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n\n.PHONY: help Makefile\n\n# Catch-all target: route all unknown targets to Sphinx using the new \"make mode\" option.\n# $(O) is meant as a shortcut for $(SPHINXOPTS).\n%: Makefile\n\t@$(SPHINXBUILD) -M $@ \"$(SOURCEDIR)\" \"$(BUILDDIR)\" $(SPHINXOPTS) $(O)\n"
  },
  {
    "path": "doc/__init__.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This is the documentation package: not part of the codebase.\"\"\"\n"
  },
  {
    "path": "doc/conf.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nARMI documentation build configuration file.\n\nThis file is execfile()d with the current directory set to its containing dir.\n\nNote that not all possible configuration values are present in this autogenerated file.\n\nAll configuration values have a default; values that are commented out serve to show the default.\n\"\"\"\n\n# ruff: noqa: E402\nimport datetime\nimport inspect\nimport os\nimport pathlib\nimport re\nimport shutil\nimport subprocess\nimport sys\nimport warnings\n\nimport sphinx_rtd_theme  # noqa: F401\nfrom docutils import nodes, statemachine\nfrom docutils.parsers.rst import Directive, directives\nfrom sphinx.domains.python import PythonDomain\nfrom sphinx_gallery.sorting import ExplicitOrder, FileNameSortKey\nfrom sphinx_needs.api import add_dynamic_function\n\nfrom doc.getTestResults import getTestResult\n\n# handle python import locations for this execution\nPYTHONPATH = os.path.abspath(\"..\")\nsys.path.insert(0, PYTHONPATH)\n# Also add to os.environ which will be used by the nbsphinx extension environment\nos.environ[\"PYTHONPATH\"] = PYTHONPATH\n# Add dochelpers.py and automateScr.py from doc/.static/ directory\nsys.path.insert(0, \".static\")\n\nfrom armi import apps, context, disableFutureConfigures, meta\nfrom armi import configure as armi_configure\nfrom armi.bookkeeping import tests as bookkeepingTests\nfrom armi.utils import safeCopy\n\ncontext.Mode.setMode(context.Mode.BATCH)\n\n# Configure the baseline framework \"App\" for framework doc building\narmi_configure(apps.App())\ndisableFutureConfigures()\n\nAPIDOC_REL = \".apidocs\"\nSOURCE_DIR = os.path.join(\"..\", \"armi\")\nSTATIC_DIR = \".static\"\n_TUTORIAL_FILES = [fName for fName in bookkeepingTests.TUTORIAL_FILES if \"ipynb\" not in fName]\n\n\nclass PatchedPythonDomain(PythonDomain):\n    def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode):\n        if \"refspecific\" in node:\n            del node[\"refspecific\"]\n        return super(PatchedPythonDomain, self).resolve_xref(env, fromdocname, builder, typ, target, node, contnode)\n\n\nclass ExecDirective(Directive):\n    \"\"\"\n    Execute the specified python code and insert the output into the document.\n\n    The code is used as the body of a method, and must return a ``str``. The string result is interpreted as\n    reStructuredText.\n\n    Error handling informed by https://docutils.sourceforge.io/docs/howto/rst-directives.html#error-handling\n    The self.error function should both inform the documentation builder of the error and also insert an error into the\n    built documentation.\n\n    Warning\n    -------\n    This only works on a single node in the doctree, so the rendered code may not contain any new section names or\n    labels. They will result in ``WARNING: Unexpected section title`` warnings.\n    \"\"\"\n\n    has_content = True\n\n    def run(self):\n        try:\n            # clean the content, then put back into a list\n            cleancode = inspect.cleandoc(\"\\n\".join(self.content)).split(\"\\n\")\n            code = \"def usermethod():\\n    \" + \"\\n    \".join(cleancode)\n            globals = {}\n            exec(code, globals)\n\n            result = globals[\"usermethod\"]()\n\n            if result is None:\n                raise self.error(\n                    \"Return value needed! The body of your `.. exec::` is a function call that must return a value.\"\n                )\n\n            para = nodes.container()\n            lines = statemachine.StringList(result.split(\"\\n\"))\n            self.state.nested_parse(lines, self.content_offset, para)\n            return [para]\n        except Exception as e:\n            docname = self.state.document.settings.env.docname\n            raise self.error(f\"Unable to execute embedded doc code at {docname}:{self.lineno}\\n{str(e)}\")\n\n\nclass PyReverse(Directive):\n    \"\"\"Runs pyreverse to generate UML for specified module name and options.\n\n    The directive accepts the same arguments as pyreverse, except you should not specify ``--project`` or ``-o`` (output\n    format). These are automatically specified.\n\n    If you pass ``-c`` to this, the figure generated is forced to be the className.png like. For .gitignore purposes,\n    this is a pain. Thus, we auto-prefix ALL images generated by this directive with ``pyrev_``.\n    \"\"\"\n\n    has_content = True\n    required_arguments = 1\n    optional_arguments = 50\n    option_spec = {\n        \"alt\": directives.unchanged,\n        \"height\": directives.length_or_percentage_or_unitless,\n        \"width\": directives.length_or_percentage_or_unitless,\n        \"align\": lambda arg: directives.choice(arg, (\"left\", \"right\", \"center\")),\n        \"filename\": directives.unchanged,\n    }\n\n    def run(self):\n        try:\n            args = list(self.arguments)\n            args.append(\"--project\")\n            args.append(f\"{args[0]}\")\n            args.append(\"-opng\")\n\n            # NOTE: cannot use \"pylint.pyreverse.main.Run\" because it calls `sys.exit`.\n            fig_name = self.options.get(\"filename\", \"classes_{}.png\".format(args[0]))\n            command = [sys.executable, \"-m\", \"pylint.pyreverse.main\"]\n            print(\"Running {}\".format(command + args))\n            env = dict(os.environ)\n            # apply any runtime path mods to the pythonpath env variable (e.g. sys.path mods made during doc confs)\n            env[\"PYTHONPATH\"] = os.pathsep.join(sys.path)\n            subprocess.check_call(command + args, env=env)\n\n            try:\n                os.remove(os.path.join(APIDOC_REL, fig_name))\n            except OSError:\n                pass\n\n            shutil.move(fig_name, APIDOC_REL)\n            # add .gitignore helper prefix\n            shutil.move(\n                os.path.join(APIDOC_REL, fig_name),\n                os.path.join(APIDOC_REL, f\"pyr_{fig_name}\"),\n            )\n            new_content = [f\".. figure:: /{APIDOC_REL}/pyr_{fig_name}\"]\n\n            # assume we don't need the packages, and delete.\n            try:\n                os.remove(\"packages_{}.png\".format(args[0]))\n            except OSError:\n                pass\n\n            # pass the other args through (figure args like align)\n            for opt, val in self.options.items():\n                if opt in (\"filename\",):\n                    continue\n                new_content.append(\"    :{}: {}\\n\".format(opt, val))\n\n            new_content.append(\"\\n\")\n\n            for line in self.content:\n                new_content.append(\"    \" + line)\n\n            para = nodes.container()\n            lines = statemachine.StringList(new_content)\n            self.state.nested_parse(lines, self.content_offset, para)\n            return [para]\n        except Exception as e:\n            docname = self.state.document.settings.env.docname\n            # add the error message directly to the built documentation and also tell the builder\n            raise self.error(\n                \"Unable to execute embedded doc code at {}:{} ... {}\\n{}\".format(\n                    docname, self.lineno, datetime.datetime.now(), str(e)\n                )\n            )\n\n\ndef autodoc_skip_member_handler(app, what, name, obj, skip, options):\n    \"\"\"Manually exclude certain methods/functions from docs.\"\"\"\n    # exclude special methods from unittest\n    excludes = [\"setUp\", \"setUpClass\", \"tearDown\", \"tearDownClass\"]\n\n    try:\n        # special logic to fix inherited docstrings from yamlize.Attribute\n        s = str(obj).strip()\n        if s.startswith(\"<Attribute\") and \"_yamlized_\" in s:\n            return True\n    except Exception:\n        pass\n\n    return name.startswith(\"_\") or name in excludes\n\n\ndef setup(app):\n    \"\"\"Method to make `make html` generate api documentation.\"\"\"\n    app.connect(\"autodoc-skip-member\", autodoc_skip_member_handler)\n    app.add_domain(PatchedPythonDomain, override=True)\n    app.add_directive(\"exec\", ExecDirective)\n    app.add_directive(\"pyreverse\", PyReverse)\n    add_dynamic_function(app, getTestResult, \"get_test_result\")\n\n    # making tutorial data dir\n    dataDir = pathlib.Path(\"user\") / \"..\" / \"anl-afci-177\"\n    if not os.path.exists(dataDir):\n        os.mkdir(dataDir)\n\n    # Copy resources needed to build the tutorial notebooks. nbsphinx_link needs the working directory for running the\n    # notebooks to be the directory of the link itself.\n    for path in _TUTORIAL_FILES:\n        safeCopy(path, dataDir)\n\n\n# If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here.\n# If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here.\nsys.path.insert(0, os.path.abspath(\"..\"))\n\n# -- General configuration -----------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx\n# (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n    \"nbsphinx\",\n    \"nbsphinx_link\",\n    \"sphinx.ext.autodoc\",\n    \"sphinx.ext.autosummary\",\n    \"sphinx.ext.doctest\",\n    \"sphinx.ext.extlinks\",\n    \"sphinx.ext.ifconfig\",\n    \"sphinx.ext.imgconverter\",  # to convert GH Actions badge SVGs to PNG for LaTeX\n    \"sphinx.ext.inheritance_diagram\",\n    \"sphinx.ext.intersphinx\",\n    \"sphinx.ext.mathjax\",\n    \"sphinx.ext.napoleon\",\n    \"sphinx.ext.todo\",\n    \"sphinx.ext.viewcode\",\n    \"sphinx_gallery.gen_gallery\",\n    \"sphinx_needs\",  # needed for requirements tracking\n    \"sphinx_rtd_theme\",  # needed here for loading jquery in sphinx 6\n    \"sphinxcontrib.apidoc\",\n    \"sphinxcontrib.jquery\",  # see https://github.com/readthedocs/sphinx_rtd_theme/issues/1452\n    \"sphinxcontrib.plantuml\",\n    \"sphinxcontrib.test_reports\",\n    \"sphinxext.opengraph\",\n]\n\n# Our API should make sense without documenting private/special members.\nautodoc_default_options = {\n    \"members\": True,\n    \"private-members\": False,\n    \"undoc-members\": True,\n    \"ignore-module-all\": True,\n}\nautodoc_member_order = \"bysource\"\n# this line removes huge numbers of false and misleading, inherited docstrings\nautodoc_inherit_docstrings = False\nautoclass_content = \"both\"\nautodoc_mock_imports = [\"wx\"]\n\napidoc_module_dir = SOURCE_DIR\napidoc_module_first = True\napidoc_output_dir = APIDOC_REL\napidoc_separate_modules = True\n\n# Napoleon settings listed here so we know what's configurable and can track changes (for numpy docstrings)\nnapoleon_google_docstring = False\nnapoleon_include_init_with_doc = False\nnapoleon_include_private_with_doc = False\nnapoleon_include_special_with_doc = False\nnapoleon_numpy_docstring = True\nnapoleon_use_admonition_for_examples = False\nnapoleon_use_admonition_for_notes = True\nnapoleon_use_admonition_for_references = False\nnapoleon_use_ivar = True\nnapoleon_use_param = True\nnapoleon_use_rtype = True\n\nnbsphinx_kernel_name = \"python3\"\n\nogp_site_url = \"https://terrapower.github.io/armi/\"\nogp_image = \"https://terrapower.github.io/armi/_static/armiSchematicView.png\"\nogp_site_name = \"Advanced Reactor Modeling Interface\"\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\".templates\"]\n\n# The suffix of source filenames.\nsource_suffix = \".rst\"\n\n# The top-level toctree document.\nroot_doc = \"index\"\n\n# General information about the project.\ncopyright = f\"2009-{datetime.datetime.now().year}, TerraPower, LLC\"\nproject = \"ARMI\"\n\n# Use the pre-existing version definition.\nrelease = meta.__version__\nversion = meta.__version__\n\n# List of patterns, relative to doc directory, that match files and directories to ignore when looking for source files.\nexclude_patterns = [\n    \"**.ipynb_checkpoints\",\n    \"**_reqs.rst\",  # needed so included reqs files render\n    \".DS_Store\",\n    \"_build\",\n    \"gallery/**/*.ipynb\",  # prevent sphinx-gallery from causing duplicate source file errors\n    \"gallery/**/*.json\",\n    \"gallery/**/*.md5\",\n    \"gallery/**/*.zip\",\n    \"gallery/analysis/index.html\",\n    \"gallery/framework/index.html\",\n    \"logs\",\n    \"Thumbs.db\",\n]\n\nrst_epilog = r\"\"\"\n.. |keff| replace:: k\\ :sub:`eff`\\\n\"\"\"\n\nwiki = {\n    \"GitHub Discussions\": (\n        \"https://github.com/terrapower/armi/discussions\" + \"%s\",\n        None,\n    )\n}\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# A list of ignored prefixes for module index sorting.\nmodindex_common_prefix = [\"armi.\"]\n\n# -- Options for HTML output ---------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes.\nhtml_theme = \"sphinx_rtd_theme\"\n\n# (Optional) Logo. Should be small enough to fit the navbar (ideally 24x24). Path should be relative to the ``.static``\n# files directory.\nhtml_logo = os.path.join(STATIC_DIR, \"armiicon_24x24.ico\")\n\n# Theme options are theme-specific and customize the look and feel of a theme further.\nhtml_theme_options = {\n    \"display_version\": True,\n    \"logo_only\": False,\n    \"prev_next_buttons_location\": \"bottom\",\n    \"style_external_links\": True,\n    \"style_nav_header_background\": \"#233C5B\",  # TP blue looks better than green\n    \"vcs_pageview_mode\": \"\",\n    # Toc options\n    \"collapse_navigation\": True,\n    \"includehidden\": True,\n    \"navigation_depth\": 4,\n    \"sticky_navigation\": True,\n    \"titles_only\": False,\n}\n\n# as long as this file @import's the theme's main css it won't break anything\nhtml_style = \"css/theme_fixes.css\"\n\n# The name of an image file (within the static path) to use as favicon of the docs. This file should be a icon file\n# (.ico) being 16x16 or 32x32 pixels large.\nhtml_favicon = os.path.join(STATIC_DIR, \"armiicon_16x16.ico\")\n\n# Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are\n# copied after the builtin static files, so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [STATIC_DIR]\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, using the given strftime format.\nhtml_last_updated_fmt = \"%Y-%m-%d\"\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"ARMIdoc\"\n\nhtml_context = {\n    \"conf_py_path\": \"/doc/\",  # Path in the checkout to the docs root\n    \"display_github\": True,  # Integrate GitHub\n    \"github_repo\": \"armi\",  # Repo name\n    \"github_user\": \"terrapower\",  # Username\n    \"github_version\": \"main\",  # Version\n}\n\n# -- Options for LaTeX output --------------------------------------------------\nlatex_engine = \"xelatex\"\n\n# Additional stuff for the LaTeX preamble.\nlatex_elements = {\n    \"papersize\": \"letterpaper\",\n    \"pointsize\": \"10pt\",\n    \"preamble\": r\"\"\"\\usepackage{amsmath}\n\n\\usepackage{wasysym}\n\"\"\",\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, documentclass [howto/manual], toctree_only).\nlatex_documents = [\n    (\n        \"index\",\n        \"ARMI.tex\",\n        \"Advanced Reactor Modeling Interface (ARMI) Manual\",\n        \"TerraPower, LLC\",\n        \"manual\",\n        False,\n    )\n]\n\n# The name of an image file (relative to this directory) to place at the top of the title page.\nlatex_logo = os.path.join(STATIC_DIR, \"armi-logo.png\")\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts, not chapters.\nlatex_toplevel_sectioning = \"part\"\n\n# If true, show page references after internal links.\nlatex_show_pagerefs = True\n\n# If true, show URL addresses after external links.\nlatex_show_urls = \"inline\"\n\n# Documents to append as an appendix to all manuals.\nlatex_appendices = []\n\n# If false, no module index is generated.\nlatex_domain_indices = [\"py-modindex\"]\n\n# Configuration for the sphinx-gallery\nsphinx_gallery_conf = {\n    \"examples_dirs\": [\"gallery-src\"],\n    \"filename_pattern\": re.escape(os.sep) + \"run_\",\n    \"gallery_dirs\": [\"gallery\"],\n    \"line_numbers\": False,\n    \"download_all_examples\": False,\n    \"nested_sections\": False,\n    \"subsection_order\": ExplicitOrder(\n        [\n            os.path.join(\"gallery-src\", \"framework\"),\n            os.path.join(\"gallery-src\", \"analysis\"),\n        ]\n    ),\n    \"within_subsection_order\": FileNameSortKey,\n    \"default_thumb_file\": os.path.join(STATIC_DIR, \"TerraPowerLogo.png\"),\n}\n\nsuppress_warnings = [\"autoapi.python_import_resolution\", \"config.cache\"]\n\n# Filter out this warning which shows up in sphinx-gallery builds. This is suggested in the sphinx-gallery example but\n# doesn't actually work?\nwarnings.filterwarnings(\n    \"ignore\",\n    category=UserWarning,\n    message=\"Matplotlib is currently using agg, which is a non-GUI backend, so cannot show the figure.\",\n)\n\nintersphinx_mapping = {\"python\": (\"https://docs.python.org/3\", None)}\n\n# These are defaults in Windows in more recent versions of the imgconverter plugin and can be  removed if/when we\n# upgrade Sphinx beyond 2.2. Otherwise, 'convert' from system32 folder is used.\nif sys.platform.startswith(\"win\"):\n    image_converter = \"magick\"\n    image_converter_args = [\"convert\"]\n\n# sphinx-needs settings\nneeds_statuses = [\n    dict(name=None, description=\"No status yet; not in any reviews\"),\n    dict(\n        name=\"preliminary\",\n        description=\"Requirement that will have its wording reviewed and/or does not have implementation/testing yet.\",\n    ),\n    dict(\n        name=\"accepted\",\n        description=\"Requirement that either has completed or will undergo TP-ENG-PROC-0013 Appendix D Part 1 review.\",\n    ),\n]\n\nneeds_extra_options = [\n    \"acceptance_criteria\",\n    \"basis\",\n    \"subtype\",\n]\n\nneeds_extra_links = [\n    dict(option=\"tests\", incoming=\"testing\", outgoing=\"requirements\"),\n    dict(option=\"implements\", incoming=\"implementations\", outgoing=\"requirements\"),\n]\n\nneeds_layouts = {\n    \"test_layout\": {\n        \"grid\": \"simple\",\n        \"layout\": {\n            \"head\": [\n                '<<meta(\"type_name\")>>: **<<meta(\"title\")>>** <<meta_id()>>  <<collapse_button(\"meta\", '\n                'collapsed=\"icon:arrow-down-circle\", visible=\"icon:arrow-right-circle\", initial=False)>> '\n            ],\n            \"meta\": [\n                \"signature: <<meta('signature')>>\",\n                \"<<meta_links_all()>>\",\n            ],\n        },\n    },\n    \"req_hide_links\": {\n        \"grid\": \"simple\",\n        \"layout\": {\n            \"head\": [\n                '<<meta(\"type_name\")>>: **<<meta(\"title\")>>** <<meta_id()>>  <<collapse_button(\"meta\", '\n                'collapsed=\"icon:arrow-down-circle\", visible=\"icon:arrow-right-circle\", initial=False)>> '\n            ],\n            \"meta\": [\n                \"<<meta_all(no_links=True, exclude=['layout'])>>\",\n            ],\n        },\n    },\n}\n\nneeds_global_options = {\n    # Defaults for test tags\n    \"layout\": (\"test_layout\", \"type=='test'\"),\n    \"result\": (\"[[get_test_result()]]\", \"type=='test'\"),\n}\n\n# Formats need roles (reference to a req in text) as just the req ID\nneeds_role_need_template = \"{id}\"\n"
  },
  {
    "path": "doc/developer/documenting.rst",
    "content": ".. _armi-docing:\n\n****************\nDocumenting ARMI\n****************\n\nARMI uses the `Sphinx <https://www.sphinx-doc.org/en/master/>`_ documentation system to compile the \nARMI documentation into HTML and PDF from in-code docstrings and hand-created\n`ReStructedText files <https://www.sphinx-doc.org/en/master/usage/restructuredtext/basics.html>`_.\nThis provides several benefits:\n \n* We can revise and track the documentation in lock-step with the code itself, in the same source\n  code repository\n* We can make use of hyperlinked cross-references that stay up to date as the code is expanded or\n  refactored.\n* We can run specific code tests during documentation building to ensure the documentation examples\n  remain valid\n* We can auto-generate class diagrams based on the latest status of the code\n* Every Pull Request (PR) generates HTML and PDF versions of the documentation for the PR Author and\n  Reviewer\n\nWe use some special Sphinx plugins that run the tutorial jupyter notebooks during documentation\nbuild with the most up-to-date code.\n\nBuilding the Documentation\n==========================\nBefore building documentation, ensure that you have installed the documentation requirements into\nyour ARMI virtual environment with:\n\n.. code-block:: bash\n\n    pip install -e .[docs]\n\nYou also need to have the following utilities available in your PATH:\n\n* `Graphviz <https://graphviz.org/>`_\n* `Pandoc <https://pandoc.org/>`_\n\nIf you want to build the documentation into a PDF using the Sphinx LaTeX builder, you also need:\n\n* LaTeX (`MikTeX <https://miktex.org/>`_ on Windows)\n* `ImageMagick <https://imagemagick.org/>`_\n\nThe documentation depends on at least one submodule as well, so you must be sure it is available in\nyour source tree with:\n\n.. code-block:: bash\n\n    git submodule update --init\n\n\nTo build the ARMI documentation as HTML. The ARMI docs expect a bunch of custom unit test outputs to\nbe present. You can either run these test commands:\n\n.. code-block:: bash\n\n    pytest --junit-xml=test_results.xml -v -n 4 armi > pytest_verbose.log\n    mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi1.xml armi/tests/test_mpiFeatures.py > pytest_verbose_mpi1.log\n    mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi2.xml armi/tests/test_mpiParameters.py > pytest_verbose_mpi2.log\n    mpiexec -n 2 --use-hwthread-cpus pytest --junit-xml=test_results_mpi3.xml armi/utils/tests/test_directoryChangersMpi.py > pytest_verbose_mpi3.log\n    python doc/.static/cleanup_test_results.py test_results.xml\n\nOr, if you just want to build the docs locally and aren't interested in building a full test report,\nyou can just do this to inject placeholder test results files instead:\n\n.. code-block:: bash\n\n    python doc/skip_str.py\n\nEither way, you eventually go to the ``doc`` folder and type this to build the docs:\n\n .. code-block:: bash\n\n    make html\n\nThis will invoke Sphinx and generate a series of html files in the ``_build/html`` folder. Open up\n``index.html`` to see the documentation from there. A copy of the documentation is hosted online at\nhttps://terrapower.github.io/armi/.\n\nYou can suggest a change to the documentation by opening an ARMI PR.\n\nDocumentation for ARMI plugins\n==============================\nThe following subsections apply to documentation for ARMI plugins.\n\nLinking to ARMI documentation from plugins\n------------------------------------------\nARMI plugin documentation can feature rich hyperlinks to the ARMI API documentation with the help\nof the `intersphinx Sphinx plugin <http://www.sphinx-doc.org/en/master/usage/extensions/intersphinx.html>`_.\nThe ARMI plugin documentation config file should add ``\"sphinx.ext.intersphinx\",`` to its active\nSphinx plugin list, and change the default config to read::\n\n    intersphinx_mapping = {\n        \"python\": (\"https://docs.python.org/3\", None),\n        \"armi\": (\"https://terrapower.github.io/armi/\", None),\n    }\n\nNow you can link to the ARMI documentation with links like::\n\n    :doc:`armi:developer/documenting`\n    :py:mod:`armi.physics.executers`\n\n\nAutomatically building apidocs of namespace packages\n----------------------------------------------------\nActivating the ``\"sphinxcontrib.apidoc\",`` `Sphinx plugin <https://github.com/sphinx-contrib/apidoc>`_\nenables plugin API documentation to be built with the standard ``make html`` Sphinx workflow. If your\nARMI plugin is a namespace package, the following extra config is required::\n\n    apidoc_extra_args = [\"--implicit-namespaces\"]\n\nUpdating the Gallery\n====================\nThe `ARMI example gallery <https://terrapower.github.io/armi/gallery/index.html>`_ is a great way\nto quickly highlight neat features and uses of ARMI. To add a new item to the gallery, add your\nexample code (including the required docstring) to the ``doc/gallery-src`` folder in the ARMI source\ntree. The example will be added to the gallery during the next documentation build.\n\nUsing Jupyter Notebooks\n=======================\nFor interactive tutorials, it's convenient to build actual Jupyter notebooks and commit them to the\ndocumentation to be rendered by Sphinx using the nbsphinx plugin. When this is done, notebooks\nwithout any output should be committed to the repository so that Sphinx actually executes the\nnotebooks with the up-to-date code when the  documentation is built. To do this, you can clean the\noutput with:\n\n.. code-block:: bash\n\n    jupyter nbconvert --ClearOutputPreprocessor.enabled=True --inplace mynotebook.ipynb\n\nThis should clear the output and overwrite the file. If this doesn't work, you can clear all output\ncells in the notebook web interface itself before committing the file.\n"
  },
  {
    "path": "doc/developer/entrypoints.rst",
    "content": "************\nEntry Points\n************\n\n**Entry Points** are like the verbs that your App can *do*. The :py:mod:`built-in entry points <armi.cli>` offer basic\nfunctionality, like :py:class:`running a case <armi.cli.run.RunEntryPoint>` or\n:py:class:`opening up the GUI <armi.cli.gridGui.GridGuiEntryPoint>`, but the real joy of an application comes when you\nadd your own project-specific entry points that do the actions that you commonly need done.\n\nTo make a new EntryPoint, first make a new module and subclass :py:class:`~armi.cli.entryPoint.EntryPoint`. Set the\nclass attributes as follows:\n\n``name``\n    What the user types on the CLI to invoke this entry point.\n\n``settingsArgument``\n\n    * ``\"required\"`` if a settings input file must be provided,\n    * ``\"optional\"`` if it may be provided but not required,\n    * ``None`` if no settings input is allowed\n\n\n.. tip:: ARMI apps often collect EntryPoints in a ``cli/`` directory (Command Line Interface)\n\nNext, implement the :py:meth:`~armi.cli.entryPoint.EntryPoint.addOptions` method. Here you can both:\n\n* turn various Settings into command-line arguments with :py:meth:`~armi.cli.entryPoint.EntryPoint.createOptionFromSetting`\n* add arbitrary command-line arguments using the standard :py:mod:`python:argparse` library.\n\nThe values of the non-setting arguments will become attributes in ``self.args`` for later use.\n\nFinally, implement the :py:meth:`~armi.cli.entryPoint.EntryPoint.invoke` method with the code you'd like to run upon\ninvocation of this entry point.\n\n\n.. code-block:: python\n    :caption: Example entry point\n\n    from armi import cases\n    from armi.cli import entryPoint\n\n    class SampleEntryPoint(entryPoint.EntryPoint):\n        \"\"\"\n        Entry point title here.\n\n        Long description of entry point here. This will get picked up and used as the help text on the command line!\n        \"\"\"\n\n        name = \"do-my-thing\"\n        settingsArgument = \"required\"\n\n        def addOptions(self):\n            self.createOptionFromSetting(CONF_CYCLE_LENGTH)\n            self.createOptionFromSetting(CONF_BURN_STEPS)\n            self.parser.add_argument(\n                \"--post-process\",\n                \"-p\",\n                action=\"store_true\",\n                default=False,\n                help=\"Just post-process an existing suite; don't run\",\n            )\n\n        def invoke(self):\n            inputCase = cases.Case(cs=self.cs)\n            print(f\"The case is {inputCase}\")\n\n            if self.args.post_process:\n                print(\"Post processing...\")\n\n\nWhen you run your app, you will have this as an option, and you can invoke it with::\n\n    python -m myapp do-my-thing --post-process settingsFile.yaml\n\nor (if ``myapp`` is not in your ``PYTHONPATH``)::\n\n    python path/to/myapp do-my-thing --post-process settingsFile.yaml\n\n.. tip:: The settings file will be read into a ``Settings`` object. This ``Settings`` object will be passed widely\n    around the code. Please do not edit these settings during a run. The idea of \"run settings\" is a lot simpler to\n    understand when they don't change. And such changes tend to hide data from other developers.\n\nTo add entry points, ``ArmiPlugin``s can subclass the ``defineEntryPoints`` method. ARMI\nhas an extensive :py:class:`~armi.cli.EntryPointsPlugin` that comes with several CLI entry points. \nIt is important to note that if you are building your own ARMI ``Application``, the\n``EntryPointsPlugin`` must be registered to access these entry points either by registration in the application or \nsubclassing :py:class:`~armi.apps.App`. If you do not want them or if you only want\nsome of them you can build your own list in a custom ``defineEntryPoints`` method.\n"
  },
  {
    "path": "doc/developer/first_time_contributors.rst",
    "content": "*****************************\nFirst Time Contributors Guide\n*****************************\n\nThe ARMI team strongly encourages developers to contribute to the codebase.\n\nThe ARMI framework code is open source, and your contributions will become open source. Although fewer laws apply to open source materials because they are publicly-available, you still must comply with all applicable laws and regulations.\n\nHelp Wanted\n===========\n\nThere are a lot of places you can get started to help the ARMI project and team:\n\n* Better :ref:`armi-docing`\n* Better test coverage\n* Many more type annotations are desired. Type issues cause lots of bugs.\n* Targeted speedups (e.g. informed by a profiler)\n* Additional relevance to thermal reactors\n\nNaturally, you can also look at the open `ARMI issues <https://github.com/terrapower/armi/issues>`_ to see what work needs to be done. In particular, check out the `help wanted tickets <https://github.com/terrapower/armi/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22>`_ and `good first issue tickets <https://github.com/terrapower/armi/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22>`_.\n\nTesting\n=======\n\nAny contribution must pass all included unit tests. You will frequently have to fix tests your code changes break. And you should definitely add tests to cover anything new your code does.\n\nThe ARMI tests are meant to be run using `pytest <https://docs.pytest.org/en/8.0.x/>`_\nlocally ::\n\n    $ pip install -e .[test]\n    $ pytest -n 4 armi\n\nSubmitting Changes\n==================\n\nTo submit a change to ARMI, you will have to open a Pull Request (PR) on GitHub.com.\n\nThe process for opening a PR against ARMI goes something like this:\n\n1. `Fork the ARMI repo <https://docs.github.com/en/get-started/quickstart/fork-a-repo>`_\n2. `Create a new branch <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-and-deleting-branches-within-your-repository>`_ in your repo\n3. Make your code changes to your new branch\n4. Submit a Pull Request against `ARMIs main branch <https://github.com/terrapower/armi/pull/new/main>`_\n    a. See `GitHubs general guidance on Pull Requests <https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request>`_\n    b. See ARMIs specific guidance on what makes a \"good\" Pull Request: :ref:`armi-tooling`.\n5. Actively engage with your PR reviewer's questions and comments.\n\n> Note that a bot will require that you sign our `Contributor License Agreement <https://github.com/terrapower/armi/blob/main/CONTRIBUTING.md>`_ before we can accept a pull request from you.\n\nSee our published documentation for a complete guide to our coding standards and practices: :ref:`armi-stds`.\n\nAlso, please check out our (quick) synopsis on good commit messages: :ref:`armi-tooling`.\n\nLicensing of Tools\n==================\n\nBe careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include anything with a license that supersedes our Apache license. For instance, any third-party Python library included in ARMI with a GPL license will make the whole project fall under the GPL license. But a lot of potential users of ARMI will want to keep some of their work private, so we can't allow any GPL tools.\n\nFor that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-party Python libraries that have MIT or BSD licenses.\n"
  },
  {
    "path": "doc/developer/guide.rst",
    "content": "**********************\nFramework Architecture\n**********************\n\nWhat follows is a discussion of the high-level elements of the ARMI framework. Throughout, links to\nthe API docs will be provided for additional details.\n\nThe Reactor Data Model\n======================\n\nThe ARMI framework represents a nuclear reactor via a reactor data model, which is defined in the\n:py:mod:`~armi.reactor` package. Each physical piece of the nuclear reactor is defined by a Python\nobject, called an :py:class:`ArmiObject <armi.reactor.composites.ArmiObject>`. Each ``ArmiObject``\nhas associated data like: shape, material, or other physical values. The physical values can be\nnearly anything, and are attached to the data model via ARMI's\n:py:mod:`Parameter <armi.reactor.parameters>` system. Example parameters might be: ``keff``,\n``flow rates``, ``power``, ``flux``, etc.\n\nThe reactor data model is a hierarchical model, following the `Composite Design Pattern\n<http://en.wikipedia.org/wiki/Composite_pattern>`_. The top of the data model is the\n:py:class:`Reactor <armi.reactor.reactors.Reactor>`, which contains one\n:py:class:`Core <armi.reactor.cores.Core>` object and a collection of zero or more\n:py:class:`ExcoreStructures <armi.reactor.excoreStructure.ExcoreStructure>`. An example\n``ExcoreStructure`` might be a :py:class:`SpentFuelPool <armi.reactor.spentFuelPool.SpentFuelPool>`.\n\nFor now, the ``Core`` object in ARMI assumes it contains **Assembly** objects, which are in turn\nmade up as a collection of **Block** objects. The leaves of the the Composite Model in the ARMI\nframework are called :py:class:`Component <armi.reactor.components.component.Component>`.\n\n.. figure:: /.static/armi_reactor_objects.png\n    :align: center\n\n    The primary data containers in ARMI\n\nTime-evolving the parameters on the reactor composite hierarchy is what most modelers and analysts\nwill want from the ARMI framework.\n\nReview the data model :ref:`armi-tutorials` section for examples exploring a populated instance of\nthe ``Reactor`` data model.\n\nFinding objects in a model\n--------------------------\nUnder most circumstances a :py:class:`armi.reactor.reactors.Reactor` instance will have a\n``.core`` attribute, which is an instance of :py:class:`armi.reactor.reactors.Core`. While the\nComposite pattern discussed above can be used very generally, the ``Core`` class\nenforces a couple of constraints that can be very useful:\n\n* A ``Core`` is a 2-D arrangement of :py:class:`armi.reactor.assemblies.Assembly`\n  objects.\n* Each ``Assembly`` is a 1-D arrangement of :py:class:`armi.reactor.blocks.Block`\n  objects.\n* Blocks are :py:class:`armi.reactor.composites.Composite` objects with some extra\n  parameter bindings, utility functions, and other implementation details that let\n  them play nicely with their containing ``Assembly``.\n\nIn many scenarios, one wants to access specific assemblies or blocks from a core. There\nare a few ways to get the objects that you're interested in:\n\n* The `r.core.childrenByLocator` dictionary maps\n  :py:class:`armi.reactor.grids.IndexLocation` objects to whichever assembly is at\n  that location. For example ::\n\n      >>> loc = r.core.spatialGrid[i, j, 0]\n      >>> a = r.core.childrenByLocator[loc]\n\n  To access the ``k`` -th block in an assembly, try::\n\n      >>> b = a[k]\n\n* `r.core.getAssemblies()` loops through all assemblies in the core for when you\n  need to do something to all assemblies.\n\n\nParameters\n----------\n\nOne of the main benefits to ARMI is that it enables simple interfaces to extract data\nfrom the reactor, do something with it, and add new results to the reactor. This enables\nspecialized developers to write code that uses ARMI as input and output.\n\nMost data is stored in ARMI as :py:mod:`~armi.reactor.parameters`. Most parameters will\nbecome persistent, meaning they will be saved to the database during database\ninteractions, and therefore it will also be loaded when a database is loaded.\n\nDetails of the use and design can be found at :py:mod:`~armi.reactor.parameters`.\n\nConverters\n----------\n\nThe :py:mod:`~armi.reactor.converters` subpackage contains a variety of utilities that\ncan convert a reactor model in various ways. Some converters change designs at the block\nlevel, adjusting pin dimensions or fuel composition. Others adjust the reactor geometry\nat large, changing a 1/3-symmetric model to a full core, or changing a hexagonal\ngeometry to a R-Z geometry. Converters are used for parameter sweeps as well as during\nvarious physics operations.\n\nFor example, some lattice physics routines convert the full core to a 2D R-Z model and\ncompute flux with thousands of energy groups to properly capture the spectral-spatial\ncoupling in a core/reflector interface. The converters are used heavily in these operations.\n\nBlueprints\n----------\n\nAs seen in the User Guide, :py:mod:`~armi.reactor.blueprints` are how reactor models are\ndefined. During a run, they can be used to create new instances of reactor model pieces,\nsuch as when a new assembly is fabricated during a fuel management operation in a later cycle.\n\nOperators\n=========\n\nOperators conduct the execution sequence of an ARMI run. They basically contain the main\nloop. When any operator is instantiated, several actions occur:\n\n    1. Some environmental detail is printed out,\n    2. A Reactor object is instantiated\n    3. Loading and geometry input files are processed and the reactor object is\n       populated with assemblies,\n    4. The **interfaces** are instantiated\n       and placed in the **Interface Stack** during the :py:meth:`createInterfaces\n       method<armi.operators.Operator.createInterfaces>` call,\n    5. The ``interactInit`` method is called on all interfaces, and\n    6. Restart information is processed (if this is a restart run).\n\nAfter that, depending on the type of Operator at hand, one of several operational loops\nwill begin via ``operate()``. Operator types are chosen by the ``runType`` setting,\nwhich is featured on the first tab of the ARMI GUI.\n\nThe Standard Operator\n---------------------\n\nThe two primary types of operators are the Standard Operator (along with its parallel\nversion, the :py:class:`OperatorMPI <armi.operators.OperatorMPI>`), and the\n:py:class:`OperatorSnapshots <armi.operators.OperatorSnapshots>`. The former runs a\ntypical operational loop, which calls all the interfaces through their interaction hooks\nin a sequential manner, marching from beginning-of-life through the number of cycles\nrequested. This is how most quasistatic fuel cycle calculations are performed, which\ninform much of the analysis done during reactor design. The main code for this loop is\nfound in the :py:meth:`mainOperate method <armi.operators.Operator.mainOperate>`. This\noperator supports restart/continuation of past runs from an arbitrary time step.\n\nThe Snapshots Operator\n----------------------\n\nAlternatively, OperatorSnapshots is designed to allow for additional analyses at\nspecific time steps. It simply loops through all snapshots that have been requested via\nthe Snapshot Request functionality (Lists -> Edit snapshot requests in the GUI). At each\nsnapshot request, the state is loaded from a previous case, as determined by the\n``reloadDBName`` setting and then the BOC, EveryNode, and EOC interaction hooks are\nexecuted from all the interfaces. Snapshots are intended to analyze an exact reactor\nconfiguration. Therefore, interfaces which would significantly change the reactor\nconfiguration (such as Fuel management, and depletion) are disabled.\n\nThe Interface Stack\n-------------------\n*Interfaces* (:py:class:`armi.interfaces.Interface`) operate upon the Reactor Model to\ndo analysis. They're designed to allow expansion of the code in a natural and\nwell-organized manner. Interfaces are useful to link external codes to ARMI as well for\nadding new internal physics into the rest of the system. As a result, very many aspects\nof ARMI are contained within interfaces.\n\nThe flow of any ARMI calculation depends on the order of the interfaces, which is set at\ninitialization according to the user settings and the corresponding ``ORDER`` attributes\nin interface modules. The collection of the interfaces is known as the **Interface\nStack** and is prominently featured at the beginning of the standard output of each run,\nlike this::\n\n    [R 0] ----------------------------------------------------------\n    [R 0]          ***  Interface Stack Report  ***\n    [R 0] NUM TYPE               NAME         ENABLED BOL  EOL ORDER\n    [R 0] ----------------------------------------------------------\n    [R 0] 00  Main               main         Yes     No   Reversed\n    [R 0] 01  Software Testing   tests        Yes     No   Reversed\n    [R 0] 02  ReportInterface    report       Yes     No   Reversed\n    [R 0] 03  FuelHandler        fuelHandler  Yes     No   Normal\n    [R 0] 04  Depletion          depletion    Yes     Yes  Normal\n    [R 0] 05  MC2-2              mc2          Yes     No   Normal\n    [R 0] 06  DIF3D              dif3d        Yes     No   Normal\n    [R 0] 07  Thermo             thermo       Yes     No   Normal\n    [R 0] 08  OrificedOptimized  orificer     Yes     Yes  Normal\n    [R 0] 09  AlchemyLite        alchemyLite  Yes     No   Normal\n    [R 0] 10  Alchemy            alchemy      Yes     No   Normal\n    [R 0] 11  Economics          economics    Yes     No   Normal\n    [R 0] 12  History            history      Yes     No   Normal\n    [R 0] 13  Database           database     Yes     Yes  Normal\n    [R 0] ----------------------------------------------------------\n\n\nAny interface that exists on the interface stack is accessible from the ``operator`` or\nfrom any other interface object through the :py:meth:`getInterface method\n<armi.operators.Operator.getInterface>`.\n\nInterface Interaction Hooks\n---------------------------\nVarious interfaces need to interact with ARMI at various times. The point at which\nroutines are called during a run set by developers in interface *hooks*, as seen below.\nAt each point in the flow chart, interfaces are interacted with one-by-one as the\ninterface stack is traversed in order.\n\n.. figure:: /.static/armi_general_flowchart.png\n    :align: center\n\n    The computational flow of the interface hooks\n\nFor example, input checking routines would run at beginning-of-life (BOL), calculation\nmodules might run at every time node, etc. To accommodate these various needs, interface\nhooks include:\n\n* :py:meth:`interactInit <armi.interfaces.Interface.interactInit>` occurs right after\n  all interfaces are initialized.\n\n* :py:meth:`interactBOL <armi.interfaces.Interface.interactBOL>` -- Beginning of life.\n  Happens once as the run is starting up.\n\n* :py:meth:`interactBOC <armi.interfaces.Interface.interactBOC>` -- Beginning of cycle.\n  Happens once per cycle.\n\n* :py:meth:`interactEveryNode <armi.interfaces.Interface.interactEveryNode>` -- Happens\n  after every node step/flux calculation.\n\n* :py:meth:`interactEOC <armi.interfaces.Interface.interactEOC>` -- End of cycle.\n\n* :py:meth:`interactEOL <armi.interfaces.Interface.interactEOL>` -- End of life.\n\n* :py:meth:`interactError <armi.interfaces.Interface.interactError>` -- When an error\n  occurs, this can run to clean up or print debugging info.\n\n* :py:meth:`interactCoupled <armi.interfaces.Interface.interactCoupled>` -- Happens\n  after every node step/flux calculation, if tight physics coupling is active.\n\n* :meth:`~armi.interfaces.Interface.interactRestart` -- Happens when restarting from a\n  previous run. Called prior to :meth:`~armi.interfaces.Interface.interactBOL`\n\nThese interaction points are optional in every interface, and you may override one or\nmore of them to suit your needs. You should not change the arguments to the hooks.\n\nEach interface has a ``enabled`` flag. If this is set to ``False``, then the interface's\nhook code will not be called even though the interface exists in the problem. This is\nuseful for interfaces that use code from other interfaces. For example, if ``subchan``\nis activated, it still uses some code in the ``thermo`` module to compute the fuel\ntemperatures, so the ``thermo`` interface must be available in a ``getInterface`` call.\n\n\nAdding a new interface\n----------------------\nWhen using the Operators that come with ARMI, Interfaces are discovered using the\n:py:mod:`Plugin API <armi.plugins>` and inserted into the interface stack during the\n:py:meth:`createInterfaces <armi.operators.operator.Operator.createInterfaces>` method.\n\n\nHow interfaces get called\n-------------------------\n\nThe hooks of interfaces are called during the main loop in\n:py:meth:`armi.operators.Operator.mainOperate`. There are a few special operator calls\nin there to methods like :py:meth:`armi.operators.Operator.interactAllBOL` that loop\nthrough the interface stack and call each enabled interface's ``interactBOL()`` method.\nIf you override ``mainOperate`` in a custom operator, you will need to add these calls\nas deemed necessary to have the interfaces work properly.\n\nTo use interfaces in parallel, please refer to :py:mod:`armi.mpiActions`.\n\n\nPlugins\n=======\n\nPlugins are higher-level objects that can add things to the simulations like Interfaces, settings\ndefinitions, parameters, validations, etc. They are documented in :ref:`armi-app-making` and\n:py:mod:`armi.plugins`.\n\n\nEntry Points\n------------\nARMI has a set of :py:mod:`Entry Points <armi.cli.entryPoint.EntryPoint>` that can run\ncases, launch the GUI, and perform various testing and utility operations. When you\ninvoke ARMI with ``python -m armi run``, the ``__main__.py`` file is loaded and all\nvalid Entry Points are dynamically loaded. The proper entry point (in this case,\n:py:class:`armi.cli.run.RunEntryPoint`) is invoked. As ARMI initializes itself, settings\nare loaded into a :py:class:`Settings <armi.settings.caseSettings.Settings>`\nobject. From those settings, an :py:class:`Operator <armi.operators.operator.Operator>`\nsubclass is built by a factory and its ``operate`` method is called. This fires up the\nmain ARMI analysis loop and its interface stack is looped over as indicated by user input.\n"
  },
  {
    "path": "doc/developer/index.rst",
    "content": "##############\nDeveloper Docs\n##############\n\nThis guide will get you started as an ARMI developer. It will teach you how to develop within ARMI and also guide you\nthrough some of the structure of the ARMI code.\n\nThe intended audience for this section is reactor design engineers and computer scientists who want to integrate ARMI\ninto their workflow and/or enhance ARMI for the community.\n\n-------------\n\n.. toctree::\n   :maxdepth: 2\n   :numbered:\n   :glob:\n\n   guide\n   making_armi_based_apps\n   entrypoints\n   parallel_coding\n   testing\n   documenting\n   profiling\n   *\n"
  },
  {
    "path": "doc/developer/making_armi_based_apps.rst",
    "content": ".. _armi-app-making:\n\n**********************\nMaking ARMI-based Apps\n**********************\n\nLoading a reactor into the ARMI Framework is just the first step in pushing the envelope\nof reactor design and analysis. Activating a powerful collection of plugins and\ninterfaces to automate your work is the next step to unlocking ARMI's potential.\n\n.. admonition:: Heads up\n\n    A full tutorial on :ref:`armi-make-first-app` is here.\n\nTo really make ARMI your own, you will need to understand a couple of concepts that\nenable developers to adapt and extend ARMI to their liking:\n\n* **Plugins**: An ARMI plugin is a collection of code that registers new functionality\n  with the ARMI Framework. This can include new Interfaces, Settings, Parameter\n  definitions, custom Components, Materials, Operators, and others. For a more complete\n  reference, see the :py:mod:`Plugin API <armi.plugins>` documentation. It is typical\n  for a plugin to provide related components to some specific type of physics or a\n  specific external physics code or the like. Keeping the scope of a plugin limited\n  helps users to understand where all of their settings and interfaces and parameters\n  are coming from.\n\n* **ARMI-Based Applications**: A collection of plugins, along with application-specific\n  customizations, working together with the ARMI Framework constitutes an \"ARMI-Based\n  Application\". As an example, the TerraPower proprietary tool for modeling and\n  analyzing sodium-cooled fast reactors is just such an application. It is from an\n  Application that ARMI gets its collection of active plugins, which in turn dictate\n  much of the ARMI Framework's behavior.\n\nBoth of these concepts are discussed in depth below.\n\nARMI Plugins\n============\n\nAn ARMI Plugin is the primary means by which a developer or qualified analyst can go\nabout building specific capability on top of the ARMI Framework. Even some of the\nfunctionalities that ship with the Framework are implemented internally using the Plugin\nsystem! The :py:mod:`armi.plugins` module contains all of the plugin \"hook\" definitions\nand their associated documentation. It is recommended to peruse those docs before\ngetting started to get an idea of what is available.\n\nSome implementation details\n---------------------------\n\nPlugins are designed to make it easy to build a plugin by copy/pasting from an existing\nplugin. However, having a deeper understanding of what is going on may be useful.\nFeel free to skip this section.\n\nThe plugin system is built on top of a Python library called `pluggy\n<https://github.com/pytest-dev/pluggy>`_. Unless you plan on doing development within\nthe ARMI Framework itself, it is unlikely that you will need to be overly familiar with\nit, but understanding how it works may be beneficial.\n\nLooking at the code in :py:class:`armi.plugins.ArmiPlugin`, you might notice that all of\nthe methods are decorated with ``@HOOKSPEC`` (short for \"hook specification\"); this is\nhow the Framework itself defines the interfaces that a plugin implementation can\nprovide.  This is a feature of ``pluggy``. You might also notice that all of the methods\nare **static methods**. This is because we do not actually expect an instance of an\n``ArmiPlugin``; rather, we currently only use the class as a namespace to collect\nwhatever hook implementations a Plugin provides. While ``pluggy`` is happy with any\nPython namespace containing hook implementations (e.g. module, class, object, function,\netc.), we chose to make a base ``ArmiPlugin`` class for a couple of reasons:\n\n - Wrapping the specifications in a class allows you to implement them in a subclass,\n   which enables tools like ``ruff`` to check your work and complain early if you do\n   certain things wrong.\n\n - While we assume all plugins are stateless (hence all ``@staticmethods``), we may\n   introduce stateful/configurable plugins later on. Starting out with a base class will\n   make this transition easier.\n\nMaking your own Plugin\n----------------------\n\nTo get started on your own plugin you will want to subclass the\n:py:class:`armi.plugins.ArmiPlugin` class, and implement whichever Plugin APIs that you\nwant your Plugin to provide. Mark each of your implementations with an\n``@armi.plugins.HOOKIMPL`` decorator. Take a look at\n:py:class:`armi.physics.neutronics.NeutronicsPlugin` for an example. Make sure that in\nyour implementation, you follow any rules or guidelines that are provided in the\ndocstring for that Plugin API method. Failure to do so will lead to bugs and crashes in\nany ARMI-based Application that might use your plugin.\n\n.. important::\n   We do not actually instantiate Plugin classes. Plugins are currently assumed to be\n   stateless (notice that all of the ``@staticmethods`` on all of the hook\n   specifications). See the above section for why.\n\nIt is likely that your Plugin class itself is only the tip of the iceberg that is the\nfunctionality provided by it. All of the various Interfaces, Settings, Parameters,\netc. that your Plugin exposes to the Framework will likely live in other modules, which\nare imported and returned through your hook implementations. Again, see the Neutronics\nPlugin as an example. All of the other code will need to accompany your Plugin class\nsomehow in a cohesive package. Packaging Python projects is beyond the scope of this\ndocument, but see `this page <https://docs.python-guide.org/writing/structure/>`_ for\nsome guidance.\n\nOnce you have a plugin together, continue reading to see how to plug it into the ARMI\nFramework as part of an Application.\n\nARMI-Based Applications\n=======================\n\nOn its own, ARMI doesn't *do* much. Plugins provide more functionality, but even they\naren't particularly useful on their own either. The magic really happens when you\ncollect a handful of Plugins and plug them into the ARMI Framework. Such a collection is\ncalled an **ARMI-Based Application**.\n\nOnce you have a collection of Plugins that you want to use, creating an ARMI-based\nApplication is very easy. Start by creating a subclass of the :py:class:`armi.apps.App`\nclass, and write its ``__init__()`` function to register whichever plugins you need with\nthe app's ``_pm`` ``PluginManager`` object. Calling the base :py:class:`armi.apps.App`\nwill start you out with the default Framework Plugins, but you are free to discard any\nof these that you wish. Optionally, you can implement the\n:py:meth:`armi.apps.App.splashText` property to render a custom header to be printed\nwhenever your application is used.\n\nExample: ::\n\n   >>> class MyApp(armi.apps.App):\n   ...     def __init__(self):\n   ...         # Adopt the base Framework Plugins. After calling\n   ...         # __init__(), they are in self._pm.\n   ...         armi.apps.App.__init__(self)\n   ...\n   ...         # Register our own plugins\n   ...         from myapp.pluginA import PluginA\n   ...         from myapp.pluginB import PluginB\n   ...\n   ...         self._pm.register(PluginA)\n   ...         self._pm.register(PluginB)\n   ...\n   ...     @property\n   ...     def splashText(self):\n   ...         return \"\"\"\n   ...     ===============================\n   ...     == My First ARMI Application ==\n   ...     ===============================\n   ... \"\"\"\n\nOnce you have defined your ``App`` class, you need to configure the ARMI Framework to\nuse it. To do this, call the :py:func:`armi.configure()` function, passing an instance\nof your ``App`` class as the only argument. It is usually best to do this in your\napplication's ``__init__.py`` or ``__main__.py``. Notice that in\n:py:mod:`armi.__main__`, ARMI configures `itself` with the base\n:py:class:`armi.apps.App` class!\n\nExample: ::\n\n   >>> import armi\n   >>> armi.configure(MyApp())\n"
  },
  {
    "path": "doc/developer/parallel_coding.rst",
    "content": "*********************\nParallel Code in ARMI\n*********************\n\nARMI simulations can be parallelized using the `mpi4py <https://mpi4py.readthedocs.io/en/stable/mpi4py.html>`_\nmodule. You should go there and read about collective and point-to-point communication if you want to\nunderstand everything in-depth.\n\nThe OS-level ``mpiexec`` command is used to run ARMI on, say, 10 parallel processors. This fires up 10 identical\nand independent runs of ARMI; they do not share memory. If you change the reactor on one process, the reactors\ndon't change on the others.\n\nNever fear. You can communicate between these processes using the Message Passing Interface (MPI) driver\nvia the Python ``mpi4py`` module. In fact, ARMI is set up to do a lot of the MPI work for you, so if you follow\nthese instructions, you can have your code working in parallel in no time. In ARMI, there's the primary processor\n(which is the one that does most of the organization) and then there are the worker processors, which do whatever\nyou need them to in parallel.\n\nMPI communication crash course\n==============================\nFirst, let's do a crash course in MPI communications. We'll only discuss a few important ideas, you can read\nabout more on the ``mpi4py`` web page. The first method of communication is called the ``broadcast``, which\nhappens when the primary processor sends information to all others. An example of this would be when you want to\nsync up the settings object (``self.cs``) among all processors. An even more common example is when you want to\nsend a simple string command to all other processors. This is used all the time to inform the workers what they\nare expected to do next.\n\nHere is an example::\n\n    from armi import context\n\n    cmd = f\"val{context.MPI_RANK}\"\n\n    if context.MPI_RANK == 0:\n        # The primary node will send the string 'bob' to all others\n        cmd = \"bob\"\n        context.MPI_COMM.bcast(cmd, root=0)\n    else:\n        # These are the workers.\n        # They receive a value and set it to the variable cmd\n        context.MPI_COMM = comm.bcast(None, root=0)\n\nNote that the ``comm`` object is from the ``mpi4py`` module that deals with the MPI drivers. The value of cmd on\nthe worker before and after the ``bcast`` command are shown in the table.\n\n+--------------+-------+--------+--------+--------+\n|              | Proc0 | Proc1  | Proc2  | Proc3  |\n+--------------+-------+--------+--------+--------+\n| Before bcast | \"bob\" | \"val1\" | \"val2\" | \"val3\" |\n+--------------+-------+--------+--------+--------+\n| After bcast  | \"bob\" | \"bob\"  | \"bob\"  | \"bob\"  |\n+--------------+-------+--------+--------+--------+\n\nThe second important type of communication is the ``scatter``/``gather`` combo. These are used when you have a\nbig list of work you'd like to get done in parallel and you want to farm it off to a bunch of processors. To do\nthis, set up a big list of work to get done on the primary. Some real examples are that the list contains things\nlike run control parameters, assemblies, or blocks. For a trivial example, let's add a bunch of values in parallel.\nFirst, let's create 1000 random numbers to add::\n\n    import random\n    workList = [(random.random(), random.random()) for _i in range(1000)]\n\nNow we want to distribute this work to each of the worker processors (and take one for the primary too, so it's\nnot just sitting around waiting). This is what ``scatter`` will do. But ``scatter`` requires a list that has\nlength exactly equal to the number of processors available. You have some options here. Assuming there are 10\nCPUs, you can either pass the first 10 values out of the list and keep sending groups of  10 values until they\nare all sent (multiple sets of transmissions) or you can split the data up into 10 evenly-populated groups (single\ntransmission to each CPU). This is called *load balancing*. \n\nARMI has utilities that can help called :py:func:`armi.utils.iterables.chunk` and :py:func:`armi.utils.iterables.flatten`.\nGiven an arbitrary list, ``chunk`` breaks it up into a certain number of chunks and ``unchunk`` does\nthe opposite to reassemble the original list after processing. Let's look at an example script::\n\n    \"\"\"mpi_example.py\"\"\"\n    from random import random\n\n    from armi import context\n    from armi.utils import iterables\n\n    # Generate a list of random number pairs: [[(v1,v2),(v3,v4),...]]\n    workList = [(random(), random()) for _i in range(1000)]\n\n    if context.MPI_RANK == 0:\n        # Primary Process: Split the data and send it to the workers\n        balanced = iterables.split(workList, context.MPI_SIZE)\n        myValsToAdd = context.MPI_COMM.scatter(balanced, root=0)\n    else:\n        # Worker Process: Receive data, pass a dummy value to scatter\n        myValsToAdd = context.MPI_COMM.scatter(None, root=0)\n\n\n    # All processes do their bit of this work (adding)\n    results = []\n    for num1, num2 in myValsToAdd:\n        results.append(num1 + num2)\n\n    # All processes call gather to send their results back to the\n    # root process. (The result lists above are simply added to make\n    # one list with MPI_SIZE sub-lists.)\n    allResultsLoadBalanced = context.MPI_COMM.gather(results, root=0)\n\n    # Primary Process: Flatten the multiple lists\n    # (from each process), and sum them.\n    if context.MPI_RANK == 0:\n        # Flatten the MPI_SIZE number of sub lists into one list\n        allResults = iterables.flatten(allResultsLoadBalanced)\n        # Sum the final list, and print the result\n        print(\"The total sum is: {0:10.5f}\".format(sum(allResults)))\n\nRemember that this code is running on all processors. So it's just the ``if rank == 0`` statements that differentiate between the primary and the workers. To really understand what this script is doing, try to run it in parallel and see what it prints out::\n\n        mpiexec -n 4 python mpi_example.py\n\n\nMPI Communication within ARMI\n=============================\nNow that you understand the basics, here's how you should get your :py:class:`armi.interfaces.Interface`\nto run things in parallel in ARMI.\n\nYou don't have to worry too much about the ranks, etc. because ARMI will set that up for you. Basically,\nthe interfaces are executed by the primary node unless you say otherwise. All workers are stalled in an ``MPI.bcast`` waiting\nfor your command! The best coding practice is to create an :py:class:`~armi.mpiActions.MpiAction` subclass and override\nthe :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method. `MpiActions` can be broadcast, gathered, etc. and within\nthe :py:meth:`~armi.mpiActions.MpiAction.invokeHook` method have ``o``, ``r``, and ``cs`` attributes.\n\n.. warning::\n\n    When communicating raw Blocks or Assemblies all references to parents are lost. If a whole reactor is needed\n    use ``DistributeStateAction`` and ``syncMpiState`` (shown in last example). Additionally, note that if a ``self.r`` \n    exists on the ``MpiAction`` prior to transmission it will be removed when ``invoke()`` is called.\n\nIf you have a bunch of blocks that you need independent work done on, always remember that unless you explicitly\nMPI transmit the results, they will not survive on the primary node. For instance, if each CPU computes and sets\na block parameter (e.g. ``b.p.paramName = 10.0)``, these **will not** be set on the primary! There are a few\nmechanisms that can help you get the data back to the primary reactor.\n\n.. note:: If you want similar capabilities for objects that are not blocks, take another look at :py:func:`armi.utils.iterables.chunk`.\n\n\nExample using ``bcast``\n-----------------------\n\nSome actions that perform the same task are best distributed through a broadcast. This makes sense for if your are\nparallelizing code that is a function of an individual assembly, or block. In the following example, the interface simply\ncreates an ``Action`` and broadcasts it as appropriate::\n\n    from armi import context\n\n    class SomeInterface(interfaces.Interface):\n\n        def interactEverNode(self, cycle, node):\n            action = BcastAction()\n            context.MPI_COMM.bcast(action)\n            results = action.invoke(self.o, self.r, self.cs)\n\n            # allResults is a list of len(self.r)\n            for aResult in results:\n                a.p.someParam = aResult\n\n    class BcastAction(mpiActions.MpiAction):\n      \n        def invokeHook(self):\n            # do something with the local self.r, self.o, and self.cs.\n            # in this example... do stuff for assemblies.\n            results = []\n            for a in self.mpiIter(self.r):\n                results.append(someFunction(a))\n\n            # in this usage, it makes sense to gather the results\n            allResults = self.gather(results)\n\n            # Only primary node has allResults\n            if allResults:\n                # Flatten results returns the original order after\n                # having made lists of mpiIter results.\n                return self.mpiFlatten(allResults)\n\n\n.. warning::\n\n    Currently, there is no guarantee that the reactor state is the same across all nodes. Consequently, the above code\n    should really contain a ``mpiActions.DistributeStateAction.invokeAsMaster`` call prior to broadcasting the\n    ``action``. See example below.\n\n\nExample using ``scatter``\n-------------------------\n\nWhen trying two independent actions at the same time, you can use ``scatter`` to distribute the work. The following example\nshows how different operations can be performed in parallel::\n\n    class SomeInterface(interfaces.Interface):\n\n        def interactEveryNode(self, cycle, node):\n            actions = []\n            # pseudo code for getting a bunch of different actions\n            for opt in self.cs['someSetting']:\n                actions.append(factory(opt))\n            \n            distrib = mpiActions.DistributeStateAction()\n            distrib.broadcast()\n            \n            # this line any existing reactor on workers to ensure consistency\n            distrib.invoke(self.o, self.r, self.cs)\n            # the 3 lines above are equivalent to:\n            # mpiActions.DistributeStateAction.invokeAsMaster(self.o, self.r, self.cs)\n\n            results = mpiActions.runActions(self.o, self.r, self.cs, actions)\n\n            # do something to apply the results.\n            for bi, b in enumerate(self.r.getBlocks():\n                b.p.what = extractBlockResult(results, bi)\n\n    def factory(opt):\n        if opt == 'WHAT':\n            return WhatAction()\n\n    class WhatAction(mpiActions.MpiAction):\n\n        def invokeHook(self):\n            # does something\n            # somehow gathers results.\n            return self.gather(results)\n\n\nA simplified approach\n---------------------\n\nTransferring state to and from a Reactor can be complicated and add a lot of code. An alternative approach is to ensure\nthat the reactor state is synchronized across all nodes, and then use the reactor instead of raw data::\n\n    class SomeInterface(interfaces.Interface):\n\n        def interactEveryNode(self, cycle, node):\n            actions = []\n            # pseudo code for getting a bunch of different actions\n            for opt in self.cs['someSetting']:\n                actions.append(factory(opt))\n            \n            mpiActions.DistributeStateAction.invokeAsMaster(self.o, self.r, self.cs)\n            results = mpiActions.runActions(self.o, self.r, self.cs, actions)\n\n    class WhatAction(mpiActions.MpiAction):\n\n        def invokeHook(self):\n\n            # do something\n            for a in self.generateMyObjects(self.r):\n                a.p.someParam = func(a)\n                for b in a:\n                    b.p.someParam = func(b)\n\n            # notice we don't return an value, but instead just sync\n            # the state, which updates the primary node with the\n            # params that the workers changed.\n            self.r.syncMpiState()\n\n.. warning::\n\n    Only parameters that are set are synchronized to the primary node. Consequently if a mutable \n    parameter (e.g. ``b.p.depletionMatrix`` which is of type ``BurnMatrix``) is changed, it will not\n    natively be synced. To flag it to be synced, ``b.p.paramName`` must be set, even if it is to the\n    same object. For this reason, setting parameters to mutable objects should be avoided. Further,\n    if the mutable object has a reference to a large object, such as a composite or cross section\n    library, it can be very computationally expensive to pass all this data to the primary node. See\n    also: :py:mod:`armi.reactor.parameters`\n"
  },
  {
    "path": "doc/developer/profiling.rst",
    "content": "**************\nProfiling ARMI\n**************\n\nPython in slow, so it's important to profile code to keep it running reasonably quickly. Using the basic `Python profiler <https://docs.python.org/3/library/profile.html>`_ is the best way to get started. Once you have a ``.stats`` file, however, we highly recommend using a visualizer.\n\nThe profiler visualizer `gprof2dot <http://code.google.com/p/jrfonseca/wiki/Gprof2Dot#Windows_users>`_ is an invaluable tool for taking a look at the profiler traces. You have to install graphvis also, which contains the ``dot`` program.\n\nThe basic commands to run are::\n\n    python -m gprof2dot -f pstats <mystatsfile>.stats | dot -Tpng -o <mydesiredimagename>.png\n\nThis produces images like this:\n\n.. figure:: /.static/buildMacros.png\n    :align: center\n\n    An example of the profiler output rendered to a png.\n"
  },
  {
    "path": "doc/developer/standards_and_practices.rst",
    "content": ".. _armi-stds:\n\n**********************************\nStandards and Practices for Coding\n**********************************\n\nThe ARMI coding standards are a set of guidelines for helping to create a more consistent and clear code base.\nSubpart 2.7 402 of `NQA-1 <http://nqa-1.com/files/NQA-1%20Nuclear%20Quality%20Manual.pdf>`_ states, \"Software\ndesign verification shall evaluate... the design approach and ensure internal completeness, consistency, clarity\nand correctness.\" While these are required by NQA-1, the idea is that an ARMI developer, who is familiar with\nthese coding standards, should be able to jump from one module to another without changing their coding style.\n\n.. tip ::\n    :class: warning\n\n    The overall theme is: **Balance clarity with conciseness.**\n\nJust try to be as clear as possible, while using as few words as possible.\n\n.. important ::\n    Most of the guidelines can be broken, but all deviations need to be justified. It is up to the code reviewers\n    to determine whether the justification was adequate.\n\n    Developers and reviewers should consult the standards/guidelines while writing and reviewing code to ensure\n    consistency. Code reviewers should make sure to be familiar with the standards, so that their comments are\n    consistent with other reviewers.\n\nCode formatting with ruff\n=========================\nARMI uses the Python code formatter `ruff <https://docs.astral.sh/ruff/>`_. So while developing code\nin ARMI it is important to remember to us the ``ruff`` formatter before pushing any code to the\nrepo. All changes pushed to ARMI on github.com will be automatically checked to see if they conform\nto the ``ruff`` code formatter standards.\n\nThe ``ruff`` formatter provides 100% consistency in ARMI for: whitespace, line length, trailing\ncommas, and string formatting. And it is easy to run on the command line:\n\n.. code-block:: bash\n\n    ruff format .\n\nCode linting with ruff\n======================\nARMI also uses the amazing Python linter `ruff <https://docs.astral.sh/ruff/>`_. Again, any new code\nyou add must have zero ``ruff`` warnings or errors.\n\nThis is very easy to run on the command line:\n\n.. code-block:: bash\n\n    ruff check . --fix\n\nRemove commented-out code\n=========================\nIf you were testing code and you commented out a block, delete it before sending it in for code\nreview/production. If you want to see the old code later, it will still be in the Git history.\n\nAvoid hard-coding run parameters\n================================\nUse the global settings object ``self.cs`` for most user-setable parameters that determine the run\nenvironment, etc. This will help keep the amount of repeated code down.\n\nAlso, do not **ever** code the following things into the code: user names, passwords, or file paths\non your computer. Use environmental variables where possible and user-configurable settings\nelsewhere. You can also use the ``armi.ROOT`` variable (for the active code directory) or\n``armi.RES``, and some other useful root-level variables.\n\nAvoid the global keyword\n========================\nAt all costs, avoid use of the ``global`` keyword in your code. Using this keyword can, and usually does, create\nextremely fragile code that is nigh-impossible to use a debugger on. Especially as part of object-oriented programming,\nthis is extremely lazy design. A careful reader might notice that there are several files in ARMI that are currently\nusing the ``global`` keyword. These are all schedule for a refactor to remove the use of ``global``. But, for now,\nchanging the code would cause more annoyance for the ARMI ecosystem userbase than fixing it would. Still, all of those\ninstance in ARMI will be fixed soon.\n\nNo new uses of ``global`` will make it through the ARMI pull request process.\n\nNaming conventions\n==================\n\n.. note::\n   There is a good argument to make that ARMI's use of ``camelCase`` makes the code less readable than if ARMI\n   used ``snake_case``. Unfortunately, making the switch now would affect such a large percentage of the API that\n   it would be more hassle for our user base than it is worth to change.\n\nUse meaningful names\n--------------------\nUse descriptive names for variables, functions, methods, classes, and files. This might mean using a longer name like\n``correlationMatrix`` instead of a shorter one like ``cm``.\n\nGeneral conventions\n-------------------\n\nHere are some general naming guidelines that are always applicable, but particularly applicable to public classes,\nfunctions, and methods and their signatures (the signature includes the parameters):\n\n* Variables that you designate as unused should be prefaced with an underscore (``_``).\n* Do not use Python `reserved keywords <https://realpython.com/lessons/reserved-keywords/>`_ as variable names.\n* Try to use names that are pronounceable. (Well-established variable names from equations are acceptable.)\n* Keep names concise and expressive. (An exception is test method names, which may be longer and more\n  descriptive.)\n* Avoid abbreviations and acronyms, unless they are well understood by subject-matter experts (e.g. DB for database,\n  XS for cross-sections, BU for burn up). When using acronyms or abbreviations with ``camelCase`` or ``PascalCase``:\n\n  * Use the same case for two-letter acronyms/abbreviations (e.g. ``diskIO``, ``ioOperation``)\n  * Use different case for acronyms/abbreviations with more than two characters (e.g. ``renderHtml()``, ``path``)\n\nFor consistency, use the following naming conventions:\n\npackage names\n    Python packages, i.e. folders with an ``__init__.py``, **shall** use ``camelCase``.\n\nmodule names\n    Python modules, i.e. python files, **shall** use ``camelCase``.\n\n    **Caveat:** Test modules are prefixed with ``test_``.\n\nmodule constants\n    Module-level \"constants\" **shall** be all capitals with an underscore separating words.\n\nfunction names\n    Functions **shall** use ``camelCase``. If the function is only intended to be used within that module, prefix\n    it with a single leading underscore to indicate it is \"module protected.\"\n\nvariable names\n    Use ``camelCase``. In the odd scenario that the variable is not used (e.g. a method returns a\n    tuple and you only want the first item), prefix it with a single leading underscore to indicate it is\n    \"module protected.\"\n\nclass names\n    Classes **shall** use ``PascalCase``. If the class is only intended to be inherited by other classes within\n    the module, prefix the class name with an underscore to indicate it is \"module protected.\"\n\nclass attribute, instance attribute and method names\n    Use ``camelCase``. If the method is only intended to be used within that module, prefix it with a single\n    leading underscore to indicate it is \"class protected.\"\n\nNaming quick-reference\n----------------------\n\n.. list-table::\n    :widths: 40 30 30\n    :header-rows: 1\n\n    * - Item to be named\n      - Public\n      - Private\n    * - package\n\n        (folder with an ``__init__.py``)\n      - ``packageName``\n      - N/A\n    * - module\n\n        (a ``.py`` file)\n      - ``moduleName``\n      - N/A\n    * - module constant\n      - ``SPEED_OF_LIGHT_IN_METERS_PER_SECOND``\n      - ``_ONE_OVER_PI``\n    * - method or function\n      - ``doSomeAction()``\n      - ``_doSomeAction()``\n    * - class or instance attribute\n      - ``assemblies``\n      - ``_assemblies``\n    * - variable names\n      - ``linearHeatGenerationRate``\n      - ``_unusedDescription``\n\n        There are not \"private\" variables, use this for an unused variable.\n\nCommon naming conventions within ARMI\n-------------------------------------\nSingle character variable names are not usually \"clear\" or \"concise\"; however, the following variables are a\nwell-established convention within ARMI and should be used by developers:\n\n    * ``r`` when referring to a reactor, and\n    * ``o`` when referring to a operator\n\nOther names are also consistently used throughout ARMI for specific objects:\n\n* ``cs`` when referring to a :py:class:``armi.settings.Settings`` class; this should not be confused with\n  the ``.settings`` attribute of ``ArmiObject``.\n* ``lib`` when referring to a cross section library (would have been better as ``xsLib``)\n\n\nPrefer shorter methods\n======================\nA method should have one clear purpose. If you are writing a method that does one thing after the other,\nbreak it up into multiple methods and have a primary method call them in order. If your method is longer\nthan 100 lines, see if you can't break it up. This does a few things:\n\n1. It makes the code easier to read.\n2. It makes the code chunks more reusable.\n3. It makes the code easier to test.\n4. It makes the code easier to profile, for performance.\n\nAvoid repeating code\n====================\nIn other words, don't repeat yourself. (`D. R. Y. <https://en.wikipedia.org/wiki/Don't_repeat_yourself>`_).\nRepetitious code is harder to read, and harderd for others to update. If you ever find yourself copying and pasting\ncode, consider pulling the repeated code out into its own function, or using a loop.\n\nPublic methods should have docstrings\n=====================================\nAlways create the `proper docstrings <https://numpydoc.readthedocs.io/en/latest/example.html>`_ for all public\nfunctions and public classes.\n\nUnit tests\n==========\nAll ARMI developers are required to write unit tests.\n\n.. important ::\n    If you add a new function to the code base, you are required to add unit tests to cover that function.\n\nARMI uses the ``pytest`` library to drive tests, therefore tests need to be runnable from the commandline by\n``python -m pytest armi``. Furthermore, for consistency:\n\n* Each individual unit test should take under 10 seconds, on a modern laptop.\n* All unit tests should be placed into a separate module from production code that is prefixed with ``test_``.\n* All unit tests should be written in object-oriented fashion, inheriting from ``unittest.TestCase``.\n* All test method names should start with ``test_``.\n* All test method names should be descriptive. If the test method is not descriptive enough, add a docstring.\n* Unit tests should have at least one assertion.\n\nImport statements\n=================\nPython allows many variations on the import statement, including relative imports, renaming and others. We prefer:\n\n#. one import per line,\n#. no relative imports\n#. no periods\n#. explicit module/namespace usage\n\nImport ordering\n---------------\nFor consistency, import packages in this order:\n\n1. Python built-in packages\n2. External third-party packages\n3. ARMI modules\n\nPlace a single line between each of these groups, for example:\n\n.. code-block:: python\n    :linenos:\n\n    import os\n    import math\n\n    import numpy as np\n    from matplotlib import pyplot\n\n    from armi import runLog\n\nDon't create naked exceptions.\n==============================\nWhen creating ``try``/``except`` blocks, a naked exception is when the ``except`` command is not\nfollowed by a specific exception type. Naked exceptions hide a lot of sins, particularly unexpected\nbugs. `This article <http://www.wilfred.me.uk/blog/2013/11/03/no-naked-excepts/>`_ explains the concept well,\nas well as a few exceptions to this general rule.\n\nExamples:\n\nBad\n\n::\n\n    >>> try:\n    >>>     stuff()\n    >>> except:\n    >>>     runLog.warning('Some error occurred in stuff().')\n\nGood (for one exception type)\n\n::\n\n    >>> try:\n    >>>     stuff()\n    >>> except AttributeError:\n    >>>     runLog.warning('Some error occurred in stuff().')\n\nGood (for multiple exception types)\n\n::\n\n    >>> try:\n    >>>     stuff()\n    >>> except (ZeroDivisionError, FloatingPointError):\n    >>>     runLog.warning('Some error occurred in stuff().')\n\n\nData model\n==========\nAny reactor state information that is created by an ``Interface`` should be stored in the ARMI data model. The goal\nis that given minimal information (i.e. case settings and blueprints) ARMI should be able to load an entire reactor\nsimulation from a given database. If you add state data to your modeling that isn't stored in the reactor, or add\nnew input files, you will break this paradigm and make everyone's life just a little bit harder.\n\nInput files\n===========\nARMI developers **shall** use one of the following well-defined, Python-supported, input file formats.\n\n.json\n    JSON files are used for a variety of data-object representations. There are some limitations of\n    JSON, in that it does not easily support comments. JSON is also very strict.\n\n.yaml\n    YAML files are like JSON files but can have comments in them.\n\nGeneral do's and don'ts\n=======================\n\nDo not use ``print``\n    ARMI code should not use the ``print`` function; use one of the methods within ``armi.runLog``.\n\nDo not add new ``TODO`` statements to the repo.\n    If your new ``TODO`` statement is important, it should be a GitHub Issue. Similarly, never mark\n    the code with ``FIXME`` or ``XXX```; open a ticket.\n\nDo not link GitHub tickets or PRs in code.\n    The idea in ARMI is that either something is worth documenting well in a docstring, or the docs,\n    or it is not. And just linking a ticket or PR in a docstring is not helpful.\n"
  },
  {
    "path": "doc/developer/testing.rst",
    "content": ".. _armi-testing:\n\n******************\nARMI Testing Tools\n******************\n\nARMI has many useful tools to streamline tests in the plugins. Included here are some popular ones. If you are trying to write a new unit test, chances are something like it has been done before and you do not need to design it from scratch. Look around ARMI and other plugins for examples of tests. The ``armi.testing`` module is always a good place to start.\n\n\nTesting with runLog\n===================\n\nUse Case: Test code that prints to stdout\n\nWhile there are some other mocking examples in ARMI, none are as heavily used as ``mockRunLogs``. ``mockRunLogs.BufferLog()`` is used to capture the ``runLog`` output instead of printing it.\n\nIn `test_comparedb3.py <https://github.com/terrapower/armi/blob/49f357b2a92aaffaf883642f7b86fbe21b0e0272/armi/bookkeeping/db/tests/test_comparedb3.py>`_, there is a (simplified here) use case. A portion of the test for ``_diffSpecialData`` wants to confirm the below printout has happened, so it uses the ``getStdout()`` method to check that the expected printout exists.\n\nExample of ``mockRunLogs``:\n\n.. code-block:: python\n\n    from armi.tests import mockRunLogs\n\n    class TestCompareDB3(unittest.TestCase):\n        # ...\n\n        def test_diffSpecialData(self):\n            dr = DiffResults(0.01)\n            fileName = \"test.txt\"\n            with OutputWriter(fileName) as out:\n                with mockRunLogs.BufferLog() as mock:\n                    #... skip for clarity: create refData & srcData\n                    _diffSpecialData(refData, srcData, out, dr)\n                    self.assertEqual(dr.nDiffs(), 0)\n                    self.assertIn(\"Special formatting parameters for\", mock.getStdout())\n\nThere are examples of this throughout ARMI. Search for ``BufferLog`` or ``getStdout`` in the code to find examples.\n\n\nSelf-Cleaning Directories\n=========================\n\nUse Case: Automatically cleans up tests that create files:\n\n.. code-block:: python\n\n    from armi.utils.directoryChangers import TemporaryDirectoryChanger\n\nTwo main uses of this class in testing:\n\n1. Standalone test that calls code that creates something (`test_operators.py <https://github.com/terrapower/armi/blob/2bcb03689954ae39f3044f18a9a77c1fb7a0e63b/armi/operators/tests/test_operators.py#L237-L242>`_):\n\n.. code-block:: python\n\n     def test_snapshotRequest(self, fakeDirList, fakeCopy): \n         fakeDirList.return_value = [\"mccAA.inp\"] \n         with TemporaryDirectoryChanger(): \n             with mockRunLogs.BufferLog() as mock: \n                 self.o.snapshotRequest(0, 1) \n                 self.assertIn(\"ISOTXS-c0\", mock.getStdout()) \n\n2. Setup and teardown of a testing class, where all/most of the tests create something (`test_comparedb3.py <https://github.com/terrapower/armi/blob/2bcb03689954ae39f3044f18a9a77c1fb7a0e63b/armi/bookkeeping/db/tests/test_comparedb3.py#L36-L52>`_):\n\n.. code-block:: python\n\n     class TestCompareDB3(unittest.TestCase): \n         \"\"\"Tests for the compareDB3 module.\"\"\" \n      \n         def setUp(self): \n             self.td = TemporaryDirectoryChanger() \n             self.td.__enter__() \n      \n         def tearDown(self): \n             self.td.__exit__(None, None, None) \n      \n         def test_outputWriter(self): \n             fileName = \"test_outputWriter.txt\" \n             with OutputWriter(fileName) as out: \n                 out.writeln(\"Rubber Baby Buggy Bumpers\") \n      \n             txt = open(fileName, \"r\").read() \n             self.assertIn(\"Rubber\", txt) \n\nNote that sometimes it is necessary to give the temporary directory change object a non-default root path:\n\n.. code-block:: python\n\n    Include root argument\n    THIS_DIR = os.path.dirname(__file__)\n    # ...\n\n    def test_something():\n        with TemporaryDirectoryChanger(root=THIS_DIR): \n            # test something\n\n\nLoad a Test Reactor\n===================\n\nUse Case: You need a full reactor for a unit test\n\n.. warning::\n    This is computationally expensive, and historically over-used for unit tests. Consider whether mocking or BYO components (below) can be used instead.\n\n\nTo get the standard ARMI test reactor, import this:\n\n.. code-block:: python\n\n    from armi.reactor.tests.test_reactors import loadTestReactor\n\nThis function will return a reactor object. And it takes various input arguments to allow you to customize that reactor:\n\n.. code-block:: python\n\n     def loadTestReactor( \n         inputFilePath=TEST_ROOT, \n         customSettings=None, \n         inputFileName=\"armiRun.yaml\", \n     ): \n\nSo many interfaces and methods require an operator or a reactor, and ``loadTestReactor`` returns both. From there you can use the whole reactor or just grab a single ARMI object, like a `fuel block <https://github.com/terrapower/armi/blob/58b0e8198d2f8a217c1db84e97127adfe7e91c09/armi/reactor/tests/test_blocks.py#L3030-L3036>`_:\n\n.. code-block:: python\n\n     _o, r = loadTestReactor(\n        os.path.join(TEST_ROOT, \"smallestTestReactor\"),\n        inputFileName=\"armiRunSmallest.yaml\",\n    )\n\n    # grab a pinned fuel block\n    b = r.core.getFirstBlock(Flags.FUEL)\n\nIf you need a full reactor for a unit test, always try to start with the ``smallestTestReactor.yaml`` shown above first. Your tests will run faster if you pick the smallest possible reactor that meets your needs. Less is more.\n\nSidebar: Speed up Test Reactor Tests\n------------------------------------\nMaybe you do need an entire reactor for your unit test, but you don't need a very large one. In that case, ARMI comes with a few standard tools:\n\n#. ``from armi.testing import reduceTestReactorRings`` - Reduce the size of the test reactor you are using.\n#. ``from armi.testing import getEmptyCartesianReactor`` - Provides a test cartesian reactor with no assemblies or blocks inside.\n#. ``from armi.testing import getEmptyHexReactor`` - Provides a test hex reactor with no assemblies or blocks inside.\n\n\nTest Blocks and Assemblies\n==========================\n\nUse Case: Your unit test needs some ARMI objects, but not a full test reactor.\n\nARMI provides several helpful tools for generating simple blocks and assemblies for unit tests:\n\n* ``from armi.reactor.tests.test_assemblies import buildTestAssemblies`` - Two hex blocks.\n* ``from armi.reactor.tests.test_blocks import buildSimpleFuelBlock`` - A simple hex block containing fuel, clad, duct, and coolant.\n* ``from armi.reactor.tests.test_blocks import loadTestBlock`` - An annular test block.\n\n"
  },
  {
    "path": "doc/developer/tooling.rst",
    "content": ".. _armi-tooling:\n\n**************************\nTooling and Infrastructure\n**************************\n\nGood Commit Messages\n====================\nThe ARMI project follows a few basic rules for \"good\" commit messages:\n\n* The purpose of the message is to explain to the changes you made to a stranger 5 years from now.\n* Keep your writing short and to the point.\n* The first line of each commit must be shorter than 50 characters.\n* Commit messages should be active voice, present tense.\n* Multi-line comments are allowed, but make sure the second line of the commit is blank:\n\n.. code-block::\n\n    Adding this commit for REASONS.\n\n    Here is some super important extra info.\n    Oh, there is so much extra info.\n    This section\n    * is\n    * totally\n    * optional.\n\nGood Pull Requests\n==================\nA good commit is like a sentence; it expresses one complete thought. In that context, a good Pull\nRequest (PR) is like a paragraph; it contains a few sentences that contain one larger thought. A\ngood PR is *not* a chapter or an entire book! It should not contain multiple independent ideas.\n\nOne Idea = One PR\n-----------------\n.. important ::\n    If you *can* break a PR into smaller PRs, containing unrelated changes, please do.\n\nIt is a discourtesy to your reviewers to make them review a PR with multiple, unrelated changes. It\nforces them to look at every line of diff in your PR and figure out which change it belongs to. They\nare busy people, and it will save them time and effort if your PR only has one main idea. If your\nPRs are smaller, you will notice a great increase in the quality of the reviews you get.\n\nDon't open until it is ready\n----------------------------\n\n.. important ::\n    Wait until your PR is complete to open it.\n\nYour PR isn't complete when the code works, it is complete when the code is polished and all the\ntests are written and working. The idea here is: as soon as you open a PR, people will start\nspending their time looking at it. And their time is valuable. Even though GitHub allows you to\n`open a Draft PR <https://github.blog/2019-02-14-introducing-draft-pull-requests/>`_, this is not\nthe default option in ARMI. It should not be your workflow to open a Draft PR by default. We prefer\nto keep the PR list as short as possible. A good rule of thumb is: don't open a PR until you think\nit is ready for final review.\n\nTest It\n-------\n.. important ::\n    If a PR doesn't have any changes to testing, it probably isn't complete.\n\nUnless a PR is just documentation or linting, it almost certainly needs testing to be complete. For\nexample:\n\n* If a PR adds new code, that code needs new tests to prove it is working.\n* If a PR changes existing code, there needs to be test changes to prove the code still works.\n* If a PR fixes a bug, there needs to be a test to prove the bug is fixed.\n\nIf the changes in the PR are worth the time to make, they are worth the time to test. Help your\nreviewer by proving your code works.\n\nDocument It\n-----------\n\n.. important ::\n    If it isn't documented, it doesn't exist.\n\nWe auto-document the API, so don't worry about that. But when it comes to documentation, write it\nfor somebody who is new to the code base 3 years from now, who needs to understand it in nitty-\ngritty detail to fix a bug without you. Think about variable names, comments, and docstrings. Also\nconsider (if you are making a major change) that you might be making something in the docs out-of-\ndate.\n\nWatch for Requirements\n----------------------\nWhen you are touching code in ARMI, watch out for the docstrings in the methods, classes, or\nmodules you are editing. These docstrings might have bread crumbs that link back to requirements.\nSuch breadcrumbs will look like:\n\n.. code-block::\n\n    \"\"\"\n    .. test: This is a requirement test breadcrumb.\n\n    .. impl: This is an requirement implementation breadcrumb.\n\n    \"\"\"\n\nIf you touch any code that has such a docstring, even at the top of the file, you are going to be\nresponsible for not breaking that code/functionality. And you will be required to explicitly call\nout that you touch such a code in your PR.\n\nYour PR reviewer will take an extra look at any PR that touches a requirement test or implementation.\nAnd you will need to add a special note in your PR description, under a field called \"One-line Impact on Requirements\". This note can be as long as it needs to be, but can only be on one line.\n\n\nPackaging and dependency management\n===================================\nThere are many ways to manage and package a Python project. We try to centralize as much of this as possible in a ``pyproject.toml``, following existing conventions. In particular, we follow `the official Python packaging guidance <https://packaging.python.org/en/latest/>`_.\n\npyproject.toml\n--------------\nAs much as possible, the ARMI team will try to centralize our installation and build systems through\nthe top-level ``pyproject.toml`` file. The only exception will be our documentation, which has much\ncustomization done through the Sphinx ``doc/conf.py`` file.\n\nThe packages listed in the ``install_requires`` argument to ``setup()`` are meant to express, as\nabstractly as possible, the packages that need to be installed **somehow** for the package to work.\nIn addition, ``extras_require`` are used to specify other packages that are not strictly required,\nbut if installed enable extra functionality, like unit testing or building documentation.\n\nThird-Party Licensing\n---------------------\nBe careful when including any dependency in ARMI (say in the ``pyproject.toml`` file) not to include\nanything with a license that supersedes our Apache license. For instance, any third-party Python\nlibrary included in ARMI with a GPL license will make the whole project fall under the GPL license.\nBut a lot of potential users of ARMI will want to keep some of their work private, so we can't allow\nany GPL tools.\n\nFor that reason, it is generally considered best-practice in the ARMI ecosystem to only use third-\nparty Python libraries that have MIT or BSD licenses.\n\nReleasing a New Version of ARMI\n===============================\nWe use the common ``major.minor.bump`` version scheme where a version string might look like\n``0.1.7``, ``1.0.0``, or ``12.3.123``. Each number has a specific meaning:\n\n* ``major`` - Revved for major milestones of the ARMI project.\n* ``minor`` - Revved for the usual release, real feature work completed.\n* ``bump`` - Revved for very small releases, but still well-tested with a stable API.\n\n**NOTE**: Changes to documentation or testing probably do not deserve a version bump.\n\n**Any change to a major or minor version is considered a release.**\n\nOnly a core member of the ARMI team may release a new version, or add a tag of any kind to the\nrepository. The rule is *the only tags in the ARMI repo are for official versions*. If you want to\nrelease a version of ARMI, you will need admin privileges to multiple TerraPower repos on GitHub.\n\nEvery release should follow this process:\n\n1. Ensure all unit tests pass and the documentation is building correctly.\n2. Create a release PR:\n\n    - Bump the ``version`` string in ``pyproject.toml``.\n    - Now that the release is done, hard-copy the SCR information into the last releases RST file, so we don't keep regenerating it: ``doc/qa_docs/scr/x.y.rst``.\n    - Update the commit in ``doc/qa_docs/scr/latest_scr.rst`` to the release commit.\n\n3. Tag the commit after it goes into the repo:\n\n    - From this commit: ``git tag -a 1.0.0 -m \"Release v1.0.0\"``\n    - Or from another commit: ``git tag -a 1.0.0 <commit-hash> -m \"Release v1.0.0\"``\n    - Pushing to the repo: ``git push origin 1.0.0``\n    - **NOTE** - The ONLY tags in the ARMI repo are for official version releases.\n\n4. Also add the release notes on `the GitHub UI <https://github.com/terrapower/armi/releases>`__.\n5. Follow the instructions `here <https://github.com/terrapower/terrapower.github.io>`_ to archive the new documentation.\n6. Tell everyone!\n\nLogging with runLog\n===================\nARMI provides a logging tool, ``runLog``, to be used in place of ``print`` for all logging during a\nsimulation. It is very easy to use:\n\n.. code-block:: python\n\n    from armi import runLog\n\n    runLog.debug(\"This will only be seen if you run in debug mode.\")\n    runLog.info(\"Default log level.\")\n    runLog.error(\"The run will die, or the results are invalid.\")\n\n.. note::\n    Calling ``runLog.error()`` is not the same as calling Python's ``raise error``; a log statement\n    does not kill a run, or raise an error, it just puts some text in the log.\n\nWhen an ARMI simulation is run, it will be run at a particular log level. All log messages that are\nat or above that log level will be seen during the simulation and in the final log files. To control\nthe log level of an ARMI run, you use the setting ``verbosity`` in your settings file. You will\nprobably be running ARMI in a parallel mode, and if you want the child processes to have a different\nlog level than the main process, you can set ``branchVerbosity`` to the desired verbosity of all the\nchild processes.\n\nFor reference, here are the log levels that ARMI supports:\n\n.. list-table::\n    :widths: 20 20 60\n    :header-rows: 1\n\n    * - Level\n      - Value\n      - When to Use\n    * - debug\n      - 10\n      - This will only be seen if the simulation is run in debug mode.\n    * - extra\n      - 15\n      - More detailed than will normally be seen in a usual simulation.\n    * - info\n      - 20\n      - Use only for things that important enough to be visible during every normal simulation.\n    * - important\n      - 25\n      - More important than the default log level, but not a problem or issue.\n    * - prompt\n      - 27\n      - RESERVED for the ARMI CLI.\n    * - warning\n      - 30\n      - Use ONLY for issues that may or may not invalidate the simulation results.\n    * - error\n      - 40\n      - Use ONLY for problems that halt the program or invalidate the simulation results.\n    * - header\n      - 100\n      - Use ONLY to define major sections in the log files.\n\n\nBlocking Duplicate Logs\n-----------------------\nSometimes you want to add a log message, but based on program logic it might pop up in the final log\nfile multiple times, even thousands of times. And probably you do not want that. Happily, the\n``runLog`` tool provides a simple argument that will stop a single log line from being logged more\nthan once.\n\nHere is a (silly) example of a heavily duplicate log message:\n\n.. code-block:: python\n\n    for _i in range(1000):\n        runLog.warning(\"Something wicked this way comes.\")\n\nThat log message gets printed 1,000 times, but we can ensure it is only printed once:\n\n.. code-block:: python\n\n    for _i in range(1000):\n        runLog.warning(\"Something wicked this way comes.\", single=True)\n\nObviously, this will not be useful in every scenario. But it is a handy tool to clean up your log files.\n\n\nModule-Level Logging\n--------------------\nThe ``runLog`` tool also allows for you to log one module differently from the rest of the code\nbase. For instance, you could set the log level to \"debug\" in just one Python file, to help testing\nduring development.\n\nThat functionality is provided by what might look like a bare Python logging import, but is actually\ncalling the same underlying ``armi`` logging tooling:\n\n.. code-block:: python\n\n    import logging\n    runLog = logging.getLogger(__name__)\n\nIn either case, you can then log using the same, easy interface:\n\n.. code-block:: python\n\n    runLog.info('Normal stuff.')\n    runLog.error('Oh no!')\n\nFinally, you can change the logging level in the above scenario by doing:\n\n.. code-block:: python\n\n    runLog.setVerbosity(logging.DEBUG)\n    # or\n    runLog.setVerbosity('debug')\n"
  },
  {
    "path": "doc/gallery-src/README.rst",
    "content": "#######\nGallery\n#######\nThis section demonstrates some capabilities and offer quick reference for common use cases.\n\nTutorials with more explanatory narratives are available in :doc:`/tutorials/index`.\n\n.. tip::\n    Many of the examples build ARMI objects from test cases to be concise. You are expected to define your own objects\n    for your reactors in inputs and then use these examples on things relevant to you.\n"
  },
  {
    "path": "doc/gallery-src/analysis/README.rst",
    "content": "Analysis\n--------\n\nThis section contains various examples for performing analyses using the ARMI framework's data model.\n"
  },
  {
    "path": "doc/gallery-src/analysis/run_blockMcnpMaterialCard.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nWrite MCNP Material Cards\n=========================\n\nHere we load a test reactor and write each component of one fuel block out as MCNP material cards.\n\nNormally, code-specific utility code would belong in a code-specific ARMI plugin. But in this case, the need for MCNP\nmaterials cards is so pervasive that it made it into the framework\n\"\"\"\n\nfrom armi import configure\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils.densityTools import formatMaterialCard\n\n# configure ARMI\nconfigure(permissive=True)\n\n_o, r = test_reactors.loadTestReactor()\n\nbFuel = r.core.getBlocks(Flags.FUEL)[0]\n\nfor ci, component in enumerate(bFuel, start=1):\n    ndens = component.getNumberDensities()\n    # convert nucName (str) keys to nuclideBase keys\n    ndensByBase = {r.nuclideBases.byName[nucName]: dens for nucName, dens in ndens.items()}\n    print(\"\".join(formatMaterialCard(ndensByBase, matNum=ci)))\n"
  },
  {
    "path": "doc/gallery-src/analysis/run_hexBlockToRZConversion.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nHex block to RZ geometry conversion\n===================================\nOften, parts of a reactor model must be transformed to a different geometry in order to\nperform a certain type of physics calculation. For example, in some fast reactor lattice\nphysics calculations, detailed descriptions of control assemblies must be mapped to\nequivalent 1-D cylindrical models.\n\nThis example shows how a control assembly defined in full hex-pin detail can be\nautomatically converted to an equivalent 1-D RZ case, including an outer ring of fuel to\ndrive the case.\n\nThis conversion includes rings for control material, gap, cladding (on both sides of each\nring of control material), coolant, duct, and fuel. The color of the plot is proportional\nto the mass density.\n\nGiven this transformation, a 1-D lattice physics solver can be executed to compute\naccurate cross sections.\n\nBy automating these kinds of geometry conversions, ARMI allows core designers to maintain\nthe design in real geometry while still performing appropriate approximations for\nefficient analysis.\n\n.. warning::\n    This uses :py:mod:`armi.reactor.converters.blockConverters`, which\n    currently only works on a constrained set of hex-based geometries. For your systems,\n    consider these an example and starting point and build your own converters as\n    appropriate.\n\"\"\"\n\nfrom armi import configure\nfrom armi.reactor.converters import blockConverters\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\n\n# configure ARMI\nconfigure(permissive=True)\n\n_o, r = test_reactors.loadTestReactor()\n\n# fully heterogeneous\nbFuel = r.core.getBlocks(Flags.FUEL)[0]\nbControl = r.core.getBlocks(Flags.CONTROL)[0]\nconverter = blockConverters.HexComponentsToCylConverter(sourceBlock=bControl, driverFuelBlock=bFuel, numExternalRings=1)\nconverter.convert()\nconverter.plotConvertedBlock()\n\n# partially heterogeneous\nconverter = blockConverters.HexComponentsToCylConverter(sourceBlock=bFuel, ductHeterogeneous=True)\nconverter.convert()\nconverter.plotConvertedBlock()\n"
  },
  {
    "path": "doc/gallery-src/analysis/run_hexReactorToRZ.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nHex reactor to RZ geometry conversion\n=====================================\nThis shows how an entire reactor specified in full hex detail can be\nautomatically converted to a 2-D or 3-D RZ case with conserved mass.\n\n.. warning::\n    This uses :py:mod:`armi.reactor.converters.geometryConverters`, which\n    will only work on a constrained set of hex-based geometries. For your systems,\n    consider these an example and starting point and build your own converters as\n    appropriate.\n\"\"\"\n\nimport math\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\nfrom armi.reactor.converters import geometryConverters\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils import plotting\n\n# configure ARMI\nconfigure(permissive=True)\n\no, r = test_reactors.loadTestReactor()\nkgFis = [a.getHMMass() for a in r.core]\nplotting.plotFaceMap(r.core, data=kgFis, labelFmt=\"{:.1e}\")\n\nconverterSettings = {\n    \"radialConversionType\": \"Ring Compositions\",\n    \"axialConversionType\": \"Axial Coordinates\",\n    \"uniformThetaMesh\": True,\n    \"thetaBins\": 1,\n    \"axialMesh\": [50, 100, 150, 175],\n    \"thetaMesh\": [2 * math.pi],\n}\n\nconverter = geometryConverters.HexToRZConverter(o.cs, converterSettings)\n# makes new reactor in converter.convReactor\nconverter.convert(r)\nfigs = converter.plotConvertedReactor()\n\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/README.rst",
    "content": "Framework\n---------\n\nThis section provides a range of examples for utilizing the ARMI framework and its data model to explore the state of a\nreactor."
  },
  {
    "path": "doc/gallery-src/framework/run_blockVolumeFractions.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# -*- coding: utf-8 -*-\n\"\"\"\nComputing Component Volume Fractions on a Block with Automatic Thermal Expansion\n================================================================================\n\nGiven an :py:mod:`Block <armi.reactor.blocks.Block>`, compute the component volume fractions. Assess\nthe change in volume of these components within the block as the temperatures of the fuel and\nstructure components are uniformly increased.\n\nNote: Thermal expansion is automatically considered with material data defined within\n:py:mod:`materials <armi.materials>`.\n\"\"\"\n\n# ruff: noqa: E402\nimport collections\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\n\nconfigure(permissive=True)\n\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests.test_blocks import buildSimpleFuelBlock\nfrom armi.utils import tabulate\n\n\ndef writeInitialVolumeFractions(b):\n    \"\"\"Write out the initial temperatures and component volume fractions.\"\"\"\n    headers = [\"Component\", \"Temperature, °C\", \"Volume Fraction\"]\n    data = [(c, c.temperatureInC, volFrac) for c, volFrac in b.getVolumeFractions()]\n    print(tabulate.tabulate(data=data, headers=headers) + \"\\n\")\n\n\ndef plotVolFracsWithComponentTemps(b, uniformTemps):\n    \"\"\"Plot the percent change in vol. fractions as fuel/structure temperatures are uniformly increased.\"\"\"\n    # Perform uniform temperature modifications of the fuel and structural components.\n    componentsToModify = b.getComponents([Flags.FUEL, Flags.CLAD, Flags.DUCT])\n\n    initialVols = {}\n    relativeVols = collections.defaultdict(list)\n    for tempInC in uniformTemps:\n        print(f\"Updating fuel/structure components to {tempInC} °C\")\n        # Modify the fuel/structure components to the same uniform temperature\n        for c in componentsToModify:\n            c.setTemperature(tempInC)\n\n        writeInitialVolumeFractions(b)\n\n        # Iterate over all components and calculate the mass and volume fractions\n        for c in b:\n            # Set the initial volume fractions at the first uniform temperature\n            if tempInC == uniformTempsInC[0]:\n                initialVols[c] = c.getVolume()\n\n            relativeVols[c].append((c.getVolume() - initialVols[c]) / initialVols[c] * 100.0)\n\n    fig, ax = plt.subplots()\n\n    for c in b.getComponents():\n        ax.plot(uniformTempsInC, relativeVols[c], label=c.name)\n\n    ax.set_title(\"Component Volume Fractions with Automatic Thermal Expansion\")\n    ax.set_ylabel(f\"% Change in Volume from {uniformTempsInC[0]} °C\")\n    ax.set_xlabel(\"Uniform Fuel/Structure Temperature, °C\")\n    ax.legend()\n    ax.grid()\n\n    plt.show()\n\n\nuniformTempsInC = [300.0, 400.0, 500.0, 600.0, 700.0]\nb = buildSimpleFuelBlock()\n\nwriteInitialVolumeFractions(b)\nplotVolFracsWithComponentTemps(b, uniformTempsInC)\n"
  },
  {
    "path": "doc/gallery-src/framework/run_chartOfNuclides.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlot a chart of the nuclides\n============================\n\nUse the nuclide directory of ARMI to plot a chart of the nuclides coloring the squares with the natural abundance.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\nfrom armi.nucDirectory.nuclideBases import NuclideBases\n\nconfigure(permissive=True)\n\nxyc = []\nfor name, base in NuclideBases().byName.items():\n    if not base.a:\n        continue\n    xyc.append((base.a - base.z, base.z, base.abundance or 0.5))\n\nx, y, c = zip(*xyc)\nplt.figure(figsize=(12, 8))\nplt.scatter(x, y, c=c, marker=\"s\", s=6)\nplt.title(\"Chart of the nuclides\")\nplt.xlabel(\"Number of neutrons (N)\")\nplt.ylabel(\"Number of protons (Z)\")\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_computeReactionRates.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nComputing Reaction Rates on a Block.\n====================================\n\nIn this example, a set of 1-group reaction rates (in #/s) are evaluated for a dummy fuel block containing UZr fuel, HT9\nstructure, and sodium coolant. A dummy multigroup flux is applied.\n\nThis example also demonstrates how to build a reactor model from code alone rather than relying upon input files.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom armi import configure, settings\nfrom armi.materials import ht9, sodium, uZr\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.reactor import assemblies, blocks, geometry, grids, reactors\nfrom armi.reactor.components import Circle, DerivedShape, Hexagon\nfrom armi.reactor.flags import Flags\nfrom armi.tests import ISOAA_PATH\n\nconfigure(permissive=True)\n\n\ndef _addFlux(b):\n    \"\"\"Add dummy 33-group flux to the block.\"\"\"\n    # fmt: off\n    b.p.mgFlux = [\n        1.6e+11, 2.3e+12, 1.1e+13, 2.6e+13, 4.6e+13, 7.9e+13, 1.4e+14, 2.2e+14,\n        2.3e+14, 2.7e+14, 2.2e+14, 1.7e+14, 1.3e+14, 1.4e+14, 7.5e+13, 3.2e+13,\n        2.2e+13, 6.3e+12, 2.2e+13, 1.2e+13, 5.2e+12, 1.5e+12, 1.4e+12, 2.9e+11,\n        7.4e+10, 5.5e+10, 1.9e+10, 5.0e+09, 3.6e+09, 8.8e+08, 4.3e+09, 1.3e+09,\n        6.0e+08\n    ]\n    # fmt: on\n\n\ndef createDummyReactor():\n    \"\"\"\n    Create a dummy reactor with a single fuel assembly and a single fuel block.\n\n    Often, a reactor model like this is built directly from input files rather\n    than from code as done here.\n    \"\"\"\n    from armi.reactor.blueprints import Blueprints\n\n    bp = Blueprints()\n    cs = settings.Settings()\n\n    r = reactors.Reactor(\"Reactor\", bp)\n    r.add(reactors.Core(\"Core\"))\n    r.core.spatialGrid = grids.HexGrid.fromPitch(1.0)\n    r.core.spatialGrid.symmetry = geometry.SymmetryType(geometry.DomainType.THIRD_CORE, geometry.BoundaryType.PERIODIC)\n    r.core.spatialGrid.geomType = geometry.GeomType.HEX\n    r.core.spatialGrid.armiObject = r.core\n    r.core.setOptionsFromCs(cs)\n\n    # Create a single fuel assembly\n    a = assemblies.HexAssembly(\"fuel assembly\")\n    a.spatialGrid = grids.AxialGrid.fromNCells(1)\n    a.spatialLocator = r.core.spatialGrid[1, 0, 0]\n\n    # Create a single fuel block\n    b = blocks.HexBlock(\"fuel block\")\n    b.setType(\"fuel\")\n\n    # Create a single fuel component with UZr fuel.\n    dims = {\"Tinput\": 20, \"Thot\": 900, \"id\": 0.0, \"od\": 2.9, \"mult\": 7}\n    c = Circle(\"fuel\", uZr.UZr(), **dims)\n    b.add(c)\n\n    # Create a single structure component with HT9.\n    dims = {\"Tinput\": 20, \"Thot\": 600, \"op\": 16.0, \"ip\": 15.0, \"mult\": 1}\n    c = Hexagon(\"structure\", ht9.HT9(), **dims)\n    b.add(c)\n\n    # Fill in the rest of the block with sodium coolant.\n    dims = {\"Tinput\": 600, \"Thot\": 600}\n    c = DerivedShape(\"coolant\", sodium.Sodium(), **dims)\n    b.add(c)\n\n    a.add(b)\n    r.core.add(a)\n    _addFlux(b)\n    return r\n\n\n# Create a dummy reactor with the function defined above.\nr = createDummyReactor()\n\n# Add an example cross section library to the reactor core\nr.core.lib = isotxs.readBinary(ISOAA_PATH)\n\nb = r.core.getFirstBlock(Flags.FUEL)\nb.expandElementalToIsotopics(r.nuclideBases.byName[\"NA\"])\n\n# Iterate over a few nuclides/elements in the XS library\n# and collect the total reaction rates in #/s.\nallRates = []\nnucNames = [\"U235\", \"U238\", \"FE\", \"NA23\"]\nfor nucName in nucNames:\n    rateData = b.getReactionRates(nucName)\n    rateLabels = sorted(rateData.keys())  # will be constant\n    allRates.append([rateData[k] for k in rateLabels])\n\n# plot the reaction rates as a bar graph\nfig, ax = plt.subplots()\nwidth = 1.0 / len(rateLabels)\noffset = 0.0\nfor nucName, nucRates in zip(nucNames, allRates):\n    ax.bar(\n        np.arange(len(rateLabels)) + width + offset,\n        nucRates,\n        width=width,\n        label=nucName,\n    )\n    offset += width\n\nax.set_xticks(np.arange(len(rateLabels)) + 0.5)\nax.set_xticklabels(rateLabels)\n\n# Add little divider lines between reactions for clarity\nfor border in np.arange(len(rateLabels) - 1):\n    ax.axvline(border + 1, ls=\"--\", alpha=0.4, color=\"k\")\n\nax.set_xlim([0, len(rateLabels)])\n\nplt.yscale(\"log\")\nplt.legend()\nplt.title(\"Reaction rates\")\nplt.xlabel(\"Reaction type\")\nplt.ylabel(\"Reaction rate (1/s)\")\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_fuelManagement.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nFuel management in a LWR.\n=========================\n\nDemo of locating and swapping assemblies in a core with Cartesian geometry. Given a burnup\ndistribution, this swaps high burnup assemblies with low ones.\n\nAssembly selection for moving and swapping is very flexible using the ARMI API and the\nhigh-level language features of Python. This allows highly complex fuel management\nalgorithms to be expressed and parameterized.\n\nBecause the ARMI framework does not come with a LWR global flux/depletion solver, actual\nflux/depletion results would need to be provided by a physics plugin before actually using\nARMI to do fuel management. Thus, this example applies a dummy burnup distribution for\ndemonstration purposes.\n\"\"\"\n\nimport math\n\nfrom armi import configure\nfrom armi.physics.fuelCycle import fuelHandlers\nfrom armi.reactor.flags import Flags\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils import plotting\n\n# configure ARMI\nconfigure(permissive=True)\n\no, reactor = test_reactors.loadTestReactor(inputFileName=\"refTestCartesian.yaml\")\n\n# Apply a dummy burnup distribution roughly in a cosine\nfor b in reactor.core.getBlocks(Flags.FUEL):\n    x, y, z = b.spatialLocator.getGlobalCoordinates()\n    d = math.sqrt(x**2 + y**2)\n    b.p.percentBu = 5 * math.cos(d * math.pi / 2 / 90)\n\n# show the initial burnup distribution\nplotting.plotFaceMap(reactor.core, param=\"percentBu\")\n\nfuelHandler = fuelHandlers.FuelHandler(o)\n\ncandidateAssems = reactor.core.getAssemblies(Flags.FUEL)\ncriterion = lambda a: a.getMaxParam(\"percentBu\")\ncandidateAssems.sort(key=criterion)\n\nfor num in range(12):\n    # swap the 12 highest burnup assemblies with the 12 lowest burnup ones\n    high = candidateAssems.pop()\n    low = candidateAssems.pop(0)\n    fuelHandler.swapAssemblies(high, low)\n\n# re-filter the remaining candidates for more complex selections\ncandidateAssems = [a for a in candidateAssems if a.getMaxParam(\"percentBu\") < 4.0]\nfor num in range(8):\n    high = candidateAssems.pop()\n    low = candidateAssems.pop(0)\n    fuelHandler.swapAssemblies(high, low)\n\n# show final burnup distribution\nplotting.plotFaceMap(reactor.core, param=\"percentBu\")\n"
  },
  {
    "path": "doc/gallery-src/framework/run_grids1_hex.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nMake a hex grid.\n================\n\nThis uses a grid factory method to build an infinite 2-D grid of hexagons with pitch\nequal to 1.0 cm.\n\nLearn more about :py:mod:`grids <armi.reactor.grids>`.\n\"\"\"\n\nimport math\n\nimport matplotlib.patches as mpatches\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\n\nfrom armi import configure\nfrom armi.reactor import grids\n\nconfigure(permissive=True)\n\nhexes = grids.HexGrid.fromPitch(1.0)\n\npolys = []\nfig, ax = plt.subplots()\nax.set_aspect(\"equal\")\nax.set_axis_off()\n\nfor hex_i in hexes.generateSortedHexLocationList(127):\n    x, y, z = hex_i.getGlobalCoordinates()\n    ax.text(x, y, f\"{hex_i.i},{hex_i.j}\", ha=\"center\", va=\"center\", fontsize=8)\n    polys.append(mpatches.RegularPolygon((x, y), numVertices=6, radius=1 / math.sqrt(3), orientation=math.pi / 2))\npatches = PatchCollection(polys, fc=\"white\", ec=\"k\")\nax.add_collection(patches)\n\n# create a bounding box around patches with a small margin (2%)\nbbox = patches.get_datalim(ax.transData)\nbbox = bbox.expanded(1.02, 1.02)\nax.set_xlim(bbox.xmin, bbox.xmax)\nax.set_ylim(bbox.ymin, bbox.ymax)\nax.set_title(\"(i, j) indices for a hex grid\")\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_grids2_cartesian.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nMake a Cartesian grid.\n======================\n\nThis builds a Cartesian grid with squares 1 cm square, with the z-coordinates\nprovided explicitly. It is also offset in 3D space to X, Y, Z = 10, 5, 5 cm.\n\nLearn more about :py:mod:`grids <armi.reactor.grids>`.\n\"\"\"\n\nimport itertools\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\nfrom armi.reactor import grids\n\nconfigure(permissive=True)\n\nfig = plt.figure()\nzCoords = [1, 4, 8]\ncartesian_grid = grids.CartesianGrid(\n    unitSteps=((1, 0), (0, 1)),\n    bounds=(None, None, zCoords),\n    offset=(10, 5, 5),\n)\nxyz = []\n\n# the grid is infinite in i and j so we will just plot the first 10 items\nfor i, j, k in itertools.product(range(10), range(10), range(len(zCoords) - 1)):\n    xyz.append(cartesian_grid[i, j, k].getGlobalCoordinates())\nax = fig.add_subplot(1, 1, 1, projection=\"3d\")\nx, y, z = zip(*xyz)\nax.scatter(x, y, z)\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_grids3_rzt.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nMake a Theta-R-Z grid.\n======================\n\nThis builds a 3-D grid in Theta-R-Z geometry by specifying the theta, r, and z\ndimension bounds explicitly.\n\nLearn more about :py:mod:`grids <armi.reactor.grids>`.\n\"\"\"\n\nimport itertools\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom armi import configure\nfrom armi.reactor import grids\n\nconfigure(permissive=True)\n\nfig = plt.figure()\ntheta = np.linspace(0, 2 * np.pi, 10)\nrad = np.linspace(0, 10, 10)\nz = np.linspace(5, 25, 6)\nrz_grid = grids.ThetaRZGrid(bounds=(theta, rad, z))\n\n\nxyz = []\nfor i, j, k in itertools.product(range(len(theta) - 1), range(len(rad) - 1), range(len(z) - 1)):\n    xyz.append(rz_grid[i, j, k].getGlobalCoordinates())\nax = fig.add_subplot(1, 1, 1, projection=\"3d\")\nx, y, z = zip(*xyz)\nax.scatter(x, y, z)\n\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_isotxs.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlotting Multi-group XS from ISOTXS.\n====================================\n\nIn this example, several cross sections are plotted from\nan existing binary cross section library file in :py:mod:`ISOTXS <armi.nuclearDataIO.isotxs>` format.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.physics.neutronics import energyGroups\nfrom armi.tests import ISOAA_PATH\n\nconfigure(permissive=True)\n\ngs = energyGroups.getGroupStructure(\"ANL33\")\nlib = isotxs.readBinary(ISOAA_PATH)\n\nfe56 = lib.getNuclide(\"FE\", \"AA\")\nu235 = lib.getNuclide(\"U235\", \"AA\")\nu238 = lib.getNuclide(\"U238\", \"AA\")\nb10 = lib.getNuclide(\"B10\", \"AA\")\n\nplt.step(gs, fe56.micros.nGamma, label=r\"Fe (n, $\\gamma$)\")\nplt.step(gs, u235.micros.fission, label=\"U-235 (n, fission)\")\nplt.step(gs, u238.micros.nGamma, label=r\"U-238 (n, $\\gamma$)\")\nplt.step(gs, b10.micros.nalph, label=r\"B-10 (n, $\\alpha$)\")\n\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.xlabel(\"Neutron Energy, eV\")\nplt.ylabel(\"Cross Section, barns\")\nplt.grid(alpha=0.2)\nplt.legend()\n\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_isotxs2_matrix.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlotting a multi-group scatter matrix.\n======================================\n\nHere we plot scatter matrices from an ISOTXS microscopic cross section library.\nWe plot the inelastic scatter cross section of U235 as well as the (n,2n) source\nmatrix.\n\nSee Also: :py:mod:`ISOTXS <armi.nuclearDataIO.isotxs>` format.\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\nfrom armi.nuclearDataIO import xsNuclides\nfrom armi.nuclearDataIO.cccc import isotxs\nfrom armi.tests import ISOAA_PATH\n\nconfigure(permissive=True)\n\nlib = isotxs.readBinary(ISOAA_PATH)\n\nu235 = lib.getNuclide(\"U235\", \"AA\")\nxsNuclides.plotScatterMatrix(u235.micros.inelasticScatter, \"U-235 inelastic\")\n\nplt.figure()\nxsNuclides.plotScatterMatrix(u235.micros.n2nScatter, \"U-235 n,2n src\")\n"
  },
  {
    "path": "doc/gallery-src/framework/run_materials.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nListing of Material Library.\n============================\n\nThis is a listing of all the elements in all the materials that are included in the ARMI\nmaterial library. Many of the materials in this library are academic in quality and\ncontents. Some have temperature dependent properties, but some don't. You can provide\nyour own proprietary material properties via a plugin.\n\nMore info about the materials here: :py:mod:`armi.materials`.\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nfrom armi import configure, materials\nfrom armi.nucDirectory.nuclideBases import NuclideBases\n\nMAX_Z = 98  # stop at Californium\n\nconfigure(permissive=True)\n\nmaterialNames = []\nmats = list(materials.iterAllMaterialClassesInNamespace(materials))\nnumMats = len(mats)\n\nzVals = np.zeros((numMats, MAX_Z))\n\nnuclideBases = NuclideBases()\nfor mi, matCls in enumerate(mats):\n    m = matCls()\n    materialNames.append(m.name)\n    for nucName, frac in m.massFrac.items():\n        nb = nuclideBases.byName[nucName]\n        idx = mi, nb.z - 1\n        try:\n            zVals[idx] += frac\n        except IndexError:\n            # respect the MAX_Z bounds\n            pass\n\nfig, ax = plt.subplots(figsize=(16, 12))\nim = ax.imshow(zVals, cmap=\"YlGn\")\n\nax.set_xticks(np.arange(MAX_Z))\nax.set_yticks(np.arange(numMats))\nax.set_xticklabels(np.arange(MAX_Z) + 1, fontsize=6)\nax.set_yticklabels(materialNames)\nax.set_xlabel(\"Proton number (Z)\")\nax.grid(alpha=0.2, ls=\"--\")\n\nax.set_title(\"Mass fractions in the ARMI material library\")\nplt.show()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_programmaticReactorDefinition.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nBuild Reactor Inputs Programmatically.\n======================================\n\nSometimes it's desirable to build input definitions for ARMI using\ncode rather than by writing the textual input files directly.\nIn ARMI you can either make the ARMI reactor objects directly,\nor you can define Blueprints objects. The benefit of making Blueprints\nobjects is that they can in turn be used to create both ARMI reactor\nobjects as well as textual input itself. This is nice when you want to\nhave traceable input files associated with a run that was developed\nprogrammatically (e.g. for parameter sweeps).\n\nThis example shows how to make Blueprints objects programmatically completely\nfrom scratch.\n\"\"\"\n\n# ruff: noqa: E402\nimport matplotlib.pyplot as plt\n\nfrom armi import configure\n\n# configure ARMI\nconfigure(permissive=True)\n\nfrom armi import cases\nfrom armi.reactor import blueprints\nfrom armi.reactor.blueprints import (\n    assemblyBlueprint,\n    blockBlueprint,\n    componentBlueprint,\n    gridBlueprint,\n    isotopicOptions,\n    reactorBlueprint,\n)\nfrom armi.settings import caseSettings\nfrom armi.utils import plotting\n\n\ndef buildCase():\n    \"\"\"Build input components and a case.\"\"\"\n    bp = blueprints.Blueprints()\n    bp.customIsotopics = isotopicOptions.CustomIsotopics()\n    bp.nuclideFlags = isotopicOptions.genDefaultNucFlags()\n\n    components = buildComponents()\n    bp.blockDesigns = buildBlocks(components)\n    bp.assemDesigns = buildAssemblies(bp.blockDesigns)\n    bp.gridDesigns = buildGrids()\n    bp.systemDesigns = buildSystems()\n\n    cs = caseSettings.Settings()\n    cs.path = None\n    cs.caseTitle = \"scripted-case\"\n    case = cases.Case(cs=cs, bp=bp)\n\n    return case\n\n\ndef buildComponents():\n    ISOTHERMAL_TEMPERATURE_IN_C = 450.0\n    fuel = componentBlueprint.ComponentBlueprint()\n    fuel.name = \"fuel\"\n    fuel.shape = \"Circle\"\n    fuel.mult = 217\n    fuel.material = \"UZr\"\n    fuel.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    fuel.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    fuel.id = 0.0\n    fuel.od = 0.4\n\n    clad = componentBlueprint.ComponentBlueprint()\n    clad.name = \"clad\"\n    clad.mult = \"fuel.mult\"\n    clad.shape = \"Circle\"\n    clad.material = \"HT9\"\n    clad.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    clad.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    clad.id = 0.508\n    clad.od = 0.5842\n\n    gap = componentBlueprint.ComponentBlueprint()\n    gap.name = \"gap\"\n    gap.shape = \"Circle\"\n    gap.mult = \"fuel.mult\"\n    gap.material = \"Void\"\n    gap.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    gap.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    gap.id = \"fuel.od\"\n    gap.od = \"clad.id\"\n\n    wire = componentBlueprint.ComponentBlueprint()\n    wire.name = \"wire\"\n    wire.mult = \"fuel.mult\"\n    wire.shape = \"Helix\"\n    wire.material = \"HT9\"\n    wire.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    wire.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    wire.id = 0.0\n    wire.od = 0.14224\n    wire.axialPitch = 30.48\n    wire.helixDiameter = 0.72644\n\n    duct = componentBlueprint.ComponentBlueprint()\n    duct.name = \"duct\"\n    duct.mult = 1\n    duct.shape = \"Hexagon\"\n    duct.material = \"HT9\"\n    duct.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    duct.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    duct.ip = 11.0109\n    duct.op = 11.6205\n\n    intercoolant = componentBlueprint.ComponentBlueprint()\n    intercoolant.name = \"intercoolant\"\n    intercoolant.mult = 1\n    intercoolant.shape = \"Hexagon\"\n    intercoolant.material = \"Sodium\"\n    intercoolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    intercoolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n    intercoolant.ip = \"duct.op\"\n    intercoolant.op = 12.01420\n\n    coolant = componentBlueprint.ComponentBlueprint()\n    coolant.name = \"coolant\"\n    coolant.shape = \"DerivedShape\"\n    coolant.material = \"Sodium\"\n    coolant.Tinput = ISOTHERMAL_TEMPERATURE_IN_C\n    coolant.Thot = ISOTHERMAL_TEMPERATURE_IN_C\n\n    componentBlueprints = {c.name: c for c in [fuel, gap, clad, wire, duct, intercoolant, coolant]}\n\n    return componentBlueprints\n\n\ndef buildBlocks(components):\n    \"\"\"Build block blueprints.\"\"\"\n    blocks = blockBlueprint.BlockKeyedList()\n    fuel = blockBlueprint.BlockBlueprint()\n    fuel.name = \"fuel\"\n    for cname, c in components.items():\n        fuel[cname] = c\n    blocks[fuel.name] = fuel\n\n    reflector = blockBlueprint.BlockBlueprint()\n    reflector.name = \"reflector\"\n    reflector[\"coolant\"] = components[\"coolant\"]\n    reflector[\"duct\"] = components[\"duct\"]\n    blocks[reflector.name] = reflector\n\n    return blocks\n\n\ndef buildAssemblies(blockDesigns):\n    \"\"\"Build assembly blueprints.\"\"\"\n    fuelBock, reflectorBlock = blockDesigns[\"fuel\"], blockDesigns[\"reflector\"]\n\n    assemblies = assemblyBlueprint.AssemblyKeyedList()\n\n    fuelAssem = assemblyBlueprint.AssemblyBlueprint()\n    fuelAssem.name = \"Fuel\"\n    fuelAssem.specifier = \"IC\"\n\n    fuelAssem.blocks = blockBlueprint.BlockList()\n    fuelAssem.blocks.extend([reflectorBlock, fuelBock, fuelBock, fuelBock, reflectorBlock])\n    fuelAssem.height = [10, 20, 20, 20, 10]\n    fuelAssem.xsTypes = [\"A\"] * 5\n    fuelAssem.axialMeshPoints = [1] * 5\n\n    assemblies[fuelAssem.name] = fuelAssem\n\n    reflectorAssem = assemblyBlueprint.AssemblyBlueprint()\n    reflectorAssem.name = \"Reflector\"\n    reflectorAssem.specifier = \"RR\"\n    reflectorAssem.blocks = blockBlueprint.BlockList()\n    reflectorAssem.blocks.extend([reflectorBlock] * 5)\n    reflectorAssem.height = [10, 20, 20, 20, 10]\n    reflectorAssem.xsTypes = [\"A\"] * 5\n    reflectorAssem.axialMeshPoints = [1] * 5\n    assemblies[reflectorAssem.name] = reflectorAssem\n\n    return assemblies\n\n\ndef buildGrids():\n    \"\"\"Build the core map grid.\"\"\"\n    coreGrid = gridBlueprint.GridBlueprint(\"core\")\n    coreGrid.geom = \"hex\"\n    coreGrid.symmetry = \"third periodic\"\n    coreGrid.origin = gridBlueprint.Triplet()\n\n    coreGrid.latticeMap = \"\"\"\n         RR   RR\n           IC   RR\n         IC   IC   RR\"\"\"\n\n    grids = gridBlueprint.Grids()\n    grids[\"core\"] = coreGrid\n    return grids\n\n\ndef buildSystems():\n    \"\"\"Build the core system.\"\"\"\n    systems = reactorBlueprint.Systems()\n    core = reactorBlueprint.SystemBlueprint(\"core\", \"core\", gridBlueprint.Triplet())\n    systems[\"core\"] = core\n    return systems\n\n\nif __name__ == \"__main__\":\n    case = buildCase()\n    # build ARMI objects\n    o = case.initializeOperator()\n    fig = plotting.plotAssemblyTypes(\n        list(case.bp.assemblies.values()),\n        None,\n        showBlockAxMesh=True,\n    )\n    plt.show()\n\n    # also write input files\n    case.writeInputs()\n"
  },
  {
    "path": "doc/gallery-src/framework/run_reactorFacemap.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nPlot a reactor facemap.\n=======================\n\nLoad a test reactor from the test suite and plot a dummy\npower distribution from it. You can plot any block parameter.\n\"\"\"\n\nfrom armi import configure\nfrom armi.reactor.tests import test_reactors\nfrom armi.utils import plotting\n\n# configure ARMI\nconfigure(permissive=True)\n\noperator, reactor = test_reactors.loadTestReactor()\nreactor.core.growToFullCore(None)\n# set dummy power\nfor b in reactor.core.getBlocks():\n    x, y, z = b.spatialLocator.getGlobalCoordinates()\n    b.p.pdens = x**2 + y**2 + z**2\n\nplotting.plotFaceMap(reactor.core, param=\"pdens\", labelFmt=\"{0:.1e}\")\n"
  },
  {
    "path": "doc/gallery-src/framework/run_transmutationMatrix.py",
    "content": "# Copyright 2019 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nTransmutation and decay reactions.\n==================================\n\nThis plots some of the transmutation and decay pathways for the actinides and some light\nnuclides using the burn chain definition that is included with ARMI. Note that many of\nthese reactions are shortcut for reactor analysis. For example, a U-238 capture goes\ndirectly to NP-239 rather than first going to U-239. Some (n,2n) reactions quickly beta\ndecay, so the transmutation goes right to the product. For the decays, the arrow has been\nadjusted in width based on the branching ratio. The transmutations are all constant since\ntheir rates would depend on the neutron spectrum being modeled. This is mostly a demo of\nmore features of the :py:mod:`armi.nucDirectory` subpackage.\n\nUsers can input their own transmutation matrix or use this one.\n\nA Bateman equation/matrix exponential solver is required to actually *solve* transmutation and\ndecay problems, which can be provided via a plugin.\n\"\"\"\n\nimport math\nimport os\n\nimport matplotlib.patches as mpatch\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\n\nfrom armi.context import RES\nfrom armi.nucDirectory.nuclideBases import NuclideBases\n\n\ndef plotNuc(nb, ax):\n    \"\"\"Make a square patch for a single nuclide base.\"\"\"\n    patch = mpatch.Rectangle((nb.a - nb.z - 0.5, nb.z - 0.5), 1.0, 1.0)\n    rx, ry = patch.get_xy()\n    cx = rx + patch.get_width() / 2.0\n    # bump label down for metastable nuclides\n    cy = ry + (3 - 2 * nb.state) * patch.get_height() / 4.0\n    ax.annotate(\n        nb.name,\n        (cx, cy),\n        color=\"k\",\n        weight=\"normal\",\n        fontsize=10,\n        ha=\"center\",\n        va=\"center\",\n    )\n    return patch\n\n\ndef plotAll(xlim, ylim):\n    \"\"\"Plot all nuclides and transformations.\"\"\"\n    # load the burn chain input that comes with ARMI\n    nuclideBases = NuclideBases()\n    with open(os.path.join(RES, \"burn-chain.yaml\")) as burnChainStream:\n        nuclideBases.imposeBurnChain(burnChainStream)\n    nbs = nuclideBases.instances\n\n    fig, ax = plt.subplots(figsize=(15, 10))\n\n    patches = []\n    for nb in nbs:\n        if not nb.trans and not nb.decays:\n            # skip nuclides without any transmutations defined\n            pass\n        patch = plotNuc(nb, ax)\n        patches.append(patch)\n        # loop over all possible transmutations and decays and draw arrows\n        for ti, trans in enumerate(nb.trans + nb.decays):\n            product = nuclideBases.fromName(trans.productNuclides[0])\n            if product.z == 0:\n                # skip lumped fission products and DUMP nuclides\n                continue\n            # add index-based y-offset to minimize overlaps\n            x, y, xp, yp = (\n                nb.a - nb.z,\n                nb.z + ti * 0.05,\n                product.a - product.z,\n                product.z + ti * 0.05,\n            )\n            if trans in nb.trans:\n                color = \"deeppink\"\n            else:\n                color = \"orangered\"\n            ax.annotate(\n                \"\",\n                (xp, yp),\n                (x, y),\n                arrowprops=dict(width=2 * trans.branch, shrink=0.1, alpha=0.4, color=color),\n            )\n            # add reaction label towards the middle of the arrow\n            xlabel = xp - (xp - x) * 0.5\n            ylabel = yp - (yp - y) * 0.5\n            # pretty up the labels a bit with some LaTeX and rotations\n            rxnType = (\n                trans.type.replace(\"nGamma\", r\"n,$\\gamma$\")\n                .replace(\"nalph\", r\"n,$\\alpha$\")\n                .replace(\"ad\", r\"$\\alpha$\")\n                .replace(\"bmd\", r\"$\\beta^-$\")\n                .replace(\"bpd\", r\"$\\beta^+$\")\n            )\n            if xp != x:\n                # rotate the nuclide type label to sit right on the arrow\n                rotation = math.atan((yp - y) / (xp - x)) * 180 / math.pi\n            else:\n                rotation = 0\n            ax.text(xlabel, ylabel, rxnType, color=\"grey\", ha=\"center\", rotation=rotation)\n\n    pc = PatchCollection(patches, facecolor=\"mistyrose\", alpha=0.2, edgecolor=\"black\")\n    ax.add_collection(pc)\n    ax.set_xlim(xlim)\n    ax.set_ylim(ylim)\n    ax.set_aspect(\"equal\")\n    ax.set_xlabel(\"Neutrons (N)\")\n    ax.set_ylabel(\"Protons (Z)\")\n    ax.set_title(\"Transmutations and Decays (with branching)\")\n    plt.show()\n"
  },
  {
    "path": "doc/getTestResults.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport glob\nimport os\nimport xml.etree.ElementTree as ET\n\nTHIS_DIR = os.path.dirname(os.path.abspath(__file__))\nRESULTS_DIR = os.path.join(THIS_DIR, \"..\")\nTEST_RESULTS = []\n\n\ndef parseTestXML(file):\n    \"\"\"Parse the test result XML file to gather results in a list of dictionaries.\n\n    Parameters\n    ----------\n    file : path\n        Path of XML file to be parsed\n\n    Returns\n    -------\n    list\n        Dictionaries containing:\n\n        - File location of the test: 'file'\n        - Class signature of test: 'class'\n        - Method signature of test: 'method'\n        - Runtime of test: 'time'\n        - The result of the test: 'result' (passed, skipped, failure)\n        - Console message when skipped or failed: 'info'\n    \"\"\"\n    tree = ET.parse(file)\n\n    results = []\n    for testcase in tree.getroot().iter(\"testcase\"):\n        cn = testcase.attrib.get(\"classname\", \"unknown\")\n        tc_dict = {\n            \"file\": \"/\".join(cn.split(\".\")[:-1]) + \".py\",\n            \"class\": cn.split(\".\")[-1],\n            \"method\": testcase.attrib.get(\"name\", \"unknown\"),\n            \"time\": float(testcase.attrib.get(\"time\", -1)),\n            \"result\": \"passed\",\n            \"info\": None,\n        }\n        if testcase.find(\"skipped\") is not None:\n            tc_dict[\"result\"] = \"skipped\"\n            tc_dict[\"info\"] = testcase.find(\"skipped\").attrib[\"message\"]\n        elif testcase.find(\"failure\") is not None:\n            tc_dict[\"result\"] = \"failure\"\n            tc_dict[\"info\"] = testcase.find(\"failure\").text\n\n        results.append(tc_dict)\n\n    return results\n\n\ndef getTestResult(app, need, needs):\n    \"\"\"Dynamic function used by sphinx-needs to gather the result of a test tag.\"\"\"\n    if not need[\"signature\"]:\n        return \"none\"\n\n    # Get all the tests that match the method signature\n    results = [test_case[\"result\"] for test_case in TEST_RESULTS if need[\"signature\"] == test_case[\"method\"]]\n    # Logic is as follows if there are multiple matches:\n    #   - If one is a \"failure\", then return \"failure\"\n    #   - If all are \"skipped\", then return \"skipped\"\n    #   - Otherwise, return \"passed\"\n    if results:\n        if \"failure\" in results:\n            return \"failure\"\n        elif \"passed\" in results:\n            return \"passed\"\n        else:\n            return \"skipped\"\n\n    # Things get a little more complicated when the test tag has a class-level signature.\n    # Basically we have to determine if all the methods in the class passed or if any of skipped/failed.\n    # First, gather all the results related to the class signature from the tag and categorize by method\n    results = {}\n    for test_case in TEST_RESULTS:\n        if need[\"signature\"] == test_case[\"class\"]:\n            if test_case[\"method\"] in results:\n                results[test_case[\"method\"]].append(test_case[\"result\"])\n            else:\n                results[test_case[\"method\"]] = [test_case[\"result\"]]\n\n    # If we haven't found the test by now, we never will\n    if not results:\n        return \"none\"\n\n    # Apply logic from before for each method in the class\n    for m, r in results.items():\n        if \"failure\" in r:\n            results[m] = \"failure\"\n        elif \"passed\" in r:\n            results[m] = \"passed\"\n        else:\n            results[m] = \"skipped\"\n\n    # Now for the class logic\n    #  - If any of the methods failed, return \"failure\"\n    #  - If any of the methods skipped, return \"skipped\"\n    #  - If all of the methods passed, return \"passed\"\n    if \"failure\" in results.values():\n        return \"failure\"\n    elif \"skipped\" in results.values():\n        return \"skipped\"\n    else:\n        return \"passed\"\n\n\n# Here is where we fill out all the test results, so it is only done once\nfor file in glob.glob(os.path.join(RESULTS_DIR, \"*.xml\")):\n    TEST_RESULTS.extend(parseTestXML(file))\n\nif __name__ == \"__main__\":\n    # Prints results of all the tests found in the repo in a pytest-like way\n    colors = {\n        \"passed\": \"\\033[92m\",\n        \"skipped\": \"\\033[93m\",\n        \"failure\": \"\\033[91m\",\n        \"end\": \"\\033[0m\",\n    }\n    for testcase in TEST_RESULTS:\n        print(\n            \"{} {} {} {}::{}::{}\".format(\n                colors[testcase[\"result\"]],\n                testcase[\"result\"].upper(),\n                colors[\"end\"],\n                testcase[\"file\"],\n                testcase[\"class\"],\n                testcase[\"method\"],\n            )\n        )\n"
  },
  {
    "path": "doc/glossary.rst",
    "content": "Glossary\n========\n\nHere we define a few specialized terms used in this documentation.\n\n.. glossary::\n\n    ANL\n        Argonne National Laboratory\n\n    ARMI\n        The Advanced Reactor Modeling Interface is a software system\n        for nuclear reactor design and analysis.\n\n    assembly\n        A basic structural unit in the reactor core that is stacked together by a list of blocks.\n        Assemblies typically move together in fuel management.\n\n    block\n        a vertical segment of the assembly consisting of components.\n\n    burnup\n        Amount of energy that has been extracted from fuel. Can be measured in megawatt-days per\n        kilogram of heavy metal (MWd/kgHM) or in percent of fissionable atoms that have fissioned.\n\n    BOC\n        Beginning-of-cycle; the state of the core after an outage\n\n    BOL\n        Beginning-of-life; the fresh-core state of the reactor\n\n    cladding\n        Material that surrounds nuclear fuel in pins, keeping radionuclides contained.\n\n    CLI\n        Command Line Interface. The method of interacting with software from a command line.\n\n    component\n        the basic primitive geometrical body, such as a circle, hex, helix, etc. These have\n        dimensions, temperatures, material properties, and isotopic composition.\n\n    FIMA  \n        Fissions per initial metal atom. This is a unit of measuring burnup as a fraction of the\n        fissionable nuclides that have fissioned.\n\n    grid plate\n        A reactor structure in a sodium-cooled fast reactor that all the fuel assemblies sit on.\n\n    GUI\n        Graphical User Interface. The method of interacting with software through a visual display.\n\n    interface\n        Also named *code interface*; linked to an external program or an internal ARMI module to\n        perform a specific calculation function. An example is the DIF3D interface that makes use of\n        DIF3D diffusion code for core physics calculation. Interfaces are building blocks of ARMI\n        calculations\n\n    In-Use Tests\n        Automated software test that shows many modules working together in a way that a user would\n        typically use them.\n\n    Liner\n        A thin layer of material between fuel and cladding intended to impede chemical corrosion and\n        wastage.\n\n    LWR\n        Light Water Reactor. The predominant kind of commercial nuclear plant in operation today.\n\n    material\n        an object that contains isotopic mass fractions and intrinsic material properties\n\n    MPI\n        Message passing interface. This is a protocol for exchanging data around a network to run a\n        code in parallel.\n\n    node\n        A specific point in time in a ARMI case.\n\n    operator\n        An object that controls the calculation sequence for a specific purpose e.g. a multi-cycle\n        quasi-static depletion calculation. Operators trigger interfaces.\n\n    parameter\n        A state variable on a reactor, assembly, block, or component object.\n\n    plenum\n        An empty space inside the cladding tube above the fuel that holds fission gasses and other\n        things that are produced during irradiation.\n\n    reactor\n        an object consisting of a core full of assemblies and possibly other structures\n\n    reactor state\n        An instantaneous representation of the physical condition of all components of a reactor,\n        including dimensions, temperatures, composition, material, shape, flux, dose, stress,\n        strain, arrangement, orientation, and so on.\n\n    smear density\n        A term used to characterize how much room exists inside the cladding for the fuel to expand\n        into. It is defined as the fraction of fuel area divided by total space inside the cladding.\n\n    TWR\n        Traveling wave reactor: a reactor that uses a breed-and-burn process to achieve most fast\n        reactor advantages without requiring a reprocessing plant.\n\n    Unit Tests\n        Software tests that check small units of software.\n\n    V&V\n        Validation and Verification. Validation is showing that code results match physical reality\n        (comparisons with known answers or experiments), and verification is demonstrating that\n        software is built in a way that satisfies its requirements.\n\n    XTVIEW\n        A TerraPower-developed visualization tool that graphically shows ARMI results that have been\n        added to a database.\n"
  },
  {
    "path": "doc/index.rst",
    "content": "====\nARMI\n====\n\n.. image:: .static/armi-logo.png\n\n.. toctree::\n   :hidden:\n   :maxdepth: 2\n\n   readme\n   installation\n   user/index\n   developer/index\n   gallery/index\n   tutorials/index\n   release/index\n   qa_docs/index\n   glossary\n   API Docs <.apidocs/modules>\n\n*  :doc:`glossary`\n*  :ref:`genindex`\n*  :ref:`modindex`\n*  :ref:`search`\n"
  },
  {
    "path": "doc/installation.rst",
    "content": "############\nInstallation\n############\n\n.. include:: user/user_install.rst\n    :start-line: 4"
  },
  {
    "path": "doc/make.bat",
    "content": "@ECHO OFF\n\npushd %~dp0\n\nREM Windows command file for Sphinx documentation for ARMI\nREM This can be run locally with make html.\n\nif \"%PYTHON%\" == \"\" (\n\tset PYTHON=python\n)\nset SOURCEDIR=.\nif \"%BUILDDIR%\" == \"\" (\n\tset BUILDDIR=_build\n)\nif \"%PYTHONPATH%\" == \"\" (\n\tset PYTHONPATH=..\n)\nREM Graphviz and Pandoc binaries are required for auto-generating figures and running notebooks\nREM during doc building\nif NOT \"%GRAPHVIZ%\" == \"\" (\n\tset PATH=\"%PATH%\";%GRAPHVIZ%\n)\nif NOT \"%PANDOC%\" == \"\" (\n\tset PATH=\"%PATH%\";%PANDOC%\n)\n\nif \"%1\" == \"\" goto help\n\n%PYTHON% -m sphinx >NUL 2>NUL\nif errorlevel 9009 (\n\techo.\n\techo.The 'sphinx' package was not found. Make sure you have Sphinx installed, then set the\n\techo.SPHINXBUILD environment variable to point to the full path of the 'sphinx-build'\n\techo.executable. Alternatively you may add the Sphinx directory to PATH.\n\techo.\n\techo.If you don't have Sphinx installed, grab it from: http://sphinx-doc.org/\n\texit /b 1\n)\n@ECHO ON\n%PYTHON% -m sphinx -b %1 %SOURCEDIR% %BUILDDIR%\\%1 %SPHINXOPTS%\n@ECHO OFF\ngoto end\n\n:help\n%PYTHON% -m sphinx -h\n\n:end\npopd\n"
  },
  {
    "path": "doc/qa_docs/index.rst",
    "content": "################\nQA Documentation\n################\n\nThis is the Quality Assurance (QA) documentation for the Advanced Reactor Modeling Interface (ARMI)\nframework. This document includes the Software Requirements Specification Document (SRSD), the\nSoftware Design and Implementation Document (SDID), and the Software Test Report (STR).\n\n-------------\n\n.. toctree::\n   :maxdepth: 3\n   :numbered:\n   \n   srsd\n   sdid\n   str\n   scr/index\n"
  },
  {
    "path": "doc/qa_docs/scr/0.1.rst",
    "content": "Release Notes for ARMI 0.1\n==========================\n\nThese are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record.\n\n\nARMI v0.1.7\n-----------\nRelease Date: 2021-08-09\n\nAPI changes\n^^^^^^^^^^^\n\n#. ``geomType`` arguments in most places has been changed to accept a ``GeomType``\n   enumeration, instead of a string. Some places will still attempt to implicitly convert\n   strings into enum values, but this will eventually be deprecated.\n#. The ``SystemLayoutInput`` class has been moved into its own module. Instances of the\n   ``SystemLayoutInput`` (usually named ``geom``) often participate in various function\n   signatures. These will be removed soon, as grids now serve this purpose, and ``geom``\n   objects are largely vestigial. ``SystemLayoutInput`` will be retained to facilitate\n   input migrations.\n#. Changed block default names so that they are no longer constrained by axial characters.\n   They now are named ``B{assemNum}-{axialIndex}`` to allow arbitrary numbers of blocks. This\n   will invalidate any user setting that includes a block name (e.g. detail assemblies)\n#. Changed location string labels to be numerical (``001-002-005``) rather than alphanumeric\n   to eliminate a limitation on how many i-indices and k-indices were allowed. This will\n   invalidate any user setting value that includes a location label (e.g. in\n   ``detailAssemsByBOLLocation``). A migration script may be used to assist in migration.\n#. Removed the ``localization`` module, and shifted most of that exception handling to less\n   custom exception types. Though, there were also some functions moved to:\n   ``armi.utils.customExceptions.py``.\n#. ``Settings`` are now immutable (or nearly so).\n\nBug fixes\n^^^^^^^^^\n\n#. Fix bug in loading from databases when multi-index locations are used.\n\n\nARMI v0.1.6\n-----------\nRelease Date: 2020-12-17\n\n#. Add capability to map flags to current meaning when loading from database.\n   Previously, loading would fail if the meanings of written and current flags did not\n   match exactly.\n#. Numerous documentation improvements.\n#. Add support for XDMF visualization file output.\n#. Add optional flag to ``armi.configure()`` to permit repeated configuration. This aids\n   in certain testing and demonstration contexts.\n#. Allow for fully-qualified material names in blueprints. Materials take the form of\n   ``module.import.path:MaterialClassName``.\n#. Disable the use of the fast path in interactive sessions.\n#. Define ``ARMITESTBASE`` environment variable when configuring ``pytest``. This allows\n   tests to spawn new processes and still find the ARMI test suite.\n#. Enable full-core expansion of core grid blueprints.\n\nDeprecations\n^^^^^^^^^^^^\n\n#. Removed ``dumpLocationSnapshot`` setting and related functionality. This is replaced\n   by features of Database, version 3. Database 3 supports history tracking from the\n   database file, and whole reactor models can be loaded for any stored time step,\n   obviating the need for special logic in snapshots.\n#. Removed ``None`` option to XS ``\"geometry\"`` setting.\n#. Removed ``Location`` classes. These were made redundant with grids/spatial locators.\n#. Removed ``Block.isAnnular()``.\n#. Remove old \"XTView\" database format support. Migrating older databases will require\n   checking out an older version of the code.\n\nBugfixes\n^^^^^^^^\n\n#. Apply YAML ``!input`` resolution before writing blueprints to database.\n#. Change default App ``name`` to \"armi\" instead of \"ARMI\". This allows ARMI to re-invoke\n   itself, and produce accurate help messages.\n#. Conform R-Z-Theta grid ring/position indices to be 1-based like other grid types.\n#. Add a check that an ISOTXS library exists before attempting to calculate flux-based\n   reaction rates on mesh conversions. Prior to this, performing mesh conversions without\n   an ISOTXS would lead to a crash.\n#. Hide ``FAST_PATH`` behind ``context.getFastPath()`` function, allowing it to change.\n   The avoids bugs where code is sensitive to changes to the fast path at runtime.\n\n\nARMI v0.1.5\n-----------\nRelease Date: 2020-10-15\n\nUser-facing enhancements\n^^^^^^^^^^^^^^^^^^^^^^^^\n#. Add location-based history tracking to Database3.\n#. Add grid-editor GUI (``grids`` entry point).\n#. Add support for converting Database files to general-purpose visualization formats\n   (currently supported are VTK and XDMF).\n#. Add generic fuel-performance plugin.\n#. Update Hastelloy N and Incoloy 800 materials.\n#. Add holed rectangle, square component types.\n#. Add ``syncDbAfterWrite`` setting.\n#. Add support for explicit Flags input in Blueprints.\n#. Add glob option to directory changer file retrieval.\n#. Add Cartesian plotting capabilities.\n#. Add support for importing unstable nuclides from the RIPL-3 database.\n#. Numerous documentation and tutorial enhancements.\n#. Add ``run-suite`` entry point.\n#. Improve/generalize and make extensible from Plugins the setting rename capability.\n#. Improve merging behavior of GAMISO and PMATRX files.\n#. Add ``doTH`` setting.\n#. Add ``mpiActionRequiresReset`` plugin hook.\n#. Remove unused entry points (``back-up-db``, ``copy-db``).\n#. Add thermal-scattering metadata to materials.\n#. Improve ASCII map capabilities.\n#. Add the ability to define ex-core Core-like structures in blueprints. This is good for\n   things like spent-fuel pools.\n#. Minor improvements to ``SuiteBuilder``.\n\nBugfixes\n^^^^^^^^\n#. Fix issues in uniform mesh conversion.\n#. Fix order-of-operations issues with string-to-Flags conversions.\n#. Fix issues with circular ring hex-to-RZ mesh conversion.\n#. Fix bug in HT9 material, which was not properly converting between C and K.\n\nBackend changes\n^^^^^^^^^^^^^^^\n#. Better-formalize Executer classes.\n#. Improve consistency global flux code.\n#. Various performance enhancements.\n#. Add packing/unpacking of Multi-Index Locations in the database.\n#. Remove deprecated old Settings.\n#. Remove armiAbsDirFromName.\n#. Reduce numerical diffusion in mesh mapping operations.\n#. No longer auto-apply DEPLETABLE Flag when flags explicitly specified.\n#. Improve behavior of delated neutron fraction settings and parameter values.\n#. Change assembly ordering to be based on (i, j) indices rather than (ring, pos).\n#. Remove ``Block.getEnrichment()``, since it is redundant with the ``Composite``\n   version.\n#. Remove old block-homogenized number density params. These are still whipped up on the\n   fly when writing to DB.\n#. Add explicit ``CartesianGrid`` class.\n#. Remove some unused or design- and physics-related functions from ``Block``.\n#. Merge ``addComponent()`` with base ``add()``.\n#. Fix issues with Be material properties.\n#. Allow setting ``Block`` heights to zero.\n#. Add a Setting class for handling lists of Flags.\n#. Greatly improve support for CCCC file reading/writing.\n\nARMI v0.1.4\n-----------\nRelease Date: 2020-02-27\n\nBugfixes\n^^^^^^^^\n#. Fix minor output date/time bug.\n#. Copy Interface inputs in a manner consistent with standard inputs when cloning Cases.\n\nARMI v0.1.3\n-----------\nRelease Date: 2020-02-25\n\nUser-facing enhancements\n^^^^^^^^^^^^^^^^^^^^^^^^\n#. Improved flexibility of nuclide flags input by adding an ``expandTo`` section so\n   users can control precisely which isotopes the elements get expanded into.\n#. Improved migration system, accessible with ``python -m armi migrate-inputs``\n#. Added new material modifications for inputting fuels made of mixtures of two custom\n   isotopic vectors.\n#. Add YAML ``!include`` support to blueprints files.\n#. Remove ``latticeFile`` section to grid blueprints.\n#. Allow modification of linked dimensions in ``SuiteBuilder``.\n\nBugfixes\n^^^^^^^^\n#. SuiteBuilder handles smear density dimension changes for parameter sweeps again\n#. Fixed broken documentation printout of Flags.\n#. Ensure that Cases do not think of themselves as their own dependencies.\n\nBackend changes\n^^^^^^^^^^^^^^^\n#. Number fractions are now maintained across elemental expansion subsets. This slightly\n   changes the isotopic composition when, for example, Tungsten is expanded to 4 out of\n   the 5 natural isotopes.\n#. Add BOL HM mass block parameter.\n#. Add support for custom parameter serializers for database interaction.\n#. Formalize Flag reading and writing from/to the database.\n#. Improve handling of large HDF5 attributes in Database3.\n\nARMI v0.1.2\n-----------\nRelease Date: 2019-11-16\n\nHotfixes\n^^^^^^^^\n#. Fixed dependency issue with pympler\n\n\nARMI v0.1.1\n-----------\nRelease Date: 2019-11-15\n\nUser-facing enhancements\n^^^^^^^^^^^^^^^^^^^^^^^^\n#. Added C5G7 sample LWR inputs\n#. Slightly improved installation documentation\n#. Improved ability to input subassembly geometric details by adding\n   grid definitions to blueprints files\n#. Demoted ``mpi4py`` to an optional requirement to ease installation\n   process.\n\nBackend changes\n^^^^^^^^^^^^^^^\n#. Removed need for geometry object for Reactor construction\n#. Pushed symmetry and geomType metadata onto spatialGrids\n#. Turned off auto-conversion of HDF5 DBs to a previous format\n\nARMI v0.1.0\n-----------\nRelease Date: 2019-10-31\n\nInitial public release.\n\n"
  },
  {
    "path": "doc/qa_docs/scr/0.2.rst",
    "content": "Release Notes for ARMI 0.2\n==========================\n\nThese are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record.\n\n\nARMI v0.2.9\n-----------\nRelease Date: 2023-09-27\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. Moved the ``Reactor`` assembly number from the global scope to a ``Parameter``. (`PR#1383 <https://github.com/terrapower/armi/pull/1383>`_)\n#. Removed the global ``Settings`` object, ``getMasterCs()``, and ``setMasterCs()``. (`PR#1399 <https://github.com/terrapower/armi/pull/1399>`_)\n#. Moved the Spent Fuel Pool (``sfp``) from the ``Core`` to the ``Reactor``. (`PR#1336 <https://github.com/terrapower/armi/pull/1336>`_)\n#. Made the ``sfp`` a child of the ``Reactor`` so it is stored in the database. (`PR#1349 <https://github.com/terrapower/armi/pull/1349>`_)\n#. Broad cleanup of ``Parameters``: filled in all empty units and descriptions, removed unused params. (`PR#1345 <https://github.com/terrapower/armi/pull/1345>`_)\n#. Updated some parameter definitions and defaults. (`PR#1355 <https://github.com/terrapower/armi/pull/1355>`_)\n#. Removed redundant ``Material.name`` variable. (`PR#1335 <https://github.com/terrapower/armi/pull/1335>`_)\n#. Added ``powerDensity`` as a high-level alternative to ``power`` to configure a ``Reactor``. (`PR#1395 <https://github.com/terrapower/armi/pull/1395>`_)\n#. Added SHA1 hashes of XS control files to the welcome text. (`PR#1334 <https://github.com/terrapower/armi/pull/1334>`_)\n\nBuild changes\n^^^^^^^^^^^^^\n#. Moved from ``setup.py`` to ``pyproject.toml``. (`PR#1409 <https://github.com/terrapower/armi/pull/1409>`_)\n#. Add python 3.11 to ARMI's CI testing GH actions. (`PR#1341 <https://github.com/terrapower/armi/pull/1341>`_)\n#. Put back ``avgFuelTemp`` block parameter. (`PR#1362 <https://github.com/terrapower/armi/pull/1362>`_)\n#. Make cylindrical component block collection less strict about pre-homogenization checks. (`PR#1347 <https://github.com/terrapower/armi/pull/1347>`_)\n#. Updated some parameter definitions and defaults. (`PR#1355 <https://github.com/terrapower/armi/pull/1355>`_)\n#. Make the SFP a child of the reactor so it is stored in database. (`PR#1349 <https://github.com/terrapower/armi/pull/1349>`_)\n#. Update black to version 22.6. (`PR#1396 <https://github.com/terrapower/armi/pull/1396>`_)\n#. Added Python 3.11 to ARMI's CI on GH actions. (`PR#1341 <https://github.com/terrapower/armi/pull/1341>`_)\n#. Updated ``black`` to version 22.6. (`PR#1396 <https://github.com/terrapower/armi/pull/1396>`_)\n#. Add a _getNucTempHelper method for CylindricalComponentsAverageBlockCollection. (`PR#1363 <https://github.com/terrapower/armi/pull/1363>`_)\n\nBug fixes\n^^^^^^^^^\n#. Fixed ``_processIncludes()`` to handle ``StringIO`` input. (`PR#1333 <https://github.com/terrapower/armi/pull/1333>`_)\n#. Fixed logic for computing thermal expansion factors for axial expansion. (`PR#1342 <https://github.com/terrapower/armi/pull/1342>`_)\n\nARMI v0.2.8\n-----------\nRelease Date: 2023-06-21\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. Added ``Composite.sort()`` to allow the user to recursively sort any part of the ``Reactor``. (`PR#1280 <https://github.com/terrapower/armi/pull/1280>`_)\n#. Switching from ``pylint`` to the ``ruff`` linter. (`PR#1296 <https://github.com/terrapower/armi/pull/1296>`_)\n#. Move cross section group manager Interface stack position to be just before lattice physics. (`PR#1288 <https://github.com/terrapower/armi/pull/1288>`_)\n#. Add ``interactCoupled`` method for ``SnapshotInterface``. (`PR#1294 <https://github.com/terrapower/armi/pull/1294>`_)\n#. Calculate weighted-average percent burnup of ``BlockCollections``. (`PR#1265 <https://github.com/terrapower/armi/pull/1265>`_)\n#. Add method ``sortAssemsByRing`` to sort ``Reactor`` assemblies by spatial location (interior first).  (`PR#1320 <https://github.com/terrapower/armi/pull/1320>`_)\n\nBug fixes\n^^^^^^^^^\n#. Changed ``units.FLOAT_DIMENSION_DECIMALS`` from 10 to 8. (`PR#1183 <https://github.com/terrapower/armi/pull/1183>`_)\n#. Improved ``HexBlock.getWettedPerimeter()`` to include wire. (`PR#1299 <https://github.com/terrapower/armi/pull/1299>`_)\n#. Fixed a bug in the ISOTXS file name used for snapshots. (`PR#1277 <https://github.com/terrapower/armi/pull/1277>`_)\n#. Fix a bug in uniform mesh decusping when assemblies of same type have drastically different height. (`PR#1282 <https://github.com/terrapower/armi/pull/1282>`_)\n#. Sort ``Components`` on ``representativeBlock`` for consistency check. (`PR#1275 <https://github.com/terrapower/armi/pull/1275>`_)\n\nARMI v0.2.7\n-----------\nRelease Date: 2023-05-24\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. The method ``Material.density3`` is now called ``density``, and the old ``density`` is now called ``pseudoDensity``. (`PR#1163 <https://github.com/terrapower/armi/pull/1163>`_)\n#. Removed ``metadata`` setting section, and created ``versions``. (`PR#1274 <https://github.com/terrapower/armi/pull/1274>`_)\n#. Use ``minimumNuclideDensity`` setting when generating macroscopic XS. (`PR#1248 <https://github.com/terrapower/armi/pull/1248>`_)\n#. Introduce new ``LatticePhysicsFrequency`` setting to control lattice physics calculation. (`PR#1239 <https://github.com/terrapower/armi/pull/1239>`_)\n#. Added new setting ``assemFlagsToSkipAxialExpansion`` to enable users to list flags of assemblies to skip axial expansion. (`PR#1235 <https://github.com/terrapower/armi/pull/1235>`_)\n#. Added documentation for the thermal expansion approach used in ARMI. (`PR#1204 <https://github.com/terrapower/armi/pull/1204>`_)\n#. Use ``TemporaryDirectoryChanger`` for ``executer.run()`` so dirs are cleaned up during run. (`PR#1219 <https://github.com/terrapower/armi/pull/1219>`_)\n#. New option ``copyOutput`` for globalFluxInterface to not copy output back to working directory. (`PR#1218 <https://github.com/terrapower/armi/pull/1218>`_, `PR#1227 <https://github.com/terrapower/armi/pull/1227>`_)\n#. `Executer` class has a ``dcType`` attribute to define the type of ``DirectoryChanger`` it will use. (`PR#1228 <https://github.com/terrapower/armi/pull/1228>`_)\n#. Enabling one-way (upwards) axial expansion of control assemblies. (`PR#1226 <https://github.com/terrapower/armi/pull/1226>`_)\n#. Implement control rod decusping option for uniform mesh converter. (`PR#1229 <https://github.com/terrapower/armi/pull/1229>`_)\n#. ``createRepresentativeBlocksFromExistingBlocks`` now returns the mapping of original to new XS IDs. (`PR#1217 <https://github.com/terrapower/armi/pull/1217>`_)\n#. Added a capability to prioritize ``MpiAction`` execution and exclusivity. (`PR#1237 <https://github.com/terrapower/armi/pull/1237>`_)\n#. Improve support for single component axial expansion and general cleanup of axial expansion unit tests. (`PR#1230 <https://github.com/terrapower/armi/pull/1230>`_)\n#. New cross section group representative block type for 1D cylindrical models. (`PR#1238 <https://github.com/terrapower/armi/pull/1238>`_)\n#. Store the axial expansion target component name as a block parameter. (`PR#1256 <https://github.com/terrapower/armi/pull/1256>`_) \n#. When using non-uniform mesh, detailed fission/activation products have cross sections generated to avoid blocks without xs data. (`PR#1257 <https://github.com/terrapower/armi/pull/1257>`_)\n#. Fix a bug in database comparison. (`PR#1258 <https://github.com/terrapower/armi/pull/1258>`_)\n#. Introduce new LatticePhysicsFrequency setting to control lattice physics calculation. (`PR#1239 <https://github.com/terrapower/armi/pull/1239>`_)\n#. Made sure all material classes could be resolved via name. (`PR#1270 <https://github.com/terrapower/armi/pull/1270>`_)\n#. Read flux directly from output into Gamma uniform mesh instead of mapping it in from block params. (`PR#1213 <https://github.com/terrapower/armi/pull/1213>`_)\n#. Forced GAMISO/PMATRX file path extensions to be lower case for linux support. (`PR#1216 <https://github.com/terrapower/armi/pull/1216>`_)\n\nBug fixes\n^^^^^^^^^\n#. Fixed a bug in database comparison. (`PR#1258 <https://github.com/terrapower/armi/pull/1258>`_)\n#. Fixed an invalid assumption on the lattice physics and cross section manager interfaces when using tight coupling for snapshot runs. (`PR#1206 <https://github.com/terrapower/armi/pull/1206>`_)\n#. Fixed a bug where the precision used to determine the axial submesh was too small. (`PR#1225 <https://github.com/terrapower/armi/pull/1225>`_)\n\nARMI v0.2.6\n-----------\nRelease Date: 2023-02-09\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. The ``Material`` class no longer subclasses ``Composite``. (`PR#1062 <https://github.com/terrapower/armi/pull/1062>`_)\n#. Froze the NumPy version to <= 1.23.5. (`PR#1035 <https://github.com/terrapower/armi/pull/1035>`_) to continue to support NumPy jagged arrays in the DatabaseInterface.\n#. Split 3 classes in ``database3.py`` into 3 files. (`PR#955 <https://github.com/terrapower/armi/pull/955>`_)\n#. Split algorithms specific to hex assemblies out of ``FuelHandler``. (`PR#962 <https://github.com/terrapower/armi/pull/962>`_)\n#. Added 4614 nuclides to decouple the loading of RIPL-3 data from the standard framework run. (`PR#998 <https://github.com/terrapower/armi/pull/998>`_)\n#. Overhaul of the tight coupling routine in ARMI, and removal of ``looseCoupling`` setting. (`PR #1033 <https://github.com/terrapower/armi/pull/1033>`_)\n#. Added ``savePhysicsFiles`` setting to copy physics kernel I/O to directories organized by cycle and time step (e.g., c2n1). (`PR#952 <https://github.com/terrapower/armi/pull/952>`_)\n#. Add ``pinQuantities`` parameter category for block params that have spatial distribution.\n#. Use ``r.core.p.axialMesh`` instead of ``r.core.refAssem.getAxialMesh()`` for the uniform mesh converter. (`PR#959 <https://github.com/terrapower/armi/pull/959>`_)\n#. Add group structures for 21- and 94-groups used in photon transport.\n#. Add block parameter, ``fuelCladLocked``, to track whether or not the fuel and clad are locked. (`PR#1038 <https://github.com/terrapower/armi/pull/1038>`_)\n#. An explicit fission product modeling option was added. (`PR#1022 <https://github.com/terrapower/armi/pull/1022>`_)\n#. Axially expand from cold to hot before deepcopy of assemblies into reactor; improving speed. (`PR#1047 <https://github.com/terrapower/armi/pull/1047>`_)\n#. Add a how-to on restart calculations in the docs.\n#. General improvements to efficiency in uniform mesh conversion. (`PR#1042 <https://github.com/terrapower/armi/pull/1042>`_)\n#. Allow MCNP material card number to be defined after the card is written. (`PR#1086 <https://github.com/terrapower/armi/pull/1086>`_)\n#. Refine logic for ``Block.getNumPins()`` to only count components that are actually pins. (`PR#1098 <https://github.com/terrapower/armi/pull/1098>`_)\n#. Improve handling of peak/max parameters by the ``UniformMeshConverter`` parameter mapper. (`PR#1108 <https://github.com/terrapower/armi/pull/1108>`_)\n#. Calculate block kgHM and kgFis on core loading and after shuffling. (`PR#1136 <https://github.com/terrapower/armi/pull/1136>`_)\n#. Calculate block ``PuFrac`` on core loading and after shuffling. (`PR#1165 <https://github.com/terrapower/armi/pull/1165>`_)\n#. Add setting ``cyclesSkipTightCouplingInteraction`` to skip coupling interaction on specified cycles. (`PR#1173 <https://github.com/terrapower/armi/pull/1173>`_)\n#. Remove unused ``HCFcoretype`` setting. (`PR#1179 <https://github.com/terrapower/armi/pull/1179>`_)\n\nBug fixes\n^^^^^^^^^\n#. Fixed ``referenceBlockAxialMesh`` and ``axialMesh`` during process loading. (`PR#980 <https://github.com/terrapower/armi/pull/980>`_)\n#. Fixed deadelines in MPI cases due to barriers in temp directory changers.\n#. Fixed the material namespace order for ``test_axialExpansionChanger.py`` persisting after tests. (`PR#1046 <https://github.com/terrapower/armi/pull/1046>`_)\n#. Fixed the gaseous fission products not being removed from the core directly, but instead the fission yields within the lumped fission products were being adjusted. (`PR#1022 <https://github.com/terrapower/armi/pull/1022>`_)\n#. Fixed non-fuel depletable components not being initialized with all nuclides with the ``explicitFissionProducts`` model. (`PR#1067 <https://github.com/terrapower/armi/pull/1067>`_)\n#. Fixed consistency between cross section group manager and lattice physics interface for tight coupling. (`PR#1118 <https://github.com/terrapower/armi/pull/1118>`_)\n#. Fixed numerical diffusion in uniform mesh converter that affects number densities and cumulative parameters like DPA. (`PR#992 <https://github.com/terrapower/armi/pull/992>`_)\n#. Fix the formula to calculate ``b.p.puFrac``. (`PR#1168 <https://github.com/terrapower/armi/pull/1168>`_)\n#. Fixed ``Material.densityTimesHeatCapacity()``, moving from pseudo-density to physical density. (`PR#1129 <https://github.com/terrapower/armi/pull/1129>`_)\n#. Fixed ``TD_frac`` modification on UraniumOxide and MOX was not being applied correctly.\n#. Fixed Magnessium density curve. (`PR#1126 <https://github.com/terrapower/armi/pull/1126>`_)\n#. Fixed Potassium density curve. (`PR#1128 <https://github.com/terrapower/armi/pull/1128>`_)\n#. Fixed Concrete density curve. (`PR#1131 <https://github.com/terrapower/armi/pull/1131>`_)\n#. Fixed Copper density curve. (`PR#1150 <https://github.com/terrapower/armi/pull/1150>`_)\n#. Fixed ``Component.density``. (`PR#1149 <https://github.com/terrapower/armi/pull/1149>`_)\n#. Fixed error where a non-float value could be assigned to a material's mass fraction dictionary. (`PR#1199 <https://github.com/terrapower/armi/pull/1199>`_)\n#. Fixed interface/event ``runLog.header`` for tight coupling. (`PR#1178 <https://github.com/terrapower/armi/pull/1178>`_)\n#. Fixed circular import bug in ``reactors.py`` caused by importing settings constants. (`PR#1185 <https://github.com/terrapower/armi/pull/1185>`_)\n\nARMI v0.2.5\n-----------\nRelease Date: 2022-10-24\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. Cleanup of stale ``coveragerc`` file. (`PR#923 <https://github.com/terrapower/armi/pull/923>`_)\n#. Added `medium` writer style option to ``SettingsWriter``. Added it as arg to modify CLI. (`PR#924 <https://github.com/terrapower/armi/pull/924>`_), and to clone CLI (`PR#932 <https://github.com/terrapower/armi/pull/932>`_).\n#. Update the EntryPoint class to provide user feedback on required positional arguments. (`PR#922 <https://github.com/terrapower/armi/pull/922>`_)\n#. Overhaul ``reactor.zones`` tooling and remove application-specific zoning logic. (`PR#943 <https://github.com/terrapower/armi/pull/943>`_)\n\nBug fixes\n^^^^^^^^^\n#. Adjusted ``density3`` in ``armi/materials/b4C.py`` to include the theoretical density. (`PR#942 <https://github.com/terrapower/armi/pull/942>`_)\n#. Fixed bug in ``fastFlux`` block parameter mapping in the ``UniformMeshConverter`` by applying it to the ``detailedAxialExpansion`` category.\n#. Fixed issue where shuffles might duplicate in restart runs.\n\n\nARMI v0.2.4\n-----------\nRelease Date: 2022-10-03\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. Added new ``UserPlugin`` functionality.\n#. Introduced ``axial expansion changer``.\n#. Greatly improved the ``UniformMeshGeometryConverter``.\n#. Made the min/max temperatures of ``Material`` curves discoverable.\n#. Removed the ``PyYaml`` dependency.\n#. Changed the default Git branch name to ``main``.\n#. Moved math utilities into their own module.\n#. Moved ``newReports`` into their final location in ``armi/bookkeeping/report/``.\n#. Removed ``_swapFluxParam`` method. (`PR#665 <https://github.com/terrapower/armi/pull/665#discussion_r893348409>`__)\n#. Removed the last usage of ``settingsRules``; now only use ``settingsValidation``.\n#. Removed separate blueprints in snapshot runs, they must come from the database. (`PR#872 https://github.com/terrapower/armi/pull/872`)\n#. Added reporting of neutron and gamma energy groups in the XS library ``__repr__``.\n#. Updated NHFLUX reader to store VARIANT data that was being discarded.\n#. Store thermally expanded block heights at BOL in ``armi/reactor/reactors.py::Core::processLoading``.\n#. Added neutronics settings: ``inners`` and ``outers`` for downstream support.\n#. Removed unused Thermal Hydraulics settings.\n#. Replaced setting ``stationaryBlocks`` with ``stationaryBlockFlags`` setting. (`PR#665 <https://github.com/terrapower/armi/pull/665>`__))\n#. Changed the default value of the ``trackAssems`` setting to ``False``.\n#. Add setting ``inputHeightsConsideredHot`` to enable thermal expansion of assemblies at BOL.\n\n\nBug fixes\n^^^^^^^^^\n#. Fixed issues finding ``ISOXX`` files cross-platform.\n#. Fixed issues in ``growToFullCore``.\n#. Fixed issue in the ARMI memory profiler.\n#. Fixed issue in linear expansion in ``Alloy200``.\n#. Fixed issue in ``armi/reactor/components/complexShapes.py::Helix::getCircleInnerDiameter``\n#. Fixed issue with axial expansion changer in ``armi/reactor/reactors.py::Core::processLoading``.\n#. Fixed issue in how number densities are initialized for components.\n#. Fixed issue in ``armi/cases/case.py::copyInterfaceInputs``\n#. Fixed issue in ``armi/reactor/components/component.py::getReac``\n#. Fixed issue in ``armi/reactor/converters/uniformMesh.py`` was clearing out unchanged param data.\n#. Fixed issue where components were different if initialized through blueprints vs init.\n#. Fixed issue where component mass was conserved in axial expansion instead of density. (`PR#846 <https://github.com/terrapower/armi/pull/846>`_)\n#. Fixed issue in ``HexBlock::rotatePins`` failed to modify ``pinLocation`` param. (`#855 <https://github.com/terrapower/armi/pull/855>`_)\n#. Fixed issue in ``Core::_applyThermalExpansion`` failed to call ``block.completeInitiaLoading``. (`#885 <https://github.com/terrapower/armi/pull/885>`_)\n#. Fixed issue where a validator would complain both simple and detailed cycles settings were used.\n#. Fixed issue where ``getReactionRates()`` was not accounting for burnup-dependent cross-sections.\n\n\nARMI v0.2.3\n-----------\nRelease Date: 2022-02-08\n\nWhat's new in ARMI\n^^^^^^^^^^^^^^^^^^\n#. Upgrading the version of NumPy for a security alert. (`PR#530 <https://github.com/terrapower/armi/pull/530>`_)\n#. Upgraded ThoriumOxide material. (`PR#548 <https://github.com/terrapower/armi/pull/548>`_)\n#. Upgraded Lithium material. (`PR#546 <https://github.com/terrapower/armi/pull/546>`_)\n#. Improved ``Helix`` class. (`PR#558 <https://github.com/terrapower/armi/pull/558>`_)\n\nBug fixes\n^^^^^^^^-\n#. Fixed issue where UML diagrams weren't being generated in docs. (`#550 <https://github.com/terrapower/armi/issues/550>`_)\n#. Fixed issue with Inconel Alloy 617. (`PR#557 <https://github.com/terrapower/armi/pull/557>`_)\n\n\nARMI v0.2.2\n-----------\nRelease Date: 2022-01-19\n\nWhat's new in ARMI v0.2.2\n^^^^^^^^^^^^^^^^^^^^^^^^-\n#. Improved type hinting.\n#. Flushed out the ability to build the docs as PDF.\n#. Material modifications can now be made per-component.\n#. The ``loadOperator`` method now has the optional ``allowMissing`` argument.\n\nBug fixes\n^^^^^^^^^\n#. Fixed issue where copying a ``Setting`` with a defined list of options would throw an error. (`PR#540 <https://github.com/terrapower/armi/pull/540>`_)\n\n\nARMI v0.2.1\n-----------\nRelease Date: 2022-01-13\n\nWhat's new in ARMI v0.2.1\n^^^^^^^^^^^^^^^^^^^^^^^^^\n#. Added new reference data for lumped fission products. (`#507 <https://github.com/terrapower/armi/issues/507>`_)\n\nBug fixes\n^^^^^^^^^\n#. Fixed issue where grid GUI was not saving lattice maps. (`#490 <https://github.com/terrapower/armi/issues/490>`_)\n#. Fixed issue where SettingsModifier was using old Settings API. (`#500 <https://github.com/terrapower/armi/issues/500>`_)\n#. Fixed issue where copying a Setting only copied the default value. (`PR#534 <https://github.com/terrapower/armi/pull/534>`_)\n\n\nARMI v0.2.0\n-----------\nRelease Date: 2021-11-19\n\nThe API has started to solidify, and the number of external-facing changes have started to slow down. This release is a stake in the ground on a stable API.\n\nWhat's new in ARMI v0.2.0\n^^^^^^^^^^^^^^^^^^^^^^^^^\n#. Made user settings immutable to avoid confusing runtime behavior.\n#. Removed the concept of 'facemaps' (now replaced with more general grids).\n#. Added ability to use module-level logging for more precise debugging.\n#. Added ability to write full tips-up hex asciimaps.\n#. Fixed ability to serialize grid blueprints.\n#. Improved code coverage and linting.\n#. Added a latin hypercube suite builder for parameter sweeps.\n#. Added several clarifications, fixes, and updates to documentation.\n#. Updated units labels on several parameters.\n#. Added protections against deleting directories.\n#. Updated spontaneous fission data.\n#. Removed confusing Charge Fuel Pool from core.\n#. Sped up YAML reading.\n#. Removed localization module.\n#. Added ANL116 energy group structure.\n#. Added setting to control auto-creation of within-block grids.\n#. Added new plot/summarizing capabilities.\n#. Added ability for GUI to save map as image.\n#. Added C5G7 compositions and dimensions to LWR tutorial.\n#. Added 1d/2d mesh reading/writing to GEODST.\n\nBackwards incompatible changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nThere may be some new errors based on updated input checking.\n\n\nBug fixes\n^^^^^^^^^\n#. Fixed centering of full-symmetry Cartesian lattice maps.\n#. Fixed issues with grids that had multi-index locations.\n#. Removed test files from coverage check.\n#. Fixed order of operations issue in ``rotatePins``.\n#. Fixed incorrect multiplicity for non-grid block components.\n#. Many additional bugfixes and cleanups (see PR list).\n"
  },
  {
    "path": "doc/qa_docs/scr/0.3.rst",
    "content": "Release Notes for ARMI 0.3\n==========================\n\nThese are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record.\n\n\nARMI v0.3.0\n-----------\nRelease Date: 2024-02-02\n\nWhat's new in ARMI?\n^^^^^^^^^^^^^^^^^^^\n#. The ``_copyInputsHelper()`` gives relative path and not absolute after copy. (`PR#1416 <https://github.com/terrapower/armi/pull/1416>`_)\n#. Attempt to set representative block number densities by component if possible. (`PR#1412 <https://github.com/terrapower/armi/pull/1412>`_)\n#. Use ``functools`` to preserve function attributes when wrapping with ``codeTiming.timed``. (`PR#1466 <https://github.com/terrapower/armi/pull/1466>`_)\n#. Remove a number of deprecated block, assembly, and core parameters related to a defunct internal plugin.\n\nBug Fixes\n^^^^^^^^^\n#. ``StructuredGrid.getNeighboringCellIndices()`` was incorrectly implemented for the second neighbor. (`PR#1614 <https://github.com/terrapower/armi/pull/1614>`_)\n\nQuality Work\n^^^^^^^^^^^^\n#. ARMI now mandates ``ruff`` linting. (`PR#1419 <https://github.com/terrapower/armi/pull/1419>`_)\n#. Many new references to requirement tests and implementations were added to docstrings.\n#. Removed all old ARMI requirements, to start the work fresh. (`PR#1438 <https://github.com/terrapower/armi/pull/1438>`_)\n#. Downgrading Draft PRs as policy. (`PR#1444 <https://github.com/terrapower/armi/pull/1444>`_)\n"
  },
  {
    "path": "doc/qa_docs/scr/0.4.rst",
    "content": "Release Notes for ARMI 0.4\n==========================\n\nThese are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record.\n\nARMI v0.4.0\n-----------\nRelease Date: 2024-07-29\n\nNew Features\n^^^^^^^^^^^^\n#. Conserve mass by component in ``assembly.setBlockMesh()``. (`PR#1665 <https://github.com/terrapower/armi/pull/1665>`_)\n#. Removal of the ``Block.reactor`` property. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_)\n#. System information is now also logged on Linux. (`PR#1689 <https://github.com/terrapower/armi/pull/1689>`_)\n#. Reset ``Reactor`` data on worker processors after every interaction to free memory from state distribution. (`PR#1729 <https://github.com/terrapower/armi/pull/1729>`_ and `PR#1750 <https://github.com/terrapower/armi/pull/1750>`_)\n#. Density can be specified for components via ``custom isotopics`` in the blueprints. (`PR#1745 <https://github.com/terrapower/armi/pull/1745>`_)\n#. Implement a new ``JaggedArray`` class that handles HDF5 interface for jagged data. (`PR#1726 <https://github.com/terrapower/armi/pull/1726>`_)\n#. Adding temperature dependent representative blocks to cross section group manager. (`PR#1987 <https://github.com/terrapower/armi/pull/1987>`_)\n\n\nAPI Changes\n^^^^^^^^^^^\n#. Replacing the concrete material with a better reference. (`PR#1717 <https://github.com/terrapower/armi/pull/1717>`_)\n#. Adding more detailed time information to logging. (`PR#1796 <https://github.com/terrapower/armi/pull/1796>`_)\n#. Renaming ``structuredgrid.py`` to camelCase. (`PR#1650 <https://github.com/terrapower/armi/pull/1650>`_)\n#. Removing unused argument from ``Block.coords()``. (`PR#1651 <https://github.com/terrapower/armi/pull/1651>`_)\n#. Removing unused method ``HexGrid.allPositionsInThird()``. (`PR#1655 <https://github.com/terrapower/armi/pull/1655>`_)\n#. Removed unused methods: ``Reactor.getAllNuclidesIn()``, ``plotTriangleFlux()``. (`PR#1656 <https://github.com/terrapower/armi/pull/1656>`_)\n#. Removed ``armi.utils.dochelpers``; not relevant to nuclear modeling. (`PR#1662 <https://github.com/terrapower/armi/pull/1662>`_)\n#. Removing old tools created to help people convert to the current database format: ``armi.bookkeeping.db.convertDatabase()`` and ``ConvertDB``. (`PR#1658 <https://github.com/terrapower/armi/pull/1658>`_)\n#. Removing the unused method ``Case.buildCommand()``. (`PR#1773 <https://github.com/terrapower/armi/pull/1773>`_)\n#. Removed the variable ``armi.physics.neutronics.isotopicDepletion.ORDER``. (`PR#1671 <https://github.com/terrapower/armi/pull/1671>`_)\n#. Removing extraneous ``ArmiOjbect`` methods. (`PR#1667 <https://github.com/terrapower/armi/pull/1667>`_)\n    * Moving ``ArmiObject.getBoronMassEnrich()`` to ``Block``.\n    * Moving ``ArmiObject.getPuMoles()`` to ``Block``.\n    * Moving ``ArmiObject.getUraniumMassEnrich()`` to ``Block``.\n    * Removing ``ArmiObject.getMaxUraniumMassEnrich.()``.\n    * Removing ``ArmiObject.getMaxVolume()`` & ``Block.getMaxVolume()``.\n    * Removing ``ArmiObject.getPuFrac()``.\n    * Removing ``ArmiObject.getPuMass()``.\n    * Removing ``ArmiObject.getPuN()``.\n    * Removing ``ArmiObject.getZrFrac()``.\n    * Removing ``ArmiObject.printDensities()``.\n    * Moving ``Composite.isOnWhichSymmetryLine()`` to ``Assembly``.\n    * Removing ``Block.isOnWhichSymmetryLine()``.\n#. Removing the ``Block.reactor`` property. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_)\n#. Moving several ``ArmiObject`` methods. (`PR#1425 <https://github.com/terrapower/armi/pull/1425>`_)\n    * Moving ``ArmiObject.getNeutronEnergyDepositionConstants`` to ``Block``.\n    * Moving ``ArmiObject.getGammaEnergyDepositionConstants`` to ``Block``.\n    * Moving ``ArmiObject.getTotalEnergyGenerationConstants`` to ``Block``.\n    * Moving ``ArmiObject.getFissionEnergyGenerationConstants`` to ``Block``.\n    * Moving ``ArmiObject.getCaptureEnergyGenerationConstants`` to ``Block``.\n#. Removing the parameter ``rdIterNum``. (`PR#1704 <https://github.com/terrapower/armi/pull/1704>`_)\n#. Removing the parameters ``outsideFuelRing`` and ``outsideFuelRingFluxFr``. (`PR#1700 <https://github.com/terrapower/armi/pull/1700>`_)\n#. Removing the setting ``doOrificedTH``. (`PR#1706 <https://github.com/terrapower/armi/pull/1706>`_)\n#. Changing the Doppler constant params to ``VOLUME_INTEGRATED``. (`PR#1659 <https://github.com/terrapower/armi/pull/1659>`_)\n#. Change ``Operator._expandCycleAndTimeNodeArgs`` to be a non-static method. (`PR#1766 <https://github.com/terrapower/armi/pull/1766>`_)\n#. Database now writes state at the last time node of a cycle rather than during the ``DatabaseInterface.interactEOC`` interaction. (`PR#1090 <https://github.com/terrapower/armi/pull/1090>`_)\n#. Renaming ``b.p.buGroup`` to ``b.p.envGroup``. Environment group captures both burnup and temperature. (`PR#1987 <https://github.com/terrapower/armi/pull/1987>`_)\n\nBug Fixes\n^^^^^^^^^\n#. Fixed four bugs with \"corners up\" hex grids. (`PR#1649 <https://github.com/terrapower/armi/pull/1649>`_)\n#. Fixed ``safeCopy`` to work on both Windows and Linux with strict permissions. (`PR#1691 <https://github.com/terrapower/armi/pull/1691>`_)\n#. When creating a new XS group, inherit settings from initial group. (`PR#1653 <https://github.com/terrapower/armi/pull/1653>`_, `PR#1751 <https://github.com/terrapower/armi/pull/1751>`_)\n#. Fixed a bug with ``Core.getReactionRates``. (`PR#1771 <https://github.com/terrapower/armi/pull/1771>`_)\n#. Fixed a bug with interactive versus batch mode checking on windows versus linux. (`PR#1786 <https://github.com/terrapower/armi/pull/1786>`_)\n\nQuality Work\n^^^^^^^^^^^^\n#. Creating a single-block test reactor, to speed up unit tests. (`PR#1737 <https://github.com/terrapower/armi/pull/1737>`_)\n#. Supporting MacOS in CI. (`PR#1713 <https://github.com/terrapower/armi/pull/1713>`_)\n#. We now enforce a maximum line length of 120 characters, using ``ruff``. (`PR#1646 <https://github.com/terrapower/armi/pull/1646>`_)\n#. Updating ``ruff`` to version ``0.5.1``. (`PR#1770 <https://github.com/terrapower/armi/pull/1770>`_)\n#. Move ``.coveragerc`` file information into ``pyproject.toml``. (`PR#1692 <https://github.com/terrapower/armi/pull/1692>`_)\n\nChanges that Affect Requirements\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n#. Removing unused argument to ``Block.coords()``. (`PR#1651 <https://github.com/terrapower/armi/pull/1651>`_)\n#. Touched ``HexGrid`` by adding a \"cornersUp\" property and fixing two bugs. (`PR#1649 <https://github.com/terrapower/armi/pull/1649>`_)\n#. Very slightly modified the implementation of ``Assembly.add()``. (`PR#1670 <https://github.com/terrapower/armi/pull/1670>`_)\n"
  },
  {
    "path": "doc/qa_docs/scr/0.5.rst",
    "content": "Release Notes for ARMI 0.5\n==========================\n\nThese are the release notes for past versions of ARMI, created before our SCR process. They are preserved here for historical record.\n\nARMI v0.5.1\n-----------\nRelease Date: 2025-03-14\n\nThis release was all about QA documentation. We open-sourced our QA documentation, even our software requirements. There were also several bug fixes.\n\nNew Features\n^^^^^^^^^^^^\n#. Move instead of copy files from ``TemporaryDirectoryChanger``. (`PR#2022 <https://github.com/terrapower/armi/pull/2022>`_)\n#. Creating the ``armi.testing`` module, to share ARMI testing tools. (`PR#2028 <https://github.com/terrapower/armi/pull/2028>`_)\n#. Using inner diameter for sorting components when outer diameter is identical. (`PR#1882 <https://github.com/terrapower/armi/pull/1882>`_)\n#. Invoking ``component.material.density()`` does not log an expensive stack tracefor fluids. (`PR#2075 <https://github.com/terrapower/armi/pull/2075>`_)\n#. ARMI will now try to use the ``/tmp/`` directory for its temp files, on Linux and MacOS. (`PR#2092 <https://github.com/terrapower/armi/pull/2092>`_)\n\nAPI Changes\n^^^^^^^^^^^\n#. Removing ``Database3`` from the API, use ``Database``. (`PR#2052 <https://github.com/terrapower/armi/pull/2052>`_)\n\nBug Fixes\n^^^^^^^^^\n#. Fixing check for jagged arrays during ``_writeParams``. (`PR#2051 <https://github.com/terrapower/armi/pull/2051>`_)\n#. Fixing BP-section ignoring tool in ``PassiveDBLoadPlugin``. (`PR#2055 <https://github.com/terrapower/armi/pull/2055>`_)\n#. Making sure SFPs have spatial grids. (`PR#2082 <https://github.com/terrapower/armi/pull/2082>`_)\n#. Fixing number densities when custom isotopics are combined with Fluid components. (`PR#2071 <https://github.com/terrapower/armi/pull/2071>`_)\n#. Fixing scaling of volume-integrated parameters on edge assemblies. (`PR#2060 <https://github.com/terrapower/armi/pull/2060>`_)\n#. Fixing strictness of ``HexGrid`` rough equality check. (`PR#2058 <https://github.com/terrapower/armi/pull/2058>`_)\n#. Fixing treatment of symmetry factors when calculating component flux and reaction rates. (`PR#2068 <https://github.com/terrapower/armi/pull/2068>`_)\n\nQuality Work\n^^^^^^^^^^^^\n#. Open-Sourcing the ARMI requirements. (`PR#2076 <https://github.com/terrapower/armi/pull/2076>`_)\n#. Significant revamp of the ARMI requirements. (`PR#2074 <https://github.com/terrapower/armi/pull/2074>`_)\n#. Adding PDF versions of the ARMI docs. (`PR#2072 <https://github.com/terrapower/armi/pull/2072>`_)\n#. Update docs build to occur with Python 3.13 and updated docs dependencies. (`PR#2050 <https://github.com/terrapower/armi/pull/2050>`_)\n#. Removing silent overwrite of ``shutil.copy``. (`PR#2081 <https://github.com/terrapower/armi/pull/2081>`_)\n\n\nARMI v0.5.0\n-----------\nRelease Date: 2024-12-14\n\nNew Features\n^^^^^^^^^^^^\n#. Supporting Python 3.12. (`PR#1813 <https://github.com/terrapower/armi/pull/1813>`_)\n#. Supporting Python 3.13. (`PR#1996 <https://github.com/terrapower/armi/pull/1996>`_)\n#. Adding data models for ex-core structures in ARMI. (`PR#1891 <https://github.com/terrapower/armi/pull/1891>`_)\n#. Opening some DBs without the ``App`` that created them. (`PR#1917 <https://github.com/terrapower/armi/pull/1917>`_)\n#. Adding support for ENDF/B-VII.1-based MC2-3 libraries. (`PR#1982 <https://github.com/terrapower/armi/pull/1982>`_)\n#. Adding setting ``mcnpLibraryVersion`` to chosen ENDF library for MCNP. (`PR#1989 <https://github.com/terrapower/armi/pull/1989>`_)\n#. Removing the ``tabulate`` dependency by ingesting it to ``armi.utils.tabulate``. (`PR#1811 <https://github.com/terrapower/armi/pull/1811>`_)\n#. ``HexBlock.rotate`` updates the spatial locator for children of that block. (`PR#1943 <https://github.com/terrapower/armi/pull/1943>`_)\n#. Provide ``Block.getInputHeight`` for determining the height of a block from blueprints. (`PR#1927 <https://github.com/terrapower/armi/pull/1927>`_)\n#. Provide ``Parameter.hasCategory`` for quickly checking if a parameter is defined with a given category. (`PR#1899 <https://github.com/terrapower/armi/pull/1899>`_)\n#. Provide ``ParameterCollection.where`` for efficient iteration over parameters who's definition matches a given condition. (`PR#1899 <https://github.com/terrapower/armi/pull/1899>`_)\n#. Flags can now be defined with letters and numbers. (`PR#1966 <https://github.com/terrapower/armi/pull/1966>`_)\n#. Provide utilities for determining location of a rotated object in a hexagonal lattice (``getIndexOfRotatedCell``). (`PR#1846 <https://github.com/terrapower/armi/1846>`_)\n#. Allow merging a component with zero area into another component. (`PR#1858 <https://github.com/terrapower/armi/pull/1858>`_)\n#. New plugin hook ``getAxialExpansionChanger`` to customize axial expansion. (`PR#1870 <https://github.com/terrapower/armi/pull/1870>`_)\n#. New plugin hook ``beforeReactorConstruction`` to process settings before reactor init. (`PR#1945 <https://github.com/terrapower/armi/pull/1945>`_)\n#. Improving performance in the lattice physics interface by not updating cross sections at ``everyNode`` during coupled calculations. (`PR#1963 <https://github.com/terrapower/armi/pull/1963>`_)\n#. Allow merging a component with zero area into another component. (`PR#1858 <https://github.com/terrapower/armi/pull/1858>`_)\n#. Updating ``copyOrWarn`` and ``getFileSHA1Hash`` to support directories. (`PR#1984 <https://github.com/terrapower/armi/pull/1984>`_)\n#. Improve efficiency of reaction rate calculations. (`PR#1887 <https://github.com/terrapower/armi/pull/1887>`_)\n#. Adding new options for simplifying 1D cross section modeling. (`PR#1949 <https://github.com/terrapower/armi/pull/1949>`_)\n#. Adding ``--skip-inspection`` flag to ``CompareCases`` CLI. (`PR#1842 <https://github.com/terrapower/armi/pull/1842>`_)\n#. Exposing skip inspection options for ``armi.init`` and ``db.loadOperator``. (`PR#2005 <https://github.com/terrapower/armi/pull/2005>`_)\n#. Exposing ``detailedNDens`` to components. (`PR#1954 <https://github.com/terrapower/armi/pull/1954>`_)\n#. Adding a method ``getPinMgFluxes`` to get pin-wise multigroup fluxes from a Block. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_)\n\nAPI Changes\n^^^^^^^^^^^\n#. ``nuclideBases.byMcc3ID`` and ``getMcc3Id()`` return IDs consistent with ENDF/B-VII.1. (`PR#1982 <https://github.com/terrapower/armi/pull/1982>`_)\n#. Moving ``settingsValidation`` from ``operators`` to ``settings``. (`PR#1895 <https://github.com/terrapower/armi/pull/1895>`_)\n#. Allowing for unknown Flags when opening a DB. (`PR#1844 <https://github.com/terrapower/armi/pull/1835>`_)\n#. Renaming ``Reactor.moveList`` to ``Reactor.moves``. (`PR#1881 <https://github.com/terrapower/armi/pull/1881>`_)\n#. Transposing ``pinMgFluxes`` parameters so that leading dimension is pin index. (`PR#1937 <https://github.com/terrapower/armi/pull/1937>`_)\n#. ``Block.getPinCoordinates`` returns an ``(N, 3)`` array, rather than a list of arrays. (`PR#1943 <https://github.com/terrapower/armi/pull/1943>`_)\n#. Alphabetizing ``Flags.toString()`` results. (`PR#1912 <https://github.com/terrapower/armi/pull/1912>`_)\n#. ``copyInterfaceInputs`` no longer requires a valid setting object. (`PR#1934 <https://github.com/terrapower/armi/pull/1934>`_)\n#. Changing ``synDbAfterWrite`` default to ``True``. (`PR#1968 <https://github.com/terrapower/armi/pull/1968>`_)\n#. Removing ``Assembly.rotatePins`` and ``Block.rotatePins``. Prefer ``Assembly.rotate`` and ``Block.rotate``. (`PR#1846 <https://github.com/terrapower/armi/1846>`_)\n#. Removing broken plot ``buVsTime``. (`PR#1994 <https://github.com/terrapower/armi/pull/1994>`_)\n#. Removing class ``AssemblyList`` and ``assemblyLists.py``. (`PR#1891 <https://github.com/terrapower/armi/pull/1891>`_)\n#. Removing class ``globalFluxInterface.DoseResultsMapper``. (`PR#1952 <https://github.com/terrapower/armi/pull/1952>`_)\n#. Removing class ``SmartList``. (`PR#1992 <https://github.com/terrapower/armi/pull/1992>`_)\n#. Removing flags ``CORE`` and ``REACTOR``. (`PR#1835 <https://github.com/terrapower/armi/pull/1835>`_)\n#. Removing method ``Assembly.doubleResolution()``. (`PR#1951 <https://github.com/terrapower/armi/pull/1951>`_)\n#. Removing method ``buildEqRingSchedule``. (`PR#1928 <https://github.com/terrapower/armi/pull/1928>`_)\n#. Removing method ``prepSearch``. (`PR#1845 <https://github.com/terrapower/armi/pull/1845>`_)\n#. Removing method ``SkippingXsGen_BuChangedLessThanTolerance``. (`PR#1845 <https://github.com/terrapower/armi/pull/1845>`_)\n#. Removing setting ``autoGenerateBlockGrids``. (`PR#1947 <https://github.com/terrapower/armi/pull/1947>`_)\n#. Removing setting ``mpiTasksPerNode`` and renaming ``numProcessors`` to ``nTasks``. (`PR#1958 <https://github.com/terrapower/armi/pull/1958>`_)\n#. History Tracker: \"detail assemblies\" are now fuel and control assemblies. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_)\n#. Removing ``Block.breakFuelComponentsIntoIndividuals()``. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_)\n#. Moving ``getPuMoles`` from blocks.py up to composites.py. (`PR#1990 <https://github.com/terrapower/armi/pull/1990>`_)\n#. Requiring ``buReducingAssemblyRotation`` and ``getOptimalAssemblyOrientation`` to have pin-level burnup. (`PR#2019 <https://github.com/terrapower/armi/pull/2019>`_)\n\nBug Fixes\n^^^^^^^^^\n#. Fixed spatial grids of pins in Blocks on flats-up grids. (`PR#1947 <https://github.com/terrapower/armi/pull/1947>`_)\n#. Fixed ``DerivedShape.getArea`` for ``cold=True``. (`PR#1831 <https://github.com/terrapower/armi/pull/1831>`_)\n#. Fixed error parsing command line integers in ``ReportsEntryPoint``. (`PR#1824 <https://github.com/terrapower/armi/pull/1824>`_)\n#. Fixed ``PermissionError`` when using ``syncDbAfterWrite``. (`PR#1857 <https://github.com/terrapower/armi/pull/1857>`_)\n#. Fixed ``MpiDirectoryChanger``. (`PR#1853 <https://github.com/terrapower/armi/pull/1853>`_)\n#. Changed data type of ``thKernel`` setting from ``bool`` to ``str`` in ``ThermalHydraulicsPlugin``. (`PR#1855 <https://github.com/terrapower/armi/pull/1855>`_)\n#. Update height of fluid components after axial expansion. (`PR#1828 <https://github.com/terrapower/armi/pull/1828>`_)\n#. Rotate hexagonal assembly patches correctly on facemap plots. (`PR#1883 <https://github.com/terrapower/armi/pull/1883>`_)\n#. Material theoretical density is serialized to and read from database. (`PR#1852 <https://github.com/terrapower/armi/pull/1852>`_)\n#. Removed broken and unused column in ``summarizeMaterialData``. (`PR#1925 <https://github.com/terrapower/armi/pull/1925>`_)\n#. Fixed hex block rotation in ``plotBlockDiagram``. (`PR#1926 <https://github.com/terrapower/armi/pull/1926>`_)\n#. Fixed edge case in ``assemblyBlueprint._checkParamConsistency()``. (`PR#1928 <https://github.com/terrapower/armi/pull/1928>`_)\n#. Fixed wetted perimeter for hex inner ducts. (`PR#1985 <https://github.com/terrapower/armi/pull/1985>`_)\n#. Fixing number densities when custom isotopics and material properties are combined. (`PR#1822 <https://github.com/terrapower/armi/pull/1822>`_)\n\nQuality Work\n^^^^^^^^^^^^\n#. Removing deprecated code ``axialUnitGrid``. (`PR#1809 <https://github.com/terrapower/armi/pull/1809>`_)\n#. Refactoring ``axialExpansionChanger``. (`PR#1861 <https://github.com/terrapower/armi/pull/1861>`_)\n#. Raising a ``ValueError`` when ``Database.load()`` fails. (`PR#1940 <https://github.com/terrapower/armi/pull/1940>`_)\n#. Making axial expansion-related classes more extensible. (`PR#1920 <https://github.com/terrapower/armi/pull/1920>`_)\n"
  },
  {
    "path": "doc/qa_docs/scr/0.6.rst",
    "content": "Release Notes for ARMI 0.6\n==========================\n\nHere you will find the release notes for previous ARMI releases.\n\nARMI v0.6.4\n-----------\nRelease Date: 2026-03-25\n\nThis was a very short-burn release. The biggest new feature is the addition of matProps, the materials library. This\ntool allows developers to flexibly define materials with properties in several flexible ways. This release also included\na lot of clean up work, such as moving several parameters and settings out of ARMI. The HistoryTracker EOL interaction\nwas optimized. And a block converter was added for mixed-pin assemblies.\n\n\nCode Changes, Features\n^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2453 <https://github.com/terrapower/armi/pull/2453>`_) Adding matProps: a material library\n#. (`PR#2405 <https://github.com/terrapower/armi/pull/2405>`_) Remove ruamel.yaml dependency pin maximum\n#. (`PR#2436 <https://github.com/terrapower/armi/pull/2436>`_) Optimizing the HistoryTracker EOL interaction\n#. (`PR#2442 <https://github.com/terrapower/armi/pull/2442>`_) Defaulting the Database to read mode\n#. (`PR#2477 <https://github.com/terrapower/armi/pull/2477>`_) Adding a block converter for mixed pin assemblies\n#. (`PR#2478 <https://github.com/terrapower/armi/pull/2478>`_) Checking for unrepresented XS IDs\n#. (`PR#2479 <https://github.com/terrapower/armi/pull/2479>`_) Adding method to DefaultExecuter for final parameter updates\n\n\nCode Changes, Bugs and Fixes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2468 <https://github.com/terrapower/armi/pull/2468>`_) Fixing shuffle file reading in reload runs\n#. (`PR#2470 <https://github.com/terrapower/armi/pull/2470>`_) Removing unused and broken material UThZr\n\n\nCode Changes, Maintenance, or Trivial\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2422 <https://github.com/terrapower/armi/pull/2422>`_) Removing unused parameter fluxAdjPeak\n#. (`PR#2430 <https://github.com/terrapower/armi/pull/2430>`_) Moving 27 TH parameters out of ARMI\n#. (`PR#2451 <https://github.com/terrapower/armi/pull/2451>`_) Moving 7 Neutronics Settings out of ARMI\n#. (`PR#2456 <https://github.com/terrapower/armi/pull/2456>`_) Anonymizing example user names in IPYNBs\n#. (`PR#2457 <https://github.com/terrapower/armi/pull/2457>`_) Doing misc cleanup and temporary pyDOE issue\n#. (`PR#2458 <https://github.com/terrapower/armi/pull/2458>`_) Removing broken pyDOE dep and LatinHyperCubeSuiteBuilder\n#. (`PR#2459 <https://github.com/terrapower/armi/pull/2459>`_) Removing Core.getAssembliesOfType in favor of iterChildrenWithFlags\n#. (`PR#2460 <https://github.com/terrapower/armi/pull/2460>`_) Stopping intermittent CI failures\n#. (`PR#2461 <https://github.com/terrapower/armi/pull/2461>`_) Renaming _Material_Test to AbstractMaterialTest\n#. (`PR#2462 <https://github.com/terrapower/armi/pull/2462>`_) Switching from coveralls.io to codecov.io\n#. (`PR#2482 <https://github.com/terrapower/armi/pull/2482>`_) Ensuring codecov.io does not fail if coverage drops a tiny amount\n#. (`PR#2483 <https://github.com/terrapower/armi/pull/2483>`_) Producing an ARMI wheel with every merge to main\n\n\nDocumentation-Only Changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2441 <https://github.com/terrapower/armi/pull/2441>`_) Moving the README text up in the PDF ToC\n#. (`PR#2481 <https://github.com/terrapower/armi/pull/2481>`_) Getting nuclide_demo tutorial working again\n#. (`PR#2443 <https://github.com/terrapower/armi/pull/2443>`_) Starting release cycle for ARMI 0.6.4\n\n\nARMI v0.6.3\n-----------\nRelease Date: 2026-02-02\n\nThis was a minor release. There were changes made to the temporary directory and unit test tooling to support read-only containers. There were also some improvements to the shuffle logic API. And the documentation had many small cleanup changes. For quality, when accessing a material property outside the temperature range it was defined for, ARMI now raises an exception by default.\n\n\nCode Changes, Features\n^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2385 <https://github.com/terrapower/armi/pull/2385>`_) Set materials.FAIL_ON_RANGE to True by default\n#. (`PR#2399 <https://github.com/terrapower/armi/pull/2399>`_) New exception for mpi4py import\n#. (`PR#2402 <https://github.com/terrapower/armi/pull/2402>`_) Improving cleanPath and related tooling\n#. (`PR#2411 <https://github.com/terrapower/armi/pull/2411>`_) Refactoring Shuffling Logic\n#. (`PR#2423 <https://github.com/terrapower/armi/pull/2423>`_) Fix issue with recent runLog changes\n#. (`PR#2428 <https://github.com/terrapower/armi/pull/2428>`_) Copying assembly flags when creating a new assembly from type\n#. (`PR#2432 <https://github.com/terrapower/armi/pull/2432>`_) Adding forceClean to snapshot deletions\n\n\nCode Changes, Maintenance, or Trivial\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2393 <https://github.com/terrapower/armi/pull/2393>`_) Shortening some test class names that are too long\n#. (`PR#2407 <https://github.com/terrapower/armi/pull/2407>`_) Removing unused Inconel material properties\n#. (`PR#2420 <https://github.com/terrapower/armi/pull/2420>`_) Adding unit testing to HexBlock.getPinPitch\n#. (`PR#2421 <https://github.com/terrapower/armi/pull/2421>`_) Renaming misloadSwap to swap\n#. (`PR#2424 <https://github.com/terrapower/armi/pull/2424>`_) Removing deprecation warning from test\n#. (`PR#2425 <https://github.com/terrapower/armi/pull/2425>`_) Adding code coverage\n#. (`PR#2426 <https://github.com/terrapower/armi/pull/2426>`_) Moving DIF3D parameters out of ARMI\n\n\nDocumentation-Only Changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2386 <https://github.com/terrapower/armi/pull/2386>`_) Adding a count of how many settings are in ARMI\n#. (`PR#2390 <https://github.com/terrapower/armi/pull/2390>`_) Ensuring that SCRs will build correctly during main branch pushes\n#. (`PR#2395 <https://github.com/terrapower/armi/pull/2395>`_) Cleaning up the docs\n#. (`PR#2398 <https://github.com/terrapower/armi/pull/2398>`_) Starting version 0.6.3 release cycle\n#. (`PR#2403 <https://github.com/terrapower/armi/pull/2403>`_) Improving nuclidebases documentation - byMcnpId\n#. (`PR#2409 <https://github.com/terrapower/armi/pull/2409>`_) Clarifying PIP version in user install docs\n#. (`PR#2418 <https://github.com/terrapower/armi/pull/2418>`_) Adding descriptions to two impl tags\n#. (`PR#2419 <https://github.com/terrapower/armi/pull/2419>`_) Ensuring all impl and test tags show in the docs\n#. (`PR#2433 <https://github.com/terrapower/armi/pull/2433>`_) Adding a high-level Parameter Report to the docs\n#. (`PR#2439 <https://github.com/terrapower/armi/pull/2439>`_) Adding an AUTHORS file to ARMI\n\n\nARMI v0.6.2\n-----------\nRelease Date: 2025-12-17\n\nThis was a minor release. There was a refactoring cleanup done to NulcideBases. And there were some very minor API-breaking changes to the HistoryTrackerInterface and Interface.function was renamed to Interface.purpose. The coolest new feature is the improvements made to loadTestReactor, which can now cache a variety of test reactors to improve your test performance.\n\nCode Changes, Features\n^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2335 <https://github.com/terrapower/armi/pull/2335>`_) Moving to use Reactor.nuclideBases, where possible\n#. (`PR#2374 <https://github.com/terrapower/armi/pull/2374>`_) Changing MPI_COMM to the Pickle Protocol 5\n\n\nCode Changes, Bugs and Fixes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2364 <https://github.com/terrapower/armi/pull/2364>`_) Fixing HoledHexagon.holeRadFromCenter type\n\n\nCode Changes, Maintenance, or Trivial\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2177 <https://github.com/terrapower/armi/pull/2177>`_) Changing Interface.function to Interface.purpose\n#. (`PR#2334 <https://github.com/terrapower/armi/pull/2334>`_) Refactoring loadTestReactor to pickle multiple test reactors\n#. (`PR#2358 <https://github.com/terrapower/armi/pull/2358>`_) Adding code coverage\n#. (`PR#2359 <https://github.com/terrapower/armi/pull/2359>`_) Cleaning up DB version logic\n#. (`PR#2360 <https://github.com/terrapower/armi/pull/2360>`_) Cleaning out broken tryPickleOnAllContents3\n#. (`PR#2366 <https://github.com/terrapower/armi/pull/2366>`_) Raising instead of returning errors\n#. (`PR#2369 <https://github.com/terrapower/armi/pull/2369>`_) Moving three test reactors to the testing module\n#. (`PR#2371 <https://github.com/terrapower/armi/pull/2371>`_) Moving more ARMI testing tools to the testing module\n#. (`PR#2375 <https://github.com/terrapower/armi/pull/2375>`_) Cleaning unused parts of HistoryTrackerInterface\n#. (`PR#2387 <https://github.com/terrapower/armi/pull/2387>`_) Removing Five Unused Settings\n#. (`PR#2389 <https://github.com/terrapower/armi/pull/2389>`_) Improving HexBlock.hasPinPitch\n\n\nDocumentation-Only Changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2351 <https://github.com/terrapower/armi/pull/2351>`_) Updating PR form to use the word Rationale\n#. (`PR#2363 <https://github.com/terrapower/armi/pull/2363>`_) Starting 0.6.2 release cycle\n#. (`PR#2365 <https://github.com/terrapower/armi/pull/2365>`_) Improving and correcting the ARMI version semantics\n#. (`PR#2367 <https://github.com/terrapower/armi/pull/2367>`_) Formatting code in docstrings\n#. (`PR#2372 <https://github.com/terrapower/armi/pull/2372>`_) Documenting database cycle and node values\n#. (`PR#2373 <https://github.com/terrapower/armi/pull/2373>`_) Fixing docs deploying across GitHub repos\n#. (`PR#2383 <https://github.com/terrapower/armi/pull/2383>`_) Clarifying parameter doc headers\n#. (`PR#2388 <https://github.com/terrapower/armi/pull/2388>`_) Updating SCR to use the term Rationale\n\n\n\nARMI v0.6.1\n-----------\nRelease Date: 2025-11-05\n\nThis was a minor release. While a lot of technical debt was addressed, no major feature work was done. There were some minor bugs fixed, but again nothing worth a release. This is being tagged as a release because the API is stable and this commit was tested heavily downstream and is trustworthy.\n\nCode Changes, Features\n^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2303 <https://github.com/terrapower/armi/pull/2303>`_) Encapsulating global nuclide data in classes\n#. (`PR#2317 <https://github.com/terrapower/armi/pull/2317>`_) Simplify thermal scattering\n#. (`PR#2320 <https://github.com/terrapower/armi/pull/2320>`_) Providing interface method interactRestart for managing restarts\n#. (`PR#2321 <https://github.com/terrapower/armi/pull/2321>`_) Adding Core.hasLib to check if there is a XS library\n#. (`PR#2323 <https://github.com/terrapower/armi/pull/2323>`_) Enabling user-specified distribution of MPI actions across nodes\n#. (`PR#2324 <https://github.com/terrapower/armi/pull/2324>`_) Adding orientationBOL to full core modifier\n#. (`PR#2325 <https://github.com/terrapower/armi/pull/2325>`_) Creating a new setting to specify memory requirement for cross section calculation\n#. (`PR#2344 <https://github.com/terrapower/armi/pull/2344>`_) Changing default for guideTubeTopElevation to zero\n\n\nCode Changes, Bugs and Fixes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2328 <https://github.com/terrapower/armi/pull/2328>`_) Ensuring that users define isotopics for custom materials\n#. (`PR#2331 <https://github.com/terrapower/armi/pull/2331>`_) Resolve TEST_ROOT path\n#. (`PR#2336 <https://github.com/terrapower/armi/pull/2336>`_) Checking number of jobs against available ranks in runBatchedActions\n#. (`PR#2347 <https://github.com/terrapower/armi/pull/2347>`_) Fixing missing f-string in component blueprints\n#. (`PR#2350 <https://github.com/terrapower/armi/pull/2350>`_) Fixing compareLines for a multiple numbers edge case\n\n\nCode Changes, Maintenance, or Trivial\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2306 <https://github.com/terrapower/armi/pull/2306>`_) Removing volume from getMgFlux()\n#. (`PR#2309 <https://github.com/terrapower/armi/pull/2309>`_) Moving plotting functions into utils module\n#. (`PR#2310 <https://github.com/terrapower/armi/pull/2310>`_) Removing unnecessary code from Assembly.getBlocksBetweenElevations\n#. (`PR#2315 <https://github.com/terrapower/armi/pull/2315>`_) Moving some testing utils to testing module\n#. (`PR#2327 <https://github.com/terrapower/armi/pull/2327>`_) Adding a unit test to HexGrid.generateSortedHexLocationList\n#. (`PR#2348 <https://github.com/terrapower/armi/pull/2348>`_) Make singleMixedAssembly more flexible for downstream testing\n\n\nDocumentation-Only Changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2250 <https://github.com/terrapower/armi/pull/2250>`_) Logging redundant material modifications\n#. (`PR#2280 <https://github.com/terrapower/armi/pull/2280>`_) Improving the new shuffling docs\n#. (`PR#2311 <https://github.com/terrapower/armi/pull/2311>`_) Starting the ARMI 0.6.1 release cycle\n#. (`PR#2312 <https://github.com/terrapower/armi/pull/2312>`_) Updating XS group manager requirements\n#. (`PR#2314 <https://github.com/terrapower/armi/pull/2314>`_) Documenting ARMI’s testing tools for devs\n#. (`PR#2332 <https://github.com/terrapower/armi/pull/2332>`_) Adding settings header to the settings YAML\n#. (`PR#2337 <https://github.com/terrapower/armi/pull/2337>`_) Fixing docs build\n#. (`PR#2339 <https://github.com/terrapower/armi/pull/2339>`_) Adding contributor to SCR doc automation script\n#. (`PR#2342 <https://github.com/terrapower/armi/pull/2342>`_) Documenting how to use the zonesFile Setting\n#. (`PR#2343 <https://github.com/terrapower/armi/pull/2343>`_) Improving documentation for zoneDefinions Setting\n#. (`PR#2354 <https://github.com/terrapower/armi/pull/2354>`_) Clarifying that ARMI materials are of testing quality\n\n\n\nARMI v0.6.0\n-----------\nRelease Date: 2025-09-25\n\nThis was a big release. A lot of technical debt has been cleaned up (XML geom files are finally gone), but there was a lot a lot of feature work: multi-pin blocks, axial expansion improvements, more powerful shuffle logic, and the ability to more freely load an ARMI database.\n\nCode Changes, Features\n^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#1995 <https://github.com/terrapower/armi/pull/1995>`_) Improving HexBlock.getFlowArea\n#. (`PR#2031 <https://github.com/terrapower/armi/pull/2031>`_) Providing better composite iteration methods\n#. (`PR#2045 <https://github.com/terrapower/armi/pull/2045>`_) Adding a check on the grid/component consistency in the BPs\n#. (`PR#2092 <https://github.com/terrapower/armi/pull/2092>`_) Allowing ARMI to use tmp dir on Mac/Linux\n#. (`PR#2105 <https://github.com/terrapower/armi/pull/2105>`_) Removing support for XML geom files\n#. (`PR#2106 <https://github.com/terrapower/armi/pull/2106>`_) Add Core.iterBlocks and Assembly.iterBlocks\n#. (`PR#2107 <https://github.com/terrapower/armi/pull/2107>`_) Handing empty string defaults better in copyInterfaceInputs\n#. (`PR#2109 <https://github.com/terrapower/armi/pull/2109>`_) Store number densities in numpy arrays instead of dictionary\n#. (`PR#2114 <https://github.com/terrapower/armi/pull/2114>`_) Allowing component area to be queried at arbitrary temp\n#. (`PR#2118 <https://github.com/terrapower/armi/pull/2118>`_) Adding a FilletedHexagon shape\n#. (`PR#2121 <https://github.com/terrapower/armi/pull/2121>`_) Supporting growing DBto full core on db load\n#. (`PR#2135 <https://github.com/terrapower/armi/pull/2135>`_) Retooling single-warnings report as all warnings report\n#. (`PR#2138 <https://github.com/terrapower/armi/pull/2138>`_) Allowing the BOL orientations to be set in the blueprints\n#. (`PR#2162 <https://github.com/terrapower/armi/pull/2162>`_) Make axial linking aware of block grids for axial expansion\n#. (`PR#2173 <https://github.com/terrapower/armi/pull/2173>`_) Improving Core.libs to look for the current cycle and node\n#. (`PR#2175 <https://github.com/terrapower/armi/pull/2175>`_) Blocking duplicate flags from being added\n#. (`PR#2198 <https://github.com/terrapower/armi/pull/2198>`_) Updating Axial Expansion Changer for improved mass redistribution\n#. (`PR#2199 <https://github.com/terrapower/armi/pull/2199>`_) Add 3 nuclides to getDefaultNuclideFlags\n#. (`PR#2202 <https://github.com/terrapower/armi/pull/2202>`_) Provide Component.pinIndices for helping understand where pins are\n#. (`PR#2208 <https://github.com/terrapower/armi/pull/2208>`_) Making ParamMapper symmetry-aware\n#. (`PR#2209 <https://github.com/terrapower/armi/pull/2209>`_) Block collection nuclides\n#. (`PR#2218 <https://github.com/terrapower/armi/pull/2218>`_) Adding a method to get cycle/node combinations for a time interval\n#. (`PR#2219 <https://github.com/terrapower/armi/pull/2219>`_) Refactoring Shuffle Logic Inputs to YAML\n#. (`PR#2221 <https://github.com/terrapower/armi/pull/2221>`_) Update logic for number density arrays and other cleanup\n#. (`PR#2223 <https://github.com/terrapower/armi/pull/2223>`_) Move the zonesFile setting to the framework and add building of zones to the interface stack\n#. (`PR#2225 <https://github.com/terrapower/armi/pull/2225>`_) Advancing r.p.time in the Operator\n#. (`PR#2227 <https://github.com/terrapower/armi/pull/2227>`_) Symmetry testing\n#. (`PR#2233 <https://github.com/terrapower/armi/pull/2233>`_) Remove axialPowerProfile* parameters\n#. (`PR#2235 <https://github.com/terrapower/armi/pull/2235>`_) Adding two geometry parameters\n#. (`PR#2243 <https://github.com/terrapower/armi/pull/2243>`_) Updating wetted perimeter calculation\n#. (`PR#2251 <https://github.com/terrapower/armi/pull/2251>`_) Comparing special formatting parameters in DBs\n#. (`PR#2255 <https://github.com/terrapower/armi/pull/2255>`_) Add b10NumFrac attribute to B4C class to allow for flexible setDefaultMassFracs\n#. (`PR#2266 <https://github.com/terrapower/armi/pull/2266>`_) Allow assembly parameters to be symmetry aware during core transformations and move operations\n#. (`PR#2269 <https://github.com/terrapower/armi/pull/2269>`_) Track assemblies if discharged to SFP\n#. (`PR#2272 <https://github.com/terrapower/armi/pull/2272>`_) Adding new Component param enrichmentBOL\n#. (`PR#2275 <https://github.com/terrapower/armi/pull/2275>`_) Cleaning internal state out of some materials\n#. (`PR#2277 <https://github.com/terrapower/armi/pull/2277>`_) Enhance fuel handler logic to support module imports\n#. (`PR#2278 <https://github.com/terrapower/armi/pull/2278>`_) Adding support for moving assemblies from SFP to Core in YAML shuffle input\n#. (`PR#2292 <https://github.com/terrapower/armi/pull/2292>`_) Support mixed Blocks for smear density calculation\n#. (`PR#2305 <https://github.com/terrapower/armi/pull/2305>`_) Raising error if no driverBlock is found by latticePhysicsWriter\n\n\nCode Changes, Bugs and Fixes\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#1654 <https://github.com/terrapower/armi/pull/1654>`_) Use clearer input syntax for hexagonal lattice pitch\n#. (`PR#1998 <https://github.com/terrapower/armi/pull/1998>`_) Fixing a couple of plots to use initial block height\n#. (`PR#2098 <https://github.com/terrapower/armi/pull/2098>`_) Removing the HTML reports feature\n#. (`PR#2102 <https://github.com/terrapower/armi/pull/2102>`_) Fixing issue in copyInterfaceInputs with one file\n#. (`PR#2111 <https://github.com/terrapower/armi/pull/2111>`_) fix side effects from tests\n#. (`PR#2115 <https://github.com/terrapower/armi/pull/2115>`_) Adding Reactor construction hook to Database.load()\n#. (`PR#2120 <https://github.com/terrapower/armi/pull/2120>`_) Cylindrical Cross Section model updates\n#. (`PR#2129 <https://github.com/terrapower/armi/pull/2129>`_) OperatorMPI doesn’t need to bcast quits if there no other workers\n#. (`PR#2153 <https://github.com/terrapower/armi/pull/2153>`_) Hiding duplicate warning messages\n#. (`PR#2160 <https://github.com/terrapower/armi/pull/2160>`_) Fixing bad Return in safeCopy\n#. (`PR#2163 <https://github.com/terrapower/armi/pull/2163>`_) Using gamma groups instead of neutron groups in gamiso.addDummyNuclidesToLibrary\n#. (`PR#2176 <https://github.com/terrapower/armi/pull/2176>`_) Using np.int32 when reading GEODST files\n#. (`PR#2180 <https://github.com/terrapower/armi/pull/2180>`_) Remove assert statements from FilletedHexagon instantiation\n#. (`PR#2186 <https://github.com/terrapower/armi/pull/2186>`_) Ensuring full core BPs aren’t converted like 1/3 core\n#. (`PR#2187 <https://github.com/terrapower/armi/pull/2187>`_) Fixing bug in Uranium.pseudoDensity\n#. (`PR#2189 <https://github.com/terrapower/armi/pull/2189>`_) Fixing bug in finding ISOTXS libraries to merge\n#. (`PR#2191 <https://github.com/terrapower/armi/pull/2191>`_) Fixing issue with full core BP geometry conversion\n#. (`PR#2195 <https://github.com/terrapower/armi/pull/2195>`_) Fixing round trip of hex lattice maps\n#. (`PR#2226 <https://github.com/terrapower/armi/pull/2226>`_) Fix equality of MultiIndexLocator and CoordinateLocation\n#. (`PR#2228 <https://github.com/terrapower/armi/pull/2228>`_) Fixing bug in Air.pseudoDensity when given Celsius T\n#. (`PR#2229 <https://github.com/terrapower/armi/pull/2229>`_) Change initialization of modArea for database load\n#. (`PR#2231 <https://github.com/terrapower/armi/pull/2231>`_) Fixing issue initial time node in previous PR\n#. (`PR#2236 <https://github.com/terrapower/armi/pull/2236>`_) Handle pinIndices for blocks that don’t have fuel\n#. (`PR#2245 <https://github.com/terrapower/armi/pull/2245>`_) Fixing invalid any() signature\n#. (`PR#2248 <https://github.com/terrapower/armi/pull/2248>`_) Fixing issue loading from snapshots database\n#. (`PR#2253 <https://github.com/terrapower/armi/pull/2253>`_) Making a unit test thread safe\n#. (`PR#2259 <https://github.com/terrapower/armi/pull/2259>`_) Re-assigning pin indices when sorting a Block\n#. (`PR#2260 <https://github.com/terrapower/armi/pull/2260>`_) Fixing compareLines so that it doesn’t trip on zeros\n#. (`PR#2268 <https://github.com/terrapower/armi/pull/2268>`_) Fixing Uranium enrichment calculations\n#. (`PR#2276 <https://github.com/terrapower/armi/pull/2276>`_) Fixing Composite.extend to correctly set the parent\n#. (`PR#2282 <https://github.com/terrapower/armi/pull/2282>`_) Fixing incorrect variable name in Pitch class\n#. (`PR#2291 <https://github.com/terrapower/armi/pull/2291>`_) Conserve molesHmBOL / massHmBOL when performing axial expansion\n#. (`PR#2294 <https://github.com/terrapower/armi/pull/2294>`_) Ensuring settings file can be found when writing one DB from another\n#. (`PR#2298 <https://github.com/terrapower/armi/pull/2298>`_) Preserve loading of CoordinateLocation in db load\n#. (`PR#2302 <https://github.com/terrapower/armi/pull/2302>`_) Handle pin indices for fuel + non fuel on the same grid\n#. (`PR#2307 <https://github.com/terrapower/armi/pull/2307>`_) Clearing out Component.p.pinIndices prior to assignment\n\n\nCode Changes, Maintenance, or Trivial\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#1386 <https://github.com/terrapower/armi/pull/1386>`_) Improve “smallRun” settings names\n#. (`PR#2085 <https://github.com/terrapower/armi/pull/2085>`_) Dropping black formatter for ruff\n#. (`PR#2093 <https://github.com/terrapower/armi/pull/2093>`_) Speed up axial expansion unit tests\n#. (`PR#2096 <https://github.com/terrapower/armi/pull/2096>`_) Fixing spelling errors\n#. (`PR#2103 <https://github.com/terrapower/armi/pull/2103>`_) Fixing spelling in docs and docstrings\n#. (`PR#2104 <https://github.com/terrapower/armi/pull/2104>`_) Removing defunct references to Cinder\n#. (`PR#2110 <https://github.com/terrapower/armi/pull/2110>`_) Combining three .gitignore files into one\n#. (`PR#2116 <https://github.com/terrapower/armi/pull/2116>`_) Cleaning up the codeTiming reports\n#. (`PR#2117 <https://github.com/terrapower/armi/pull/2117>`_) Reducing the warnings from Block.autoCreateSpatialGrids\n#. (`PR#2123 <https://github.com/terrapower/armi/pull/2123>`_) Removing permanently skipped tests\n#. (`PR#2126 <https://github.com/terrapower/armi/pull/2126>`_) Removing old TODO comments from the codebase\n#. (`PR#2127 <https://github.com/terrapower/armi/pull/2127>`_) Removing 3 unused Settings\n#. (`PR#2128 <https://github.com/terrapower/armi/pull/2128>`_) Created a fast flux energy structure for calculating fast flux\n#. (`PR#2130 <https://github.com/terrapower/armi/pull/2130>`_) Removing unused Parameters\n#. (`PR#2132 <https://github.com/terrapower/armi/pull/2132>`_) Removing unused reactivity coeffs params\n#. (`PR#2133 <https://github.com/terrapower/armi/pull/2133>`_) Moving NeutronicsPlugin to its own file\n#. (`PR#2134 <https://github.com/terrapower/armi/pull/2134>`_) Removing unused Parameters\n#. (`PR#2136 <https://github.com/terrapower/armi/pull/2136>`_) Removing unused TH parameters\n#. (`PR#2139 <https://github.com/terrapower/armi/pull/2139>`_) Removing unnecessary DB load try/except\n#. (`PR#2140 <https://github.com/terrapower/armi/pull/2140>`_) Cleaning up Block constructor\n#. (`PR#2141 <https://github.com/terrapower/armi/pull/2141>`_) Changing format-style strings to f-strings\n#. (`PR#2142 <https://github.com/terrapower/armi/pull/2142>`_) Quieting warnings from Block.getComponent\n#. (`PR#2144 <https://github.com/terrapower/armi/pull/2144>`_) Improving the default value for Assembly.getArea()\n#. (`PR#2146 <https://github.com/terrapower/armi/pull/2146>`_) Some more fstring conversions\n#. (`PR#2155 <https://github.com/terrapower/armi/pull/2155>`_) Cleaning up strange counter line\n#. (`PR#2157 <https://github.com/terrapower/armi/pull/2157>`_) Removing overly-specific check from the Component constructor\n#. (`PR#2165 <https://github.com/terrapower/armi/pull/2165>`_) Removing old setting mpiTasksPerNode from ZPPR test file\n#. (`PR#2166 <https://github.com/terrapower/armi/pull/2166>`_) Removing commented out code\n#. (`PR#2167 <https://github.com/terrapower/armi/pull/2167>`_) Removing unused test code\n#. (`PR#2168 <https://github.com/terrapower/armi/pull/2168>`_) Removing Deprecation Warning on sortReactor setting\n#. (`PR#2170 <https://github.com/terrapower/armi/pull/2170>`_) Adding a collar flag\n#. (`PR#2171 <https://github.com/terrapower/armi/pull/2171>`_) Cleaning up Tests to have Fewer Side Effects\n#. (`PR#2183 <https://github.com/terrapower/armi/pull/2183>`_) Renaming old smallRun Setting to rmExternalFilesAtEOL\n#. (`PR#2190 <https://github.com/terrapower/armi/pull/2190>`_) Using iterators instead of getAssemblies where possible\n#. (`PR#2197 <https://github.com/terrapower/armi/pull/2197>`_) Using iterators more in our unit tests\n#. (`PR#2203 <https://github.com/terrapower/armi/pull/2203>`_) Slight refactor on b.getSmearDensity to accommodate downstream work\n#. (`PR#2210 <https://github.com/terrapower/armi/pull/2210>`_) Removing python-dateutil dependency\n#. (`PR#2211 <https://github.com/terrapower/armi/pull/2211>`_) Remove Component.p.puFrac\n#. (`PR#2212 <https://github.com/terrapower/armi/pull/2212>`_) Removing duplicate lines\n#. (`PR#2215 <https://github.com/terrapower/armi/pull/2215>`_) Removing defunct deprecation warning\n#. (`PR#2220 <https://github.com/terrapower/armi/pull/2220>`_) Adding a basic unit test of Block.computeSmearDensity\n#. (`PR#2230 <https://github.com/terrapower/armi/pull/2230>`_) Adding Composite.getFirstComponent method\n#. (`PR#2232 <https://github.com/terrapower/armi/pull/2232>`_) Handling BOL times better\n#. (`PR#2240 <https://github.com/terrapower/armi/pull/2240>`_) Cleaning trace and profile out of RunEntryPoint\n#. (`PR#2241 <https://github.com/terrapower/armi/pull/2241>`_) move attributes to __init__\n#. (`PR#2242 <https://github.com/terrapower/armi/pull/2242>`_) ParamLocation for Duct Temp/DPAs\n#. (`PR#2257 <https://github.com/terrapower/armi/pull/2257>`_) Improving Code Coverage for Blocks and MPIAction\n#. (`PR#2263 <https://github.com/terrapower/armi/pull/2263>`_) Adding tests to improve code coverage\n#. (`PR#2265 <https://github.com/terrapower/armi/pull/2265>`_) Removing deprecated settingsValidation file\n#. (`PR#2283 <https://github.com/terrapower/armi/pull/2283>`_) Removing unused debugDB setting\n#. (`PR#2285 <https://github.com/terrapower/armi/pull/2285>`_) Improving the error messages for invalid settings data\n#. (`PR#2289 <https://github.com/terrapower/armi/pull/2289>`_) Improving extensibility of mass redistribution method in axial expansion\n#. (`PR#2297 <https://github.com/terrapower/armi/pull/2297>`_) Reducing log spam when creating a lot of spatial grids\n#. (`PR#2300 <https://github.com/terrapower/armi/pull/2300>`_) Shortening our longest unit test names\n\n\nDocumentation-Only Changes\n^^^^^^^^^^^^^^^^^^^^^^^^^^\n\n#. (`PR#2090 <https://github.com/terrapower/armi/pull/2090>`_) Adding an SCR section to the docs\n#. (`PR#2095 <https://github.com/terrapower/armi/pull/2095>`_) Edits to STR test report\n#. (`PR#2100 <https://github.com/terrapower/armi/pull/2100>`_) Adding more info to STR intro\n#. (`PR#2101 <https://github.com/terrapower/armi/pull/2101>`_) Fixing issue with SCR on main branch\n#. (`PR#2119 <https://github.com/terrapower/armi/pull/2119>`_) Adding basic documentation for axial expansion\n#. (`PR#2131 <https://github.com/terrapower/armi/pull/2131>`_) Update docstring for Settings class to reflect mutability\n#. (`PR#2137 <https://github.com/terrapower/armi/pull/2137>`_) Improving description of rateProdNet parameter\n#. (`PR#2143 <https://github.com/terrapower/armi/pull/2143>`_) Improving the docs-build instructions\n#. (`PR#2148 <https://github.com/terrapower/armi/pull/2148>`_) Adding tooling to help people build the docs locally\n#. (`PR#2150 <https://github.com/terrapower/armi/pull/2150>`_) Clarifying setting disableBlockTypeExclusionInXsGeneration\n#. (`PR#2151 <https://github.com/terrapower/armi/pull/2151>`_) Adding SQA for the SFP and cycles setting\n#. (`PR#2174 <https://github.com/terrapower/armi/pull/2174>`_) Remove traces of black\n#. (`PR#2213 <https://github.com/terrapower/armi/pull/2213>`_) Ensuring non-main branch PRs do not yield SCRs\n#. (`PR#2214 <https://github.com/terrapower/armi/pull/2214>`_) Fixing error in recent doc change\n#. (`PR#2217 <https://github.com/terrapower/armi/pull/2217>`_) Improving documentation of axial expansion\n#. (`PR#2222 <https://github.com/terrapower/armi/pull/2222>`_) Make a duplicated test tag unique\n#. (`PR#2238 <https://github.com/terrapower/armi/pull/2238>`_) Trying to speed up docs build\n#. (`PR#2249 <https://github.com/terrapower/armi/pull/2249>`_) Improving docs on entry points creation\n#. (`PR#2264 <https://github.com/terrapower/armi/pull/2264>`_) Update the description of the mcnpLibraryVersion case setting\n#. (`PR#2270 <https://github.com/terrapower/armi/pull/2270>`_) Fixing sphinx warnings in the doc build\n#. (`PR#2274 <https://github.com/terrapower/armi/pull/2274>`_) Adding user documentation of core symmetry\n#. (`PR#2279 <https://github.com/terrapower/armi/pull/2279>`_) Fixing the SCR table in the docs\n#. (`PR#2286 <https://github.com/terrapower/armi/pull/2286>`_) Improving Docs for 0.6.0 Release\n"
  },
  {
    "path": "doc/qa_docs/scr/index.rst",
    "content": "Software Change Requests (SCR)\n==============================\n\nYou can find a Software Change Request (SCR) for each releases below.\n\n----------\n\n.. toctree::\n   :maxdepth: 1\n   :glob:\n   :reversed:\n\n   *\n"
  },
  {
    "path": "doc/qa_docs/scr/latest_scr.rst",
    "content": "SCR for ARMI 0.7.0\n==================\n\nThis is a listing of all the Software Change Request (SCR) changes in the ARMI repository, as part of the current release.\n\nBelow, this SCR is organized into the individual changes that comprise the net SCR for this release. Each SCR below explicitly lists its impact on ARMI requirements, if any. It is also important to note ARMI and all its requirements are tested entirely by the automated testing that happens during the ARMI build. None of the SCRs below will be allowed to happen if any single test fails, so it can be guaranteed that all SCRs below have fully passed all testing.\n\n\nSCR Listing\n-----------\n\nThe following lists display all the SCRs in this release of the ARMI framework.\n\n\n.. exec::\n   import os\n   from automateScr import buildScrListing\n\n   thisPrNum = int(os.environ.get('PR_NUMBER', -1) or -1)\n   return buildScrListing(\"7b741a19\", thisPrNum)\n"
  },
  {
    "path": "doc/qa_docs/sdid.rst",
    "content": "Software Design and Implementation Document (SDID)\n==================================================\n\n\nPurpose and Scope\n-----------------\n\nThis document is the Software Design and Implementation Document (SDID) for ARMI.\n\nThe purpose of this document is to define how the ARMI requirements are implemented. These are\nimportant user stories for anyone wanting to use ARMI or develop their own ARMI-based application.\nThe implementation of the ARMI requirements is described in detail in an Implementation Traceability\nMatrix (ITM).\n\n\nProcedural Compliance\n^^^^^^^^^^^^^^^^^^^^^\n\nThis document includes information on four topics: the (1) software environment, (2) measures to\nmitigate possible failures, (2) implementation of the computational sequence, and (4) technical\nadequacy.\n\nSoftware Environment\n^^^^^^^^^^^^^^^^^^^^\n\nARMI is built using the Python programming language and runs on Windows and Linux operating systems.\n\nFailure Mitigation\n^^^^^^^^^^^^^^^^^^\n\nARMI provides a suite of unit tests which provide indication of the proper usage of the program.\nThese tests are described in the software test report and are directly traceable to the requirements\nin the software requirements specification document. The purpose of these tests is to provide a way\nfor downstream users to test and measure the utility of the ARMI framework for their own purposes,\nin their own environment. This allows users and developers to perform failure analysis. These tests\nallow for a push-button way to measure and mitigate consequences and problems including external and\ninternal abnormal conditions and events that can affect the software.\n\nImplementation of Computational Sequence\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nThe computational sequence and relevant portions of the technical adequacy are specific to the\nimplementation and are described for each implementation in the\n:ref:`Implementation Traceability Matrix <armi_impl_matrix>`.\n\nTechnical Adequacy\n^^^^^^^^^^^^^^^^^^\n\nThe internal completeness for each implementation is shown by providing traceability to the\nrequirements as showing in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. The\nconsistency of the implementation is provided by a best practice used by the development team\nincluding, revision control, ensuring that code content is reviewed by non-code originating team\nmembers, and ensuring training for developers. Clarity is provided by the descriptions of the\nimplementations in the :ref:`Implementation Traceability Matrix <armi_impl_matrix>`. Figures are\nadded as needed in the implementation in the\n:ref:`Implementation Traceability Matrix <armi_impl_matrix>`.\n\n\nDesign and Implementation\n-------------------------\n\nTo automate the process of tracking the implementation of all requirements in ARMI, we are using the\n:ref:`Implementation Traceability Matrix <armi_impl_matrix>` below. This will connect\nhigh-quality, in-code documentation with each requirement in a complete way. However, before giving\na complete overview of the requirement implementations, this document will describe the design of\ntwo main features in the ARMI codebase: the plugin system and the reactor data model. These are the\ntwo major features which you need to understand to understand what ARMI is, and why it is useful.\nSo, at the risk of duplicating documentation, the design of these two features will be discussed in\nsome detail.\n\n\nImplementation of Plugin System\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe first important design idea to understand in ARMI is that ARMI is a framework for nuclear\nreactor modeling. What this means is that the science or engineering calculations for nuclear\nreactor modeling do not happen in ARMI. The point of ARMI is to tie together disparate nuclear\nmodeling software that already exist. Thus, ARMI must be able to wrap external codes, and\norchestrate running them at each time step we want to model.\n\nThe second design idea is that at each time step, there is an ordered list of conceptual reactor\nmodeling steps to be executed. ARMI calls these steps\n:py:class:`Interfaces <armi.interfaces.Interface>` and runs the code in each, in order, at each time\nstep. While ARMI does have a default list of modeling steps, and a default order, none of the steps\nare mandatory, and their order is modifiable. An example interface stack would be:\n\n* preprocessing\n* fuel management\n* depletion\n* fuel performance\n* cross sections\n* critical control\n* flux\n* thermal hydraulics\n* reactivity coefficients\n* transient\n* bookkeeping\n* postprocessing\n\nSo, how do we add Interfaces to the simulation? The third major design idea is that developers can\ncreate an ARMI :py:class:`Plugin <armi.plugins.ArmiPlugin>`, which can add one or more Interfaces to\nthe simulation.\n\nLastly, at the highest level of the design, a developer can create an ARMI\n:py:class:`Application <armi.apps.App>`. This is a flexible container that allows developers to\nregister multiple Plugins, which register multiple Interfaces, which fully define all the code that\nwill be run at each time step of the simulation.\n\nBelow is a diagram from an example ARMI Application. Following this design, in the real world you\nwould expect an ARMI Application to be made by various teams of scientists and engineers that define\none Plugin and a small number of Interfaces. Then a simulation of the reactor would be carried out\nover some number of cycles / time nodes, where each of the Interfaces would be run in a specified\norder at each time node.\n\n.. figure:: /.static/armi_application_structure.png\n    :align: center\n\n    An example ARMI Application.\n\nIf this high-level design seems abstract, that is by design. ARMI is not concerned with implementing\nscientific codes, or enforcing nuclear modelers do things a certain way. ARMI is a tool that aims to\nsupport a wide audience of nuclear reactor modelers.\n\n\nImplementation of Reactor Data Model\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIn the previous section, we described how an ARMI Application is put together. But that Application\nis only useful if it can pass information about the reactor between all the external codes that are\nbeing wrapped by each Interface. Thus, an important part of the ARMI design is that is has a robust\nand detailed software data model to represent the current state of the reactor. This data model can\nbe queried and manipulated by each Interface to get data that is needed to run the external reactor\nmodeling codes.\n\nThe structure of the ARMI reactor data model is designed to be quite flexible, and heavily\nmodifiable in code. But most of the practical work done with ARMI so far has been on pin-type\nreactor cores, so we will focus on such an example.\n\nAt the largest scale, the :py:class:`Reactor <armi.reactor.reactors.Reactor>` contains a\n:py:class:`Core <armi.reactor.reactors.Core>` and a\n:py:class:`Spent Fuel Pool <armi.reactor.assemblyLists.SpentFuelPool>`. The Core is made primarily\nof a collection of :py:class:`Assemblies <armi.reactor.assemblies.Assembly>`, which are vertical\ncollections of :py:class:`Blocks <armi.reactor.blocks.Block>`. Each Block, and every other physical\npiece of the Reactor is a :py:class:`Composite <armi.reactor.composites.Composite>`. Composites have\na defined shape, material(s), location in space, and parent. Composites have parents because ARMI\ndefines all Reactors as a hierarchical model, where outer objects contain inner children, and the\nReactor is the outermost object. The important thing about this model is that it is in code, so\ndevelopers of ARMI Interfaces can query and modify the reactor data model in any way they need.\n\n.. figure:: /.static/armi_reactor_objects.png\n    :align: center\n\n    Structure of the ARMI reactor data model.\n\n\n.. _armi_hardware:\n\nHardware/OS Compatibility\n^^^^^^^^^^^^^^^^^^^^^^^^^\n\nARMI is a Python-based framework, designed to help tie together various nuclear models, all written\nin a variety of languages. ARMI officially supports Python versions 3.9 and higher. ARMI is also\ndesigned to work on modern versions of both Windows and Linux.\n\nThe memory, CPU, and hardware needs of an ARMI simulation depend on the Reactor. Simulations run\nwith lumped fission products will require more memory than those run without. Simulations with much\nlarger, more detailed reactor core blueprints, or containing more components, will take up more\nmemory than simpler blueprints. ARMI can also be run with only one process, but most users choose to\nrun ARMI in parallel on a computing cluster of some kind. In practice, users tend to find that\ndozens or hundreds of parallel processes are helpful for speeding up ARMI runs, and each process\nwill ideally have 1 or 2 GB of RAM.\n\n\nError/Input Handling\n^^^^^^^^^^^^^^^^^^^^\n\nARMI's internal error-handling library is the :py:mod:`runLog <armi.runLog>`. This tool handles the\nwarnings and errors for internal ARMI code and all the plugins. The ``runLog`` system will handle\nboth print-to-screen and log file messages. At the end of the run, all log messages from every\nplugin and from all parallel processes are tabulated into centralized log files.\n\nThe ``runLog`` system will also tabulate a list of all warnings that occurred doing a simulation.\nAnd it should be noted that most full \"errors\" will cause the ARMI simulation to fail and stop hard,\nending the run early. This is the ideal solution, so people know the run results are invalid. To\nthat affect, ARMI makes use of Python's robust `Exception` system.\n\n\n.. _armi_impl_matrix:\n\nImplementation Traceability Matrix\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe requirements and associated tests which demonstrate acceptance of the codebase with the\nrequirements are in the Software Requirements Specification Document :ref:`(SRSD) <armi_srsd>`. This\nsection contains a list of all requirement implementations.\n\nHere are some quick metrics for the requirement implementations in ARMI:\n\n* :need_count:`type=='req' and status=='accepted'` Accepted Requirements in ARMI\n\n  * :need_count:`type=='req' and status=='accepted' and len(implements_back)>0` Accepted Requirements with implementations\n  * :need_count:`type=='impl'` implementations linked to Requirements\n\nAnd here is a full listing of all the requirement implementations in ARMI, that are tied to requirements:\n\n.. needextract::\n  :filter: id.startswith('I_ARMI_')\n"
  },
  {
    "path": "doc/qa_docs/srsd/bookkeeping_reqs.rst",
    "content": ".. _armi_bookkeeping:\n\nBookkeeping Package\n-------------------\n\nThis section provides requirements for the :py:mod:`armi.bookkeeping` package within the framework, which\nhandles data persistence, including storage and recovery, report generation, data visualization, \nand debugging.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The database package shall save a copy of the user settings associated with the run.\n    :id: R_ARMI_DB_CS\n    :subtype: functional\n    :basis: This supports traceability and restart ability.\n    :acceptance_criteria: Save and retrieve the user settings from the database.\n    :status: accepted\n\n.. req:: The database package shall save a copy of the reactor blueprints associated with the run.\n    :id: R_ARMI_DB_BP\n    :subtype: functional\n    :basis: This supports traceability and restart ability.\n    :acceptance_criteria: Save and retrieve the blueprints from the database.\n    :status: accepted\n\n.. req:: The database shall store reactor state data at specified points in time.\n    :id: R_ARMI_DB_TIME\n    :subtype: functional\n    :basis: Loading a reactor from a database is needed for follow-on analysis.\n    :acceptance_criteria: Save and load a reactor from a database at specified point in time and show parameters are appropriate.\n    :status: accepted\n\n.. req:: ARMI shall allow runs at a particular time node to be re-instantiated from a snapshot.\n    :id: R_ARMI_SNAPSHOT_RESTART\n    :subtype: functional\n    :basis: Analysts need to do follow-on analysis on detailed treatments of particular time nodes.\n    :acceptance_criteria: After restarting a run, the reactor time node and power has been correctly reset.\n    :status: accepted\n\n.. req:: The database shall store system attributes during a simulation.\n    :id: R_ARMI_DB_QA\n    :subtype: functional\n    :basis: Storing system attributes provides QA traceability.\n    :acceptance_criteria: Demonstrate that system attributes are stored in a database after it is initialized.\n    :status: accepted\n\n.. req:: ARMI shall allow for previously calculated reactor state data to be retrieved within a run.\n    :id: R_ARMI_HIST_TRACK\n    :subtype: functional\n    :basis: Retrieval of calculated run data from a previous time node within a run supports time-based data integration.\n    :acceptance_criteria: Demonstrate that a set of parameters stored at differing time nodes can be recovered.\n    :status: accepted\n\n    .. ## Note: ARMI strongly suggests you use the Database for this purpose instead.\n\nSoftware Attributes\n+++++++++++++++++++\n\n.. req:: The database produced shall be agnostic to programming language.\n    :id: R_ARMI_DB_H5\n    :subtype: attribute\n    :basis: Analysts should be free to use the data in any programming language they choose.\n    :acceptance_criteria: Open an output file in the h5 format.\n    :status: accepted\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: ARMI shall allow extra data to be saved from a run, at specified time nodes.\n    :id: R_ARMI_SNAPSHOT\n    :subtype: io\n    :basis: Analysts need to do follow-on analysis on detailed treatments of particular time nodes.\n    :acceptance_criteria: Snapshot logic can be called for a given set of time nodes.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/cases_reqs.rst",
    "content": ".. _armi_cases:\n\nCases Package\n-------------\n\nThis section provides requirements for the :py:mod:`armi.cases` package within the framework, which\nis responsible for running and analyzing ARMI-based cases and case suites for an application. This\nincludes functionalities to serialize and deserialize case inputs for input modification, tracking\nthe status of a case, and running simulations.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The case package shall provide a generic mechanism that will allow a user to run a simulation.\n    :id: R_ARMI_CASE\n    :subtype: functional\n    :basis: Most workflows rely on this capability.\n    :acceptance_criteria: Build a case and initialize a simulation.\n    :status: accepted\n\n.. req:: The case package shall provide a tool to run multiple cases at the same time or with dependence on other cases.\n    :id: R_ARMI_CASE_SUITE\n    :subtype: functional\n    :basis: Many workflows rely on this capability.\n    :acceptance_criteria: Build a suite of cases with dependence and run them.\n    :status: accepted\n\n.. req:: The case package shall provide a generic mechanism to allow users to modify user inputs in a collection of cases.\n    :id: R_ARMI_CASE_MOD\n    :subtype: functional\n    :basis: This capability is needed by analysis workflows such as parameter studies and uncertainty quantification.\n    :acceptance_criteria: Load user inputs and build a collection of cases that contain programmatically-perturbed inputs.\n    :status: accepted\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: The case package shall have the ability to load user inputs and perform input validation checks.\n    :id: R_ARMI_CASE_CHECK\n    :subtype: io\n    :basis: Most workflows rely on this capability.\n    :acceptance_criteria: Load user inputs and perform validation checks.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/cli_reqs.rst",
    "content": ".. _armi_cli:\n\nCommand Line Interface Package\n------------------------------\n\nThis section provides requirements for the :py:mod:`armi.cli` package. This package is\nresponsible for providing user entry points to an ARMI-based application as a Command Line Interface (CLI). This package allows for developers to create their own automated work flows including: case submission, user setting validation, data migrations, and more.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The cli package shall provide a generic CLI for developers to build their own CLI.\n    :id: R_ARMI_CLI_GEN\n    :basis: Provides extensibility of the system behavior for an application to implement analysis workflows.\n    :subtype: functional\n    :status: accepted\n    :acceptance_criteria: Create an entry point, pass it arguments, and invoke it.\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: The cli package shall provide a basic CLI which allows users to start an ARMI simulation.\n    :id: R_ARMI_CLI_CS\n    :basis: This is relied upon for most users to submit jobs to a cluster.\n    :subtype: io\n    :status: accepted\n    :acceptance_criteria: Invoke an ARMI CLI.\n"
  },
  {
    "path": "doc/qa_docs/srsd/framework_reqs.rst",
    "content": ".. _armi_framework:\n\nFramework-Related Concepts\n--------------------------\n\nThis section provides the highest-level requirements for the ARMI framework. These requirements are\nspecific to the idea that ARMI is a framework, that allows for the connection of disparate scientific and\nnuclear engineer models. The four major pieces of the codebase covered by these requirements are:\n\n    - :py:mod:`armi.apps` - An ARMI simulation is controlled by an ARMI :py:class:`Application <armi.apps.App>`.\n    - :py:mod:`armi.plugins` - Each :py:class:`Application <armi.apps.App>` registers a list of :py:class:`Plugins <armi.plugins.ArmiPlugin>`.\n    - :py:mod:`armi.interfaces` - Each :py:class:`Plugin <armi.plugins.ArmiPlugin>` registers a list of :py:class:`Interface <armi.interfaces.Interface>`.\n    - :py:mod:`armi.operators` - The :py:class:`Operator <armi.operators.Operator>` contains a list of :py:class:`Interfaces <armi.interfaces.Interface>`, which are run in order at each time node.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. ## Note: These 12 requirements define ARMI at a high level. They will rarely change.\n\n.. req:: The operator package shall provide a means by which to communicate inputs and results between analysis plugins.\n    :id: R_ARMI_OPERATOR_COMM\n    :subtype: functional\n    :basis: This is a foundational design concept in ARMI and is what makes it a framework.\n    :acceptance_criteria: A plugin can access run input data and results from other plugins.\n    :status: accepted\n\n.. req:: The operator package shall allow tight coupling between analysis plugins.\n    :id: R_ARMI_OPERATOR_PHYSICS\n    :subtype: functional\n    :basis: Tight coupling is a mechanism that allows for simultaneous convergence of analysis results.\n    :acceptance_criteria: An operator can call each interface multiple times at a given time node, subject to some convergence criteria.\n    :status: accepted\n\n.. req:: The operator package shall provide a means to perform parallel computations.\n    :id: R_ARMI_OPERATOR_MPI\n    :subtype: functional\n    :basis: Parallel computations provide scalable solutions to computational performance.\n    :acceptance_criteria: An operator can execute logic dependent on its MPI rank.\n    :status: accepted\n\n.. req:: ARMI shall allow users to customize how time is discretized for modeling.\n    :id: R_ARMI_FW_HISTORY\n    :subtype: functional\n    :basis: Analysts will want to model the time evolution of reactors. And discretizing time is a common need to nearly all scientific modeling.\n    :acceptance_criteria: Specify number of cycles and burn steps and observe the interfaces are run at those time nodes.\n    :status: accepted\n\n.. req:: An application shall consist of a collection of plugins.\n    :id: R_ARMI_APP_PLUGINS\n    :subtype: functional\n    :basis: Plugins are the major mechanism for adding code to a simulations.\n    :acceptance_criteria: Construct an ARMI application from a collection of plugins.\n    :status: accepted\n\n.. req:: An operator shall be built from user settings.\n    :id: R_ARMI_OPERATOR_SETTINGS\n    :subtype: functional\n    :basis: Configuring an operator allows users to customize a simulation.\n    :acceptance_criteria: Construct an operator that depends on user settings.\n    :status: accepted\n\n.. req:: The operator package shall expose an ordered list of interfaces that is looped over at each time step.\n    :id: R_ARMI_OPERATOR_INTERFACES\n    :subtype: functional\n    :basis: Reactor modeling is controlled by looping over an ordered list of interfaces at each time node.\n    :acceptance_criteria: Show that interfaces are executed in order at each time step.\n    :status: accepted\n\n.. req:: The interface package shall allow code execution at important operational points in time.\n    :id: R_ARMI_INTERFACE\n    :subtype: functional\n    :basis: Defining code to be run at specific times allows users to control the reactor simulation and analysis.\n    :acceptance_criteria: Show that interfaces allow code to be execute at BOL, EOL, BOC, and EOC.\n    :status: accepted\n\n.. req:: The plugin module shall allow the creation of a plugin, which adds code to the application.\n    :id: R_ARMI_PLUGIN\n    :subtype: functional\n    :basis: The primary way developers will add code to the simulation is by writing an ARMI plugin.\n    :acceptance_criteria: Load a plugin into an application.\n    :status: accepted\n\n.. req:: Plugins shall add interfaces to the operator.\n    :id: R_ARMI_PLUGIN_INTERFACES\n    :subtype: functional\n    :basis: The mechanism by which plugins add code to the simulation is that plugins can register interfaces on the operator.\n    :acceptance_criteria: Register multiple interfaces from a given plugin.\n    :status: accepted\n\n.. req:: Plugins shall have the ability to add parameters to the reactor data model.\n    :id: R_ARMI_PLUGIN_PARAMS\n    :subtype: functional\n    :basis: An important feature of plugins is that they can add parameters to the reactor model, thus increasing the variety of physical values the simulations can track.\n    :acceptance_criteria: Register multiple parameters from a given plugin.\n    :status: accepted\n\n.. req:: Plugins shall have the ability to add custom settings to the simulation.\n    :id: R_ARMI_PLUGIN_SETTINGS\n    :subtype: functional\n    :basis: An important feature of plugins is that they can add settings that can be used to configure a simulation.\n    :acceptance_criteria: Add multiple settings from a given plugin.\n    :status: accepted\n\n.. ## Note: These 12 requirements define ARMI at a high level. They will rarely change.\n"
  },
  {
    "path": "doc/qa_docs/srsd/materials_reqs.rst",
    "content": ".. _armi_mats:\n\nMaterials Package\n-----------------\n\nThis section provides requirements for the :py:mod:`armi.materials` package within the framework, which contains ARMI's system for defining materials. The materials system in ARMI allows for an extreme amount of flexibility in defining materials with temperature-dependent properties like density, linear expansion factor, and the like.\n\nARMI also comes packaged with a small set of basic materials, though these are meant only as example materials and (because ARMI is open source) these materials can not include proprietary or classified information. As such, we explicitly forbid the use of the example ARMI materials in safety-related modeling and will not be writing requirements on those materials.\n\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The materials package shall allow for material classes to be searched across packages in a defined namespace.\n    :id: R_ARMI_MAT_NAMESPACE\n    :subtype: functional\n    :basis: This is just a design choice in ARMI, to define how new material definitions are added to a simulation.\n    :acceptance_criteria: Import a material class from a package in the ARMI default namespace.\n    :status: accepted\n\n.. req:: The materials package shall allow for multiple material collections to be defined with an order of precedence in the case of duplicates.\n    :id: R_ARMI_MAT_ORDER\n    :subtype: functional\n    :basis: The ability to represent physical material properties is a basic need for nuclear modeling.\n    :acceptance_criteria: Only the preferred material class is returned when multiple material classes with the same name are defined.\n    :status: accepted\n\n.. req:: The materials package shall provide the capability to retrieve material properties at different temperatures.\n    :id: R_ARMI_MAT_PROPERTIES\n    :subtype: functional\n    :basis: The ability to represent physical material properties is a basic need for nuclear modeling.\n    :acceptance_criteria: Instantiate a Material instance and show that the instance has the appropriate method names defined and examine the methods signatures to ensure they allow for temperature inputs.\n    :status: accepted\n\n.. req:: The materials package shall allow for user-input to impact the materials in a component.\n    :id: R_ARMI_MAT_USER_INPUT\n    :subtype: functional\n    :basis: The ability to represent physical material properties is a basic need for nuclear modeling.\n    :acceptance_criteria: Instantiate a reactor from blueprints that uses the material modifications and show that the modifications are used.\n    :status: accepted\n\n.. req:: Materials shall generate nuclide mass fractions at instantiation.\n    :id: R_ARMI_MAT_FRACS\n    :subtype: functional\n    :basis: The ability to represent physical material properties is a basic need for nuclear modeling.\n    :acceptance_criteria: Show that the material mass fractions are populated when the object is created.\n    :status: accepted\n\n.. req:: The materials package shall provide a class for fluids that defines the thermal expansion coefficient as identically zero.\n    :id: R_ARMI_MAT_FLUID\n    :subtype: functional\n    :basis: Thermal expansion coefficients need to be zero for fluids so that fluid components cannot drive thermal expansion of their own or linked component dimensions.\n    :acceptance_criteria: Instantiate a Fluid material and show that its linear expansion is identically zero.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/nucDirectory_reqs.rst",
    "content": ".. _armi_nuc_dirs:\n\nNuclide Directory Package\n-------------------------\n\nThis section provides requirements for the :py:mod:`armi.nucDirectory` package within the framework, which\nis responsible for defining elemental and isotopic information that is used for reactor physics evaluations.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The nucDirectory package shall provide an interface for querying basic data for elements of the periodic table.\n    :id: R_ARMI_ND_ELEMENTS\n    :subtype: functional\n    :basis: Element data is needed for converting between mass and number fractions, expanding elements into isotopes, and other tasks.\n    :acceptance_criteria: Query elements by Z, name, and symbol.\n    :status: accepted\n\n.. req:: The nucDirectory package shall provide an interface for querying basic data for important isotopes and isomers.\n    :id: R_ARMI_ND_ISOTOPES\n    :subtype: functional\n    :basis: Isotope data is used to aid in construction of cross-section generation models, to convert between mass and number fractions, and other tasks.\n    :acceptance_criteria: Query isotopes and isomers by name, label, MC2-3 ID, MCNP ID, and AAAZZZS ID.\n    :status: accepted\n\n.. req:: The nucDirectory package shall store data separately from code.\n    :id: R_ARMI_ND_DATA\n    :subtype: functional\n    :basis: Storing data separately from code is good practice in scientific programs.\n    :acceptance_criteria: The nucDirectory element, isotope, and isomer data is stored in plain text files in a folder next to the code.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/nuclearDataIO_reqs.rst",
    "content": ".. _armi_nuc_data:\n\nNuclear Data I/O Package\n------------------------\n\nThis section provides requirements for the :py:mod:`armi.nuclearDataIO` package within the framework, which\nhandles reading and writing of standard interface files for reactor physics software (e.g., cross section data).\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing ISOTXS files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_ISOTXS\n    :subtype: functional\n    :basis: These files are the MC2 output format.\n    :acceptance_criteria: Read one or more ISOTXS files and its basic input data correctly, and correctly write that data back out to a single file.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing GAMISO files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_GAMISO\n    :subtype: functional\n    :basis: These files are generated by MCC-v3.\n    :acceptance_criteria: Read a GAMISO file and its basic input data correctly, and correctly write that data back out.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing GEODST files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_GEODST\n    :subtype: functional\n    :basis: These files are generated by DIF3D.\n    :acceptance_criteria: Read a GEODST file and its basic input data correctly, and correctly write that data back out.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing DIF3D files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_DIF3D\n    :subtype: functional\n    :basis: These files are used in DIF3D.\n    :acceptance_criteria: Read a DIF3D file and its basic input data correctly, and correctly write that data back out.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing PMATRX files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_PMATRX\n    :subtype: functional\n    :basis: These files are generated by MCC-v3 and used in GAMSOR.\n    :acceptance_criteria: Read a PMATRX file and its basic input data correctly, and correctly write that data back out.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be capable of reading and writing DLAYXS files into and out of mutable data structures.\n    :id: R_ARMI_NUCDATA_DLAYXS\n    :subtype: functional\n    :basis: These files are used to generate kinetics parameters.\n    :acceptance_criteria: Read a DLAYXS file and its basic input data correctly, and correctly write that data back out.\n    :status: accepted\n\n.. req:: The nuclearDataIO package shall be able to compute macroscopic cross sections from microscopic cross sections and number densities.\n    :id: R_ARMI_NUCDATA_MACRO\n    :subtype: functional\n    :basis: Macroscopic cross sections are needed by many analysts.\n    :acceptance_criteria: Compute macroscopic cross sections from microscopic cross sections and number densities.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/physics_reqs.rst",
    "content": ".. _armi_physics:\n\nPhysics Package\n---------------\n\nThis section provides requirements for the :py:mod:`armi.physics` package within the framework, which contains interfaces for important physics modeling and analysis in nuclear reactors. It is important to note that ARMI is a framework, and as such does not generally include the actual science or engineering calculations for these topics. For instance, ARMI has an interface for \"safety analysis\", but this interface is just a *place* for developers to implement their own safety analysis code. It would be inappropriate to include the actual science or engineering calculations for a detailed safety analysis of a particular reactor in ARMI because ARMI is meant only to house the code to let nuclear modeling and analysis work, not the analysis itself.\n\n\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. ## globalFlux ######################\n\n.. req:: ARMI shall ensure that the computed block-wise power is consistent with the power specified in the reactor data model.\n    :id: R_ARMI_FLUX_CHECK_POWER\n    :subtype: functional\n    :status: accepted\n    :basis: This requirement ensures that neutronics solver scales the neutron flux appropriately such that the computed block-wise power captures the specified global power.\n    :acceptance_criteria: Test that throws an error when the summed block-wise powers does not match the specified total power.\n\n.. req:: ARMI shall provide an interface for querying options relevant to neutronics solvers.\n    :id: R_ARMI_FLUX_OPTIONS\n    :subtype: functional\n    :status: accepted\n    :basis: Reactor analysts will want to use popular neutronics solvers, e.g. DIF3D-Variant.\n    :acceptance_criteria: The interface correctly returns specified neutronics solver options.\n\n.. req:: ARMI shall allow modification of the reactor geometry when needed for neutronics solver execution.\n    :id: R_ARMI_FLUX_GEOM_TRANSFORM\n    :subtype: functional\n    :status: accepted\n    :basis: Axial expansion can cause a disjointed mesh which cannot be resolved by deterministic neutronics solvers.\n    :acceptance_criteria: Geometry transformations are performed before executing a neutronics solve.\n\n.. req:: ARMI shall calculate neutron reaction rates for a given block.\n    :id: R_ARMI_FLUX_RX_RATES\n    :subtype: functional\n    :status: accepted\n    :basis: This is a generic ARMI feature implemented to aid in calculating dose, converting results calculated on one mesh to another, and for comparing reaction rates against experiments.\n    :acceptance_criteria: Calculate accurate reaction rates for a given multigroup flux and cross section library for a wide collection of Blocks.\n\n.. req:: ARMI shall be able to calculate DPA and DPA rates from a multigroup neutron flux and DPA cross sections.\n    :id: R_ARMI_FLUX_DPA\n    :subtype: functional\n    :status: accepted\n    :basis: DPA rates are necessary for fuel performance calculations.\n    :acceptance_criteria: The DPA rate is calculated for a composite with an associated multi-group neutron flux.\n\n.. ## isotopicDepletion ######################\n\n.. req:: The isotopicDepletion package shall have the ability to generate cross-section tables from a CCCC-based library in a user-specified format.\n    :id: R_ARMI_DEPL_TABLES\n    :subtype: functional\n    :status: accepted\n    :basis: Depletion solvers require cross-sections to be supplied from external sources if not using built-in cross sections.\n    :acceptance_criteria: Produce a table with the specified formatting containing the appropriate cross sections.\n\n.. req:: The isotopicDepletion package shall provide a base class to track depletable composites.\n    :id: R_ARMI_DEPL_ABC\n    :subtype: functional\n    :status: accepted\n    :basis: Depletion analysis may want a way to track depletable composites.\n    :acceptance_criteria: Store and retrieve depletable objects.\n\n.. ## energyGroups ######################\n\n.. req:: The neutronics package shall provide the neutron energy group bounds for a given group structure.\n    :id: R_ARMI_EG_NE\n    :subtype: functional\n    :basis: The bounds define the energy groupings.\n    :acceptance_criteria: Return the correct energy bounds.\n    :status: accepted\n\n.. req:: The neutronics package shall return the energy group index which contains the fast energy threshold.\n    :id: R_ARMI_EG_FE\n    :subtype: functional\n    :basis: The energy groups are only useful if a developer can find the correct one easily.\n    :acceptance_criteria: Identify the correct energy group for a given energy threshold.\n    :status: accepted\n\n.. ## macroXSGenerationInterface ######################\n\n.. req:: The neutronics package shall be able to build macroscopic cross sections for all blocks.\n    :id: R_ARMI_MACRO_XS\n    :subtype: functional\n    :basis: Most steady-state neutronics workflows will rely on this capability.\n    :acceptance_criteria: Calculate the macroscopic cross sections for a block.\n    :status: accepted\n\n.. ## executers ######################\n\n.. req:: The executers module shall provide the ability to run external calculations on an ARMI reactor with configurable options.\n    :id: R_ARMI_EX\n    :subtype: functional\n    :basis: An ARMI plugin needs to be able to to wrap an external executable.\n    :acceptance_criteria: Execute a mock external calculation based on an ARMI reactor.\n    :status: accepted\n\n\n.. ## fuelCycle ######################\n\n.. req:: The fuel cycle package shall allow for user-defined assembly shuffling logic to update the reactor model based on reactor state.\n    :id: R_ARMI_SHUFFLE\n    :subtype: functional\n    :basis: Shuffle operations can be based on assemblies' burnup state, which may not be known at the start of a run.\n    :acceptance_criteria: Execute user-defined shuffle operations based on a reactor model.\n    :status: accepted\n\n.. req:: The fuel cycle package shall be capable of leaving user-specified blocks in place during shuffling operations.\n    :id: R_ARMI_SHUFFLE_STATIONARY\n    :subtype: functional\n    :basis: It may be desirable to leave certain blocks, such as grid plates, in place.\n    :acceptance_criteria: Shuffle an assembly while leaving a specified block in place.\n    :status: accepted\n\n.. req:: A hexagonal assembly shall support rotating around the z-axis in 60 degree increments.\n    :id: R_ARMI_ROTATE_HEX\n    :subtype: functional\n    :basis: Rotation of assemblies is common during operation, and requires updating the location of physics data assigned on the assembly.\n    :acceptance_criteria: After rotating a hexagonal assembly, spatial data corresponds to rotating the original assembly data.\n    :status: accepted\n\n.. req:: The framework shall provide an algorithm for rotating hexagonal assemblies to equalize burnup.\n    :id: R_ARMI_ROTATE_HEX_BURNUP\n    :subtype: functional\n    :basis: Rotating of assemblies to minimize burnup helps maximize fuel utilization and reduces power peaking.\n    :acceptance_criteria: After rotating a hexagonal assembly, confirm the pin with the highest burnup is in the same sector as pin with the lowest power in the high burnup pin's ring.\n    :status: accepted\n\n.. ## crossSectionGroupManager ######################\n\n.. req:: The cross-section group manager package shall run before cross sections are calculated.\n    :id: R_ARMI_XSGM_FREQ\n    :subtype: functional\n    :basis: The cross section groups need to be up to date with the core state at the time that the Lattice Physics Interface is called.\n    :acceptance_criteria: Initiate the cross-section group manager by the same setting that initiates calculating cross sections. And ensure the cross-section group manager always runs before cross sections are calculated.\n    :status: accepted\n\n.. req:: The cross-section group manager package shall create separate collections of blocks for each combination of user-specified XS type and burnup and/or temperature group.\n    :id: R_ARMI_XSGM_CREATE_XS_GROUPS\n    :subtype: functional\n    :basis: This helps improve the performance of downstream cross section calculations.\n    :acceptance_criteria: Create cross section groups and their representative blocks.\n    :status: accepted\n\n.. req:: The cross-section group manager package shall provide routines to create representative blocks for each collection based on user-specified XS type and burnup and/or temperature group.\n    :id: R_ARMI_XSGM_CREATE_REPR_BLOCKS\n    :subtype: functional\n    :basis: The Lattice Physics Interface needs a representative block from which to generate a lattice physics input file.\n    :acceptance_criteria: Create representative blocks using volume-weighted averaging and custom cylindrical averaging.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/reactors_reqs.rst",
    "content": ".. _armi_reactors:\n\nReactors Package\n----------------\n\nThis section provides requirements for the :py:mod:`armi.reactors` package within the framework, unsurprisingly this is the largest package in ARMI. In this package are sub-packages for fully defining a nuclear reactor, starting from blueprints and all the way through defining the full reactor data model. It is this reactor data object that is critical to the framework; this is how different reactor modeling tools share information.\n\n\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. ## reactors ######################\n\n.. req:: The reactor data model shall contain one core and a collection of ex-core objects, all composites.\n    :id: R_ARMI_R\n    :status: accepted\n    :basis: A shared reactor data model is a fundamental concept in ARMI.\n    :acceptance_criteria: Build a reactor data model from a blueprint file, and show it has a core and a spent fuel pool.\n    :subtype: functional\n\n.. req:: Assemblies shall be retrievable from the core object by name and location.\n    :id: R_ARMI_R_GET_ASSEM\n    :status: accepted\n    :basis: Useful for analysis, particularly mechanical and control rod analysis.\n    :acceptance_criteria: Retrieve assemblies from the core by name and location.\n    :subtype: functional\n\n.. req:: The core shall be able to construct a mesh based on its blocks.\n    :id: R_ARMI_R_MESH\n    :status: accepted\n    :basis: Preservation of material and geometry boundaries is needed for accurate physics calculations.\n    :acceptance_criteria: Construct a mesh from a core object.\n    :subtype: functional\n\n.. req:: ARMI shall support third-core symmetry for hexagonal cores.\n    :id: R_ARMI_R_SYMM\n    :status: accepted\n    :basis: Symmetric model definitions allow for easier user setup and reduced computational expense.\n    :acceptance_criteria: Construct a core of full or 1/3-core symmetry.\n    :subtype: functional\n\n.. req:: The core shall be able to provide assemblies that are neighbors of a given assembly.\n    :id: R_ARMI_R_FIND_NEIGHBORS\n    :status: accepted\n    :basis: Useful for analysis, particularly mechanical and control rod analysis.\n    :acceptance_criteria: Return neighboring assemblies from a given assembly in a core.\n    :subtype: functional\n\n.. req:: ARMI shall provide an ex-core composite to represent spent fuel pools (SFP) for spent fuel assemblies.\n    :id: R_ARMI_SFP\n    :status: accepted\n    :basis: A SFP data model is a fundamental concept in modeling solid fuel reactors.\n    :acceptance_criteria: Build a reactor data model with a SFP, then move an assembly from the reactor core to the the SFP and back.\n    :subtype: functional\n\n\n.. ## parameters ######################\n\n.. req:: The parameters package shall provide the capability to define parameters that store values of interest on any Composite.\n    :id: R_ARMI_PARAM\n    :status: accepted\n    :basis: The capability to define new parameters is a common need for downstream analysis or plugins.\n    :acceptance_criteria: Ensure that new parameters can be defined and accessed on a Reactor, Core, Assembly, Block, and Component.\n    :subtype: functional\n\n.. req:: The parameters package shall allow for some parameters to be defined such that they are not written to the database.\n    :id: R_ARMI_PARAM_DB\n    :status: accepted\n    :basis: Users will require some parameters to remain unwritten to the database file.\n    :acceptance_criteria: A parameter can be filtered from inclusion into the list of parameters written to the database.\n    :subtype: functional\n\n.. req:: The parameters package shall provide a way to signal if a parameter needs updating across multiple processes.\n    :id: R_ARMI_PARAM_PARALLEL\n    :status: accepted\n    :basis: Parameters updated on compute nodes must be propagated to the head node.\n    :acceptance_criteria: A parameter has an attribute which signals its last updated status among the processors.\n    :subtype: functional\n\n.. req:: The parameters package shall allow for a parameter to be serialized for reading and writing to database files.\n    :id: R_ARMI_PARAM_SERIALIZE\n    :status: accepted\n    :basis: Users need to be able to understand what parameters were involved during a given run after it is completed, both for QA purposes and to begin a new analysis using data from previous analyses.\n    :acceptance_criteria: The Serializer construct can pack and unpack parameter data.\n    :subtype: functional\n\n.. ## zones ######################\n\n.. req:: The zones module shall allow for a collection of reactor core locations (a Zone).\n    :id: R_ARMI_ZONE\n    :status: accepted\n    :basis: This is a basic feature of ARMI and is useful for reactivity coefficients analysis.\n    :acceptance_criteria: Store and retrieve locations from a zone that corresponds to a reactor. Also, store and retrieve multiple Zone objects from a Zones object.\n    :subtype: functional\n\n.. ## blocks ######################\n\n.. req:: The blocks module shall be able to homogenize the components of a hexagonal block.\n    :id: R_ARMI_BLOCK_HOMOG\n    :status: accepted\n    :basis: Homogenizing blocks can improve performance of the uniform mesh converter.\n    :acceptance_criteria: A homogenized hexagonal block has the same mass, dimensions, and pin locations as the block from which it is derived.\n    :subtype: functional\n\n.. req:: Blocks shall include information on their location.\n    :id: R_ARMI_BLOCK_POSI\n    :status: accepted\n    :basis: Simulations and post-simulation analysis both require block-level physical quantities.\n    :acceptance_criteria: Any block can be queried to get absolute location and position.\n    :subtype: functional\n\n.. req:: The blocks module shall define a hex-shaped block.\n    :id: R_ARMI_BLOCK_HEX\n    :status: accepted\n    :basis: Hexagonal blocks are used in some pin-based reactors.\n    :acceptance_criteria: Verify a block can be created that declares a hexagonal shape.\n    :subtype: functional\n\n.. req:: The blocks module shall return the number of pins in a block, when applicable.\n    :id: R_ARMI_BLOCK_NPINS\n    :status: accepted\n    :basis: This is a common need for analysis of pin-based reactors.\n    :acceptance_criteria: Return the number of pins in a valid block.\n    :subtype: functional\n\n.. ## assemblies ######################\n\n.. req:: The assemblies module shall define an assembly as a composite type that contains a collection of blocks.\n    :id: R_ARMI_ASSEM_BLOCKS\n    :status: accepted\n    :basis: ARMI must be able to represent assembly-based reactors.\n    :acceptance_criteria: Validate an assembly's type and the types of its children.\n    :subtype: functional\n\n.. req:: Assemblies shall include information on their location.\n    :id: R_ARMI_ASSEM_POSI\n    :status: accepted\n    :basis: Assemblies are an important part of pin-type reactor cores, and almost any analysis that uses assemblies will want to know the location of the assemblies.\n    :acceptance_criteria: Any assembly can be queried to get absolute location and position.\n    :subtype: functional\n\n.. ## flags ######################\n\n.. req:: The flags module shall provide unique identifiers (flags) to enable disambiguating composites.\n    :id: R_ARMI_FLAG_DEFINE\n    :subtype: functional\n    :basis: Flags are used to determine how objects should be handled.\n    :acceptance_criteria: No two existing flags have equivalence.\n    :status: accepted\n\n.. req:: The set of unique flags in a run shall be extensible without user knowledge of existing flags' values.\n    :id: R_ARMI_FLAG_EXTEND\n    :subtype: functional\n    :basis: Plugins are able to define their own flags.\n    :acceptance_criteria: After adding a new flag, no two flags have equivalence.\n    :status: accepted\n\n.. req:: Valid flags shall be convertible to and from strings.\n    :id: R_ARMI_FLAG_TO_STR\n    :subtype: functional\n    :basis: Flags need to be converted to strings for serialization.\n    :acceptance_criteria: A string corresponding to a defined flag is correctly converted to that flag, and show that the flag can be converted back to a string.\n    :status: accepted\n\n.. ## geometryConverters ######################\n\n.. req:: ARMI shall be able to convert a hexagonal one-third-core geometry to a full-core geometry, and back again.\n    :id: R_ARMI_THIRD_TO_FULL_CORE\n    :subtype: functional\n    :basis: Useful to improve modeling performance, if the analysis can accept the approximation.\n    :acceptance_criteria: Convert a hexagonal 1/3 core reactor to full, and back again.\n    :status: accepted\n\n.. req:: ARMI shall be able to add and remove assemblies along the 120 degree line in a 1/3 core reactor.\n    :id: R_ARMI_ADD_EDGE_ASSEMS\n    :subtype: functional\n    :basis: Helpful for analysis that are using 1/3 core hex reactors\n    :acceptance_criteria: Add and then remove assemblies in a 1/3 core reactor.\n    :status: accepted\n\n.. req:: ARMI shall be able to convert a hex core to a representative RZ core.\n    :id: R_ARMI_CONV_3DHEX_TO_2DRZ\n    :subtype: functional\n    :basis: Some downstream analysis requires a 2D R-Z geometry.\n    :acceptance_criteria: Convert a hex core into an RZ core.\n    :status: accepted\n\n.. ## axialExpansionChanger ######################\n\n.. req:: The axial expansion changer shall perform axial thermal expansion and contraction on solid components within a compatible ARMI assembly according to a given axial temperature distribution.\n    :id: R_ARMI_AXIAL_EXP_THERM\n    :subtype: functional\n    :basis: Axial expansion is used to conserve mass and appropriately capture the reactor state under temperature changes.\n    :acceptance_criteria: Perform thermal expansion due to an applied axial temperature distribution.\n    :status: accepted\n\n.. req:: The axial expansion changer shall perform axial expansion/contraction given a list of components and corresponding expansion coefficients.\n    :id: R_ARMI_AXIAL_EXP_PRESC\n    :subtype: functional\n    :basis: Axial expansion is used to conserve mass and appropriately capture the reactor state under temperature changes.\n    :acceptance_criteria: Perform axial expansion given a list of components from an assembly and corresponding expansion coefficients.\n    :status: accepted\n\n.. req:: The axial expansion changer shall perform expansion during core construction based on block heights at a user-specified temperature.\n    :id: R_ARMI_INP_COLD_HEIGHT\n    :subtype: functional\n    :basis: The typical workflow in ARMI applications is to transcribe component dimensions, which are generally given at room temperatures.\n    :acceptance_criteria: Perform axial expansion during core construction based on block heights at user-specified temperature.\n    :status: accepted\n\n.. req:: The axial expansion changer shall allow user-specified target axial expansion components on a given block.\n    :id: R_ARMI_MANUAL_TARG_COMP\n    :subtype: functional\n    :basis: The target axial expansion component influences the conservation of mass in a block.\n    :acceptance_criteria: Set a target component and verify it was set correctly.\n    :status: accepted\n\n.. req:: The axial expansion changer shall preserve the total height of a compatible ARMI assembly.\n    :id: R_ARMI_ASSEM_HEIGHT_PRES\n    :subtype: functional\n    :basis: Many physics solvers require that the total height of each assembly in the core is consistent.\n    :acceptance_criteria: Perform axial expansion and confirm that the height of the compatible ARMI assembly is preserved.\n    :status: accepted\n\n.. ## uniformMesh ######################\n\n.. req:: The uniform mesh converter shall make a copy of the reactor where the new reactor core has a uniform axial mesh.\n    :id: R_ARMI_UMC\n    :subtype: functional\n    :basis: This is used in the global flux calculations.\n    :acceptance_criteria: Convert a reactor to one where the core has a uniform axial mesh.\n    :status: accepted\n\n.. req:: The uniform mesh converter shall map select parameters from composites on the original mesh to composites on the new mesh.\n    :id: R_ARMI_UMC_PARAM_FORWARD\n    :subtype: functional\n    :basis: This is used in the global flux calculations.\n    :acceptance_criteria: Create a new reactor with the uniform mesh converter and ensure that the flux and power density block-level parameters are mapped appropriately to the new reactor.\n    :status: accepted\n\n.. req:: The uniform mesh converter shall map select parameters from composites on the new mesh to composites on the original mesh.\n    :id: R_ARMI_UMC_PARAM_BACKWARD\n    :subtype: functional\n    :basis: This is used in the global flux calculations.\n    :acceptance_criteria: Create a new reactor with the uniform mesh converter and ensure that the flux and power density block-level parameters are mapped appropriately back to the original reactor.\n    :status: accepted\n\n.. req:: The uniform mesh converter shall try to preserve the boundaries of fuel and control material.\n    :id: R_ARMI_UMC_NON_UNIFORM\n    :subtype: functional\n    :basis: Regions with extremely small axial size can cause difficulties for the deterministic neutronics solvers.\n    :acceptance_criteria: Create a reactor with slightly non-uniform mesh and verify after the uniform mesh converter the mesh is still non-uniform.\n    :status: accepted\n\n.. req:: The uniform mesh converter shall produce a uniform axial mesh with a size no smaller than a user-specified value.\n    :id: R_ARMI_UMC_MIN_MESH\n    :subtype: functional\n    :basis: Regions with extremely small axial size can cause difficulties for the deterministic neutronics solvers.\n    :acceptance_criteria: Create a reactor with a mesh that is smaller than the minimum size. After the uniform mesh conversion the new mesh conforms to the user-specified value.\n    :status: accepted\n\n.. ## blockConverters ######################\n\n.. req:: The block converter module shall be able to convert one or more given hexagonal blocks into a single user-configurable representative cylindrical block.\n    :id: R_ARMI_BLOCKCONV_HEX_TO_CYL\n    :subtype: functional\n    :basis: Needed, for example, for generating 1D cross sections for control rods.\n    :acceptance_criteria: Create a cylindrical block from one or more given hexagonal blocks and confirm that the cylindrical block has the appropriate volume fractions and temperatures.\n    :status: accepted\n\n.. req:: The block converter module shall be able to homogenize one component into another on a block.\n    :id: R_ARMI_BLOCKCONV\n    :subtype: functional\n    :basis: Needed, for example, for merging wire into coolant or gap into clad to simplify the model.\n    :acceptance_criteria: Homogenize one component into another from a given block and confirm the new components are appropriate.\n    :status: accepted\n\n.. ## components ######################\n\n.. req:: The components package shall define a composite corresponding to a physical piece of a reactor.\n    :id: R_ARMI_COMP_DEF\n    :subtype: functional\n    :basis: This is a fundamental design choice in ARMI, to describe a physical reactor.\n    :acceptance_criteria: Create components, and verify their attributes and parameters.\n    :status: accepted\n\n.. req:: A component's dimensions shall be calculable for any temperature.\n    :id: R_ARMI_COMP_DIMS\n    :subtype: functional\n    :basis: Users require access to dimensions at perturbed temperatures.\n    :acceptance_criteria: Calculate a components dimensions at a variety of temperatures.\n    :status: accepted\n\n.. req:: Components shall be able to compute dimensions, areas, and volumes that reflect its current state.\n    :id: R_ARMI_COMP_VOL\n    :subtype: functional\n    :basis: It is necessary to be able to compute areas and volumes when state changes.\n    :acceptance_criteria: Calculate volumes/areas, clear the cache, change the temperature, and recalculate volumes/areas.\n    :status: accepted\n\n.. req:: Components shall allow for constituent nuclide fractions to be modified.\n    :id: R_ARMI_COMP_NUCLIDE_FRACS\n    :subtype: functional\n    :basis: The ability to modify nuclide fractions is a common need in reactor analysis.\n    :acceptance_criteria: Modify nuclide fractions on a component.\n    :status: accepted\n\n.. req:: Components shall be made of one-and-only-one material or homogenized material.\n    :id: R_ARMI_COMP_1MAT\n    :subtype: functional\n    :basis: This is an ARMI design choice.\n    :acceptance_criteria: Create a component with a given material, and retrieve that material.\n    :status: accepted\n\n.. req:: Components shall be associated with material properties.\n    :id: R_ARMI_COMP_MAT\n    :subtype: functional\n    :basis: Users require access to material properties for a given component.\n    :acceptance_criteria: Get material properties from a component material.\n    :status: accepted\n\n.. req:: Components shall enable an ordering based on their outermost component dimensions.\n    :id: R_ARMI_COMP_ORDER\n    :subtype: functional\n    :basis: It is desirable to know which components are located physically inside of others.\n    :acceptance_criteria: Order a collection of components, based on their dimensions.\n    :status: accepted\n\n.. req:: The components package shall define components with several basic interrogable shapes.\n    :id: R_ARMI_COMP_SHAPES\n    :subtype: functional\n    :basis: Modeling real-world reactor geometries requires a variety of shapes.\n    :acceptance_criteria: Create a variety of components with different shapes and query their shape information.\n    :status: accepted\n\n.. req:: The components package shall handle radial thermal expansion of individual components.\n    :id: R_ARMI_COMP_EXPANSION\n    :subtype: functional\n    :basis: Users need the ability to model thermal expansion of a reactor core.\n    :acceptance_criteria: Calculate radial thermal expansion for a variety components.\n    :status: accepted\n\n.. req:: The components package shall allow the dimensions of fluid components to change based on the solid components adjacent to them.\n    :id: R_ARMI_COMP_FLUID\n    :subtype: functional\n    :basis: The shapes of fluid components are defined externally.\n    :acceptance_criteria: Determine the dimensions of a fluid component, bounded by solids.\n    :status: accepted\n\n.. ## composites ######################\n\n.. req:: The composites module shall define an arbitrary physical piece of a reactor with retrievable children in a hierarchical data model.\n    :id: R_ARMI_CMP\n    :subtype: functional\n    :basis: This is a fundamental aspect of the ARMI framework.\n    :acceptance_criteria: Create a composite with children.\n    :status: accepted\n\n.. req:: Composites shall be able to be associated with flags.\n    :id: R_ARMI_CMP_FLAG\n    :subtype: functional\n    :basis: Flags are used to provide context as to what a composite object represents.\n    :acceptance_criteria: Give a composite one or more flags.\n    :status: accepted\n\n.. req:: Composites shall have their own parameter collections.\n    :id: R_ARMI_CMP_PARAMS\n    :subtype: functional\n    :basis: Parameters should live on the part of the model which they describe.\n    :acceptance_criteria: Query a composite's parameter collection.\n    :status: accepted\n\n.. req:: The total mass of specified nuclides in a composite shall be retrievable.\n    :id: R_ARMI_CMP_GET_MASS\n    :subtype: functional\n    :basis: Downstream analysis will want to get masses.\n    :acceptance_criteria: Return the mass of specified nuclides in a composite.\n    :status: accepted\n\n.. req:: Composites shall allow synchronization of state across compute nodes.\n    :id: R_ARMI_CMP_MPI\n    :subtype: functional\n    :basis: Parallel executions of ARMI require synchronization of reactors on different nodes.\n    :acceptance_criteria: Synchronize a reactor's state across compute processes.\n    :status: accepted\n\n.. req:: The homogenized number densities of specified nuclides in a composite shall be retrievable.\n    :id: R_ARMI_CMP_GET_NDENS\n    :subtype: functional\n    :basis: The ability to retrieve homogenized number densities is a common need in reactor analysis.\n    :acceptance_criteria: Retrieve homogenized number densities of specified nuclides from a composite.\n    :status: accepted\n\n.. req:: Composites shall be able to return number densities for all their nuclides.\n    :id: R_ARMI_CMP_NUC\n    :subtype: functional\n    :basis: Analysts not using lumped fission products need this capability.\n    :acceptance_criteria: Return the number densities for all nuclides for a variety of composites.\n    :status: accepted\n\n.. ## grids ######################\n\n.. req:: The grids package shall allow for pieces of the reactor to be organized into regular-pitch hexagonal lattices (grids).\n    :id: R_ARMI_GRID_HEX\n    :subtype: functional\n    :basis: This is necessary for representing reactor geometry.\n    :acceptance_criteria: Construct a hex grid from pitch and number of rings, and return both.\n    :status: accepted\n\n.. req:: The grids package shall be able to represent 1/3-symmetry or full hexagonal grids.\n    :id: R_ARMI_GRID_SYMMETRY\n    :subtype: functional\n    :basis: Analysts frequently want symmetrical representations of a reactor for efficiency reasons.\n    :acceptance_criteria: Construct a 1/3 symmetry and full grid and show they have the correct number of constituents.\n    :status: accepted\n\n.. req:: A hexagonal grid with 1/3 symmetry shall be able to determine if a constituent object is in the first third.\n    :id: R_ARMI_GRID_SYMMETRY_LOC\n    :subtype: functional\n    :basis: Helpful for analysts doing analysis on third-core hex grids.\n    :acceptance_criteria: Correctly identify an object that is in the first 1/3 and one that is not.\n    :status: accepted\n\n.. req:: A hexagonal grid with 1/3 symmetry shall be capable of retrieving equivalent contents based on 1/3 symmetry.\n    :id: R_ARMI_GRID_EQUIVALENTS\n    :subtype: functional\n    :basis: This is necessary for shuffle of 1/3-core symmetry reactor models.\n    :acceptance_criteria: Return the zero or 2 elements which are in symmetric positions to a given element.\n    :status: accepted\n\n.. req:: Grids shall be able to nest.\n    :id: R_ARMI_GRID_NEST\n    :subtype: functional\n    :basis: This is typical of reactor geometries, for instance pin grids are nested inside of assembly grids.\n    :acceptance_criteria: Nest one grid within another.\n    :status: accepted\n\n.. req:: Hexagonal grids shall be either x-type or y-type.\n    :id: R_ARMI_GRID_HEX_TYPE\n    :subtype: functional\n    :basis: This is typical of reactor geometries, for instance pin grids inside of assembly grids.\n    :acceptance_criteria: Construct a \"points-up\" and a \"flats-up\" grid.\n    :status: accepted\n\n.. req:: The grids package shall be able to store components with multiplicity greater than 1.\n    :id: R_ARMI_GRID_MULT\n    :subtype: functional\n    :basis: The blueprints system allows for components with multiplicity greater than 1, when there are components that are compositionally identical.\n    :acceptance_criteria: Build a grid with components with multiplicity greater than 1.\n    :status: accepted\n\n.. req:: The grids package shall be able to return the coordinate location of any grid element in a global coordinate system.\n    :id: R_ARMI_GRID_GLOBAL_POS\n    :subtype: functional\n    :basis: This is a common need of a reactor analysis system.\n    :acceptance_criteria: Return a hexagonal grid element's location.\n    :status: accepted\n\n.. req:: The grids package shall be able to return the location of all instances of grid components with multiplicity greater than 1.\n    :id: R_ARMI_GRID_ELEM_LOC\n    :subtype: functional\n    :basis: This is a necessary result of having component multiplicity.\n    :acceptance_criteria: Return a hexagonal grid element's locations when its multiplicity is greater than 1.\n    :status: accepted\n\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: The blueprints package shall allow the user to define a component using a custom text file.\n    :id: R_ARMI_BP_COMP\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a component was correctly created.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define a block using a custom text file.\n    :id: R_ARMI_BP_BLOCK\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a block was correctly created with shape, material, and input temperature.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define an assembly using a custom text file.\n    :id: R_ARMI_BP_ASSEM\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a assembly was correctly created.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define a core using a custom text file.\n    :id: R_ARMI_BP_CORE\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a core was correctly created.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define a lattice map in a reactor core using a custom text file.\n    :id: R_ARMI_BP_GRID\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a lattice grid was correctly created at the assembly and pin levels.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define a reactor, including both a core and a spent fuel pool using a custom text file.\n    :id: R_ARMI_BP_SYSTEMS\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify a reactor was correctly created.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to define isotopes which should be depleted.\n    :id: R_ARMI_BP_NUC_FLAGS\n    :subtype: io\n    :basis: This is a basic ARMI feature, that we have custom text blueprint files.\n    :acceptance_criteria: Read a blueprint file and verify the collection of depleted nuclide flags.\n    :status: accepted\n\n.. req:: The blueprints package shall allow the user to produce a valid blueprints file from an in-memory blueprint object.\n    :id: R_ARMI_BP_TO_DB\n    :subtype: io\n    :basis: The capability to export custom blueprints input files from an in-memory blueprints object is a fundamental ARMI feature.\n    :acceptance_criteria: Write a blueprint file from an in-memory blueprint object.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd/runLog_reqs.rst",
    "content": ".. _armi_log:\n\nRunLog Module\n-------------\n\nThis section provides requirements for the simulation logging module, :py:mod:`armi.runLog`, which manages \nthe reporting of messages to the user.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The runLog module shall allow for a simulation-wide log with user-specified verbosity.\n    :id: R_ARMI_LOG\n    :subtype: functional\n    :status: accepted\n    :basis: Logging simulation information is required for analysts to document and verify simulation results.\n    :acceptance_criteria: Messages are written to the log with specified verbosity.\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: The runLog module shall allow logging to the screen, to a file, or both.\n    :id: R_ARMI_LOG_IO\n    :subtype: io\n    :status: accepted\n    :basis: Logging simulation information is required for analysts to document and verify simulation results.\n    :acceptance_criteria: Messages can be written to log files and log streams.\n\n.. req:: The runLog module shall allow log files to be combined from different processes.\n    :id: R_ARMI_LOG_MPI\n    :subtype: io\n    :status: accepted\n    :basis: Logging simulation information is required for analysts to document and verify simulation results.\n    :acceptance_criteria: Messages in different log files can be concatenated.\n"
  },
  {
    "path": "doc/qa_docs/srsd/settings_reqs.rst",
    "content": ".. _armi_settings:\n\nSettings Package\n----------------\n\nThis section provides requirements for the :py:mod:`armi.settings` package, which is responsible for providing a centralized means for users to configure an application. This package can serialize and deserialize user settings from a human-readable text file. When a simulation is being initialized, settings validation is performed to enforce things like type consistency, and to find incompatible settings. To make settings easier to understand and use, once a simulation has been initialized, settings become immutable.\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: The settings package shall allow the configuration of a simulation through user settings.\n    :id: R_ARMI_SETTING\n    :status: accepted\n    :basis: Settings are how the user configures their run.\n    :acceptance_criteria: Create and edit a set of settings that can be used to initialize a run.\n    :subtype: functional\n\n.. req:: All settings must have default values.\n    :id: R_ARMI_SETTINGS_DEFAULTS\n    :status: accepted\n    :basis: Enforcing a default recommendation for a setting allows for ease-of-use of the system\n    :acceptance_criteria: A setting cannot be created without providing a default value.\n    :subtype: functional\n\n.. req:: Settings shall support rules to validate and customize each setting's behavior.\n    :id: R_ARMI_SETTINGS_RULES\n    :status: accepted\n    :basis: Validation of user settings adds quality assurance pedigree and reduces user errors.\n    :acceptance_criteria: Query a setting and make decisions based on its value.\n    :subtype: functional\n\n.. req:: The settings package shall supply the total reactor power at each time step of a simulation.\n    :id: R_ARMI_SETTINGS_POWER\n    :status: accepted\n    :basis: Power history is needed by many downstream plugins and methodologies for normalization.\n    :acceptance_criteria: Retrieve the power fractions series from the operator and access the value at a given time step.\n    :subtype: functional\n\n.. req:: The settings package shall allow users to define basic metadata for the run.\n    :id: R_ARMI_SETTINGS_META\n    :status: accepted\n    :basis: Storing metadata in the settings file makes it easier for analysts to differentiate many settings files, and describe the simulations they configure.\n    :acceptance_criteria: Set and retrieve the basic metadata settings.\n    :subtype: functional\n\nI/O Requirements\n++++++++++++++++\n\n.. req:: The settings package shall use human-readable, plain-text files as input and output.\n    :id: R_ARMI_SETTINGS_IO_TXT\n    :status: accepted\n    :basis: Settings are how the user configures their run.\n    :acceptance_criteria: Show a settings object can be created from a text file with a well-specific format, and written back out to a text file.\n    :subtype: io\n"
  },
  {
    "path": "doc/qa_docs/srsd/utils_reqs.rst",
    "content": ".. _armi_utils:\n\nUtilities Package\n-----------------\n\nThis section provides requirements for the :py:mod:`armi.utils` package within the framework, which is one of the smaller high-level packages in ARMI. This package contains a small set of basic utilities which are meant to be generally useful in ARMI and in the wider ARMI ecosystem. While most of the code in this section does not rise to the level of a \"requirement\", some does.\n\n\n\nFunctional Requirements\n+++++++++++++++++++++++\n\n.. req:: ARMI shall provide a utility to convert mass densities and fractions to number densities.\n    :id: R_ARMI_UTIL_MASS2N_DENS\n    :subtype: functional\n    :basis: This is a widely used utility.\n    :acceptance_criteria: Provide a series of mass densities and fractions and verify the returned number densities.\n    :status: accepted\n\n.. req:: ARMI shall provide a utility to expand elemental mass fractions to natural nuclides.\n    :id: R_ARMI_UTIL_EXP_MASS_FRACS\n    :subtype: functional\n    :basis: This is a widely used utility.\n    :acceptance_criteria: Expand an element's mass into a list of it's naturally occurring nuclides and their corresponding mass fractions.\n    :status: accepted\n\n.. req:: ARMI shall provide a utility to format nuclides and densities into an MCNP material card.\n    :id: R_ARMI_UTIL_MCNP_MAT_CARD\n    :subtype: functional\n    :basis: This will be useful for downstream MCNP plugins.\n    :acceptance_criteria: Create an MCNP material card from a collection of densities.\n    :status: accepted\n"
  },
  {
    "path": "doc/qa_docs/srsd.rst",
    "content": "Software Requirements Specification Document (SRSD)\n===================================================\n\n\nPurpose\n-------\n\nThis Software Requirements Specification Document (SRSD) is prepared for the Advanced Reactor\nModeling Interface (ARMI) framework. The purpose of thisdocument is to define the functional\nrequirements, I/O requirements, relevant attributes, and applicable design constraints for ARMI.\n\nThis SRSD will be accompanied by a Software Design and Implementation Document (SDID), that\ndescribes how the requirements are implemented within the software and a Software Test Report (STR),\nthat documents the test plan and reporting of test results.\n\n.. _armi_srsd:\n\nIntroduction\n------------\n\nThe Advanced Reactor Modeling Interface (ARMI®) is an open-source framework for nuclear reactor\ndesign and analysis. Based on Python, ARMI provides a richly-featured toolset for connecting\ndisparate nuclear reactor modeling tools. ARMI is not meant to directly implement the science or\nengineering aspects of nuclear reactor modeling, but to help the wealth of existing models work\ntogether. It does this by providing easy-to-use tools for coordinating reactor simulation and\nanalysis workflows. A large part of the power of ARMI is that it provides a flexible in-memory data\nmodel of a reactor, which is used to pass information between different external tools.\n\nARMI:\n\n* Provides a hub-and-spoke mechanism to standardize communication and coupling between physics\n  kernels and the specialist analysts who use them,\n* Facilitates the creation and execution of detailed models and complex analysis methodologies,\n* Provides an ecosystem within which to rapidly and collaboratively build new analysis and physics\n  simulation capabilities, and\n* Provides useful utilities to assist in reactor development.\n\nBecause the ARMI software is just a framework for other, much larger nuclear models, ARMI does not\ncontain any proprietary or classified information. This allows ARMI to be open-source software. It\nalso greatly simplifies the software design and maintenance. For instance, ARMI does not have any\nperformance requirements. ARMI has been used to model nuclear reactors for over a decade, and in\nthat time the practical reality is that ARMI is quite light weight and >99% of the run time of a\nsimulation occurs in running other nuclear models.\n\nHere are some quick metrics for ARMI's requirements:\n\n* :need_count:`type=='req'` Requirements in ARMI\n\n  * :need_count:`type=='req' and status=='preliminary'` Preliminary Requirements\n  * :need_count:`type=='req' and status=='accepted'` Accepted Requirements\n  * :need_count:`type=='req' and len(implements_back)>0` Requirements with implementations\n  * :need_count:`type=='req' and len(tests_back)>0` Requirements with tests\n  * :need_count:`type=='test'` tests linked to Requirements\n  * :need_count:`type=='impl'` implementations linked to Requirements\n\n.. Note each of these docs has their own section header\n\n.. include:: srsd/framework_reqs.rst\n.. include:: srsd/bookkeeping_reqs.rst\n.. include:: srsd/cases_reqs.rst\n.. include:: srsd/cli_reqs.rst\n.. include:: srsd/materials_reqs.rst\n.. include:: srsd/nucDirectory_reqs.rst\n.. include:: srsd/nuclearDataIO_reqs.rst\n.. include:: srsd/physics_reqs.rst\n.. include:: srsd/reactors_reqs.rst\n.. include:: srsd/runLog_reqs.rst\n.. include:: srsd/settings_reqs.rst\n.. include:: srsd/utils_reqs.rst\n\n\nSoftware Attributes\n-------------------\n\nARMI is a Python-based framework, designed to help tie together various nuclear models, written in a\nvariety of languages. ARMI officially supports Python versions 3.9 and up.\n\nARMI is heavily tested and used in both Windows and Linux. More specifically, ARMI is always\ndesigned to work in the most modern Windows operating system (Windows 10 and Windows 11 currently).\nSimilarly, ARMI is designed to work with fairly modern versions of Ubuntu (22.04 and 24.04 at the\ntime of writing) and Red Hat (RHEL 7 and 8 currently).\n\nVersion control for ARMI is achieved using Git and is publicly hosted as open-source software on\nGitHub. To ensure ARMI remains portable and open-source, it only uses third-party libraries that are\nsimilarly fully open-source and that make no onerous demands on ARMI's distribution or legal status.\n\nARMI makes use of a huge suite of unit tests to cover the codebase. The tests are run via Continuous\nIntegration (CI) both internally and publicly. Every unit test must pass on every commit to the ARMI\nmain branch. Also, as part of our rigorous quality system, ARMI enforces tight controls on code\nstyle using Ruff as our code formatter and linter.\n"
  },
  {
    "path": "doc/qa_docs/str.rst",
    "content": "Software Test Report (STR)\n==========================\n\nPurpose and Scope\n-----------------\n\nThis document is the software test report (STR) for the ARMI framework.\n\n.. _ref_armi_default_test_criteria:\n\nDefault Test Criteria\n---------------------\n\nThe acceptance tests for ARMI requirements are very uniform. They are all unit tests. Unless the test states otherwise, all of the following test criteria apply to each ARMI requirement test. Any deviation from these standard conditions will be documented in  :numref:`Section %s <ref_armi_test_trace_matrix>` on a test-by-test basis.\n\nThis section defines some test attributes that all tests here have in common.\n\nTesting Approach\n^^^^^^^^^^^^^^^^\n\nSoftware verification testing shall be a part of the software development process and leverage continuous integration (CI) testing to demonstrate the correctness of the software during the development process. CI testing shall occur for each Pull Request (PR) and shall consist of unit testing. No PR will be merged into the main branch until all CI passes successfully.\n\nThe ARMI framework provides requirements with unit tests meeting acceptance criteria. Specifically, as the ARMI codebase cannot be run as a stand-alone application without external physics kernels or sensor data, any ARMI system tests will necessarily be limited. Thus, software projects leveraging ARMI capabilities are responsible for qualification of their end-use applications under their respective quality assurance commitments.\n\n\nPlanned Test Cases, Sequence, and Identification of Stages Required\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe test cases are described in the test traceability matrix in\n:numref:`Section %s <ref_armi_test_trace_matrix>`. All  tests must be run, and the sequence can be\nin any order unless otherwise  specified for the test in\n:numref:`Section %s <ref_armi_test_trace_matrix>`.\n\nRequirements for Testing Logic Branches\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nTests are written such that each test has only one primary logic path. For tests that do not conform\nto only one logic path, more information will be defined in  the test traceability section of the\nSTR  (:numref:`Section %s <ref_armi_test_trace_matrix>`) defining the logic flow in  more detail.\n\n.. _ref_armi_hardware_integration:\n\nRequirements for Hardware Integration\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe ``ARMI`` software test will be run in modern versions Linux, Windows, and MacOS. Though for\ndocumentation brevity, we will only attach the verbose logging to this document for Linux.\n\nCriteria for Accepting the Software\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe acceptance testing must pass with satisfactory results for all tests associated with\nrequirements in the :ref:`Software Requirements Specification  Document (SRSD) <armi_srsd>`\nfor the ``ARMI`` software.\n\n.. _ref_armi_input_data_requirements:\n\nNecessary Inputs to Run Test Cases\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIf inputs are necessary to run test cases or to return the system and data back to its original\nstate, the processes will be documented in the test  traceability matrix (TTM) in\n:numref:`Section %s <ref_armi_test_trace_matrix>`  (The TTM provides traceability for each test to\nthe required criteria). Otherwise, there are no special inputs necessary to run test cases or steps\nto  restore the system.\n\nRequired Ranges of Input Parameters for the Test Case(s)\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIf a test uses a range of inputs, then it will be documented in the TTM in\n:numref:`Section %s <ref_armi_test_trace_matrix>`. Otherwise, there are no required ranges of inputs\nfor the test case.\n\nExpected Results for the Test Case(s)\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIf a test expects a specific result, it will be documented in the TTM in\n:numref:`Section %s <ref_armi_test_trace_matrix>`. Otherwise, the expected test result is that no\nerror is raised, which constitutes a passing test.\n\nAcceptance Criteria for the Test Case(s)\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe acceptance criteria for the test cases will be described. In cases where the SRSD requirement\nacceptance criteria is acceptable for the test case  acceptance criteria, the SRSD requirement\nacceptance criteria can be referenced  by default.\n\n.. _ref_armi_record_criteria:\n\nTest Record Criteria\n^^^^^^^^^^^^^^^^^^^^\n\nThe default values for the remaining 12 criteria pertaining to the test record are given in this\nsection below. A test record will be produced after the test  is run which contains pertinent\ninformation about the execution of the test. This test record will be saved as part of the software\ntest report (STR).\n\nSoftware Tested, Including System Software Used and All Versions\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe ARMI version will be shown in the test record via standard output logs.\n\nCompute Platform and Hardware Used\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe test record will reference the environment upon which the test is run. See\n:numref:`Section %s <ref_armi_hardware_integration>` for acceptable test environments.\n\nTest Equipment and Calibrations\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nNot applicable for the ``ARMI`` software.\n\n.. _ref_armi_run_env:\n\nRuntime Environment Including System Software, and Language-Specific Environments\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe runtime environment including the operating system, hardware, and software configuration will be\nspecified in the test report. If necessary, more detail will be provided for individual tests which\nutilize custom runtime environments or have dependencies such as custom compiler options.\n\nDate of Test\n\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe date of the test execution is recorded in the output of the test.\n\nTester or Data Recorder\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nAcceptance tests will be run via automation.\n\nSimulation Models Used\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIf simulation models beyond what is described elsewhere in the documentation (SRSD, SDID, or STR)\nare used the simulation models will be  documented in the test record. Otherwise, this test record\ncriterion is not  applicable to the test.\n\nTest Problems Identified During Test Planning\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nIf specific problems such as textbooks or benchmarks are utilized for the test, then the test record\nwill reference those problems. Otherwise, test problems  are not applicable to the test record.\n\nAll Input Data and Output Results and Applicability\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe input data will be recorded per :numref:`Section %s <ref_armi_input_data_requirements>`. Output\ndata will be provided as a pass or fail of the test as part of the test  record.\n\nAction Taken in connection with Any Deviations Noted\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nNo actions will have been assumed to be taken based on the test other than pass or fail for the\ntest. If there are exceptions, to this statement, they will be noted in the TTM in\n:numref:`Section %s <ref_armi_test_trace_matrix>`.\n\nPerson Evaluating Test Result\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe reviewer of the document will evaluate the test results. Any failing unit test should result in\na release failure.\n\nAcceptability\n\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe test record states whether the tests pass or fail.\n\n\n.. _ref_armi_test_trace_matrix:\n\nTest Traceability Matrix\n------------------------\n\nThe requirements and associated tests which demonstrate acceptance of the codebase with the\nrequirements are in the :ref:`SRSD <armi_srsd>`. This section contains a list of all tests and will\nprovide information for  any non-default  criteria (see\n:numref:`Section %s <ref_armi_default_test_criteria>` for default criteria).\n\nHere are some quick metrics for the requirement tests in ARMI:\n\n* :need_count:`type=='req' and status=='accepted'` Accepted Requirements in ARMI\n\n  * :need_count:`type=='req' and status=='accepted' and len(tests_back)>0` Accepted Requirements with tests\n  * :need_count:`type=='test' and id.startswith('T_ARMI')` tests linked to Requirements\n\nAnd here is a full listing of all the tests in ARMI, that are tied to requirements:\n\n.. needextract::\n  :types: test\n  :filter: id.startswith('T_ARMI_')\n\n\nTest Results Report\n-------------------\n\nThis section provides the results of the test case runs for this release of ARMI software.\n\n.. _ref_armi_test_env:\n\nTesting Environment\n^^^^^^^^^^^^^^^^^^^\n\nThis section describes the relevant environment under which the tests were run as required by\n:numref:`Section %s <ref_armi_run_env>`. Note that individual test  records have the option to\ndefine additional environment information.\n\nSystem Information\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe logged operating system and processor information proves what environment the software was\ntested on:\n\n.. exec::\n    from armi.bookkeeping.report.reportingUtils import getSystemInfo\n\n    return getSystemInfo().replace(\"\\n\", \"\\n\\n\")\n\n\nPython Version and Packages\n+++++++++++++++++++++++++++\n\n.. exec::\n    from pip._internal.operations.freeze import freeze\n\n    return \"\\n\\n\".join(list(freeze()))\n\n\n.. _ref_armi_software_date:\n\nSoftware Tested and Date\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\nThe software tested and date of testing are below:\n\n.. exec::\n    import os\n    import sys\n    from datetime import datetime\n    from armi import __version__ as armiVersion\n\n    armiCommit = str(os.environ[\"GIT_COMMIT\"]).strip()\n\n    txt = [f\"Date: {datetime.now().strftime('%Y-%m-%d')}\"]\n    txt.append(f\"Python version: {sys.version}\")\n    txt.append(f\"ARMI version: {armiVersion}\")\n    if armiCommit:\n        txt.append(f\"ARMI commit: {armiCommit}\")\n\n    return \"\\n\\n\".join(txt)\n\n\nRecord of Test Cases\n^^^^^^^^^^^^^^^^^^^^\n\nThis section includes the resulting test record for each test which together with\n:numref:`Section %s <ref_armi_test_env>` satisfies the criteria necessary for the creation of the\ntest record defined in :numref:`Section %s <ref_armi_record_criteria>`.\n\n.. needtable:: Acceptance test results\n   :types: test\n   :columns: id, title, result\n   :filter: id.startswith('T_ARMI_')\n   :style_row: needs_[[copy('result')]]\n   :colwidths: 30,50,10\n   :class: longtable\n\nAppendix A Pytest Verbose Output\n--------------------------------\n\nShown here is the verbose output from pytest.\n\nNote that if a test says \"skipped\" in the first table below (serial unit tests), then it will appear in the \"MPI-enabled unit tests\" sections below. Some tests can be run in serial and parallel, but some can only be run in parallel. The preference in ARMI is to be explicit about which are which, as long as all the tests are run at least once.\n\nSerial unit tests:\n\n.. test-results:: ../test_results.xml\n\nMPI-enabled unit tests:\n\n.. test-results:: ../test_results_mpi1.xml\n.. test-results:: ../test_results_mpi2.xml\n.. test-results:: ../test_results_mpi3.xml"
  },
  {
    "path": "doc/readme.rst",
    "content": ".. include:: ../README.rst"
  },
  {
    "path": "doc/release/index.rst",
    "content": "#############\nRelease Notes\n#############\n\nYou can find a simplified version of the ARMI `Release Notes here <https://github.com/terrapower/armi/releases>`_. However, to meet our rigorous quality processes, you can look in the :doc:`/qa_docs/scr/index` section for the fully detailed software change log.\n"
  },
  {
    "path": "doc/skip_str.py",
    "content": "# Copyright 2025 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nA simple helper script to create dummy data files for the STR.\n\nIf the user wants to build the docs without going through the hassle of running the testing, they\ncan run this simple script which will create some placeholder files for the STR:\n\n* pytest_verbose.log\n* test_results.xml\n* test_results_mpi1.xml\n* test_results_mpi2.xml\n* test_results_mpi3.xml\n\n\"\"\"\n\n\ndef main():\n    # skip build the STR, if you are running locally\n    with open(\"pytest_verbose.log\", \"w\") as f:\n        f.write(\"skipping STR\")\n\n    fileNames = [f\"test_results_mpi{i}.xml\" for i in range(1, 4)]\n    fileNames.append(\"test_results.xml\")\n    for fileName in fileNames:\n        with open(fileName, \"w\") as f:\n            f.write(\"<metadata></metadata>\")\n\n\nif __name__ == \"__main__\":\n    main()\n"
  },
  {
    "path": "doc/tutorials/data_model.nblink",
    "content": "{\n\t\"path\": \"../../armi/tests/tutorials/data_model.ipynb\"\n}"
  },
  {
    "path": "doc/tutorials/index.rst",
    "content": ".. _armi-tutorials:\n\n#########\nTutorials\n#########\n\nYou should have ARMI installed and operational by this point. The following tutorials demonstrate in more detail how to\ninteract with ARMI.\n\n--------------\n\n.. toctree::\n    :maxdepth: 2\n    :numbered:\n\n    nuclide_demo.ipynb\n    materials_demo.ipynb\n    walkthrough_inputs.rst\n    walkthrough_lwr_inputs.rst\n    data_model.ipynb\n    making_your_first_app.rst\n    param_sweep.ipynb\n    pin-rotations.ipynb\n"
  },
  {
    "path": "doc/tutorials/making_your_first_app.rst",
    "content": "..\n    Note that this file makes use of Python files in a ``armi-example-app`` folder\n    so that they can be put under testing.\n\n.. _armi-make-first-app:\n\n********************************\nMaking your first ARMI-based App\n********************************\n\nIn this tutorial we will build a nuclear analysis application that runs (dummy) neutron\nflux and thermal/hydraulics calculations. Applications that do real analysis can be\nmodeled after this starting point. A complete, working version of this application can\nbe found `here <https://github.com/terrapower/armi-example-app>`_.\n\nWe'll assume you have the :doc:`ARMI Framework installed </user/user_install>` already.\nYou can make sure it is ready by running the following command in a shell prompt::\n\n    (armi) $ python -c \"import armi;armi.configure()\"\n\nYou should see an ARMI splash-screen and an ARMI version print out. If you do, you are ready\nto proceed.\n\n.. tip:: If you are having trouble getting it installed, see :ref:`getting-help`. You may\n    need to ensure your ``PYTHONPATH`` variable includes the armi installation directory.\n\n.. note:: This tutorial is a companion for the :doc:`/developer/making_armi_based_apps`\n    developer documentation.\n\nStarting a new app\n==================\nARMI-based applications can take on many forms, depending on your workflow. Examples may include:\n\n* Application and plugins together under one folder\n* Application in one folder, plugins in separate ones\n\nWe will build an application that contains one plugin that runs\nneutronics and thermal hydraulics in one folder. This architecture will be a good starting\npoint for many projects, and can always be separated if needed.\n\nFrom the command line, ``cd`` into a new directory where you'd like to store your\napplication code. Make a folder structure that works as a `normal Python package\n<https://packaging.python.org/tutorials/packaging-projects/>`_, and create some empty\nfiles for us to fill in, like this::\n\n    my_armi_project/\n        myapp/\n            __init__.py\n            __main__.py\n            app.py\n            plugin.py\n            fluxSolver.py\n            materials.py\n            thermalSolver.py\n        doc/\n        pyproject.toml\n        README.md\n        LICENSE.md\n\n\nThese files are:\n\n* The outer :file:`my_armi_project` root directory is a container for your app. The name\n  does not matter to ARMI; you can rename it to anything.\n\n* The inner :file:`myapp` directory is the actual Python package for your app. Its name is\n  the Python package name you will use to import anything inside (e.g. ``myapp.plugin``).\n\n* :file:`myapp/__init__.py` tells Python that this directory is a Python package. Code\n  in here runs whenever anything in the package is imported.\n\n* :file:`myapp/__main__.py` registers the application with the ARMI framework\n  and provides one or more entry points for users of your app (including you!)\n  to start running it. Since code here runs when the package is used as a\n  main, it generally performs any app-specific configuration.\n\n* :file:`myapp/app.py` contains the actual app registration code that will be called by\n  :file:`__main__.py`. This can be named anything as long as it is consistent with the\n  registration code.\n\n* :file:`myapp/plugin.py` contains the code that defines the physics plugins we will create\n\n* :file:`myapp/fluxSolver.py` contains the flux solver\n\n* :file:`myapp/thermalSolver.py` contains the thermal/hydraulics solver\n\n* :file:`pyproject.toml` the `python package installation file\n  <https://packaging.python.org/en/latest/flow/>`_ to help users install your\n  application.\n\n* :file:`README.md` and :file:`LICENSE.md` are an optional description and license of your\n  application that would be prominently featured, e.g. in a GitHub repo, if you were to\n  put it there.\n\n* :file:`doc/` is an optional folder where your application documentation source may go.\n  If you choose to use Sphinx you can run ``sphinx-quickstart`` in that folder to begin\n  documentation.\n\nRegistering the app with ARMI\n=============================\nThe ARMI Framework contains features to run the \"main loop\" of typical applications. In\norder to get access to these, we must register our new app with the ARMI framework. To do\nthis, we put the following code in the top-level :file:`__main__.py` module:\n\n.. literalinclude:: armi-example-app/myapp/__main__.py\n    :language: python\n    :caption: ``myapp/__main__.py``\n    :start-after: tutorial-configure-start\n    :end-before: tutorial-configure-end\n\nSimilar code will be needed in scripts or other code where you would like your app to be used.\n\n.. tip:: You may find it appropriate to use the plugin registration mechanism in some cases\n    rather than the app registration. More info on plugins vs. apps coming soon.\n\nDefining the app class\n======================\nWe define our app in the :file:`myapp/app.py` module. For this example, the app class is\nrelatively small: it will just register our one custom plugin. We will actually create\nthe plugin shortly.\n\n.. admonition:: Apps vs. plugins vs. interfaces\n\n    ARMI-based methodologies are broken down into three layers of abstraction. Apps are\n    collections of plugins intended to perform analysis on a certain type of reactor.\n    Plugins are independent and mixable collections of relatively arbitrary code that\n    might bring in special materials, contain certain engineering methodologies, and/or\n    Interfaces with one or more physics kernels. See :doc:`/developer/guide` for more\n    info on architecture.\n\n.. literalinclude:: armi-example-app/myapp/app.py\n    :language: python\n    :caption: ``myapp/app.py``\n\n\nDefining the physics plugin\n===========================\nNow we will create the plugin that will coordinate our dummy physics modules.\n\n.. admonition:: What are plugins again?\n\n    Plugins are the basic modular building block of ARMI-based apps. In some cases, one\n    plugin will be associated with one physics kernel (like COBRA or MCNP). This is a\n    reasonable practice when you expect to be mixing and matching various combinations of\n    plugins between related teams. It is also possible to have a plugin that performs a\n    whole cacophony of analyses using multiple codes, which some smaller research teams\n    may find preferable. The flexibility is very broad.\n\n    See :py:mod:`armi.plugins` more for info.\n\nPlugin code can exist in any directory structure in an app. In this app we\nput it in the :file:`myapp/plugin.py` file.\n\n.. note:: For \"serious\" plugins, we recommend mirroring the ``armi/physics/[subphysics]``\n    structure of the ARMI Framework :py:mod:`physics plugin subpackage <armi.physics>`.\n\nWe will start the plugin by pointing to the two physics kernels we wish to register. We\nhook them in and tell ARMI the ``ORDER`` they should be run in based on the built-in\n``STACK_ORDER`` attribute (defined and discussed :py:class:`here\n<armi.interfaces.STACK_ORDER>`).  We will come back to this plugin definition later on to\nadd a little more to the plugin.\n\n\n.. literalinclude:: armi-example-app/myapp/plugin.py\n    :caption: ``myapp/plugin.py``\n    :language: python\n\n\nDefining custom settings\n========================\nAn important facet of the above plugin is that it takes custom Settings, and has some\nvalidation built in for those ``Setting`` values. That is, the plugin registers new\nsettings that can go in the settings file, and help the user define how the simulation\nruns.\n\nThe following example boiler plate code defines three settings. We define two simple\nnumber settings (inlet and outlet temperatures), and we use :py:class:`Query \n<armi.settings.settingsValidation .Query>` to define validation on those settings. Here,\nthe validation isn't very exciting, we just make sure the temperatures are above zero.\nThat's not particularly physically meaningful, but serves as a simple example. The next\nsetting is a little more complicated, we define a setting ``myAppVersion`` that defines\na specific version of our app that this setting file is valid for. And if you try to run\na different version you get a nasty warning printed to the screen.\n\n.. literalinclude:: armi-example-app/myapp/settings.py\n    :caption: ``myapp/settings.py``\n    :language: python\n\n\nCreating the physics kernels\n============================\nSo far we have basically been weaving an administrative thread to tell ARMI about the code\nwe want to run. Now we finally get to write the guts of the code that actually does\nsomething. In your real app, this code will run your own industrial or research code, or\nperform your own methodology.  Here we just have it make up dummy values representing flux\nand temperatures.\n\nMaking the (dummy) flux kernel\n------------------------------\nIn a previous tutorial, we made a function that sets a dummy flux to all parts of the core\nbased on a radial distance from the origin. Here we will reuse that code but package it\nmore formally so that ARMI can actually run it for us from a user perspective.\n\nThe interface is responsible largely for scheduling activities to run at various time\npoints. For a flux calculation, we want it to compute at every single time node, so we use\nthe :py:meth:`armi.interfaces.Interface.interactEveryNode` hook.\n\nThese interaction hooks can call arbitrarily complex code. The code could, for example:\n\n* Run an external executable locally\n* Submit an external code to a cloud HPC and wait for it to complete\n* Run an internal physics tool\n\nHere it just does a tiny bit of math locally.\n\n.. literalinclude:: armi-example-app/myapp/fluxSolver.py\n    :caption: ``myapp/fluxSolver.py``\n    :language: python\n\n\n\nMaking the thermal/hydraulics kernel\n------------------------------------------\nSince we told the ARMI plugin to schedule the flux solver before thermal/hydraulics solver\nvia the ``ORDER`` attribute, we can depend on there being up-to-date block-level ``power``\nstate data loaded onto the ARMI reactor by the time this thermal/hydraulics solver gets\ncalled by the ARMI main loop.\n\nWe'll make a somewhat meaningful (but still totally academic) flow solver here that uses\nenergy conservation to determine an idealized coolant flow rate. To do this it will\ncompute the total power produced by each assembly to get the required mass flow rate and\nthen apply that mass flow rate from the bottom of the assembly to the top, computing a\nblock-level temperature (and flow velocity) distribution as we go.\n\n.. math::\n\n    \\dot{Q} = \\dot{m} C_p \\Delta T\n\n.. literalinclude:: armi-example-app/myapp/thermalSolver.py\n    :caption: ``myapp/thermalSolver.py``\n    :language: python\n\n\n\nAdding entry points\n===================\nIn order to call our application directly, we need to add the :file:`__main__.py` file to\nthe package. We could add all manner of :py:mod:`entry points <armi.cli.entryPoint>` here\nfor different operations we want our application to perform. If you want to add\n:doc:`your own entry points </developer/entrypoints>`, you have to register them with the\n:py:meth:`armi.plugins.ArmiPlugin.defineEntryPoints` hook. For now, we can just inherit\nfrom the default ARMI entry points (including ``run``) by adding the following code\nto what we already have in :file:`myapp/__main__.py`:\n\n.. literalinclude:: armi-example-app/myapp/__main__.py\n    :language: python\n    :caption: ``myapp/__main__.py``\n    :start-after: tutorial-entry-point-start\n    :end-before: tutorial-entry-point-end\n\n.. tip:: Entry points are phenomenal places to put useful analysis scripts\n    that are limited in scope to the scope of the application.\n\nRunning the app and debugging\n=============================\nWe are now ready to execute our application. Even though it still contains an issue, we\nwill run it now to get a feel for the iterative debugging process (sometimes lovingly\ncalled ARMI whack-a-mole).\n\nWe must make sure our ``PYTHONPATH`` contains both the armi framework itself as well as\nthe directory that contains our app. For testing, an example value for this might be::\n\n    $ export PYTHONPATH=/path/to/armi:/path/to/my_armi_project\n\n.. admonition:: Windows tip\n\n    If you're using Windows, the slashes will be the other way, you use ``set`` instead of\n    ``export``, and you use ``;`` to separate entries (or just use the GUI).\n\n.. admonition:: Submodule tip\n\n    In development, we have found it convenient to use git submodules to contain the armi\n    framework and pointers to other plugins you may need. If you do this, you can set\n    the ``sys.path`` directly in the ``__main__`` file and not have to worry about\n    ``PYTHONPATH`` nearly as much.\n\n\nMake a run directory with some input files in it. You can use the same SFR input files\nwe've used in previous tutorials for starters (but quickly transition to your own inputs\nfor your own interests!).\n\nHere are the files you can download into the run directory.\n\n* :download:`Blueprints <../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml>`\n* :download:`Settings <../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml>`\n* :download:`Core map <../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml>`\n* :download:`Fuel management <../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py>`\n\n\nThen, run your app!::\n\n    (armi) $ python -m myapp run anl-afci-177.yaml\n\nThe code will run for a while and you will see your physics plugins in the interface\nstack, but will run into an error::\n\n    NotImplementedError: Material Sodium does not implement heatCapacity\n\nThe included academic Sodium material in the ARMI material library doesn't have any heat\ncapacity! Here we can either add heat capacity to the material and submit a pull request\nto include it in the ARMI Framework (preferred for generic things), or make our own\nmaterial and register it through the plugin.\n\n.. admonition:: Yet another way\n\n    You could alternatively make a separate plugin that only has your team's special\n    material properties.\n\nAdding a new material\n---------------------\nLet's just add a subclass of sodium in our plugin that has a heat capacity defined. Make\nyour new material in a new module called :file:`myapp/materials.py`:\n\n.. literalinclude:: armi-example-app/myapp/materials.py\n    :caption: ``myapp/materials.py``\n    :language: python\n\nBut wait! Now there are **two** materials with the name *Sodium* in ARMI. Which will be\nchosen? ARMI uses a namespace order controlled by\n:py:func:`armi.materials.setMaterialNamespaceOrder` which can be set either\nprogrammatically (in an app) or at runtime (via the ``materialNamespaceOrder`` user\nsetting). In our case, we want to set it at the app level, so we will yet again add\nmore to the :file:`myapp/__main__.py` file:\n\n.. literalinclude:: armi-example-app/myapp/__main__.py\n    :language: python\n    :caption: ``myapp/__main__.py``\n    :start-after: tutorial-material-start\n    :end-before: tutorial-material-end\n\n\n.. admonition:: Why ``__main__.py``?\n\n    We put this line in ``__main__.py`` rather than ``__init__.py`` so it only activates\n    when we're explicitly running our app. If we put it in ``__init__`` it would\n    change the order even in situations where code from anywhere within our app\n    was imported, possibly conflicting with another app's needs.\n\n\nNow ARMI should find our new updated Sodium material and get past that error.  Run it once\nagain::\n\n    (armi) $ python -m myapp run anl-afci-177.yaml\n\n.. tip:: You may want to pipe the output to a log file for convenient viewing with\n    a command like ``python -m myapp run anl-afci-177.yaml > run.stdout``\n\nChecking the output\n===================\nSeveral output files should have been created in the run directory from that past command.\nMost important is the ``anl-afci-177.h5`` HDF5 binary database file. You can use this file\nto bring the ARMI state back to any state point from the run for analysis.\n\nTo visualize the output in a 3D graphics program like `ParaView <https://www.paraview.org/Wiki/ParaView>`_\nor `VisIT <https://wci.llnl.gov/simulation/computer-codes/visit>`_,\nyou can run the ARMI ``vis-file`` entry point, like this::\n\n    (armi) $ python -m myapp vis-file -f vtk anl-afci-177.h5\n\nThis creates several ``VTK`` files covering different time steps and levels of abstraction\n(assembly vs. block params). If you load up the block file and plot one of the output\nparams (such as ``THcoolantOutletT`` you can see the outlet temperature going nicely\nfrom 360 |deg|\\ C  to 510 |deg|\\ C (as expected given our simple TH solver).\n\n\n.. figure:: /.static/anl-acfi-177-coolant-temperature.jpg\n    :alt: The coolant temperature as seen in ParaView viewing the VTK file.\n    :align: center\n\n    The coolant temperature as seen in ParaView viewing the VTK file.\n\n\n.. admonition:: Fancy XDMF format\n\n    The ``-f xdmf`` produces `XDMF files <http://xdmf.org/index.php/XDMF_Model_and_Format>`_\n    that are lighter-weight than VTK, just pointing the visualization\n    program to the data in the primary ARMI HDF5 file. However it is slightly more\n    finicky and has slightly less support in some tools (looking at VisIT).\n\nA generic description of the outputs is provided in :doc:`/user/outputs`.\n\nYou can add your own outputs from your plugins.\n\nBonus: Ad-hoc UserPlugins\n=========================\nIt will often be the case that you are not building an ARMI application from scratch, but\nyou are using a pre-existing ARMI application. And while working with this (potentially\nquite large) ARMI application, you want to add a one-off change. Maybe you want to make a\nspecial plot during the run, or do a quick \"what-if\" modification of the \n:py:class:`Reactor <armi.reactor.reactors.Reactor>`. These things come up for scientific\nor engineering work: a quick one-off idea you want to test out and probably only use once.\n\nThis is where a :py:class:`UserPlugin <armi.plugins.UserPlugin>` come in.\n\nThere are two parts to defining a :py:class:`UserPlugin <armi.plugins.UserPlugin>`:\n\nDefine the UserPlugin in Python\n-------------------------------\nThis can be done by subclassing :py:class:`armi.plugins.UserPlugin`:\n\n.. code-block:: python\n\n    from armi import plugins\n    from armi.reactor.flags import Flags\n\n    class UserPluginExample(plugins.UserPlugin):\n        \"\"\"\n        This plugin flex-tests the onProcessCoreLoading() hook, and\n        arbitrarily adds \"1\" to the power ever each fuel block.\n        \"\"\"\n\n        @staticmethod\n        @plugins.HOOKIMPL\n        def onProcessCoreLoading(core, cs, dbLoad):\n        for b in core.getBlocks(Flags.FUEL):\n            b.p.power += 1.0\n\nIn most ways, ``UserPluginExample`` above is just a normal\n:py:class:`ArmiPlugin <armi.plugins.ArmiPlugin>`. You can implement any of the normal\n:py:class:`ArmiPlugin <armi.plugins.ArmiPlugin>` hooks, like: \n:py:meth:`exposeInterfaces() <armi.plugins.ArmiPlugin.exposeInterfaces>`,\n:py:meth:`defineParameters() <armi.plugins.ArmiPlugin.defineParameters>`, and so on. The\n:py:class:`UserPlugin <armi.plugins.UserPlugin>` class is more limited than a\nregular plugin though, you cannot implement:\n\n* :py:meth:`armi.plugins.ArmiPlugin.defineParameters`\n* :py:meth:`armi.plugins.ArmiPlugin.defineParameterRenames`\n* :py:meth:`armi.plugins.ArmiPlugin.defineSettings`\n* :py:meth:`armi.plugins.ArmiPlugin.defineSettingsValidators`\n\nDefine a list of UserPlugins in the Settings File\n-------------------------------------------------\nIn order for your simulation to know about your custom\n:py:class:`UserPlugin <armi.plugins.UserPlugin>` you need to add a line to your Settings\nfile:\n\n.. code-block::\n\n  userPlugins:\n    - armi.tests.test_user_plugins.UserPlugin0\n    - //path/to/my/pluginz.py:UserPlugin1\n    - C:\\\\path\\to\\my\\pluginZ.py:UserPlugin2\n\nWhat we have above is actually an example of including three different plugins via your\nsettings YAML file:\n\n* By providing a ``.``-separated ARMI import path (if you included your :py:class:`UserPlugin <armi.plugins.UserPlugin>` in your commit.\n* By providing a full Linux/Unix/MacOS file path, then a colon (``:``), followed by the class name.\n* By providing a full Windows file path, then a colon (``:``), followed by the class name.\n\n.. |deg| unicode:: U+00B0\n"
  },
  {
    "path": "doc/tutorials/materials_demo.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# The ARMI Material Library\\n\",\n    \"\\n\",\n    \"While *nuclides* are the microscopic building blocks of nature, their collection into *materials* is what we interact with at the engineering scale. The ARMI Framework provides a `Material` class, which has a composition (how many of each nuclide are in the material), and a variety of thermomechanical properties (many of which are temperature dependent), such as:\\n\",\n    \"\\n\",\n    \"* Mass density \\n\",\n    \"* Heat capacity\\n\",\n    \"* Linear or volumetric thermal expansion\\n\",\n    \"* Thermal conductivity\\n\",\n    \"* Solidus/liquidus temperature\\n\",\n    \"\\n\",\n    \"and so on. \\n\",\n    \"\\n\",\n    \"Many of these properties are widely available in the literature for fresh materials. As materials are irradiated, the properties tend to change in complex ways. Material objects can be extended to account for such changes. \\n\",\n    \"\\n\",\n    \"The ARMI Framework comes with a small set of example material definitions. These are generally quite incomplete (often missing temperature dependence), and are of academic quality at best. To do engineering design calculations, users of ARMI are expected to make or otherwise prepare materials. As the ecosystem grows, we hope the material library will mature.\\n\",\n    \"\\n\",\n    \"In any case, here we will explore the use of `Material`s. Let's get an instance of the Uranium Oxide material.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.materials import uraniumOxide\\n\",\n    \"\\n\",\n    \"uo2 = uraniumOxide.UO2()\\n\",\n    \"density500 = uo2.density(Tc=500)\\n\",\n    \"print(f\\\"The density of UO2 @ T = 500C is {density500:.2f} g/cc\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Taking a look at the composition\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"print(uo2.massFrac)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"The mass fractions of a material, plus its mass density, fully define the composition. Conversions between number density/fraction and mass density/fraction are handled on the next level up (on `Component`s), which we will explore soon.\\n\",\n    \"\\n\",\n    \"ARMI automatically thermally-expands materials based on their coefficients of linear expansion. For instance, a piece of Uranium Oxide that's 10 cm at room temperature would be longer at 500 C according to the formula:\\n\",\n    \"\\n\",\n    \"\\\\begin{equation}\\n\",\n    \"\\\\frac{\\\\Delta L}{L_0} = \\\\alpha \\\\Delta T\\n\",\n    \"\\\\end{equation}\\n\",\n    \"\\n\",\n    \"On the reactor model, this all happens behind the scenes. But here at the material library level, we can see it in detail. \"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"L0 = 10.0\\n\",\n    \"dLL = uo2.linearExpansionFactor(500, 25)\\n\",\n    \"L = L0 * (1 + dLL)\\n\",\n    \"print(f\\\"Hot length is {L:.4f} cm\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Let's plot the heat capacity as a function of temperature in K.\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": null,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"import matplotlib.pyplot as plt\\n\",\n    \"import numpy as np\\n\",\n    \"\\n\",\n    \"Tk = np.linspace(300, 2000)\\n\",\n    \"heatCapacity = [uo2.heatCapacity(Tk=ti) for ti in Tk]\\n\",\n    \"plt.plot(Tk, heatCapacity)\\n\",\n    \"plt.title(\\\"$UO_2$ heat capacity vs. temperature\\\")\\n\",\n    \"plt.xlabel(\\\"Temperature (K)\\\")\\n\",\n    \"plt.ylabel(\\\"Heat capacity (J/kg-K)\\\")\\n\",\n    \"plt.grid(ls=\\\"--\\\", alpha=0.3)\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"Different physics plugins require different properties to be defined. For pure neutronics runs, mass density and composition is enough. But for thermal/hydraulics runs, heat capacity and thermal conductivity is needed for solids, and more is needed for coolants. As irradiation models are investigated, creep, corrosion, porosity, swelling, and other factors will be necessary. \"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.9.13\"\n  },\n  \"varInspector\": {\n   \"cols\": {\n    \"lenName\": 16,\n    \"lenType\": 16,\n    \"lenVar\": 40\n   },\n   \"kernels_config\": {\n    \"python\": {\n     \"delete_cmd_postfix\": \"\",\n     \"delete_cmd_prefix\": \"del \",\n     \"library\": \"var_list.py\",\n     \"varRefreshCmd\": \"print(var_dic_list())\"\n    },\n    \"r\": {\n     \"delete_cmd_postfix\": \") \",\n     \"delete_cmd_prefix\": \"rm(\",\n     \"library\": \"var_list.r\",\n     \"varRefreshCmd\": \"cat(var_dic_list()) \"\n    }\n   },\n   \"types_to_exclude\": [\n    \"module\",\n    \"function\",\n    \"builtin_function_or_method\",\n    \"instance\",\n    \"_Feature\"\n   ],\n   \"window_display\": false\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "doc/tutorials/nuclide_demo.ipynb",
    "content": "{\n \"cells\": [\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"# The ARMI Nuclide and Element Packages\\n\",\n    \"\\n\",\n    \"One of the key features that the ARMI framework offers is access to nuclide data across an application (recall: a *nuclide* is a particular isotope of an element. Iron-56, Uranium-238, and Boron-10 are all nuclides). This is specifically useful for nuclear engineers so that manual look-ups of nuclide attributes on sources like Wikipedia, Chart of the Nuclides, etc. are not needed. \\n\",\n    \"\\n\",\n    \"The available attributes for each nuclide are:\\n\",\n    \"\\n\",\n    \"- Atomic weight/mass, in amu\\n\",\n    \"- Natural abundance\\n\",\n    \"- Atomic number, Z\\n\",\n    \"- Mass number, A\\n\",\n    \"- Half-life\\n\",\n    \"- Neutron yield from spontaneous fission\\n\",\n    \"    \\n\",\n    \"Accessing the nuclide data begins with importing the nuclide bases, and optionally, the elements packages:\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 7,\n   \"metadata\": {},\n   \"outputs\": [],\n   \"source\": [\n    \"from armi.nucDirectory.elements import Elements\\n\",\n    \"from armi.nucDirectory.nuclideBases import NuclideBases\\n\",\n    \"\\n\",\n    \"elements = Elements()\\n\",\n    \"elements.factory()\\n\",\n    \"nuclideBases = NuclideBases()\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"One these packages are imported, there are several module-level global dictionaries that are important to know about, since these are likely what you will be working with when implementing code that requires nuclide data or just when performing data look-ups:\\n\",\n    \"\\n\",\n    \"**Nuclide Bases Global Dictionaries**\\n\",\n    \"\\n\",\n    \"- nuclideBases.byName\\n\",\n    \"- nuclideBases.DBName\\n\",\n    \"- nuclideBases.byLabel\\n\",\n    \"- nuclideBases.byMcc2Id\\n\",\n    \"- nuclideBases.byMcc3Id\\n\",\n    \"- nuclideBases.byMcnpId\\n\",\n    \"- nuclideBases.byAAAZZZSId\\n\",\n    \"   \\n\",\n    \"**Elements Global Dictionaries**\\n\",\n    \"\\n\",\n    \"- elements.byZ\\n\",\n    \"- elements.bySymbol\\n\",\n    \"- elements.byName\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Accessing Individual Nuclide Data/Attributes\\n\",\n    \"\\n\",\n    \"Here we will explore retrieving data from a couple nuclides and showing the coupling between nuclide and element definitions. For these examples, let's try to answer the following questions:\\n\",\n    \"\\n\",\n    \"- How many total nuclides and elements are defined in the framework?\\n\",\n    \"- What is atomic weight of a selected nuclide?\\n\",\n    \"- What is the natural abundance a selected nuclide?\\n\",\n    \"- Are there any spontaneous fission neutrons for a selected nuclide?\\n\",\n    \"- What is the half-life in seconds for a selected nuclide?\\n\",\n    \"- How many other nuclides for the same element exist for a selected nuclide?\\n\",\n    \"    \"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### How many total nuclides and elements are defined in the framework?\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 8,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"Number of elements defined in the framework: 120\\n\",\n      \"\\n\",\n      \"Number of nuclides defined in the framework: 4706\\n\",\n      \"   - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.NuclideBase'>`: 4614\\n\",\n      \"   - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.NaturalNuclideBase'>`: 84\\n\",\n      \"   - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.DummyNuclideBase'>`: 2\\n\",\n      \"   - Number of nuclides of type `<class 'armi.nucDirectory.nuclideBases.LumpNuclideBase'>`: 6\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"import collections\\n\",\n    \"\\n\",\n    \"print(f\\\"Number of elements defined in the framework: {len(elements.byZ.values())}\\\")\\n\",\n    \"print(\\\"\\\")\\n\",\n    \"print(f\\\"Number of nuclides defined in the framework: {len(nuclideBases.instances)}\\\")\\n\",\n    \"nucsByType = collections.defaultdict(list)\\n\",\n    \"for n in nuclideBases.instances:\\n\",\n    \"    nucsByType[type(n)].append(n)\\n\",\n    \"\\n\",\n    \"for typ, nucs in nucsByType.items():\\n\",\n    \"    print(f\\\"   - Number of nuclides of type `{typ}`: {len(nucs)}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Looking up nuclide and elemental data for U-235\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 9,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"<NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\\n\",\n      \"Atomic Weight (amu): 235.043929425\\n\",\n      \"Natural Abundance: 0.007204\\n\",\n      \"Spontaneous Fission Neutron Yield: 1.87\\n\",\n      \"Half-life (seconds): 2.22160758861e+16\\n\",\n      \"\\n\",\n      \"Other nuclides for Uranium:\\n\",\n      \"    - <NaturalNuclideBase U:  Z:92, W:2.380289e+02, Label:U>\\n\",\n      \"    - <NuclideBase U215:  Z:92, A:215, S:0, W:2.150262e+02, Label:U215>, HL:7.00000000000e-04, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U216:  Z:92, A:216, S:0, W:2.160240e+02, Label:U216>, HL:4.50000000000e-03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U217:  Z:92, A:217, S:0, W:2.170244e+02, Label:U217>, HL:1.60000000000e-02, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U218:  Z:92, A:218, S:0, W:2.180235e+02, Label:U218>, HL:6.50000000000e-04, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U219:  Z:92, A:219, S:0, W:2.190249e+02, Label:U219>, HL:6.00000000000e-05, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U220:  Z:92, A:220, S:0, W:2.200247e+02, Label:U220>, HL:1.11110000000e+01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U221:  Z:92, A:221, S:0, W:2.210264e+02, Label:U221>, HL:6.60000000000e-07, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U222:  Z:92, A:222, S:0, W:2.220261e+02, Label:U222>, HL:4.70000000000e-06, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U223:  Z:92, A:223, S:0, W:2.230277e+02, Label:U223>, HL:1.80000000000e-05, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U224:  Z:92, A:224, S:0, W:2.240276e+02, Label:U224>, HL:8.40000000000e-04, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U225:  Z:92, A:225, S:0, W:2.250294e+02, Label:U225>, HL:6.90000000000e-02, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U226:  Z:92, A:226, S:0, W:2.260293e+02, Label:U226>, HL:2.68000000000e-01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U227:  Z:92, A:227, S:0, W:2.270312e+02, Label:U227>, HL:6.60000000000e+01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U228:  Z:92, A:228, S:0, W:2.280314e+02, Label:U228>, HL:5.46000000000e+02, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U229:  Z:92, A:229, S:0, W:2.290335e+02, Label:U229>, HL:3.48000000000e+03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U230:  Z:92, A:230, S:0, W:2.300339e+02, Label:U230>, HL:1.74787200000e+06, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U231:  Z:92, A:231, S:0, W:2.310363e+02, Label:U231>, HL:3.62880000000e+05, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U232:  Z:92, A:232, S:0, W:2.320372e+02, Label:U232>, HL:2.17427219965e+09, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U233:  Z:92, A:233, S:0, W:2.330396e+02, Label:U233>, HL:5.02354704590e+12, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U234:  Z:92, A:234, S:0, W:2.340410e+02, Label:U234>, HL:7.74722532676e+12, Abund:5.400000e-05>\\n\",\n      \"    - <NuclideBase U235:  Z:92, A:235, S:0, W:2.350439e+02, Label:U235>, HL:2.22160758861e+16, Abund:7.204000e-03>\\n\",\n      \"    - <NuclideBase U235M:  Z:92, A:235, S:1, W:2.350439e+02, Label:U23F>, HL:1.56000000000e+03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U236:  Z:92, A:236, S:0, W:2.360456e+02, Label:U236>, HL:7.39063206325e+14, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U237:  Z:92, A:237, S:0, W:2.370487e+02, Label:U237>, HL:5.83372800000e+05, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U238:  Z:92, A:238, S:0, W:2.380508e+02, Label:U238>, HL:1.40996345254e+17, Abund:9.927420e-01>\\n\",\n      \"    - <NuclideBase U239:  Z:92, A:239, S:0, W:2.390543e+02, Label:U239>, HL:1.40700000000e+03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U240:  Z:92, A:240, S:0, W:2.400566e+02, Label:U240>, HL:5.07600000000e+04, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U241:  Z:92, A:241, S:0, W:2.410603e+02, Label:U241>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U242:  Z:92, A:242, S:0, W:2.420629e+02, Label:U242>, HL:1.00800000000e+03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U243:  Z:92, A:243, S:0, W:2.430674e+02, Label:U243>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U244:  Z:92, A:244, S:0, W:2.440679e+02, Label:U244>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U245:  Z:92, A:245, S:0, W:2.450708e+02, Label:U245>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase U246:  Z:92, A:246, S:0, W:2.460702e+02, Label:U246>, HL:inf            , Abund:0.000000e+00>\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"u235 = nuclideBases.byName[\\\"U235\\\"]\\n\",\n    \"\\n\",\n    \"print(u235)\\n\",\n    \"print(f\\\"Atomic Weight (amu): {u235.weight}\\\")\\n\",\n    \"print(f\\\"Natural Abundance: {u235.abundance}\\\")\\n\",\n    \"print(f\\\"Spontaneous Fission Neutron Yield: {u235.nuSF}\\\")\\n\",\n    \"print(f\\\"Half-life (seconds): {u235.halflife}\\\")\\n\",\n    \"print(\\\"\\\")\\n\",\n    \"print(f\\\"Other nuclides for {elements.byZ[u235.z].name}:\\\")\\n\",\n    \"for n in elements.byZ[u235.z].nuclides:\\n\",\n    \"    print(f\\\"    - {n}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Looking up nuclide and elemental data for Li-7\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 10,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"<NuclideBase LI7:  Z:3, A:7, S:0, W:7.016004e+00, Label:LI07>, HL:inf            , Abund:9.241000e-01>\\n\",\n      \"Atomic Weight (amu): 7.01600439548\\n\",\n      \"Natural Abundance: 0.92410004\\n\",\n      \"Spontaneous Fission Neutron Yield: 0.0\\n\",\n      \"Half-life (seconds): inf\\n\",\n      \"\\n\",\n      \"Other nuclides for Lithium:\\n\",\n      \"    - <NaturalNuclideBase LI:  Z:3, W:6.940038e+00, Label:LI>\\n\",\n      \"    - <NuclideBase LI3:  Z:3, A:3, S:0, W:3.030775e+00, Label:LI03>, HL:1.11110000000e+01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI4:  Z:3, A:4, S:0, W:4.027185e+00, Label:LI04>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI5:  Z:3, A:5, S:0, W:5.012538e+00, Label:LI05>, HL:3.70924971603e-22, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI6:  Z:3, A:6, S:0, W:6.015123e+00, Label:LI06>, HL:inf            , Abund:7.590000e-02>\\n\",\n      \"    - <NuclideBase LI7:  Z:3, A:7, S:0, W:7.016004e+00, Label:LI07>, HL:inf            , Abund:9.241000e-01>\\n\",\n      \"    - <NuclideBase LI8:  Z:3, A:8, S:0, W:8.022488e+00, Label:LI08>, HL:8.39900000000e-01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI9:  Z:3, A:9, S:0, W:9.026789e+00, Label:LI09>, HL:1.78300000000e-01, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI10:  Z:3, A:10, S:0, W:1.003548e+01, Label:LI10>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI11:  Z:3, A:11, S:0, W:1.104380e+01, Label:LI11>, HL:8.75000000000e-03, Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI12:  Z:3, A:12, S:0, W:1.205378e+01, Label:LI12>, HL:inf            , Abund:0.000000e+00>\\n\",\n      \"    - <NuclideBase LI13:  Z:3, A:13, S:0, W:1.306117e+01, Label:LI13>, HL:1.11110000000e+01, Abund:0.000000e+00>\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"li7 = nuclideBases.byName[\\\"LI7\\\"]\\n\",\n    \"\\n\",\n    \"print(li7)\\n\",\n    \"print(f\\\"Atomic Weight (amu): {li7.weight}\\\")\\n\",\n    \"print(f\\\"Natural Abundance: {li7.abundance}\\\")\\n\",\n    \"print(f\\\"Spontaneous Fission Neutron Yield: {li7.nuSF}\\\")\\n\",\n    \"print(f\\\"Half-life (seconds): {li7.halflife}\\\")\\n\",\n    \"print(\\\"\\\")\\n\",\n    \"print(f\\\"Other nuclides for {elements.byZ[li7.z].name}:\\\")\\n\",\n    \"for n in elements.byZ[li7.z].nuclides:\\n\",\n    \"    print(f\\\"    - {n}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"### Exploring elemental Lithium data\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 11,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"name\": \"stdout\",\n     \"output_type\": \"stream\",\n     \"text\": [\n      \"<Element  LI (Z=3), Lithium, ChemicalGroup.ALKALI_METAL, ChemicalPhase.SOLID>\\n\",\n      \"\\n\",\n      \"Average Atomic weight: 6.940037501798687\\n\",\n      \"Is Naturally Occurring?: True\\n\",\n      \"Is a Heavy Metal Atom?: False\\n\"\n     ]\n    }\n   ],\n   \"source\": [\n    \"liElement = elements.bySymbol[\\\"LI\\\"]\\n\",\n    \"\\n\",\n    \"print(liElement)\\n\",\n    \"print(\\\"\\\")\\n\",\n    \"print(f\\\"Average Atomic weight: {liElement.standardWeight}\\\")\\n\",\n    \"print(f\\\"Is Naturally Occurring?: {liElement.isNaturallyOccurring()}\\\")\\n\",\n    \"print(f\\\"Is a Heavy Metal Atom?: {liElement.isHeavyMetal()}\\\")\"\n   ]\n  },\n  {\n   \"cell_type\": \"markdown\",\n   \"metadata\": {},\n   \"source\": [\n    \"## Plotting the Chart of the Nuclides\"\n   ]\n  },\n  {\n   \"cell_type\": \"code\",\n   \"execution_count\": 12,\n   \"metadata\": {},\n   \"outputs\": [\n    {\n     \"data\": {\n      \"image/png\": \"iVBORw0KGgoAAAANSUhEUgAAAtQAAAHwCAYAAACG+PhNAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8rg+JYAAAACXBIWXMAAAsTAAALEwEAmpwYAABeRklEQVR4nO3deZwsV13///enZ+3puRCWEAJJSEgCGEMWcokgosgmQiSIrPILkiAxggiKSAIYVgm7ol9BUaKISEDDEhEEZHOFcENCVkM2IAnZIBDu7emZ6Zn+/P6o6p7qmurq6u6q3ub1fDxCV/epOnWq7iScW/PuzzF3FwAAAID+lEY9AAAAAGCSMaEGAAAABsCEGgAAABgAE2oAAABgAEyoAQAAgAEwoQYAAAAGwIQaADows9eb2T+MehxNFvhbM/uRmV2Y8Zi/M7M3Fz22QZiZm9kR4fZfmtkfZdkXAMYFE2oAO5qZ/bqZ7TGzfWZ2i5l91sx+rqBzvcDM/muALn5O0hMkHeTuJxbQ/8i5+xnu/qZRjwMAesGEGsCOZWa/L+lPJb1F0gGSDpH0XkknF3Cu2Ry6eYCk77h7NYe+AAA5YUINYEcys7tLeqOkl7j7x9296u51d/8Xd39lZNd5M/t7M9trZleY2e5IH2ea2XVh25Vm9quRtheY2X+b2Z+Y2Q8lfVTSX0p6ZPg0/McdxnU/M7vAzO40s2vN7EXh5y+U9DeR498QO+6nUvq/h5n9azjOr5vZ4ZHjHmJmXwjPd7WZPSvlnn3FzN4UXtdeM/u8md07bHuMmd0U2/87Zvb4cHvGzF4duV8XmdnBCedoi6iY2SvD3xx838xOi+27YGbvNLPvmdltYVykHLbd28w+bWY/Dq/tP82M/88DUAj+4wJgp3qkpEVJn+iy31MlnSdpP0kXSPp/kbbrJD1a0t0lvUHSP5jZgZH2n5F0vYKn3/+fpDMk/a+7L7v7fh3Od56kmyTdT9IzJL3FzB7r7h+IHf+66EHuflVK/88Jx3cPSddK+mNJMrOKpC9I+kdJ9wn3e6+ZHZVyP35d0qnh/vOS/iBl36jfl/RcSU+WdDdJp0laSTvAzJ4U9v8ESUdKenxsl7dKepCk4yQdIen+ks4O216h4D7ur+D+v1qSZxwrAPSECTWAnepekn7g7htd9vsvd/+Mu29K+pCkY5sN7v5P7v59d2+4+0clXSMpmm3+vrv/ubtvuHut24DCJ7aPkvQqd19190sUPJV+fm+Xts0n3P3C8Fo/rGACKkknKYiQ/G04xoslnS/pmSl9/a27fzu8no9F+urmNyW91t2v9sC33P2HXY55Vni+y8OYy+ubDWZmkk6X9Hvufqe771UQ3XlOuEtd0oGSHhD+5uE/3Z0JNYBCMKEGsFP9UNK9M2Sbb41sr0habB5jZs83s0vCWMGPJR0t6d6R/W/scUz3k9ScHDZ9V8GT10HEr2E53H6ApJ9pjj+8hudJum8ffXVzsIIn+r24n9rv4Xcj2/tLWpJ0UWTs/xZ+LknvUPA0/vNmdr2ZndnjuQEgMybUAHaq/5W0Julp/RxsZg+Q9NeSfkfSvcKIxeWSLLJb/Ilotyek35d0TzPbFfnsEEk3ZxxWr09gb5T0VXffL/LPsrv/do/9SFJVwQRXUpCZ1tbktnmuw+MHdXGLgol40yGR7R9Iqkn66cjY7+7uy5Lk7nvd/RXu/kAFsZ3fN7PH9Xh+AMiECTWAHcnd71KQt/0LM3uamS2Z2ZyZ/bKZvT1DFxUFE9g7JMnMTlXwhDrNbZIOMrP5DmO6UdL/SDrHzBbN7BhJL5SUtRZ2av8JPi3pQWZ2Snjtc2b28PALjr36toKn908xszlJr5W0EGn/G0lvMrMjLXCMmd2rS58fk/QCMzvKzJYktXLj7t5Q8BeaPzGz+0iSmd3fzH4p3D7JzI4IoyF3SdqU1OjjugCgKybUAHYsd3+Xgi/LvVbBxPhGBU+cP5nh2CslvUvBk+7bJD1U0n93OexLkq6QdKuZ/aDDPs+VdKiCp9WfkPQ6d//3buPpof+WMFryRAW54+8riHO8Te0T4UzCv6C8WMHE+WYFT6yjVT/erWCC/HlJP5H0AUnlLn1+VkFZwy8piG98KbbLq8LPv2ZmP5H075IeHLYdGb7fp+DP6L3u/uVerwsAsjC+owEAAAD0jyfUAAAAwACYUAMAAAADYEINAAAADIAJNQAAADAAJtQAAADAALqtEDbW7n3ve/uhhx466mEAAABgyl100UU/cPf9k9omekJ96KGHas+ePaMeBgAAAKacmX23UxuRDwAAAGAATKgBAACAATChBgAAAAbAhBoAAAAYABNqAAAAYABMqAEAAIABMKEGAAAABlDYhNrMzjWz283s8shn7zCz/zOzS83sE2a2X6TtLDO71syuNrNfKmpcAAAAQJ6KfEL9d5KeFPvsC5KOdvdjJH1b0lmSZGZHSXqOpJ8Oj3mvmc0UODYAAAAgF4VNqN39PyTdGfvs8+6+Eb79mqSDwu2TJZ3n7mvufoOkayWdWNTYAAAAgLyMMkN9mqTPhtv3l3RjpO2m8DMAAABgrI1kQm1mr5G0IenDfRx7upntMbM9d9xxR/6DAwAAAHow9Am1mb1A0kmSnufuHn58s6SDI7sdFH62jbu/3913u/vu/fffv9CxAgAAAN0MdUJtZk+S9IeSnuruK5GmCyQ9x8wWzOwwSUdKunCYYwMAAAD6MVtUx2b2EUmPkXRvM7tJ0usUVPVYkPQFM5Okr7n7Ge5+hZl9TNKVCqIgL3H3zaLGBgAAAOTFtlIXk2f37t2+Z8+eUQ8DAAAAU87MLnL33UlthT2hBgAAAHrx0Pf+uar1uiSpMjcnSarW623bzbbLXvzS0QwyARNqAAAAjIXmhDltO+n9qI2yDjUAAAAw8ZhQAwAAYCw0ox3N7eb76HZ8v3FA5AMAAAAj08xNj9skuRdMqAEAADAyzTx0Wk6aDDUAAADQQVqsg8gHAAAAEJNUGm/SMaEGAADA0PRTGo/IBwAAADDFmFADAABgaHrJSZOhBgAAADQdpfHSMKEGAABAoQYtjUeGGgAAADtav7EOIh8AAADYMZLK4U1zzCOKCTUAAAAGlqUcHpEPAAAAoIMscQ0iHwAAAJhK8SocadGNnRrrSMOEGgAAYIcrsgoHkQ8AAAAAqZhQAwAA7HBFZJzJUAMAAGBqJZW4G3eXvfilox5CR0yoAQAAdphhZpyL6H/cEPkAAADYYYqMdaS1DdLHOOMJNQAAwJTotlrhpJW4G+eYRxQTagAAgCkxqtUKi458jDsiHwAAAMAAeEINAAAwZrJEN5ptk/Qkt5vo9UxSNIUJNQAAwJjpJzIxDSYlMx1H5AMAAAAYABNqAACAMZPXioFFr1aYd9m8SUXkAwAAYAw0c9O9TiybueOkfHUemv0VVXJvUmMeUUyoAQAAxkB04pr0eV5tg5SuG7TPac2AE/kAAAAYA+MSu0iLlBRx7mnAE2oAAIARSCqN15T2hHfYk9B4pCSqnzjINEQ84phQAwAAjEDWWES/xw1zpcSs55uWiEcckQ8AAABgAEyoAQAARqDf8nHDzlDnma+elsx0HJEPAACAAsXL4TW307LRSUtwjyI/3U1avjpqGnPTUUyoAQAACpRUDi8pSzzuZfPyaptGRD4AAAAKlDX6MK5l8/Jom3Y8oQYAAMhRWjm8qKRYRxajLqPXybTHOtIwoQYAAMhRP+XjBolP9BoVIdaRPyIfAAAAOSo6dtHpfKOOfOxkPKEGAADIIKlah7S9CkfRqxz2GxUpwk6OeUQxoQYAAMignyocefQxCVU+djoiHwAAAMAAmFADAABkkEeWeJSl64roHwEiHwAAAAni5e/S4hppqxz2E42gNN5kYUINAACQIO9sdNb9il5FkdJ4+SPyAQAAkKCXeEPWWMSwSuOltVEaL388oQYAADtaUjm8TpPapNJ4cUlxjV5jG5TGmyxMqAEAwI6WFKfIK5KRR//jUjYPnRH5AAAAAAbAhBoAAOxo/eSM0/rIo/9xKpuH7oh8AAAAJMgamZg2ZKZ7x4QaAADsaFkyzv300Uv/45qhRjZEPgAAwI7WKRbRbx9Z+x/XyAd6xxNqAAAwdeKrHErtJeySdCttFy9/16msXaeVEnspnTfslRKJeQymsAm1mZ0r6SRJt7v70eFn95T0UUmHSvqOpGe5+4/MzCS9R9KTJa1IeoG7f7OosQEAgOnWb/651+hGt/4nbaVE9KfIyMffSXpS7LMzJX3R3Y+U9MXwvST9sqQjw39Ol/S+AscFAACmXL+RhiJWK+ynj7zOnTXygcEU9oTa3f/DzA6NfXyypMeE2x+U9BVJrwo//3t3d0lfM7P9zOxAd7+lqPEBAIDJlhbrSJO0CmE8DhJ/H++zl0hG0gqLWY9JOnceiHjka9gZ6gMik+RbJR0Qbt9f0o2R/W4KP2NCDQAAEuW9EmC/kYnoZ6NcyXCQ8WMwI6vyET6N9l6PM7PTzWyPme254447ChgZAAAAkN2wJ9S3mdmBkhS+3h5+frOkgyP7HRR+to27v9/dd7v77v3337/QwQIAgPFVRGm5rP13Gkte/RddNg/5Gnbk4wJJvyHpreHrpyKf/46ZnSfpZyTdRX4aAAD0m5PuV1LeOemc/ZbGy9p/EWXzyE0Xp8iyeR9R8AXEe5vZTZJep2Ai/TEze6Gk70p6Vrj7ZxSUzLtWQdm8U4saFwAAmBzjlDPOkpPO2mcv/Wc9Nysgjk6RVT6e26HpcQn7uqSXFDUWAAAwmdIqcjS3i2hLWgAmbWGX6L5Zxtyt/17O3cu1oRislAgAAMZKM+YxSZPA+IS3l2Oa21nbekHMYziYUAMAgLGSd7xhJ5bG63QNKMbIyuYBAAAkGVa1i7S2QSp5ZO1/GNeG4eAJNQAAGLphV+/oRy+xi6xVPopGxGM0mFADAICh66cSxigjH71EK8bl2jA8RD4AAACAATChBgAAQzdOOeM8zj0u14bRIPIBAACGYtLK4fWyWmE/ZfPyQm569JhQAwCAoei17Nw4Zaj7GX8v/VMab7IR+QAAAEMxrqXlJr1sHkaPJ9QAAEDS9khG0tPPpBJ3vSyDnae0SMagbfElwKPbSdGOYZbNI+IxfphQAwAASZ0jDUn7JO03ytJ13cbfT1uR10NpvOlC5AMAAAAYABNqAAAgqXNGOL7PuJSuSxt/Hm29lMZLa8s7Q43xQ+QDAIAdKmn573GQNY8czzhnNehxnSblReanyU2PNybUAADsUL2UjMuy3yTmq0dV8o7SeNOFyAcAADtU1shB/JiiIx/9jDGvknd5nLuIyAfGG0+oAQDYQSZhtcKi4xP9Kro0HrGOycWEGgCAHaSf6EPW/cYlFjEupfH63Q+Th8gHAAA7SD+Rg/jx41LlY9iRj7S2PK4Nk4sn1AAATLikah3DWq1wnBx21oWq7V1VedeirnzNMWoszEiSSmubUsq1X/xrfyN5VbKKjj//N4dWyYOIx/RgQg0AwIQb5mp84xz5qO1dlSTV9q62JtOS1FiYSY98eDV449WRRUMw2Yh8AAAAAANgQg0AwIQrMsecdK5xzVCXdy1Kksq7FoOYR6i0tpmeXbZK8MYqQ702TA8iHwAAjKks2eheJ2ZpkYZJmOR969fObeWdjz3/tLa2G845sXUdjcj1ReMfSdJy03kjNz2dmFADADCmsuRv81xxr9Nku5/+C8tQJ+Sdux0X/WzYJfvITe8MRD4AABhTRcQispxvnCMfSfGMpOOyXlte9zVrG6YTT6gBABihbrEOtDv2/NM63p94HGRcygUS85h+TKgBABihIlfcm8rIR1pbH3GQYUY+ML2IfAAAAAAD4Ak1AAAFi8c6eGpZjGp9TpW5uqr10UY8on/Go46bYDiYUAMAUDAqPQzHsR9/4aiHIInM9E5E5AMAAAAYABNqAAAKlla6rpe2vMu7dRrnIGXzeh1/Udc2ynNj5yHyAQBAn5rZ6Gi5O2l7TnqUk6y0Khb9Zn2j1UCSJtE7sewfMY+djQk1AAB9yrL6Xvx93uXkBlmZr5eVBgcZY79tw1itcBjlCDH9iHwAANCnfqMP/bb1E03IMv5ufXY6ppc+pj3ygZ2NJ9QAAMREoxzxp4+dIg1FPD3NQ1KsI+nadmJMo1fEOtAJE2oAAGLSVgxMaivqV/55Rz66jbnXlRKLHte4Rj6AOCIfAAAAwACYUAMAENNrabmiohLDLu9GhpqcNPpD5AMAsCPEl/+WkrPE0fZu4uXj0srmZW2L9tftfINKOncvxzHJBAJMqAEAO0IvJeJ6zVB367OIDO+wxhE3LjnmUd9/IIrIBwBgR+g3+pC1LS0e0Etb1sjBMGMR8fOOMnZB5APjiCfUAICp9cD3vDvx816eUHeKaxRd2SMpUpF31KLfsnl5R0/GCaXx0A8m1AAApBj3KARl84prA7Ii8gEAQIqsv/bPK/KR1pZ3LKLTNXSLfPQ6xn7bRhH5APrBE2oAwNhLq9DR3E5qi0Ykkip5dGqLSopFZDmuCJ2iFv1GN7L2H5d39GTUiHlgUEyoAQBjb9BIQL/xhrSxjDqOkHclkk59pH0+bVU+gH4R+QAAAAAGwIQaADD28sjiduqz19hCp+OGkaHOMo54W7954bQM9ShL1xXRBgyKyAcAYCxFS97FJz29RhomKfLRKaPdyzjTxnjtcz4keVWyio4475S29n7y4Wn3fFwnq2SmkTcm1ACAsbeTSptlnej3+xcCebX9tcc++81hj2uGGsgDkQ8AwNjbSb+i7zWS0fP9sEr7a4999hsbGafIB5A3nlADAHLXLHPXS4m7pLYsovGEeFSh37J5/bRlGUcvbd2epNb2rUoLM63XKNuUfCZ4NZca4f/blzakh/7LqarW11WZm1dlzjre86yxj/j4m2Mft4krMQ8UiQk1ACB3SZPCQUqZDbO0XL9tw44jNMJJdCM2mZaCyXTz1aPHzErV+np4rnWV6lu/qM46jkkvmwcUgcgHAAAAMACeUAMABpa0kiGKZaub8sWZ1mtb24bLZ022ETyf9llrfa45Sab2R9cTqlvchJ9JDMtIJtRm9nuSflPBv86XSTpV0oGSzpN0L0kXSTrF3ddHMT4AQG+oojB897qyOYme0Q8e1t52xO4bW9vXXHJIa9tnTaW6DWF0w0EuGuNi6JEPM7u/pN+VtNvdj5Y0I+k5kt4m6U/c/QhJP5L0wmGPDQAAAOjVqDLUs5LKZjYraUnSLZIeK+mfw/YPSnraaIYGAOhVHiv65dW2UywtzrW9RpVnFtpeO8ladi5+zLislAiMi6FHPtz9ZjN7p6TvSapJ+ryCiMeP3X0j3O0mSfcf9tgAYCdKKnEnZcukdiqPFi0tl/R5tI+828axbN7RZ1+s2t5VSVJ516Iuf+Pxrbbj/vZHqlXXgrbKgi49ZT81Zk2lDdcRF85qpRakH5fK87r24RtqzJlKdQ9y0M1/YkqnbapU3VSpsim9pL2t05/NuCLWgUkw9Am1md1D0smSDpP0Y0n/JOlJPRx/uqTTJemQQw7psjcAoJthlYEbRpm5cS2b15xMx7cltSbTze1G+AXCxqy1JtOStFJbV2MuLJU3Z1pZDc7RfE3qM9r3IOPPul/RPzPAuBpF5OPxkm5w9zvcvS7p45IeJWm/MAIiSQdJujnpYHd/v7vvdvfd+++//3BGDABTbFx+fZ9XW6drS7vuPNrSxlHetZi4LQVPpaPbpbAyR2nDtVSeb7UtleeDJ9OSSnVPj3yEfUb7Thr/JEQ+gEkwiiof35P0CDNbUhD5eJykPZK+LOkZCip9/IakT41gbAAw9dJK3PX7dLlTP936KHrCFB1Hp0hGt7a0PpM+T2qLRjzirnnRsqob9wjOPVtXY2PrCfW3f3ZTwXf3JWlTzXxHY870w2Okal2qzEkVtY//mpc/oHVdlch4kq57XBDtwCQbRYb662b2z5K+KWlD0sWS3i/pXyWdZ2ZvDj/7wLDHBgA7QRG/Xu+3nyLjJr2McZQxg+rGXOJ21+MGvHdp+44y8gFMopHUoXb310l6Xezj6yWdOILhAAAAAH1j6XEA2GGKKEOWlh/OclxRGeqsY+x3/HmozNYTt7sel2PGvNtx8WOG+ecGTAKWHgeAKdWpHF6aeJa4eVy8j6S2pH6y9DEsSbnvLOOP37dmCbx4+buHvuHSttJ4l73umFbb/a6f18pa0M/SwpxuOXRdXpKsIR34v+VIabyyrn34Ruu44957S1tJvUtefGBP15p0fWnXPUxkpjFNmFADwJRK+2JdfJ+k/aahbF6afsvHNSfN28rfpZTGa06mm9se/n7YS9pWGi/6y+N4Sb1ex592PeNUNg+YdEQ+AGBKjWMJtCL7j15zFln7j2uWvdtW/i6lNN7SwlzbtjWCbWtoW2m8tj5jJfWyjj9pv6RrG2XkA5gmPKEGgCkRL4c3jk+ohzGR6hQ36ec+JJXU61QC77Kzj9latdDb277/wDU1G3+srW0vKYx4NJ9vbbQdF494ZI1rDDPWQXQDYEINAFNjEn6lPqo4SC/1pLOOeRvrsN29MbOkMQ8jStPPfsBOQuQDAKbEJFRNGEbko1PcpNNYsjztTdrexjtsd2/MbFRRmvgYiG4A7XhCDQATLF7JIyop+pC0T7Mtvl/ebcOafGV5iitJh511Yataxw3nnNg2/sPPvkS1fasqLwdZ6Nq+sHrH8qLsQYeptrKu8tK8Lnt6WY05U6nu8pLJw0UNraHWtiTJLRIHMVXmk/9sstzXYSHKAWTHhBoAJli/lSqy7DftVT6i1TrifTYn0M3X1jH7VmUrQVWO2sq6GnNLkoKlwKPaJtPStsRHP39uo7yvANIR+QAAAAAG0HVCbWb3MbNfNbOXmNlpZnaimTERB4AxMC4l0LLmmIeRoc4qWv4u3kcz6lFeXmxtt94vBaXtykvzKtWDPHSp7rLNrb6j25K2Ragn4b4CyK5j5MPMflHSmZLuKeliSbdLWpT0NEmHm9k/S3qXu/9kCOMEAEh64Hve3fY+OvHppcJFp/3iE6loNKGXtn7261enMnfxPHL8PrSVv4v1ceuLjld1ra7Kwpxuv+9623H3fOCaqhszqszOqHF9kOXoFvloZqbj24Pc1zyQkwbykZahfrKkF7n79+INZjYr6SRJT5B0fkFjAwB0kbX0W6/9JfVZZL43z6xv1i8lpqmGKxtW1xL631iPvC5sa0/sr8D7Sk4aGL2O0Q13f2XSZDps23D3T7o7k2kAGKG8f0WfFp/opa3TuIYRTegUi+hFJVzZsLKQ0P/sfNtrpv4KuK/EOoDxkRb5+F9Jr3H3LyW0fdHdH1foyAAAktJL40VFYw1p5dfix8T3S+qz17Z+9utX/LqjT6hLa5vSwoxq+1ZVWV5s2++Ar0krq+taWpzXd45Zb8U2SnXXwl2uzVXXwqJL920/390+tazZ1bqWFue09rBspQO7xVKSrqeo+0XMA8hfWuTjEEn/z8w+I+ksd4/+l/iexQ4LANCUFuugbF562bzGwkzrNX7cymrwTcGV1fW2DHRjzrSyWg/btt+7aFu13vsYo5+N8r4CyE9atY7bJO2WtJ+kr5vZgyNt/S/zBAAAAEyR1PJ37r7i7r8p6c2SvmBmZ4RNlnIYACBHveaF4+/jbfHjh1XWruj+k+5RaW2z9Rrfb2kxyEAvLW6Vv5OCyMfS4lzYtv2+RdvyzHn3e92D3B8A+ci0UqK7f9zMvi7p78zsyZKWix0WAOxczcy0tD0THDdo5GPaRSMf93nFf7cm2I2FGd32zuNV3ZhTZXZFjY2tiWZjznTn0VK1Lq3OSRW1Z7TveFjQVulhbpqUbx8UWWhgfKRNqG+PvnH3myU9wcxeKemJhY4KAHawYU6Gpz1DHdWcTDe3q+EkurqxfYLb619Uhn1tO/kvScA4Siub96QOn7/D3ReT2gAAgxvmr+inPfIR1Xxa3dyuzIZPjGc7Vz8Zt2tLi5EAGJ20snmvlfRed7+zQ/tjJS25+6eLGhwA7BRppfGacYFobCCpTepcti3aNk6iT4LTJomHnXWhantXJQVLhd9wzomttiPfca1q+9aCtuUFXfKyQyQzyV2SBd/6cemGcx4efC5J7rJ1k0rSyvrctsdLaePKOuYiEPMAxlNa5OMySf9iZquSvinpDgVLjx8p6ThJ/y7pLUUPEAB2gn6z0NNSNq/bcc3JtCTV9q62XXdzMi0p2G5Omi3y/Xlr/U+rzZtz69Sv52e7hlHHWQCMVlrk41Pu/ihJZ0i6QtKMpJ9I+gdJJ7r777n7HcMZJgBMt2FWi4gfP8rIR9K1JB1X3rWVNCzvWmxvW95a/ru8vBA+mVbw2ize4dr6PGyzRrDZfO0ky5hHHWcBMFpdq3y4+zWSrhnCWABgqj3wPe/u2BadLGV9whv9LOlpZpYJWLcnolkncVnGPMgT6hvOObFj5ZNz/+dzkleDna2i3R96oRoLMyqtN7Zy012eUEf7TIrLRMVjNkUi4gFMhkxl8wAAxUorjTfKPselLF/q+ZqT6XA7Wipv0P7HqcoHgPHVJTkGAAAAIA0TagAYA0WUQ8uSoe63j2GXcEvNElulbTu6OmK//Y9L2TwAk6Fr5MPM3q5g6fGapH+TdIyk33P3fyh4bAAw8aLl8NJyulnjGd2yvvG2bvLoI+u4KnNz28rfXf7G41t9HHPO1artC9uWF3X5HzxIjTlTqe46+KubWqltSJKWyiXN3VVXbWVd5aV5ndA4vVWpwxrS0nIwlvLyomor660+GnORDLWGm4VOQ04amHxZMtRPdPc/NLNflfQdSU+X9B8Kqn0AAFLknUHOI8Obdb8iyubFy99FNSfTze3mBLgxZ1qprbfaVmrrml8J3tdW1uWlrQmxl2L3PNJHXJ4l78hJAztblshHc9L9FEn/5O53FTgeAJgqef9qP49YQby/YZbNi5e/iyovL7Ztl+pBmbtS3bVUnm+1LZXnVV4K3peX5tvK3lmj/Z5H+0i69jyvjVgHsHNleUL9aTP7PwWRj982s/0lrXY5BgCmVjPGIXWPTESfguYxeerlCWnWsnlpJfvybEsqf1eJ7P/xb13YVv7u2POPDqIbS/O68fFStT4bHjejan1O0taktNnn0kL7NZeX5lt9RMdSxJ9NHFEOYOfIUof6zDBHfZe7b5pZVdLJxQ8NAMZTEXWWi5A1l522X95tqfcgVv4uSx9d+8xwTL/H9dI/gOmWtcrHQyQ928yeL+kZkp5Y3JAAYLz18qv9rMcVOc5uVT7SxpF3W+o9iFXryOO+5tnHIP0DmG5Zqnx8SNLhki6R1KxB5JL+vrhhAcB4iVbrSJMUfYhX8kjaLypeMSPanqWaRrStV0n9d2prVuwo71rcFuU4/OxLVNu3qvLyoq5743GtMd7926a5teALgksLplsPXpdmpVqtrieceZpWVoMvGy4tzksP3zr34R/dp1r4RcTy0ryue/bywFGN6J9Nv4h1AJCyZah3SzrK3bd/owMAdoh+q3VkjV1k7TPvKh9pY+zWf7NKR23v6vZqHmHFjtq+1bb+m5NpSVpZq6sR/r9QY1atybQUbFfrttXfylZbbWW9v0hJD9fWTxuAnStL5ONySfcteiAAAADAJMoyob63pCvN7HNmdkHzn6IHBgDjpN8SaP3kpIvM+qadr9fSb82yd+Vdi9vbwhJ45eXFtj6iVTiWFuZUCtZqUWkjjHk02xbn245rVumQgshHHhnnPNoAQJKsW5LDzH4h6XN3/2ohI+rB7t27fc+ePaMeBoAp1EtpvDzaRhkf6JTzLsK1z/lQW2m8I847Zds44tu9tknb73mef27kpoGdycwucvfdSW1ZyuZ91cwO0NbXQy5099vzHCAAjJudlMXtJ+fdt1hpvKRxJI1l0Fx5EX9uANDUNfJhZs+SdKGkZ0p6lqSvm9kzih4YAIxSEfGAcY0ODHUcsdJ4SeNIGsugMZg8/9wAIC5LlY/XSHp486l0uFLiv0v65yIHBgDDlrU0Xpqksnmd2pK2pXzL5iW1xc8VLX93+RuPb2s/+uyLO7Yd8+arWqXxNk58SFtZu7V7zGlldV1Li/Myl1Zq61oqz+vxl52mldVgHEuLc9JDOz/xTStrl0fJu6yIeADoJsuEuhSLePxQ2ReEAYCJ0W/5uE5taX1mGUcv5+s3UhIvfxeX2hYpjbcRK2u3Wg6+n7Oyui5rBJ+v1Na1YVv/99GcWOd5bUVHPgAgSZYJ9b+Z2eckfSR8/2xJny1uSAAAAMDk6Pqk2d1fKemvJB0T/vN+d//DogcGAMNWRBa6n3zyoHnhbm3x/qLl7+JS2yKl8eJl7Zol8JYW57VUDrfL80HMIxTdzuvaishQA0A3WZYef5u7v0rSxxM+A4CJ0q0cXj/SYh1pbZ3K1Q078tFcNrwyNyfF9k1r+/jle4JKHVbRsecf01rZcOu+llSZs/D8JUkb4bU2r1+qaHhZ6F6QmwbQiyyRjydIik+efznhMwAYe0WWWOt3LKPO6fb7F4JW2TuvDvUvBMPsHwCy6Bj5MLPfNrPLJD3YzC4N/7nMzG6QdOnwhggA+SkiHjDoWEb9dLbvyEqz7J1VMkcmiox1pLVRGg9AkdKeUP+jgi8fniPpzMjne939zkJHBQA5yqMcXlRSabyk6IbU3yqEeZTGi7c1S+NJ0nXvfoRk1jrf4a+7tFX+7vJXPUSNuaCtVHcd8qUNrdTqWiqbrn60t7U96fdPbZXD06Par+Hgf9/USm1DS+WSbnz8eMY6ooh4ABhExwm1u98l6S5JzzWzYyU9Omz6T0lMqAFMjF7L4eVZGq+ftiJiEW1l7yKTaam9/F1zwixJjTnTSi0oh7dSW1cjMiGOt1XrW7/wrNbrWqltRNpmBx7/oG2UxgNQpCwrJf6upA9Luk/4zz+YGX+VBzAx8o4HpPWfR1sR0Ye2Kh3ubeeLVuso1bfaSnVvq9CR1hY/d1Jb0bGOtDYqeQAoUpYvJf6mpJ9xD759YmZvk/S/kv68yIEBQC/isY549KFXSbGOpngko+8v9UVEoyFpffRyPdHj4qscRl3zloeqWl9XZW5ejZWN1ueNOdP3fmlW1bqrMjercqTP8tK8bnnUViWPuBsfP6NqfVaVuZm+xz9M8eovREAA9CLLhNokbUbeb4afAcDY6DU+kdbWbzyjV0l9duu/n0hD93Gst15LsV9c5nlf+x0/VT4AjLssE+q/lfR1M/tE+P5pkj5Q2IgAAACACZKaoTazkqSvSTpVwRcR75R0qrv/afFDA4Ds8s7b9tJ/nmPu1n8Rpd8qc/Ntr93GmOXcw8yH55GhzuvPFMDOlPqE2t0bZvYX7n68pG8OaUwA0FXSiof96JTvjeek00Tzz/2WzYuO45hzrm5V3igvL+q6s4/umD8+/KP7VFtZV3lpXqXqmmrVteC4yoIaC3NbbT/4cavtmpcdosZ8qTW+A/5LWlk1LS2arn9Ye/8HfaWhldqmlsozuukxXb/H3vX+TIpoJp88NYBuskQ+vmhmvybp4+6xr4YDwIgUkZXNOy+c1mdaW3My3dxOG1dtJcg/11bWVQonzJJUq66psemJbc3JdLOPldVgv5XVdcW/ItNeGm8m03WPKic9jHMDQJIsjxt+S9I/SVo3s73hPz8peFwAkCqveEBSn3nFG+J9Zm1rlrFrbqedu7wUxDTKS/MqVxa2jqssdGwrrTfa+lhaDEvcLW6PfKSVxis6djHKyEfanxsAxHV9Qu3uu4YxEADoJu8VD6XkSEavTzPTxpO1bF5UM+LRHF/qvs9ejoxhOaGcXzl8f7eOY7jtZ6Rq3VSZkxTb5abHlFStz6gyV0otJZjWNgmIdQAYRJbIh8zs6ZJ+TpJL+k93/+QgJzWz/ST9jaSjwz5Pk3S1pI9KOlTSdyQ9y91/NMh5AEyXYZVwy2tc/R5XZJyl0xg67dNPZKVT26REPgCgV1lWSnyvpDMkXSbpcklnmNlfDHje90j6N3d/iKRjJV0l6UxJX3T3IyV9MXwPAAAAjLUsT6gfK+mnml9INLMPSrqi3xOa2d0l/bykF0iSu68ryGefLOkx4W4flPQVSa/q9zwApkMRMY9xZ5uSz2xtW8PVmDOV6i43k4f/5bYNyRoNNeZLQS66ZGrMBl8qLG14a1sKctOtLyO6S7a9rW2fHWASq48AGE9ZJtTXSjpE0nfD9weHn/XrMEl3SPpbMztW0kWSXibpAHe/JdznVkkHDHAOAFOi3zjFJLvPN9oLKt398jtb2ysP3K+trfKt77e2Nw66d1vbdc8st7aPfOd1re2rX31k235pbdOM3DSAvGR5FLFL0lVm9hUz+7KkKyXdzcwuMLML+jjnrKSHSXpfWN+6qli8I3wanliiz8xON7M9Zrbnjjvu6OP0AAAAQH6yPKE+O+dz3iTpJnf/evj+nxVMqG8zswPd/RYzO1DS7UkHu/v7Jb1fknbv3k1dbGDKxX8tH99u7pNnW7dFWJIWfRl0YZeopcX5sCZ0sF2uzKtWXVe5Mi+V51u1oZfKQTm8WnVN5cqCNpbmW3Wpy0vzbecuLy+oti+oRR2NdkTbyssLqdeWx30dtI+8zw0AechSNu+reZ7Q3W81sxvN7MHufrWkxyl46n2lpN+Q9Nbw9VN5nhfA+IrnpEc16YnGS+ITyyzVI5KOS9PpLwtfeOu5kleDnayiI847JXJMqbXASmWupOoj7xPrb6a1HXXNKw7vWI2j2TaNk0xiHQCGIVPZvAK8VNKHzWxe0vWSTlUQP/mYmb1QQV77WSMaG4AhG1b5uGGU1Ms65uhn2/ZrTqal9u0+x9xt/MMqRzjOf24AMIiRTKjd/RJJuxOaHjfkoQAYA0lPapufN98PIx4QnYClPaHOci3d+u903ZW5OckqbU+o0/pPG3On6+405lHFLopuA4CidZxQm9kX3f1xZvY2d6d8HYDcNCMe0nBiHfEoRyed8s7dJtNpxx199sWq7V1VedeiLn/j8W3H1fatSgszqu1bVWNhpnXMv60saNM3JUkzttDW/5Hn/lC1apCFLlcWdM1p9xpo8jiNk05iHgCGLe0J9YFm9rOSnmpm50myaKO7f7PQkQGYWoNGAvqNAGTts9eoQKfIhCTV9q62vUY1J9HN16ZNX2nbjvbfnExLUq261ldcI+t+kx75AIBhSZtQny3pjyQdJOndsTZXsOALAAAAsKN1rEPt7v/s7r8s6e3u/ouxf5hMA+hbNGZQmZtrvY9up7Vl3S9+rqzn7jUGkXZceddi22tUaW2z7bVpxpbatqP9lysLW31XFvq6J/Gx93tfi/5zy+PPGwCGIUvZvDeZ2VMVLBcuSV9x908XOywAky6ek45/eS5vWaIP8fdp2/1+KTHuhnNO3Mo4x/ooLy+qWq+rvLyow866sJW1ftJz1iQPIyI2o+iXWJqZ6ea4BjWpX94jJw1gnHSdUJvZOZJOlPTh8KOXmdnPuvurCx0ZgIlWZMZ2GLnZXjLVaRnqrG1tWetY2bx+ctJ5jH8SMtQAMA6yLD3+FElPcPdz3f1cSU+SdFKxwwIw6QaNXaS1DePX/L30nxb5yNrWFg2JlsqzSu6xiLRxTErkAwDGSdY61PtJujPcvnsxQwEwaZLK33WKEDRjE0VNhuL9Z61ZnFTHOf55tV5XaW1TjYWZ1mtUNK4RL413+BsuU23fqsrLi7r8Dx7UWvK7tN5QbX1Nmi+pVl3TlX90nBpzplLd9ZzrDlBtMyyNN7OgIo3z5JRYB4BJkWVCfY6ki83sywpK5/28pDMLHRWAiTCq8mhFRQfS4hmdStxJ6aXxavtWW6/NybSkzttz1ppMS1Jts7/SeNNUNg8Axl3XyIe7f0TSIyR9XNL5kh7p7h8temAAxt8wKjYMMzqQFs/oVJFDSq/kUV5ebL2W1htb/a03Wu9L6w2V6h5s173tqXR5pr9KHlkjH+P85wYAkyJT5MPdb5F0QcFjATABmjGPQSY8RX4psde2pJhHUpUPhe+blTmiopU8KpHzVObm9PFLvxF8ydAqOuK8h7SOSXtCfdUlD0iMpRQRmRmnKh9EPABMqqwZagCQlH9FiFHr9Xp6qeRRrde3KnZEK3dkHNMg45r0Kh8AMEmyVPkAAAAA0EHqhNrMZszs/4Y1GADjL49M7TjpNK5exp+aA26WwIuWwss4pl7H1U+Gutv4h5mhBoBJlRr5cPdNM7vazA5x9+8Na1AARquXcnj92JZPjvXfrS3rfkltzRJ3ktrK3FXrdR199sWJ5e+q9bqOedMVrfJ3l736p+VhsQ/blI5459VbbS9/YOu/rLVaXUd++BT5TLCfYgVCSuuuxry1XpuOfO/3VauGZfMqC7rmxfcbq6zzIMhJA5hGWTLU95B0hZldKKkVAnT3pxY2KgAjlSUDW3TetqhzR0vbxcvcZS1/55GJsc/ESuNF/qsa3fbt1fZak+joZFpSazLd3C46tz6qDDUATIssE+o/KnwUAMZKlifIgzwlHrRtkD7KuxbbnlBHNds6lb9rPoW2TbU9oY62lTa2JtKlDclNrSfU8Ul1pyfU5cpC2xPqaCWSPO5rXN79d/tzA4Bp03VC7e5fNbMHSDrS3f/dzJa07ReXACZdHuXwsoo/tcx6zrTycVlLyzVL3ElqK3NXmZtLL393xTdb5e+OPf+4VtvS4pyuPevBreMakWtrzG5NVpcW57ZNaDs9oW5GPJrnLrJs3jD+vIl5AJh2XSfUZvYiSadLuqekwyXdX9JfSnpcsUMDMEyDlo8bJH4wbmXbEveLlL/rJdLQ6drSDLMk3bBjPAAwjbKUzXuJpEdJ+okkufs1ku5T5KAAAACASZElQ73m7utmwa8kzWxWkhc6KgCFS6rkMS1Ka5tqLMxs205smy9JZpK7SnVXY76k0npDjTkLPpckd63U5rRUrmultv1eWUPy0tZr1rZp0i13DwDTLMuE+qtm9mpJZTN7gqQXS/qXYocFoGjT/Gv5w868sO39dX/yyNb24W+4rK3tmrccF2yY6Yi3Xd36/Nt/tLVMuMx08mn/39b7p7Wfb78rtzLQPzq6/XnD3a7darvrQdP7LIKcNICdLMvzkjMl3SHpMkm/Jekzkl5b5KAAAACASdF1Qu3uDUkflPQmSW+Q9EF3n97HLMAOUfSqd3mtoph1JcOoaNm7baXxlhfbtktrm5KC+Eezrby8qNJ6o7Vfab2h8tJ80Ba+Ri0tzrW9trUtzLW9ZlHkaoVJ5yrizw0AdpIsVT6eoqCqx3WSTNJhZvZb7v7ZogcHIF/DLI1XtLTISnSVw7hLX/OQrW+BmFrbjYUZfevlh259Fqlk15gv6btPmVO1bsG9i53vxz/lqtZd63MuxdIztzxgXZJ0l9a3jSVa/zlLJY4i/uwG6ZOYBwAEsmSo3yXpF939Wkkys8Ml/askJtTAhMmzPN2wSqz1U3YulXfYlrYm0aZt0sbR7xhH9eeR9zgAYKfLkqHe25xMh66XtLeg8QAoUJ7RgVFHPvpmHbalrQl2QqitiDH2E2fJO/IRH0cv/QMAAh2fUJvZ08PNPWb2GUkfU/B/M8+U9I0hjA1AH+KxjnEsjRd/8vnQN1zaWvL7stcd07bvYa/e02q78tVHq7Ewo9Lapo7+dF21lSBGUV6a1+W/Mt8qeXf4xfNaqQVtS+V5XXf8eqvtvjcuamUtXOVwYU7fP3wrilFab6ixUGq9RtWqa9J8qfWa533o9rQ3Gg3JW7c+iXUAQHdpkY9fiWzfJukXwu07JJULGxGAgYzbqoPd2iSptne17bVTW7N+dGNhRrWV6tY+K+tqzAdfKGzMl1qTaUlaqQWT6Vbb2tb5o9tBv6W217a2SB9xecdShnHPe+0DANBZxwm1u586zIEAAAAAk6jr7y3N7DAze7eZfdzMLmj+M4zBAehd0RnnInLSzdJ28RJ38ba2EneR8nXlpflWmbvSekNL5a22pXKsLVK+Ll7KrrTWaHtta4v0EZd3znucyuYBALqzbiWlzexbkj6gYGGX1v+TuPtXix1ad7t37/Y9e/aMehjA0EWXDY+Kl1+Lvo9mqjvlq7OUcBu20lqYa15rzzUvza5rZWN+2/Y0SiuvF89XZ/nzjh8f3Y/MNAAkM7OL3H13UluWsnmr7v5nOY8JwAA6TXiHUWJt2DrlmqMT6GmeTEv9lezLI0MNAMgmy1fV32NmrzOzR5rZw5r/FD4yAB2l/fq+0/tBIhmj1CmGsTS7nrg9jbKW7Msj8gEA6F2WJ9QPlXSKpMdqK/Lh4XsAQ5J1lcM8SqxliYokRQwe+EcXt0rcXf+m49vbXntRx9J4x35kr2rVdZUr87rsGctqzAUFokt11+EXz2mlth6UvzthQ405U6nu2v/yilZWw/J3ixV99+itp6vHve/WoLydpHJlQZf89n07XltSRKZbtKLftvj9it7LblGdLG1ZEe0AgHxlmVA/U9ID3X26HwEBYy5LabZhrmSYtF+0xF1839TSeNX11mtzMi1JjTlrlcBbqa2rMTez9flqpPzdavtYmpPp+HbWa+11NcS8IxlFl80j2gEA+coS+bhc0n4FjwNAF3lUbCi6yke0IkdaW1y5Mt96LdW3vihdqnurYsdSeautVHctLUaqdSy2349yZSFxuzmWpO3o+27Rin7bssZsiq7yQbQDAPKVpcrHVyQdo2B1xNajHnd/aqEjy4AqH5hm0Uoe/VbhSItuTOJTyrRoxSjPnWccpJdqHd3aiHUAQH4GrfLxupzHAyCDrL/2L7qPcZL3ioR5nTvPNmIdADB5uk6ox6HeNAAAADCusqyUuNfMfhL+s2pmm2b2k2EMDtjJ8ihrNyml8bIa5fiHla8mJw0AkyfLE+pdzW0zM0knS3pEkYMCdqpeS+Nl/bxbH73kdKPl765/8wlt+x157g9Vq66pXFnQpc+9mxrhf2FKG9IDLp/Tyuq6lhbndcNx7WN84EVbbdef0N527Ie3Sup963m72sbSrfxdv22d7kHavevWf9qYBy1z2ERmGgBGI0uGusWDbzB+0sxeJ+nMYoYE7Fx5rnqXdtwgOd3U0nhhibpada01mZakxqy0shqWv1tdl7RVGm/rs+S2aEm9fsfca1tROeZhnhsAMDxZIh9Pj/zzDDN7q6TthWQBDKyfX+0nHZ/0Pq9YQWppvLBEXbmyoNLG1jhKG9LSYlj+bnH7MuFpbdGSemnXlmdbUbGLos8NABiNLE+ofyWyvSHpOwpiHwAGlFQar2mYVT56aWtb5TDWds2pZVU37qbKbF2NyIS6Masw5mGSto//tkdI1bqpMre9uRnzmFTxGEnWtk6IdQDA+MmSoT51GAMBdqK8y9qN+tf+1Y25ttfMx/VRDm/SIh9FxHEAAOOh44TazM5OOc7d/U0FjAcAAACYKGkZ6mrCP5L0QkmvKnhcwI6Qdx521Dnaymy97TXzcTmUBMyzLc8Mda9t5KQBYPJ0fELt7u9qbpvZLkkvk3SqpPMkvavTcQDapS0hniZrWbteRDO7R77rOtX2BVU5yssLuuYVh7faDj/7EtX2rYZti7IjHqDayrrKS8EXA5vblz2zosZsUJWjtOG6xzVLWlita2lxSdUj28d31OcareO+e9JCTyXp4iXnul1bv/dkXJCTBoDJkpqhNrN7Svp9Sc+T9EFJD3P3Hw1jYMC0KOKLgVnOldZerddbk2lJqu1bi7WtRtpWZSth6bqVrdJ1tZV1NWaXW+8bs6aV1aCP5mtULdJHtb71y7FhZpCzHjdOGWoAwPjrGPkws3dI+oakvZIe6u6vZzIN9K7o6ECnc6W1V+bmVF5eaH1eXl6ItS1G2hZbT6bLS/Nt26UNb+1X2nAtLQZ9NF+josflURYur9J1/cQuio58AAAmS9oT6ldIWpP0WkmvCRZJlBTUvXJ3v1vBYwMmVtYVD9P08wQzGo1IikhEn0Jf8ruHtB8cabv0j3460xij5yiX53VTGPO4M6E0XhDzKA10T4qIdUTvybAns0Q7AGA6pGWouy76AiDZKFe966cE3aDnynK+PO9JEbGLvMfY67kBAJOLSTNQgDwiAHmcu2hp0YpO+w6jEsagcZBhRT4AANMhy0qJADLII+YR1U+1i2hbtV7XMeevtr4MWF6a16W/tpWNPu5vfqhaNazyUVnQ5v67WlU4vFRqO272jp+oVl1TubKg6lH7a6W2rqXyvL7/6PbzH3funa39rvmtAzpW8hi2tFhH0VU+iHUAwPRjQg3kJCkuMIzoQKf9pO1VOaStCXVzMt3c3qwstPbzUqntuNlw31p1TSu1oM+V2rqq9Zm2c9ci+xUZ18gj1lFE/8Q6AGBnIvIBAAAADIAJNZCTYZZf6zaGpmapuvi2FMQ8ottJpfFa78N9y5UFLZWDtqXy9vJ30f2KLi03aE46z/7JSQPAzkbkA+hT0gqIeeq3bF5UkJleTNz3mjPupupGOBGcrYfb0Un31n8eKnMHxPLbM5I2pfpm27kvOe2eW4fHxhy9R/HrydrWL3LSAIAijWxCbWYzkvZIutndTzKzwxQsa34vSRdJOsXd19P6AEZpmFncIjQn0/HtxH1zKMWX1kevbcNerZCcNAAgzSgjHy+TdFXk/dsk/Ym7HyHpR5JeOJJRARkVHR0ofPyz9cTtxH1zGFdaH1nbhlXWjlgHAKAXI3lCbWYHSXqKpD+W9PsWLMP4WEm/Hu7yQUmvl/S+UYwP6CTv0ni9iMYWspTNS3Lch7bK323cY0m1lRVJQU76hl/d6v+wT7aX27vhaVvXe8w5V6u2b1Xl5UVdd/bRHUv7NbejbUnj7KVtXBDxAABEjSry8aeS/lDSrvD9vST92N03wvc3Sbp/0oFmdrqk0yXpkEMOSdoFKEyvK+nlGfnIErvoFj+IlrXbWNj617+2sq5qfbbVR7zcXrM8XrVeV23favD5vtWhlp0b1/4BABh65MPMTpJ0u7tf1M/x7v5+d9/t7rv333//nEcHpCs6OtBvLCK+TyfRKhzxSh7R/lPbloMvOZaXFwuNXaS1jbp/AACiRvGE+lGSnmpmT1ZQfuBukt4jaT8zmw2fUh8k6eYRjA1o00slj06r8fVStaLfL+4lnUuKVu8Iti855W6xIyKVPCKfXv6UGUnlxLZmzCMeL+n2hHeUVT76QawDAJDV0CfU7n6WpLMkycweI+kP3P15ZvZPkp6hoNLHb0j61LDHBsT1EwmIvu9lYlxElCCvSh5ZKm10HcsEV/kAACDNOC3s8ioFX1C8VkGm+gMjHg8AAADQ1Ugn1O7+FXc/Kdy+3t1PdPcj3P2Z7r42yrEBUrbSePH9ou+zloHrtO+g8iqN1yln3NNYJqxsHgAAWbFSIhDTb2m8aFm7pM+b21n7iMcOjn7dt1Tbu6ryrkVd/oZjW58f8Ve3aabukoIvG176vLurMV9Sab2hFz3pf7XeqEmS5ktl/fUXHzdwWbtu+01y2Txy0wCAfjChBmJ6LY2X1pZn2bza3tW216bmZFoKyuE15oNfPDXmS63JtCStN2pjeW3jem4AALIapww1MBbGpbxbXHnXYttr0+acbe1TWVBpvSFJKq03NF/aqtQxXyqPzbWN67kBAOgHT6ix4/VSGq8feZXNu+Etu7eOj7TPv29FMzPBU+r5mU01LryHpOAJ9Sff/jitrAaLtCwtzksndB/nMErVjbI0HrEOAEDemFBjxysyVpBn2bxOxzUn05JU22z/Lm9zMt3crtYt07mzXE+e0YpeoyjEOgAA44TIBwAAADAAJtTY8YZdfi2tjyzjjO+3uRnJUM8stLUtLc63bWc997Dz4aPMpgMAMCgiH5ha8fJ3aeXdihQvA3fYWRdulb974/Gt/ar1uo5581Wq7VtVeXlRl772p9r6OfJv71StuhaUxnv2shrhlxEvv+Gg1l+NK7PzkrYiIMs3rmpmJYh9lJcauuOE9i80Jo1zFPckb+SkAQDDxIQaUyvPEnH9tiXt16n8nSTV9q22vba1Vddar425XVsNkd8zVTfWVdLWBLW2st62Xa3PFHptw2ojJw0AGCdEPjC1xrW8W6fyd5JUXl5se21rqyy0XkuR2tNqRK55dr79mKX5tu1JKF2Xx7kBABgmnlBjqvS7ymFUWvm4ftriT0+jMY+4eMwj6poX3LPVd2Njq8/SXVvnKs3NStpqu+Fpi62n0tGYxbBL1RWBWAcAYFwwocZUSYp59BsxyKOfPOMHbX1a+j69jLHo/YpoI9YBABgnRD4wVfKIJiT1129bnk+Bs/Q5yPgnLfIBAMC44Ak1JloRqxymVbvIWgnjga+9qFXJ4/o3n9A2xiPfd+tWtY7n30ON+eDvtaX1ho64aE4rtXUtlef17UdutndqkX8ijviLmzUTZqrLlQVd8qL9+7rucUW0AwAw7phQY6INc0W/XqIi0Uoe8ePaqnXMb/2SqDFf0kotqMoRvM4knmPbSomRLyg2+x7kesY18gEAwLgi8gEAAAAMgAk1Jto4ZX2joqXxtpXNi5a/W9+qeVdab2ipHJS5a74mXWv8XJtzkZUSKwuJxyQdNykZagAAxh2RD4y9pJz0OJZ9i+arm7npaKk6KRj3NWfcTdWNOVVm6yrbQqu9XFnQtx9ZVxD12Gw7Nmky3Gy79iX3b/tcExiTICcNAJhkTKgx9iYt69utvFt1Yy7ymm/JuEkvmwcAwCQi8oGxNynRhKzl3Sqz9dZrHpGMTsck9dFP/8O4rwAATDKeUGMs5bHiYdEO+dyGVmrB5HipbLrqsUG1jWq9rsMumdPKqmtpUfruUY220niVWyuytbqWFiq6/b7rrf6q9bqO+8vbWiX1LjnjgLa26L3o96lu1rJ/RSPiAQCYJkyoMZY6lYgbl2hCtV5vTaalZpm7rUnqyup663Vbaby14Ljma1S0pF5cltUXh3lP8rqvAABMOiIfAAAAwAB4Qo2xUMSKh0VrzEml+tZ2lJcka4Sv6w15+JTa1htym5G55LEVDyWpMW8qrbsa8wmNEyapSsk4xE0AAMgbE2qMhUmMBNz0/I32D27dmijuPWTrlz8+75HtkqqHuDq55qUH5zfAESMnDQDYKYh8AAAAAANgQo2xUHTpukL6n91azbAyO9/ethBuLySUzUsrqdfHuJKOT3o/7PsKAMBOQeQDI1N0abxoVYy0c9hm8I8kWUnyma22/S+SKqtBRGNpUbr7569Wbd+qysuLqj/0UNVWgqxzecl05S9tne+wP/2WantXVd61qB+84djCri3JKEvjEfMAAOxETKgxMnmWxuu3rVqvtybTzffe2NpuTqaloATe/L5VSVJt36rqK1s1pGsr65IWt97vXW17TRpL4iqKfdyTrPsNs2weAAA7CZEPjEzRkYykc/Xax9LiVqxjaXFe5eVg0lxeXlR5aastui1J5V2Lba+drrvfe9LJUKIurHgIAEAbnlBjqIa5AmLW6MPBx9yilc1gIZWlmQV974oDW22bC9JmWN9uc0G69qwHt/VZrZda51LkCe0Nb9m9tV+GeEarjwlArAMAgHZMqDFUSZGGUUYTqvV6azItSSuba21jXIwkNlZW66rWu/cZfZ9XrGMcIx8AACBA5AMAAAAYABNqDNVQy9plzUnPLLTalmYWYhnqreOWFrPnh/MujTdOGWoAANCOyAcGlrRseHvOeHgZ4QN/6natbK5paWZBt1x1n9bnB3xNWomUv6t8+qJWWbtSeUmbYfWO+eVF6TVbGer9zrtIC2GljvKuRd355hM6njua2U6LfETF4xRZ71H0HMNAbhoAgM6YUGNg/WaXe90vS1szDx3PQq/Eyt+VImXtSptbv6ip7VttOy5a9q62d7XjRLmXnPEkl80DAADbEfnAwIqOZPTS1oxvbI9uxMrfRcraNUvhSUE5vOhx0bJ35V2LmceV5X7lFQeJH1/EfQUAAJ3xhBpjKe1JbdoE75ar7pMYu/j8Oz4keTXYySo64oRTWsfES9dFj7v+zSf0FVlJin+k7ddr/0Ug1gEAQH+YUGNgRUY+0s7XUyyiOZmObxcw5m7xjHEvmwcAAHpD5AMDKyLykeV8PcUurJK8nTD+vK+t3/EPO/IBAAD6wxNq9GWYKx7GHfmu61Tbt6by8oKuecXhbW3nnvRXanhVJavoj094pGr7VlVeXtTTzj669QXD8q5FVc7pvIpiNK7Ri+iT50mYoBLxAAAgH0yo0Zc8Ywu9xg9q+9Zar/F+GmGco+FV1cJSeLV9q2qsDV6to5cxT9pKiQAAoH9EPgAAAIABMKFGX/LMAfea5y0vL7Re48eVwnx0ySqtcnjl5cW+y9/1m0GehAw1AADIB5EPZJK0GmKeklYM7FR27ppXHN4xp3zCwpzkc5LN6bo3HNNxxca0vHO/bZ2MsjQeOWkAAIrHhBqZFLnKYS8547S2ar2+VRLPq5myykVcT9cx5nyufvYDAAD5IfKBTPKKRfRTWi6tbVvsolkSzyo9xzOKaMszUtJP5AMAABSPJ9ToaJil8eKxiCPe+u1Wybtrz3xQW9vvPP7fVG/UNFcq63PPPF61alD1o1xZ0NPf8PDWcXpd9/P1em1px/XbZ96IeQAAMFxMqNHRsGILSftFS97F9603apKkeqPWmkxLUq26pkbCcXmUv8vz2vJuSxs/AAAoHpEPAAAAYAA8oUZL0ZU8etGYL6m03ghe1zbVWJiRJJXWNjUnV12mObkac6ZS3YNj5kyNhZJKaw01Fqb774pJ1U+anwMAgOFiQo2WcaoQce1ZD25tH/57/9vW9qLn39zaftsZvxI78r5FDmtskJMGAGB8TPdjPAAAAKBgTKjRUuSKgWlt3fqPr3KYVBovbVxFr4Y4rHNTDg8AgPFE5GOHK6I0XtbSckf+xc2qVddUrixIm4228neXveQ+kkpa3VzT7D3vqdn5oG12eUHPeMTPq7ZvLViC/JW5DTuTYZbGI9YBAMBkYEK9ww1zRb94W3MCXauuSZuNVlutuqbNRvDLk81GSbV9kdJ4se0s5epGcW159w8AAMYXkY8dbpgr+m2LclQWJAVPpJvbzfczpWCCPVNqBE+im23LC6335eWFkcYuij43AACYDDyh3oGKXgEx61Pva158D1U35lSZrau6Ecsnz8yp2qhrcWZB17zyiG3l/Jrjj/eZxzVFx1/0xJZYBwAAk2/oE2ozO1jS30s6QJJLer+7v8fM7inpo5IOlfQdSc9y9x8Ne3w7QdKEt4joQ9dxhJPo+GS60xi7nbvTcZOyUiIAAJhMo4h8bEh6hbsfJekRkl5iZkdJOlPSF939SElfDN8DAAAAY23oE2p3v8Xdvxlu75V0laT7SzpZ0gfD3T4o6WnDHttOMczScqnjmK23vXYbY9K5i7i2pP767ZOcNAAA02+kGWozO1TS8ZK+LukAd78lbLpVQSQEOSh6SfGv/Or/k7wavLGKHvOJ32nljw979R7V9q5KCmtIe0O1vasq71pUaX5RtX2rKi8v6ro3HNM2xsPOurC13w3nnNh2vnhb3tdWdGk8ctMAAEyXkU2ozWxZ0vmSXu7uPzGzVpu7u5l5h+NOl3S6JB1yyCHDGOrEK7y0XHMyLUlebcsxNyfTkoJtb7S2S2Hxjtq+1e0l9cLjant7a8v92gZsGyRjDgAAJsNIyuaZ2ZyCyfSH3f3j4ce3mdmBYfuBkm5POtbd3+/uu9199/777z+cAU+4wsu7NVculNpWL6zMbV/lsPm+vGtR5eVwe3lxW//R/Xppm4SyeQAAYLqMosqHSfqApKvc/d2Rpgsk/Yakt4avnxr22KbJMEvj3bopNcLfJ5SsPZIxe8ABmq2EqxxWFnTNyx/QNq5OY2xGOXptG0dEPAAAmG6jiHw8StIpki4zs0vCz16tYCL9MTN7oaTvSnrWCMY2NXotOzdIablGJPLR8GpbJGN2ObKyYXUtcVyTslphHucGAADTZ+gTanf/L0nWoflxwxzLNIt/sa7TwihJ22n7RftuKlmlNakuWUXlXYtbT6grC60lxsuVhcRx9XruXtuK7j/LuQEAwPRipcQpUXQlj2aMQ1JbdY3K3JzO+ukTO7Zd/Gt/01YB5PjzfzP3sQ0T8Q0AABDHhHpKFL2iX7xaR1olj7ZYR0oFkF7HWNS19dMGAADQNJIqHwAAAMC0YEI9JYpe0S9e/i5rW1pJvVGWruu3DQAAII7IBzqKxjPu/6VlrTaCH5fF0qJu+PrWfmtPebhWa+uSpFJ5vu2448//zUKz3UUjMw0AALphQj0lisxQS9JqY7VtOzppXqk1Wm0rtXVFf/ExDisZ5tU/AABAEiIfU6LoFf0WS4tt29HjlsrzrbbodtZxjXPkAwAAoBueUE+wvFdD/PTJfyb3fZIks2W99Kd/tlVP+tZdu1TbNyNJKi/PSGdtHbfwr99QI6z0sbBrUZWfYyVDAACwczChnmC9roaY1lat11uTaUly39e24uGMbU2Oa/t6KJuX8dyDjj+v/gEAAHpF5AMAAAAYABPqCZZ3zthsubWf2XKrHF5516LKy5HSeMvZy+ZNQoYaAABgEEQ+JkjRy4sfPLssuQVvrKIb3rK7LQvd6dzNpcaLGlcRyE0DAIC8MKGeIEVmkLMuE17YuUfUPwAAwKCIfEyQomMRWVY1LOrcw+4fAAAgLzyhHnN5l8aTpA/9ynvV8KpKVtEbHvV41fatqry8qKe/8UTV9gUVO8rLi9LZuZ1y5Ih4AACAojChHnNFxC4aYbSj4dXWBLq2b1W+vt7aL14aL69zF9VHL/0DAADkicjHmCsiFlEKox0lq7Sqd5SX0yt5THrkAwAAoCg8oZ4S8Sey0UlkvO2Y+UXJNyVb1LVnPihTJY9JRMwDAAAMAxPqMddP7CLtOElb1TyGWMkjrW0Y5wYAACgKkQ8AAABgAEyox1y/q/2l5oeb5fGGWBovra2o/gEAAIaByMeY6Xc1xMrcXNtxT3v0hVprrGqhtKgrTjlUteqaJKlcWdAzPnSyatU1lSsL0mn5X8MokZsGAADDxoR6zOSVM15rBOXw1hqrrcm0pGB7dqa1Pa0ZagAAgGEh8jFm8oo+LJSCEngLpcXgSXSoXFlovS9XFqYu8gEAADBsPKEeA0WshviG+1weVPOwio449fi2tmY8ZBomoEQ8AADAqDGhHgOFxC4ipfGynG8aIh8AAACjQORjDBQSfYhU8shyvkmNfAAAAIwaT6hHJI+Yx/uecq42vaoZq+jtxxyt2t7gi4jlXYt6xvue3KrkUfnt9gog44joBgAAmFRMqEckj9jFZhjn2PRqazItSbW9q5oNK3tEK3mk9TlOkQ8AAIBJQuQDAAAAGAAT6hHJI0s8E+ajZ6yi8q7FVlt512Jiaby0PkedoQYAAJhURD6GpN8VENP87KIkl2TSDeecuK3/QTPa0VhKXmMmKw0AAKYNE+ohKSTHHCmNN2if3VYdzDt7DQAAMC2IfAxJEbGLaGm8IkrX9Tr+XvsEAACYBjyhLlAepfG+9WvntlY8fNqD2kvjPe2s4H1516J0Tl6j3tLviorEOgAAwE7ChLpAva6A2C3WES+NF90uMvJBrAMAAKAzIh8AAADAAJhQFyiXHHMkJ72tNF74vrxrcaTLf5OTBgAAOxmRjzHTykxLbbnp8q7FQkrj9YucNAAAQIAJdYH6ylA3J9NSW246j5x013P30QcAAMBOR+SjQH2tGNiMeEhtMY88Yh3dzk2sAwAAoHc8oc5Rv6sh7nn6+1ul8X71p05oK413wzknjCzWEUXEAwAAIBkT6hz1HadIKY3Xa2yk37Ze+gAAAMAWIh856jd2kVbJo4gqHP1EPgAAAJCMJ9QD6nc1xEM+U1ettq5yWXr7LzxU642a5ktlXf/mE/qKjRSBmAcAAEB3TKgH1G8ko1ZblyTVautab9QkSeuN2lBXMqSSBwAAwOCIfAAAAAADYEI9oH5zzOXyvCSpXJ7XfKksSZovlQvNSae1URoPAACgP0Q+RuRfPvChVqm8Y88/rdDSeGShAQAAisOEekB9l7WLlMobVmk8AAAA5I/Ix4D6XmkwUiqv6FgHAAAAisMT6h71shrikX92o2rVNZUrCzr7C/+iRvhUumQVHXv+i3ONeRDrAAAAGA0m1D3qpexcrbomSapV11qTaUlqdIh5EOsAAACYPEQ+etRTJY/KgiSpXFlQqRnxUPCEup/VCol1AAAAjB+eUPcoHq146t1PUW3vqsq7FvXJb1++9WVDq+hYbVXvOOVfXjzwCojEOgAAAMYPE+oB1faubr1GYh39VO8g1gEAADB5iHwAAAAAA2BCPaDyrsWt10hOup9yeOSkAQAAJg+Rjx41bju+LSctHd1qa654KJGTBgAA2CnG7gm1mT3JzK42s2vN7MxRj2ebWE46mqGOZ56jGeosbeSkAQAAJs9YTajNbEbSX0j6ZUlHSXqumR012lHFxGId0cjHoKscEusAAACYPOMW+ThR0rXufr0kmdl5kk6WdOVIRxVROuDitvcX3DWigQAAAGAsjNUTakn3l3Rj5P1N4WctZna6me0xsz133HHHUAcHAAAAxI3bhLord3+/u+92993777//qIcDAACAHW7cJtQ3Szo48v6g8DMAAABgLI3bhPobko40s8PMbF7ScyRdMOIxAQAAAB2N1ZcS3X3DzH5H0uckzUg6192vGPGwAAAAgI7GakItSe7+GUmfGfU4AAAAgCzGLfIBAAAATBQm1AAAAMAAmFADAAAAA2BCDQAAAAyACTUAAAAwACbUAAAAwACYUAMAAAADYEINAAAADIAJNQAAADAAJtQAAADAAJhQAwAAAAMwdx/1GPpmZndI+u6ITn9vST8Y0bmnGfe1GNzXYnBfi8F9LQb3tRjc1/yN6z19gLvvn9Qw0RPqUTKzPe6+e9TjmDbc12JwX4vBfS0G97UY3NdicF/zN4n3lMgHAAAAMAAm1AAAAMAAmFD37/2jHsCU4r4Wg/taDO5rMbivxeC+FoP7mr+Ju6dkqAEAAIAB8IQaAAAAGAAT6h6Z2ZPM7Gozu9bMzhz1eCaVmR1sZl82syvN7Aoze1n4+evN7GYzuyT858mjHuukMbPvmNll4f3bE352TzP7gpldE77eY9TjnCRm9uDIz+QlZvYTM3s5P6+9M7Nzzex2M7s88lniz6cF/iz87+2lZvaw0Y18vHW4r+8ws/8L790nzGy/8PNDzawW+bn9y5ENfMx1uK8d/703s7PCn9erzeyXRjPq8dfhvn40ck+/Y2aXhJ9PxM8rkY8emNmMpG9LeoKkmyR9Q9Jz3f3KkQ5sApnZgZIOdPdvmtkuSRdJepqkZ0na5+7vHOX4JpmZfUfSbnf/QeSzt0u6093fGv5F8B7u/qpRjXGShf8duFnSz0g6Vfy89sTMfl7SPkl/7+5Hh58l/nyGE5WXSnqygvv9Hnf/mVGNfZx1uK9PlPQld98ws7dJUnhfD5X06eZ+6KzDfX29Ev69N7OjJH1E0omS7ifp3yU9yN03hzroCZB0X2Pt75J0l7u/cVJ+XnlC3ZsTJV3r7te7+7qk8ySdPOIxTSR3v8Xdvxlu75V0laT7j3ZUU+1kSR8Mtz+o4C8v6M/jJF3n7qNaVGqiuft/SLoz9nGnn8+TFfwfrrv71yTtF/5lHDFJ99XdP+/uG+Hbr0k6aOgDm3Adfl47OVnSee6+5u43SLpWwbwBMWn31cxMwcO1jwx1UANiQt2b+0u6MfL+JjEJHFj4t8/jJX09/Oh3wl9Rnks0oS8u6fNmdpGZnR5+doC73xJu3yrpgNEMbSo8R+3/oefndXCdfj75b25+TpP02cj7w8zsYjP7qpk9elSDmmBJ/97z85qPR0u6zd2viXw29j+vTKgxUma2LOl8SS93959Iep+kwyUdJ+kWSe8a3egm1s+5+8Mk/bKkl4S/WmvxIOdF1qsPZjYv6amS/in8iJ/XnPHzmT8ze42kDUkfDj+6RdIh7n68pN+X9I9mdrdRjW8C8e99sZ6r9ocWE/HzyoS6NzdLOjjy/qDwM/TBzOYUTKY/7O4flyR3v83dN929Iemvxa/LeubuN4evt0v6hIJ7eFvzV+Xh6+2jG+FE+2VJ33T32yR+XnPU6eeT/+YOyMxeIOkkSc8L/7KiMJLww3D7IknXSXrQyAY5YVL+vefndUBmNivp6ZI+2vxsUn5emVD35huSjjSzw8InVc+RdMGIxzSRwozUByRd5e7vjnwezUf+qqTL48eiMzOrhF/ylJlVJD1RwT28QNJvhLv9hqRPjWaEE6/tyQk/r7np9PN5gaTnh9U+HqHgS0q3JHWA7czsSZL+UNJT3X0l8vn+4ZdrZWYPlHSkpOtHM8rJk/Lv/QWSnmNmC2Z2mIL7euGwxzfhHi/p/9z9puYHk/LzOjvqAUyS8JvSvyPpc5JmJJ3r7leMeFiT6lGSTpF0WbM0jqRXS3qumR2n4Fe+35H0W6MY3AQ7QNIngr+vaFbSP7r7v5nZNyR9zMxeKOm7Cr7wgR6Ef0F5gtp/Jt/Oz2tvzOwjkh4j6d5mdpOk10l6q5J/Pj+joMLHtZJWFFRVQYIO9/UsSQuSvhD+N+Fr7n6GpJ+X9EYzq0tqSDrD3bN+8W5H6XBfH5P07727X2FmH5N0pYKIzUuo8JEs6b66+we0/Tsq0oT8vFI2DwAAABgAkQ8AAABgAEyoAQAAgAEwoQYAAAAGwIQaAAAAGAATagAAAGAATKgBAACAATChBoCQmbmZvSvy/g/M7PU59f13ZvaMPPrqcp5nmtlVZvblAs9xnJk9uaj+U877z+HCDjKz75jZ+ZG2Z5jZ34XbJ5nZG4c9PgA7FxNqANiyJunpZnbvUQ8kKlyON6sXSnqRu/9iUeORdJyCBVe26XGsmZnZT0uacffoCmknmNlRCbv/q6RfMbOlIsYCAHFMqAFgy4ak90v6vXhD/Amzme0LXx9jZl81s0+Z2fVm9lYze56ZXWhml5nZ4ZFuHm9me8zs22Z2Unj8jJm9w8y+YWaXmtlvRfr9TzO7QMHKa/HxPDfs/3Ize1v42dmSfk7SB8zsHbH9H2NmXwmf8v6fmX3YwuXzzOyE8BouMrPPNZdWDvffHW7fO3wqPC/pjZKebWaXmNmzzez1ZvYhM/tvSR8ys0PN7Evh9XzRzA6J3MM/M7P/Ce/VM8LPDzSz/wj7u9zMHp3wZ/M8bS1J3vQuSa+J7+jBimVfkXRSQj8AkDsm1ADQ7i8kPc/M7t7DMcdKOkPST0k6RdKD3P1ESX8j6aWR/Q6VdKKkp0j6SzNbVPBE+S53f7ikh0t6kZkdFu7/MEkvc/cHRU9mZveT9DZJj1XwtPjhZvY0d3+jpD2Snufur0wY5/GSXi7pKEkPlPQoM5uT9OeSnuHuJ0g6V9Ifd7pQd1+XdLakj7r7ce7+0bDpKEmPd/fnhv190N2PkfRhSX8W6eJABZP+kxQsOS5Jvy7pc+5+XHgvL0k49aMkXRT77GOSHmZmRyTsv0dS0sQcAHJXyK/mAGBSuftPzOzvJf2upFrGw77h7rdIkpldJ+nz4eeXSYpGLz7m7g1J15jZ9ZIeIumJko6JPP2+u6QjJa1LutDdb0g438MlfcXd7wjP+WFJPy/pk13GeaG73xQec4mCCf6PJR0t6QvhA+sZSbdkueiYC9y9eb8eKenp4faHJL09st8nw3twpZkdEH72DUnnhpP7T7r7JQn9Hyjpjthnm5LeIeksSZ+Ntd0u6X59XAcA9Iwn1ACw3Z8qeHJciXy2ofC/mWZWkjQfaVuLbDci7xtqf3DhsfO4JJP00vBp73Hufpi7Nyfk1UEuIkF0nJvh2EzSFZHzP9Tdnxju07pmSYtd+s461ugYTJLc/T8U/IXgZkl/Z2bPTziu1mEMHwqPPTj2+aKy/4UIAAbChBoAYtz9TgVxghdGPv6OpBPC7adKmuuj62eaWSnMVT9Q0tWSPifpt8OnszKzB5lZJa0TSRdK+oUw1zwj6bmSvtrHeBSOYX8ze2R4/rnwC4BS+zVHK5TslbQrpc//kfSccPt5kv4zbQBm9gBJt7n7XyuIyTwsYberJG2Ldrh7XdKfaHvu/UGSLk87LwDkhQk1ACR7l6RotY+/VjCJ/ZaCSEM/T4+/p2Ay/FlJZ7j7qoIJ5JWSvmlml0v6K3WJ44XxkjMlfVnStyRd5O7xL+xlEmainyHpbeG1XSLpZ8PmdyqY7F+s9nvxZUlHNb+UmNDtSyWdamaXKsiUv6zLMB4j6VvheZ4t6T0J+/xruF+SD2j7PfvF8BgAKJwFX4YGAGB8mVlZwUT+Ue6+2WXfAyT9o7s/biiDA7DjMaEGAEwEM/slSVe5+/e67PdwSfUOX24EgNwxoQYAAAAGQIYaAAAAGAATagAAAGAATKgBAACAATChBgAAAAbAhBoAAAAYwP8PhhLrqnAMLTwAAAAASUVORK5CYII=\\n\",\n      \"text/plain\": [\n       \"<Figure size 864x576 with 1 Axes>\"\n      ]\n     },\n     \"metadata\": {\n      \"needs_background\": \"light\"\n     },\n     \"output_type\": \"display_data\"\n    }\n   ],\n   \"source\": [\n    \"import matplotlib.pyplot as plt\\n\",\n    \"\\n\",\n    \"xyc = []\\n\",\n    \"for name, base in nuclideBases.byName.items():\\n\",\n    \"    if not base.a:\\n\",\n    \"        continue\\n\",\n    \"    xyc.append((base.a - base.z, base.z, base.abundance or 0.5))\\n\",\n    \"x, y, c = zip(*xyc)\\n\",\n    \"plt.figure(figsize=(12, 8))\\n\",\n    \"plt.scatter(x, y, c=c, marker=\\\"s\\\", s=6)\\n\",\n    \"plt.title(\\\"Chart of the nuclides\\\")\\n\",\n    \"plt.xlabel(\\\"Number of neutrons (N)\\\")\\n\",\n    \"plt.ylabel(\\\"Number of protons (Z)\\\")\\n\",\n    \"plt.show()\"\n   ]\n  }\n ],\n \"metadata\": {\n  \"kernelspec\": {\n   \"display_name\": \"Python 3 (ipykernel)\",\n   \"language\": \"python\",\n   \"name\": \"python3\"\n  },\n  \"language_info\": {\n   \"codemirror_mode\": {\n    \"name\": \"ipython\",\n    \"version\": 3\n   },\n   \"file_extension\": \".py\",\n   \"mimetype\": \"text/x-python\",\n   \"name\": \"python\",\n   \"nbconvert_exporter\": \"python\",\n   \"pygments_lexer\": \"ipython3\",\n   \"version\": \"3.9.13\"\n  },\n  \"varInspector\": {\n   \"cols\": {\n    \"lenName\": 16,\n    \"lenType\": 16,\n    \"lenVar\": 40\n   },\n   \"kernels_config\": {\n    \"python\": {\n     \"delete_cmd_postfix\": \"\",\n     \"delete_cmd_prefix\": \"del \",\n     \"library\": \"var_list.py\",\n     \"varRefreshCmd\": \"print(var_dic_list())\"\n    },\n    \"r\": {\n     \"delete_cmd_postfix\": \") \",\n     \"delete_cmd_prefix\": \"rm(\",\n     \"library\": \"var_list.r\",\n     \"varRefreshCmd\": \"cat(var_dic_list()) \"\n    }\n   },\n   \"types_to_exclude\": [\n    \"module\",\n    \"function\",\n    \"builtin_function_or_method\",\n    \"instance\",\n    \"_Feature\"\n   ],\n   \"window_display\": false\n  }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n"
  },
  {
    "path": "doc/tutorials/param_sweep.nblink",
    "content": "{\n\t\"path\": \"../../armi/tests/tutorials/param_sweep.ipynb\",\n\t  \"extra-media\": [\n        \"../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\",\n        \"../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\",\n        \"../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml\",\n        \"../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py\"\n      ]\n}\n"
  },
  {
    "path": "doc/tutorials/pin-rotations.nblink",
    "content": "{\n    \"path\": \"../../armi/tests/tutorials/pin-rotations.ipynb\",\n    \"extra-media\": [\n        \"../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\",\n        \"../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\",\n        \"../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml\",\n        \"../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py\"\n      ]\n}"
  },
  {
    "path": "doc/tutorials/walkthrough_inputs.rst",
    "content": ".. _walkthrough-inputs:\n\n***************************************\nBuilding input files for a fast reactor\n***************************************\n\nThe true power of ARMI comes when you have a reactor at your fingertips. To get this,\nyou must describe the reactor via input files.\nThis tutorial will walk you through building input files from scratch for a reactor.\nWe will model the CR=1.0 sodium-cooled fast reactor documented in `ANL-AFCI-177\n<https://publications.anl.gov/anlpubs/2008/05/61507.pdf>`_. The full :doc:`documentation\nfor input files is available here </user/inputs>`.\n\n.. tip:: The full inputs created in this tutorial are available for download at the bottom of\n\tthis page.\n\nSetting up the blueprints\n=========================\nFirst we'll set up the fuel assembly design in the blueprints input. Make a new\nfile called ``anl-afci-177-blueprints.yaml``. We'll be entering information\nbased on Table 4.4 of the reference. To define the pin cell we need dimensions\nof the fuel pin, cladding, ducts, wire wrap, and so on.\n\nThe cladding dimensions are clear from the table. The outer diameter is given\nas the pin diameter, and the inner diameter is simply that minus twice the\ncladding thickness. We will use the ``Circle`` shape, and make the material\n``HT9`` steel. Since we're inputting cold dimensions, we'll set ``Tinput`` to\nroom temperature and let ARMI thermally expand the clad up to an average\noperating temperature of 450 °C. Lastly, since there are 271 pins in the\nassembly, we'll set the ``mult`` (short for *multiplicity*) to 271:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: start-block-clad\n    :end-before: end-block-clad\n\n\n.. note:: In fast reactors, neutrons aren't as affected by spatial details at a\n    pin level, and compositions are often quite spatially flat across an assembly.\n    Thus we can often just copy a component using the ``mult`` input equal to the\n    number of pins, and neutronic modeling is sufficient. For subchannel T/H, the\n    spatial details are of course much more important.\n\n.. note:: The ``&block_fuel`` is a YAML anchor which will be discussed more below.\n\nNext, let's enter the wire wrap. This is a helical wire used in fast reactors\nto mix coolant and keep pins separate (used in lieu of a grid spacer). ARMI has\na special shape for this, called a ``Helix``. Helices are defined by their\naxial pitch (how much linear distance between two wrappings axially), the wire\ndiameter, and the diameter of the pin they're wrapping around (called\n``helixDiameter``).  Thus, we input the wire wrap into the blueprints as\nfollows.\n\n.. note:: The wire axial pitch isn't specified in the table so we just use a typical value of 30 cm.\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-clad\n    :end-before: end-block-wire\n\n\nWe set the wire inner diameter to 0 to make it a solid wire. If we set it to\nsomething non-zero, the wire itself would be hollow on the inside, which would\nbe crazy.\n\nNow, it's time to do the fuel. This example reactor uses UZr metal fuel with a\nliquid sodium thermal bond in the large gap between the fuel and the cladding.\nThe fraction of space inside the clad that is fuel is called the \"smeared\ndensity\", so we can figure out the actual fuel slug dimensions from the\ninformation in the table.\n\nSpecifically, the smeared density is 75%, which means that 75% of the area\ninside the circle made by the inner diameter of the cladding (0.6962 cm) is\nfuel. Thus, the fuel outer diameter is given by solving:\n\n.. math::\n       0.75 = \\frac{\\pi d^2}{\\pi 0.6962^2}\n\nwhich gives :math:`d = 0.6029`, our fuel outer diameter. Now we can enter our\nfuel slug component into blueprints:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-wire\n    :end-before: end-block-fuel\n\n\n.. note:: We upped the hot temperature to 500 °C, indicative of the fact that fuel will be running a bit hotter than\n        cladding.\n\nLet's enter a description of the thermal bond next. This is an annulus of\nsodium between the fuel and the cladding.  Since those dimensions are already\nset, we will use **linked dimensions**. Thus, no numbers (beyond temperatures)\nare needed!\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-fuel\n    :end-before: end-block-bond\n\n\nThe next physical component we need to model is the hexagonal assembly duct.\nThis information is provided in Table 4.3 of ANL-AFCI-177. For the ``Hexagon``\nshape, we enter inner and outer flat-to-flat distances (\"pitch\") instead of\ndiameters. The outer pitch is given as ``15.710``, and we can calculate the\ninner pitch from that and the duct thickness. It ends up looking like this:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-bond\n    :end-before: end-block-duct\n\n\nIt's essential to capture the spacing between adjacent ducts too (the assembly\npitch, also defined in Table 4.3), and we define this by defining a special\n``Hexagon`` full of interstitial coolant outside the duct:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-duct\n    :end-before: end-block-intercoolant\n\n\nThat defines everything in our assembly except for the coolant. The shape of\nthe coolant is geometrically complex, it's a hexagon with holes punched\nthrough it (one for each cladding tube/wire wrap). Rather than explicitly\ndefining this shape, ARMI allows you to input a ``DerivedShape`` in certain\nconditions (e.g. when the rest of the assembly is filled and only one\n``DerivedComponents`` is defined. It can simply back-calculate the area of this\nshape automatically. And that's just what we'll do with the coolant:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-intercoolant\n    :end-before: end-block-coolant\n\n\nAnd that completes our generic fuel block description.\n\nDefining non-fuel blocks\n------------------------\nFor this core model, we will need some reflectors, shields, and control blocks\nas well. In detailed models, it can often be important to model these in\ndetail. For this example, we'll keep it simple. Control blocks will simply be\nfilled with sodium (representing an all-rods-out condition), reflectors will\njust be full pins of HT9 steel with coolant around them, and shields will be\nunclad B4C in sodium. Normally the pin sizes would be different, but again for\nsimplicity, we're just duplicating the pin dimensions.\n\nFor brevity, we will simply provide the definitions as described.\n\nRadial Shields\n^^^^^^^^^^^^^^\nHere is a very simplified radial shield:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-coolant\n    :end-before: end-block-radialshield\n\n\nReflectors\n^^^^^^^^^^\nHere is a reflector block definition. We can use this for radial reflectors and\naxial reflectors. We include wire wrap so the axial reflector will work with\nour basic thermal hydraulic solver:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-radialshield\n    :end-before: end-block-reflector\n\n\nControl\n^^^^^^^\nHere is a big empty sodium duct (what you'd find below a control absorber bundle):\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-reflector\n    :end-before: end-block-control\n\n\nPlenum\n^^^^^^\nWe also need to define empty cladding tubes above the fuel for the fission\ngasses to accumulate in. This just has a ``gap`` component made of the ``Void``\nmaterial, which is just empty space:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-control\n    :end-before: end-block-plenum\n\n\nThat should be enough to define the whole core.\n\nDefining how the blocks are arranged into assemblies\n----------------------------------------------------\nWith block cross-sections defined, we now set their heights and stack them up\ninto assemblies. While we're at it, we can conveniently adjust some\nfrequently-modified material parameters, such as the uranium enrichment.\n\nDefining the fuel assemblies\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nThere are three fuel assemblies defined in ANL-AFCI-177, each with different\nenrichments. We can specify some assembly data to be shared across all\nassemblies and just overlay the differences. We define the ``assemblies``\nsection of the blueprints input file. We get core and plenum height from table\n4.4, and split the core into 5 equally-sized sections at 20.32 cm tall each.\nThis defines the depletion mesh. Each of these 5 blocks will deplete and\naccumulate state independently. In the ``axial mesh points`` section, we\nspecify a roughly even neutronic/transport mesh, with slightly larger neutronic\nmesh points in the very tall single-block plenum:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-plenum\n    :end-before: end-assemblies-common\n\n\nNow that the common heights and neutronic mesh are specified, we start applying\nthem to the various individual assemblies. We start with the inner core and\nrefer to the heights and mesh with YAML anchors. As described in Section 2.0 of\nthe reference, an enrichment splitting of 1.0, 1.25, and 1.5 was used for\ninner, middle, and outer core in order to help minimize radial power peaking.\nThe specific enrichments of each zone are shown in Table 4.8. For simplicity,\nlet's just use these as uranium enrichments rather than the detailed material\nfrom the paper. Specifying more details is possible via the **custom\nisotopics** input fields.:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-common\n    :end-before: end-assemblies-ic\n\n\n.. warning:: The weirdest thing about this input section is the use of YAML\n    anchors for the blocks. Under the hood, this copies the entire block definition\n    into each entry of that list. This is a bit strange, and we plan to switch this\n    to a string-based block name rather than a full YAML anchor in the ``blocks``\n    list.\n\n.. note:: Notice the blank strings in the ``U235_wt_frac`` section? Those are\n    placeholders indicating that the material in those blocks does not have uranium\n    in it, and thus adjusting uranium enrichment doesn't make sense. These are the\n    axial reflectors, plena, grid plates, etc.\n\nFor the middle core, we can use the same stack of blocks (using an anchor), but\nwe need different enrichments. We can choose whether or not to use the same\n``xs types``. When composition is different, one often uses independent cross\nsection types so you get cross sections specific to different enrichments. This\nis a trade-off, since more cross section types means more lattice physics\ncalculations, which can require either more time or more processors:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-ic\n    :end-before: end-assemblies-mc\n\n\nSame deal for the outer core.\n\n.. note:: The columnar form of YAML lists is very convenient when using text editors with column-edit capabilities. It\n        is highly recommended to make sure you know how to column edit.\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-mc\n    :end-before: end-assemblies-oc\n\n\nDefining the non-fuel assemblies\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nLet's make some shield, reflector, and control assemblies. It's fine for these\nto have different numbers of blocks. Some physics kernels (like DIF3D) have\nsome requirements of axial mesh boundaries at least lining up between\nassemblies, but there are some ARMI features that can automatically adjust the\nmesh if you have very complicated assemblies:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-oc\n    :end-before: end-assemblies-rr\n\n\n.. note:: Here we just reuse the fuel block cross sections. In more precise models, a different approach\n\tmay be used.\n\nHere is the radial shield:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-rr\n    :end-before: end-assemblies-sh\n\n\nHere are the control blocks:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-sh\n    :end-before: end-assemblies-section\n\n\nAnd that's it! All blueprints are now defined.\n\nSpecifying the core map\n=======================\nWith blueprints defined we can now arrange assemblies into the core. This is with the geometry input file.\n\n.. note:: There are GUI tools to help making the core map easy to set up.\n\n.. note:: We plan to converge on consistent input between pin maps and core maps for the physics kernels\n        and analyses that require finer detail of how the pins are arranged within blocks.\n\nGeometry can be input various ways. The most straightforward is to provide a\nsimple ASCII-based map of the core.  For this problem, a 1/3 hexagonal model\ncan be input as follows (see Figure 4.3 in the reference). First, we refer to a\ngeometry file from the ``systems`` section of the ``blueprints`` file:\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies-section\n    :end-before: end-systems-section\n\n\nAnd then, in the core map file (``anl-afci-177-coreMap.yaml``):\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml\n    :language: yaml\n\n\n.. note:: The two-letter values here can be any contiguous strings, and\n    correspond with the ``specifier`` field in the blueprints input.\n\n.. note:: GUI utilities are also useful for building core maps like this.\n\n\nSpecifying settings\n===================\nNow we need to specify some **settings** that define fundamental reactor\nparameters, as well as modeling approximation options. For this, we make a\n**settings file**, called ``anl-afci-177.yaml``.\n\nThe thermal power in this reference is 1000 MWt. The thermal efficiency isn't\nspecified, so let's assume 0.38. From Table 4.8, the cycle length is 370 EFPD.\nLet's also assume a 0.90 capacity factor which will gives full cycles of 411.1\ndays.\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\n    :language: yaml\n    :start-after: begin-settings\n    :end-before: end-section-1\n\nWe need to tell the system which other input files to load by bringing in the\nblueprints and geometry (the shuffling and fuel handler info will be described\nmomentarily):\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\n    :language: yaml\n    :start-after: end-section-1\n    :end-before: end-section-2\n\nIn terms of our simulation parameters, let's run it for 10 cycles, with 2 depletion time steps per cycle:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\n    :language: yaml\n    :start-after: end-section-2\n    :end-before: end-section-3\n\nSet some physics kernel and environment options:\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml\n    :language: yaml\n    :start-after: end-section-3\n\n\n.. note:: The :ref:`ARMI GUI <armi-gui>` is simply an optional frontend to this settings file. Behind the scenes it just reads and writes\n        this. It is quite convenient for discovering important settings and describing what they do, however.\n\nDefining fuel management\n========================\nFinally, let's specify the fuel management file that we referred to above by\ncreating the file ``anl-afci-177-fuelManagement.py``. Fuel management is very\nwide-open, so we use Python scripts to drive it. It's generally overly\nconstraining to require any higher-level input for such a general problem.\n\nIn ANL-AFCI-177, section 2 says no shuffling was modeled, and that the core is\nin a batch shuffling mode, limited by a cladding fast fluence of 4.0e23 n/cm\\\n:sup:`2`. Often, SFR studies use the REBUS code's implicit equilibrium fuel\ncycle mode. There is an ARMI equilibrium module at TerraPower that performs\nthis useful calculation (with different inputs), but for this sample problem,\nwe will simply model 10 cycles with explicit fuel management.\n\nThe shuffling algorithm we'll write will simply predict whether or not the\nstated fluence limit will be violated in the next cycle. If it will be, the\nfuel assembly will be replaced with a fresh one of the same kind.\n\n\n.. literalinclude:: ../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py\n    :language: python\n\n\nThere! You have now created all the ARMI inputs, from scratch, needed to\nperform a simplified reactor analysis of one of the SFRs in the ANL-AFCI-177\ndocument. The possibilities from here are only limited by your creativity, (and\na few code limitations ;).\n\nAs you load the inputs in ARMI it will provide some consistency checks and\nerrors to help identify common mistakes.\n\nHere are the full files used in this example:\n\n* :download:`Blueprints <../../armi/testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml>`\n* :download:`Settings <../../armi/testing/reactors/anl-afci-177/anl-afci-177.yaml>`\n* :download:`Core map <../../armi/testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml>`\n* :download:`Fuel management <../../armi/testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py>`\n\nThe next tutorial will guide you through inputs for a classic LWR benchmark problem (C5G7).\n"
  },
  {
    "path": "doc/tutorials/walkthrough_lwr_inputs.rst",
    "content": ".. _walkthrough-lwr:\n\n******************************************\nBuilding input files for a thermal reactor\n******************************************\n\nIn the :doc:`previous tutorial </tutorials/walkthrough_inputs>`,\nwe introduced the basic input files and made a full\ninput for a sodium-cooled fast reactor. In this tutorial, we will build simple\ninputs for the light-water reactor (LWR) benchmark problem called C5G7 as defined\nin `NEA/NSC/DOC(2003)16 <https://www.oecd-nea.org/upload/docs/application/pdf/2019-12/nsc-doc2003-16.pdf>`_.\nThe compositions are documented in\n`NEA/NSC/DOC(96)2 <https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf>`_.\n\n.. tip:: The full inputs created in this tutorial are available for download at the bottom of\n    this page.\n\n.. warning:: C5G7 is a problem with defined 7-group macroscopic cross sections. Rather than\n    Using those cross sections directly, this input is meant to regenerate them rather\n    than to using the provided macros directly.\n\n.. warning:: ARMI was historically developed in support of fast reactors and most\n    features have been used and tested in fast reactor contexts. This\n    tutorial shows that simple LWR cases can be defined in input,\n    but there is still a lot of work to make sure all ARMI capabilities\n    are operational in this context. Thus, be warned that\n    as of 2020, doing LWR analysis with ARMI will certainly require\n    new developments. We are excited to expand ARMI scope fully\n    into LWR relevant analysis.\n\n    In particular, the handling of detailed locations within a block is\n    relatively experimental (fast reactors usually just smear it out).\n\nSetting up the blueprints\n=========================\nThis tutorial is shorter than the previous, focusing mostly on the new information.\n\nCustom isotopic vectors\n-----------------------\nWhen using materials that differ in properties or composition from the\nmaterials in the ARMI material library, you can use custom isotopics\nto specify their composition. The composition details below are documented in Table 2 of\n`NEA/NSC/DOC(96)2 <https://www.oecd-nea.org/upload/docs/application/pdf/2020-01/nsc-doc96-02-rev2.pdf>`_.\n\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: start-custom-isotopics\n    :end-before: end-custom-isotopics\n\n.. tip:: Scripts that load the prescribed cross sections from the benchmark\n    into the ARMI cross section model could be written fairly easily, allowing\n    users to quickly evaluate this full benchmark problem with various global\n    solvers.\n\nThe UO2 block\n-------------\nNow we define the pins and other components of the UO2 block.\nWhat's new here is that we're pointing to custom isotopics\nin many cases, and we're using the ``latticeIDs`` input to add\ntextual specifiers, which will be used in the ``grids`` input section\nbelow to count and place the pins into a square-pitch lattice. Note that\nthe ``latticeIDs`` section is a list. The component will fill every\nposition in the grid that has any of the specifiers in this list.\n\nYou will see the `<<: *guide_tube` notation below. This means use the\nspecifications of guide_tube, but make the modifications that appear below.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-custom-isotopics\n    :end-before: end-block-uo2\n\n.. note:: The dummy pitch component has no material and is simply used to\n    define the assembly pitch. In a future upgrade, this information will\n    be taken directly from the ``lattice pitch`` grid definition below.\n\nThe MOX block\n-------------\nThe next assembly is very similar. We define three separate fuel pins,\neach with different ``latticeIDs``, and then use YAML anchors to just\ncopy the moderator, guide tube, and fission chamber from the previous assembly.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-uo2\n    :end-before: end-block-mox\n\nThe moderator block\n-------------------\nThe moderator block for the radial and axial reflectors is very simple:\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-mox\n    :end-before: end-block-mod\n\nThe 3-D Assembly definitions\n----------------------------\nNow that the pins are defined, we stack them into assemblies, very similar\nto what we did in the SFR tutorial. There are three distinct assembly definitions.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-block-mod\n    :end-before: end-assemblies\n\nThe Systems definition\n----------------------\nThis problem only considers a core, so we will only have a core system in this\nproblem. If pumps, heat exchangers, spent fuel pools, etc were to be modeled,\nthey would be here alongside the core. We also anchor the core at the global\ncoordinates (0,0,0). If we wanted the core at some other elevation, we could\nadjust that here.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-assemblies\n    :end-before: end-systems\n\nThe Grids definitions\n---------------------\nNow we define the core map and the assembly pin maps using the\ngeneric grid input section. In the previous tutorial, we loaded the grid definition\nfrom an XML file. In this tutorial, we define the grid directly with an\ntextual ``lattice map`` input section. The core map is particularly simple; it\nonly has 9 assemblies.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-systems\n    :end-before: end-grid-core\n\nThe pin map for the UO2 assembly is larger, but still relatively straightforward.\nRecall that on the ``uo2`` block above we said that we want to apply the grid\nwith the name ``UO2 grid``, and wanted to fill any ``U`` position with\nthe ``fuel`` component defined up there. Here's where we define that grid.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-grid-core\n    :end-before: end-grid-UO2\n\nSimilarly, we define the ``MOX grid`` as follows:\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-grid-UO2\n    :end-before: end-grid-MOX\n\nThis grid is more complex in that it has different enrichment zones throughout\nthe assembly.\n\nNuclide Flags\n-------------\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml\n    :language: yaml\n    :start-after: end-grid-MOX\n    :end-before: end-nucflags\n\nThe default nuclide flags provided do not contain oxygen or hydrogen, but\nthese elements are present in the ``SaturatedWater`` material. Thus,\nwe list them in this input section, and specifically leave out\nthe trace isotope, ``O18``.\n\nThe settings file\n=================\nReally, the only thing the settings file does in this case is point to the blueprints\nfile. As we turn this case into an actual run, we may add various cross section\nand neutrons options to evaluate the benchmark.\n\n.. literalinclude:: ../../armi/testing/reactors/c5g7/c5g7-settings.yaml\n    :language: yaml\n\nDefining fuel management\n========================\nBy not defining any fuel management settings, we skip fuel management for\nthis benchmark problem entirely.\n\nThere! You have now created all the ARMI inputs, from scratch, needed to\nrepresent the C5G7 benchmark problem.\n\nOk, so now what?\n================\nYou can run the default ARMI app on these inputs, which will run a \nfew cycles and make an output database::\n\n    $ python -m armi run c5g7-settings.yaml\n\nBut since the baseline\napp doesn't do any real calculations, it won't have a lot in it.\nYou have to add plugins to do calculations (see the \n`plugin directory <https://github.com/terrapower/armi-plugin-directory>`_). \n\nOf course, you can fiddle around with the reactor in memory. For example, in an\nipython session, you can plot one of the assembly's pin locations.\n\n.. code-block:: python\n\n    import matplotlib.pyplot as plt\n    import armi\n    from armi.reactor.flags import Flags\n\n    armi.configure()\n\n    o = armi.init(fName = \"c5g7-settings.yaml\")\n    b = o.r.core.getFirstBlock(Flags.MOX)\n\n    flags = [Flags.LOW, Flags.MEDIUM, Flags.HIGH]\n    colors = [\"green\", \"yellow\", \"red\"]\n\n    for f, c in zip(flags, colors): \n        x, y=[], []\n        pin = b.getComponent(Flags.FUEL| f)\n        for loc in pin.spatialLocator:\n            xi, yi, zi = loc.getGlobalCoordinates()\n            x.append(xi)\n            y.append(yi)\n        plt.scatter(x, y, color=c)\n    plt.show()\n\n\nThis should show a simple representation of the block. \n\n.. figure:: https://terrapower.github.io/armi/_static/c5g7-mox.png\n   :figclass: align-center\n\n   A representation of a C5G7 fuel assembly.\n\n\nHere are the full files used in this example:\n\n* :download:`Blueprints <../../armi/testing/reactors/c5g7/c5g7-blueprints.yaml>`\n* :download:`Settings <../../armi/testing/reactors/c5g7/c5g7-settings.yaml>`\n"
  },
  {
    "path": "doc/user/_gallery/index.rst",
    "content": ".. This is just here as a placeholder to avoid 404s after we moved gallery up one. \n   A variety of sphinx extensions may support redirects but none seem mature yet.\n\n.. include:: /gallery/index.rst"
  },
  {
    "path": "doc/user/accessingEntryPoints.rst",
    "content": "**********************\nAccessing Entry Points \n**********************\n\nReports Entry Point\n===================\n\nThere are two ways to access the reports entry point in ARMI.\n\nThe first way is through a yaml settings file.\nHere, the call is as follows::\n\n    (venv) $ armi report anl-afci-177.yaml\n\nIt is also possible to call this on an h5 file::\n\n    (venv) $ armi report -h5db refTestBase.h5\n\n.. note:: When working with a h5 file, -h5db must be included\n\nOnce these are called, a report is generated and outputted as an html file in reportsOutputFiles.\n"
  },
  {
    "path": "doc/user/index.rst",
    "content": "#########\nUser Docs\n#########\n\nHere you will learn how to use :term:`ARMI`. It will cover getting and installing ARMI and its prerequisites, making\nARMI input files, running various kinds of ARMI runs, analyzing ARMI output files, etc.\n\n--------------\n\n.. toctree::\n   :maxdepth: 2\n   :numbered:\n\n   inputs\n   outputs\n   settings_report\n   params_report\n   manual_data_access\n   spatial_block_data\n   physics_coupling\n   accessingEntryPoints\n   radial_and_axial_expansion\n   symmetry_handling\n"
  },
  {
    "path": "doc/user/inputs.rst",
    "content": "******\nInputs\n******\n\nARMI input files define the initial state of the reactor model and tell ARMI what kind of analysis should be performed on it.\n\n.. note:: We have a :ref:`walkthrough-inputs` tutorial for a quick overview of the inputs.\n\nThere are several input files:\n\nSettings file\n    Contains simulation parameters (like full power, cycle length, and which physics modules to activate) and all kind of modeling approximation settings (e.g. convergence criteria)\n\nBlueprints file\n    Contains dimensions and composition of the components/blocks/assemblies in your reactor systems, from fuel pins to heat exchangers\n\nFuel management file\n    Describes how fuel moves around during a simulation\n\n\nDepending on the type of analysis, developers may create other input files for things like: control logic, ex-core models for transients and shielding, etc.\n\n\nYAML Files\n==========\nARMI's input files all use the `YAML <https://en.wikipedia.org/wiki/YAML>`_ format. This is a well-known file format, chosen because it is human-readable and easy to hand-write. That being said, there are two details about the YAML format that are important to know:\n\nOrdering\n    YAML is not order specific; however, one of the techniques used to limit the size of the input includes using YAML anchors to reuse block and component definitions. YAML anchors (e.g. ``&block_name``) must be defined before their corresponding alias (e.g. ``*block_name``) used.\n\nDuplicate Keys\n    YAML allows for duplicate keys. However, in ARMI, duplicates might be erroneous. Unfortunately, because the international YAML specification allows for duplicates, none of the YAML-parsing libraries see it as an error. You will have to hand-verify your inputs are correct.\n\n\nThe Settings Input File\n=======================\nThe **settings** input file defines a series of key/value pairs the define various information about the system you are modeling as well as which modules to run and various modeling/approximation settings. For example, it includes:\n\n* The case title\n* The reactor power\n* The number of cycles to run\n* Which physics solvers to activate\n* Whether or not to perform a critical control search\n* Whether or not to do tight coupling iterations\n* What neutronics approximations specific to the chosen physics solver to apply\n* Environment settings (paths to external codes)\n* How many CPUs to use on a computer cluster\n\nThis file is a YAML file that you can edit manually with a text editor or with the ARMI GUI.\n\nHere is an excerpt from a settings file:\n\n.. literalinclude:: ../../armi/tests/armiRun.yaml\n    :language: yaml\n    :lines: 1-14\n\nA full listing of settings available in the framework may be found in the\n:ref:`Table of all global settings <settings-report>` .\n\nMany settings are provided by the ARMI Framework, and others are defined by various plugins.\n\n.. _armi-gui:\n\nThe ARMI GUI\n------------\nThe ARMI GUI may be used to manipulate many common settings (though the GUI can't change all of the settings). The GUI also enables the graphical manipulation of a reactor core map, and convenient automation of commands required to submit to a cluster. The GUI is a front-end to these files. You can choose to use the GUI or not, ARMI doesn't know or care -- it just reads these files and runs them.\n\nNote that one settings input file is required for each ARMI case, though many ARMI cases can refer to the same Blueprints, Core Map, and Fuel Management inputs.\n\n.. tip:: The ARMI GUI is not yet included in the open-source ARMI framework, but a simple grid editor GUI is, as described in :ref:`grids`\n\nThe assembly clicker\n^^^^^^^^^^^^^^^^^^^^\nThe assembly clicker (aka the :py:mod:`Grid Editor <armi.utils.gridEditor>`) allows users to define the 2-D layout of the assemblies defined in the :ref:`bp-input-file`. This can be done in hexagon or cartesian. The results of this arrangement get written to grids in blueprints. Click on the assembly palette on the right and click on the locations where you want to put the assembly. By default, the input assumes a 1/3 core model, but you can create a full core model through the menu.\n\nIf you want one assembly type to fill all positions in a ring, right click it once it is placed and choose ``Make ring like this hex``. Once you submit the job or save the settings file (File -> Save), you will be prompted for a new name of the geometry file before the settings file is saved. The geometry setting in the main tab will also be updated.\n\nThe ARMI Environment Tab\n^^^^^^^^^^^^^^^^^^^^^^^^\nThe environment tab contains important settings about which version of ARMI you will run and with which version of Python, etc. Most important is the ``ARMI location`` setting. This points to the codebase that will run. If you want to run the released version of ARMI, ensure that it is set in this setting. If you want to run a developer version, then be sure to update this setting.\n\nOther settings on this tab may need to be updated depending on your computational environment. Talk to your system admins to determine which settings are best.\n\nSome special settings\n---------------------\nA few settings warrant additional discussion.\n\n.. _detail-assems:\n\nDetail assemblies\n^^^^^^^^^^^^^^^^^\nMany plugins perform more detailed analysis on certain regions of the reactor. Since the analyses often take longer, ARMI has a feature, called *detail assemblies* to help. Different plugins may treat detail assemblies differently, so it's important to read the plugin documentation as well. For example, a depletion plugin may perform pin-level depletion and rotation analysis only on the detail assemblies. Or perhaps CFD thermal/hydraulics will be run on detail assemblies, while subchannel T/H is run on the others.\n\nDetail assemblies are specified by the user in a variety of ways, through the GUI or the settings system.\n\n.. warning:: The Detail Assemblies mechanism has begun to be too broad of a brush for serious multiphysics calculations with each plugin treating them differently. It is likely that this feature will be extended to be more flexible and less surprising in the future.\n\nDetail Assembly Locations BOL\n    The ``detailAssemLocationsBOL`` setting is a list of assembly location strings\n    (e.g. ``004-003`` for ring 4, position 3). Assemblies that are in these locations at the\n    beginning-of-life will be activated as detail assemblies.\n\nDetail assembly numbers\n    The ``detailAssemNums`` setting is a list of ``assemNum``\\ s that can be inferred from a previous\n    case and specified, regardless of when the assemblies enter the core. This is useful for\n    activating detailed treatment of assemblies that enter the core at a later cycle.\n\nDetail all assemblies\n    The ``detailAllAssems`` setting makes all assemblies in the problem detail assemblies\n\n.. _kinetics-settings:\n\nKinetics settings\n^^^^^^^^^^^^^^^^^\nIn reactor physics analyses it is standard practice to represent reactivity in either absolute units (i.e., dk/kk' or pcm) or in dollars or cents. To support this functionality, the framework supplies the ``beta`` and ``decayConstants`` settings to apply the delayed neutron fraction and precursor decay constants to the Core parameters during initialization.\n\nThese settings come with a few caveats:\n\n    1. The ``beta`` setting supports two different meanings depending on\n       the type that is provided. If a single value is given, then this setting\n       is interpreted as the effective delayed neutron fraction for the\n       system. If a list of values is provided, then this setting is interpreted\n       as the group-wise (precursor family) delayed neutron fractions (useful for\n       reactor kinetics simulations).\n\n    2. The ``decayConstants`` setting is used to define the precursor\n       decay constants for each group. When set, it must be\n       provided with a corresponding ``beta`` setting that has the\n       same number of groups. For example, if six-group delayed neutron\n       fractions are provided, the decay constants must also be provided\n       in the same six-group structure.\n\n    3. If ``beta`` is interpreted as the effective delayed neutron fraction for\n       the system, then the ``decayConstants`` setting will not be utilized.\n\n    4. If both the group-wise ``beta`` and ``decayConstants`` are provided\n       and their number of groups are consistent, then the effective delayed\n       neutron fraction for the system is calculated as the summation of the\n       group-wise delayed neutron fractions.\n\n.. _cycle-history:\n\nCycle history\n^^^^^^^^^^^^^\nFor all cases, ``nCycles`` and ``power`` must be specified by the user. In the case that only a single state is to be examined (i.e. no burnup), the user need only additionally specify ``nCycles = 1``.\n\nIn the case of burnup, the reactor cycle history may be specified using either the simple or detailed option. The simple cycle history consists of the following case settings:\n\n    * ``power``\n    * ``nCycles`` (default = 1)\n    * ``burnSteps`` (default = 4)\n    * ``availabilityFactor(s)`` (default = 1.0)\n    * ``cycleLength(s)`` (default = 365.2425)\n\nIn addition, one may optionally use the ``powerFractions`` setting to change the reactor power between each cycle. With these settings, a user can define a history in which each cycle may vary in power, length, and uptime. The history is restricted, however, to each cycle having a constant power, to each cycle having the same number of burnup nodes, and to those burnup nodes being evenly spaced within each cycle. An example simple cycle history might look like\n\n.. code-block:: yaml\n\n    settings:\n        power: 1000000\n        nCycles: 3\n        burnSteps: 2\n        cycleLengths: [100, R2]\n        powerFractions: [1.0, 0.5, 1.0]\n        availabilityFactors: [0.9, 0.3, 0.93]\n\nNote the use of the special shorthand list notation, where repeated values in a list can be specified using an \"R\" followed by the number of times the value is to be repeated.\n\nThe above scheme would represent 3 cycles of operation:\n\n    1. 100% power for 90 days, split into two segments of 45 days each, followed by 10 days shutdown (i.e. 90% capacity)\n\n    2. 50% power for 30 days, split into two segments of 15 days each, followed by 70 days shutdown (i.e. 15% capacity)\n\n    3. 100% power for 93 days, split into two segments of 46.5 days each, followed by 7 days shutdown (i.e. 93% capacity)\n\nIn each cycle, criticality calculations will be performed at 3 nodes evenly-spaced through the uptime portion of the cycle (i.e. ``availabilityFactor``*``powerFraction``), without option for changing node spacing or frequency.\nThis input format can be useful for quick scoping and certain types of real analyses, but clearly has its limitations.\n\nTo overcome these limitations, the detailed cycle history, consisting of the ``cycles`` setting may be specified instead.\nFor each cycle, an entry to the ``cycles`` list is made with the following optional fields:\n\n    * ``name``\n    * ``power fractions``\n    * ``cumulative days``, ``step days``, or ``burn steps`` + ``cycle length``\n    * ``availability factor``\n\nAn example detailed cycle history employing all of these fields could look like\n\n.. code-block:: yaml\n\n    settings:\n        power: 1000000\n        nCycles: 4\n        cycles:\n          - name: A\n            step days: [1, 1, 98]\n            power fractions: [0.1, 0.2, 1]\n            availability factor: 0.1\n          - name: B\n            cumulative days: [2, 72, 78, 86]\n            power fractions: [0.2, 1.0, 0.95, 0.93]\n          - name: C\n            step days: [5, R5]\n            power fractions: [1, R5]\n          - cycle length: 100\n            burn steps: 2\n            availability factor: 0.9\n\nNote that repeated values in a list may be again be entered using the shorthand notation for ``step days``, ``power fractions``, and ``availability factors`` (though not ``cumulative days`` because entries must be monotonically increasing).\n\nSuch a scheme would define the following cycles:\n\n    1. A 2 day power ramp followed by full power operations for 98 days, with three nodes clustered during the ramp and another at the end of the cycle, followed by 900 days of shutdown\n\n    2. A 2 day power ramp followed by a prolonged period at full power and then a slight power reduction for the last 14 days in the cycle\n\n    3. Constant full-power operation for 30 days split into six even increments\n\n    4. Constant full-power operation for 90 days, split into two equal-length 45 day segments, followed by 10 days of downtime\n\nAs can be seen, the detailed cycle history option provides much flexibility for simulating realistic operations, particularly power ramps or scenarios that call for unevenly spaced burnup nodes, such as xenon buildup in the early period of thermal reactor operations.\n\n.. note:: Although the detailed cycle history option allows for powers to change within each cycle, it should be noted that the power over each step is still considered to be constant.\n\n.. note:: The ``name`` field of the detailed cycle history is not yet used for anything, but this information will still be accessible on the operator during runtime.\n\n.. note:: Cycles without names will be given the name ``None``\n\n.. warning:: When a detailed cycle history is combined with tight coupling, a subclass of :py:meth:`LatticePhysicsInterface.interactCoupled <armi.physics.neutronics.latticePhysics.latticePhysicsInterface.LatticePhysicsInterface.interactCoupled>` should be used.\n\n.. _restart-cases:\n\nRestart cases\n^^^^^^^^^^^^^\nOftentimes the user is interested in re-examining just a specific set of time nodes from an existing run.\nIn these cases, it is sometimes not necessary to rerun an entire reactor history, and one may instead use one of the following options:\n\n    1. Snapshot, where the reactor state is loaded from a database and just a single time node is run.\n\n    2. Restart, where the cycle history is loaded from a database and the calculation continues through the remaining specified time history.\n\nFor either of these options, it is possible to alter the specific settings applied to the run by simply adjusting the case settings for the run.\nFor instance, a run that originally had only neutronics may incorporate thermal hydraulics during a snapshot run by adding in the relevant TH settings.\n\n.. note:: For either of these options, it is advisable to first create a new case settings file with a name different than the one from which you will be restarting off of, so as to not overwrite those results.\n\nTo run a snapshot, the following settings must be added to your case settings:\n\n    * Set ``runType`` to ``Snapshots``\n    * Add a list of cycle/node pairs corresponding to the desired snapshots to ``dumpSnapshot`` formatted as ``'CCCNNN'``\n    * Set ``reloadDBName`` to the existing database file that you would like to load the reactor state from\n\nAn example of a snapshot run input:\n\n.. code-block:: yaml\n\n       runType: Snapshots\n       reloadDBName: my-old-results.h5\n       dumpSnapshot: ['000000', '001002'] # 2 snapshots at BOL and cycle 1-node 2\n\nTo run a restart, the following settings must be added to your case settings:\n\n    * Set ``runType`` to ``Standard``\n    * Set ``loadStyle`` to ``fromDB``\n    * Set ``startCycle`` and ``startNode`` to the cycle/node that you would like to continue the calculation from (inclusive). ``startNode`` may use negative indexing.\n    * Set ``reloadDBName`` to the existing database file from which you would like to load the reactor history up to the restart point\n    * If you would like to change the specified reactor history (see :ref:`restart-cases`), keep the history up to the restarting cycle/node unchanged, and just alter the history after that point. This means that the cycle history specified in your restart run should include all cycles/nodes up to the end of the simulation. For complicated restarts, it may be necessary to use the detailed ``cycles`` setting, even if the original case only used the simple history option.\n\nA few examples of restart cases:\n\n    - Restarting a calculation at a specific cycle/node and continuing for the remainder of the originally-specified cycle history:\n        .. code-block:: yaml\n\n            # old settings\n            settings:\n                nCycles: 2\n                burnSteps: 2\n                cycleLengths: [100, 100]\n                runType: Standard\n                loadStyle: fromInput\n                loadingFile: my-blueprints.yaml\n\n        .. code-block:: yaml\n\n            # restart settings\n            settings:\n                nCycles: 2\n                burnSteps: 2\n                cycleLengths: [100, 100]\n                runType: Standard\n                loadStyle: fromDB\n                startCycle: 1\n                startNode: 0\n                reloadDBName: my-original-results.h5\n\n    - Add an additional cycle to the end of a case:\n        .. code-block:: yaml\n\n            # old settings\n            settings:\n                nCycles: 1\n                burnSteps: 2\n                cycleLengths: [100]\n                runType: Standard\n                loadStyle: fromInput\n                loadingFile: my-blueprints.yaml\n\n        .. code-block:: yaml\n\n            # restart settings\n            settings:\n                nCycles: 2\n                burnSteps: 2\n                cycleLengths: [100, 100]\n                runType: Standard\n                loadStyle: fromDB\n                startCycle: 0\n                startNode: -1\n                reloadDBName: my-original-results.h5\n\n    - Restart but cut the reactor history short:\n        .. code-block:: yaml\n\n            # old settings\n            settings:\n                nCycles: 3\n                burnSteps: 2\n                cycleLengths: [100, 100, 100]\n                runType: Standard\n                loadStyle: fromInput\n                loadingFile: my-blueprints.yaml\n\n        .. code-block:: yaml\n\n            # restart settings\n            settings:\n                nCycles: 2\n                burnSteps: 2\n                cycleLengths: [100, 100]\n                runType: Standard\n                loadStyle: fromDB\n                startCycle: 1\n                startNode: 0\n                reloadDBName: my-original-results.h5\n\n    - Restart with a different number of steps in the third cycle using the detailed ``cycles`` setting:\n        .. code-block:: yaml\n\n            # old settings\n            settings:\n                nCycles: 3\n                burnSteps: 2\n                cycleLengths: [100, 100, 100]\n                runType: Standard\n                loadStyle: fromInput\n                loadingFile: my-blueprints.yaml\n\n        .. code-block:: yaml\n\n            # restart settings\n            settings:\n                nCycles: 3\n                cycles:\n                  - cycle length: 100\n                    burn steps: 2\n                  - cycle length: 100\n                    burn steps: 2\n                  - cycle length: 100\n                    burn steps: 4\n                runType: Standard\n                loadStyle: fromDB\n                startCycle: 2\n                startNode: 0\n                reloadDBName: my-original-results.h5\n\n.. note:: The ``skipCycles`` setting is related to skipping the lattice physics calculation specifically, it is not required to do a restart run.\n\n.. note:: The ISO binary cross section libraries are required to run cases that skip the lattice physics calculation (e.g. MC^2)\n\n.. note:: Restarting a calculation with an different version of ARMI than what was used to produce the restarting database may result in undefined behavior.\n\nShuffling\n^^^^^^^^^\n\n.. note:: The ``explicitRepeatShuffles`` setting points to a ``*-SHUFFLES.txt`` file that records moves from a previous\n          run for exact repetition.\n\nUsers may also define a custom shuffle plan in a YAML file referenced by the ``shuffleSequenceFile`` setting. The YAML\nformat organizes data by cycle in a ``sequence`` mapping. Keys are the cycle where the shuffling should occur during\nthe beginning-of-cycle step. The first available cycle where shuffling will occur is cycle 1. Each cycle contains a\nlist of high-level actions. An action is a  mapping containing one of the keys ``cascade``, ``swap``, or\n``extraRotations``. ``cascade`` chains describe a sequence of assembly displacements beginning with a fresh fuel\nassembly and ending with the final location's assembly being discharged. Optional ``fuelEnrichment`` lists specify the\nU235 weight fraction enrichment for each axial block in the fresh assembly, from bottom to top, including zeroes for\nnon-fuel blocks. ``swap`` swaps the assemblies at two locations after all cascades are processed.\n``extraRotations`` map final location labels to relative counterclockwise angles in degrees and are applied after all\ncascades, swaps, and any algorithmic rotation routines defined with the ``assemblyRotationAlgorithm`` setting.\nThe angle is relative to the assembly's current orientation and whatever assembly ends up at the given location is\nrotated. Valid angles depend on the assembly's geometry.\n\nExtra rotations therefore:\n\n* apply to whatever assembly resides at the specified location once all cascades and swaps are complete;\n* rotate the assembly relative to its current orientation; and\n* execute after any algorithmic rotation routines.\n\nA cascade with no final destination defaults to deleting the assembly. Assemblies can be retained in the model by\nending the cascade with ``SFP``. When ``SFP`` is specified, the discharged assembly is stored in the spent fuel pool\neven if the ``trackAssems`` setting is ``False``; ``Delete`` always removes the assembly from the model.\n\nAssemblies may also be re-inserted from the spent fuel pool by starting a cascade with ``SFP`` and providing a\n``ringPosCycle`` to identify the spent fuel pool assembly returning to the core. ``ringPosCycle`` is a list conatining\nring, pos, and cycle used to specify that the assembly which resided at (ring, pos) during the specified cycle number\nis to be re-introduced into the reactor in the associated shuffle cascade. No assembly type is required in this case.\nThe cascade then proceeds as normal from the destination location. For example\n\n..  code:: yaml\n\n       sequence:\n         1:\n           - cascade: [\"outer fuel\", \"009-045\", \"008-004\", \"SFP\"]\n             fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]  # wt fraction U235 by block\n           - swap: [\"009-045\", \"008-004\"]\n           - extraRotations: {\"009-045\": 60}\n         2:\n           - cascade: [\"outer fuel\", \"010-046\", \"009-045\", \"Delete\"]\n             fuelEnrichment: [0, 0.12, 0.14, 0.15, 0]\n\nA cascade that loads an assembly from the SFP may look like::\n\n..  code:: yaml\n\n       sequence:\n         1:\n           - cascade: [\"SFP\", \"005-003\", \"SFP\"]\n             ringPosCycle: [3, 5, 4]\n\nThis example retrieves the assembly that resided at ring 3, position 5 during cycle 4 from the spent fuel pool and\nplaces it in location ``005-003`` (ring 5, position 3) while sending the previous occupant of ``005-003`` to the\nspent fuel pool.\n\n.. note:: Consider using yaml anchors ``&`` and aliases ``*`` to reduce repetition.\n\nFor cycle 1 above, the actions execute in the following order:\n\n   1. The assembly originally at ``008-004`` is discharged to the spent fuel pool ``SFP``.\n   2. The assembly originally at ``009-045`` moves to ``008-004``.\n   3. A fresh ``outer fuel`` assembly is created with the specified axial enrichment profile and inserted at ``009-045``.\n   4. The fresh assembly and the moved assembly at ``008-004`` are swapped, leaving the fresh assembly at ``008-004`` and the moved assembly back at ``009-045``.\n   5. The assembly now at ``009-045`` is rotated an additional 60 degrees counterclockwise.\n\n.. note:: The restart.dat file is required to repeat the exact fuel management methods during a branch search. These can potentially modify the reactor state in ways that cannot be captures with the SHUFFLES.txt file.\n\nZones\n^^^^^\n\nZones are a collection of assemblies that share some similar characteristics. A zone might be those assemblies with\na similar orrificing pattern or a some subset of fuel assemblies. Some codes may wish to study behavior by lumping the\nreactor into a few channels with bulk or aggregated properties. Users can collect assemblies in each of these channels\nthrough the :attr:`~armi.reactor.cores.Core.zones` attribute on the core. See also the\n:class:`~armi.reactor.zones.Zones` class.\n\nUsers can define these zones with the ``zonesFile`` setting. It must point to YAML file that contains the high-level key\n``customZonesMap`` containing a map of ``location: zone`` maps.\n\n.. code:: yaml\n\n    customZonesMap:\n      001-001: primary control\n      002-001: fuel z0\n      003-001: fuel z0\n      004-001: fuel z1\n      004-002: secondary control\n\nThe ``location`` keys are the ARMI ring-position assembly identifier. It is not required to have every assembly\nbe inside a zone. But assemblies not listed will not be added to any zone, i.e., there is no default zone.\n\nThis example would produce four zones:\n\n1. ``primary control`` containing the center assembly at ``001-001``,\n2. ``fuel z0`` containing two fuel assemblies: ``002-001`` and ``003-001``,\n3. ``fuel z1`` containing one fuel assembly: ``004-001``, and\n4. ``secondary control`` containing the assembly at ``004-002``.\n\nAn alternative method is with the ``zoneDefinitions`` setting in the primary settings file. This contains a list of\nzone names and the assemblies that make up that zone. The following would create an identical zone structure as above.\n\n.. code:: yaml\n\n    settings:\n      zoneDefinitions:\n        - \"primary control: 001-001\"\n        - \"fuel z0: 002-001, 003-001\"\n        - \"fuel z1: 004-001\"\n        - \"secondary control: 004-002\"\n\n.. note::\n\n    These are list of strings, not additional maps. Wrapping in quotations is required to process the zone definitions.\n\nThese zones will be populated according to the :meth:`~armi.reactor.cores.Core.buildManualZones` core method.\n\n.. _bp-input-file:\n\nThe Blueprints Input File\n=========================\n\nThe **blueprints** input defines the dimensions of structures in the reactor, as well as their material makeup. In a typical case, pin dimensions, isotopic composition, control definitions, coolant type, etc. are defined here. The specifics of each assembly type are then overlaid, possibly including enrichment distributions and other material modifications.\n\n.. note:: See the :py:mod:`~armi.reactor.blueprints` module for implementation and more detail.\n\nThis input file is formatted using `YAML <https://en.wikipedia.org/wiki/YAML>`_, which allows text-based change tracking for design control. ARMI does not have a blueprints-editing GUI yet, but may in the future.\n\n.. note:: You can point many ARMI runs to the same Blueprints input file using full paths in ``loadingFile`` setting.\n\nARMI adds an ``!include`` YAML tag, which can be used to include the contents of an external YAML file in any part of a blueprints file. The can be useful for sharing core or assembly pin layouts amongst multiple cases. For example::\n\n   grids:\n       core: !include path/to/core_grid.yaml\n\nWould have the effect of copy-pasting the contents of ``path/to/core_grid.yaml`` into the main blueprints file. The rules that ARMI uses to handle things like indentation of the included text are usually rather intuitive, but sometimes it can be useful to witness the behavior first-hand. The ``expand-bp`` command can be used to do a dry run for testing inputs with !includes.\n\nARMI models are built hierarchically, first by defining components, and then by larger and larger collections of the levels of the reactor.\n\nBlueprint sections\n------------------\nThe **blueprints** input file has several sections that corresponds to different levels of the reactor\nhierarchy. You will generally build inputs \"bottoms up\", first by defining elementary pieces (like pins)\nand then collecting them into the core and reactor.\n\nThe ARMI data model is represented schematically below, and the blueprints are defined accordingly:\n\n.. figure:: /.static/armi_reactor_objects.png\n    :align: center\n\n    The primary data containers in ARMI\n\n:ref:`blocks <blocks-and-components>`:\n    Defines :py:class:`~armi.reactor.components.component.Component` inputs for a\n    :py:class:`~armi.reactor.blocks.Block`.\n\n:ref:`assemblies <assemblies>`:\n    Defines vertical stacks of blocks used to define the axial profile of an\n    :py:class:`~armi.reactor.assemblies.Assembly`.\n\n:ref:`systems <systems>`:\n    Reactor-level structures like the core, the spent fuel pool, pumps, the head, etc.\n\n:ref:`grids <grids>`:\n    Lattice definitions for the core map or pin maps\n\n:ref:`nuclide flags <nuclide-flags>`:\n    Special setting: Specifies nuclide modeling options, whether a nuclide is being modeled for cross sections and/or\n    depletion. For instance, it allows you to ignore nuclides above Curium for depletion speed.\n    This also allows you to expand elements to a subset of nuclides. For example, you can\n    choose to expand Oxygen to just Oxygen-16 and neglect Oxygen-17 and 18.\n\n:ref:`custom isotopics <custom-isotopics>`:\n    Special setting: defines user-specified isotopic compositions.\n\nThe core map input files can be graphically manipulated with the\n:py:mod:`Grid editor <armi.utils.gridEditor>`.\n\n\n.. _blocks-and-components:\n\nBlocks and Components\n---------------------\nBlocks and components are defined together in the **blueprints** input.\n\nWe will start with a component, and then define the whole ``blocks:`` input. The structure will be something like::\n\n    blocks:\n        block name 1:\n            component name 1:\n                ...\n            component name 2:\n        block name 2:\n            component name 1:\n                ...\n            component name 2:\n                ...\n\n.. note:: You can also define components at the top level of the blueprints file under the ``components:`` top level\n    section, but bringing anything defined there into the reactor model must currently be done programmatically. We are\n    currently developing additional input capabilities to use these more flexibly.\n\n    Associated with this is a ``component groups:`` section which can collect different free components with different\n    volume fractions. This also is not fully implemented yet.\n\nDefining a Component\n^^^^^^^^^^^^^^^^^^^^\nThe **Components** section defines the pin (if modeling a pin-type reactor) and assembly in-plane dimensions (axial\ndimensions are defined in the :ref:`assemblies` input) and the material makeups of each\n:py:mod:`Component <armi.reactor.components>`. :py:mod:`Blocks <armi.reactor.blocks>` are defined here as collections\nof geometric components that have specific temperatures, dimensions, material properties, and isotopic compositions.\n\nAn component may be defined as::\n\n    fuel:\n        shape: Circle\n        material: UZr\n        Tinput: 20.0\n        Thot: 450.0\n        mult: 169\n        id: 0.0\n        od: 0.757\n\nHere we have provided the following information:\n\nComponent name\n    The component name (``fuel``) is specified at the top. Some physics kernels interpret names specially, so\n    pay attention to any naming conventions. As a general rule, you can expect that people will be doing regex\n    on your name, so you should not use any of these characters in your component names:\n    ``. ^ $ * + ? { } [ ] \\ | ( ) :``.\n\nshape\n    The shape will be extruded to the length specified in the ``assemblies`` input section below. ARMI contains\n    a variety of built-in simple shapes, and plugins can define their own design-specific/proprietary shapes.\n\nmaterial\n    The material links the component to a certain set of thermo-physical properties (e.g. temperature-dependent thermal\n    expansion coefficients, density, thermal conductivity, etc., which are used in the various physics kernels.\n    Natural isotopic composition is determined from this material specification as well (unless custom isotopics are\n    supplied). The entry here should either be a class name of a valid material (``UZr``) or a ``module:className`` pair\n    for specifying specific material (e.g. ``armi.materials.uZr:UZr``).\n    Materials are handled through the :py:mod:`material library <armi.materials>`.\n\n|Tinput|\n    The temperature (in C) that corresponds to the input dimensions given here. This facilitates automatic thermal\n    expansion.\n\n|Thot|\n    The temperature (in C) that the component dimensions will be thermal expanded to (using material properties based on\n    the ``material`` input). To disable automatic thermal expansion, set |Tinput| and |Thot| both to the same value\n\nmult\n    Multiplicity specifies how many duplicates of this component exist in this block. If you want 169 pins per assembly,\n    this would be 169. This does not explicitly describe the location of the pins. Note that many fast-neutron systems\n    only need volume fractions, not precise spatial locations, at least for pre-conceptual/simple studies.\n\nid\n    Inner diameter (in cm). Each shape has different required input dimension keys. For annulus, set id to non-zero.\n\nod\n    Outer diameter (in cm).\n\n.. _componentTypes:\n\nComponent Types\n^^^^^^^^^^^^^^^\nEach component has a variety of dimensions to define the shape and composition. All dimensions are in cm. The following\nis a list of included component shapes and their dimension inputs. Again, additional/custom components with arbitrary\ndimensions may be provided by the user via plugins.\n\n.. exec::\n    from armi.reactor.components import ComponentType\n    from dochelpers import createListTable\n\n    rows = [['Component Name', 'Dimensions']]\n    for c in ComponentType.TYPES.values():\n        rows.append([c.__name__, ', '.join(c.DIMENSION_NAMES)])\n\n    return createListTable(rows, widths=[25, 65], klass=\"longtable\")\n\nWhen a ``DerivedShape`` is specified as the final component in a block, its area is inferred from the difference\nbetween the area of the block and the sum of the areas comprised by the other components in the block. This is useful\nfor complex shapes like coolant surrounding a lattice of pins.\n\n.. _componentLinks:\n\nComponent Links\n^^^^^^^^^^^^^^^\nDimensions of a component may depend on the dimensions of a previously-defined component in the same block. For\ninstance, the sodium bond between fuel and cladding. The format is simply ``<componentName>.<dimensionName>``. The\ndimension names are available in the table above.\n\n::\n\n    blocks:\n        fuel:                       # block name\n            fuel:                   # component name\n                shape: Circle\n                material: UZr\n                Tinput: 25.0\n                Thot: 600.0\n                id: 0.0\n                isotopics: LABEL1\n                mult: 169.0\n                od: 0.757\n            bond:\n                shape: Circle\n                material: Sodium\n                Tinput: 450.0\n                Thot: 450.0\n                mult: fuel.mult\n                id: fuel.od         # bond is connected to the outside of fuel\n                od: clad.id         # and the inside of the clad\n            clad:\n                shape: Circle\n                material: HT9\n                Tinput: 25.0\n                Thot: 450.0\n                id: 0.905\n                mult: fuel.mult\n                od: 1.045\n\nLinked component dimensions (such as ``bond.id`` being linked to ``fuel.od``) remain linked\nas dimensions change. For example when the above defined fuel is expanded from cold temperature of\n25 to the hot temperature of 600 the ``bond.id`` will still be whatever the ``fuel.od`` is. This can\nresult in the displacement of material. For example, in the above case, if the fuel expansion\nremoves more cross sectional area than the clad expansion creates, the amount of thermal bond will be\nreduced. This is physical since, in reality, the fluid would be displaced as dimensions\nchange.\n\nPin lattices\n^^^^^^^^^^^^\nPin lattices may be explicitly defined in the block/component input in conjunction with the ``grids`` input section. A\nblock may assigned a grid name, and then each component may be assigned one or more grid specifiers.\n\nFor example, the following input section specifies that fuel pins will occupy all grid positions\nmarked with a ``1`` and cladding components will occupy all grid positions marked with either\na ``1`` or a ``2``. This situation may be desirable when some burnable poison pins use the same\ncladding as the fuel pins. ::\n\n    blocks:\n        fuel: &block_fuel\n            grid name: fuelgrid\n            fuel:\n                flags: fuel test\n                shape: Circle\n                material: UZr\n                Tinput: 25.0\n                Thot: 600.0\n                id: 0.0\n                mult: 169.0\n                od: 0.86602\n                latticeIDs: [1]\n            clad:\n                shape: Circle\n                material: HT9\n                Tinput: 25.0\n                Thot: 470.0\n                id: 1.0\n                mult: fuel.mult\n                od: 1.09\n                latticeIDs: [1,2]\n\n.. note:: A ``grid`` with the name ``fuelgrid`` must be defined as well in the grid input section.\n\n\n.. _naming-flags:\n\nFlags and naming\n----------------\n\nAll objects in the ARMI Reactor Model possess a set of\n:py:class:`armi.reactor.flags.Flags`, which can be used to affect the way that the\nvarious physics kernels treat each object. Most flags are named after common reactor\ncomponents, like ``FUEL``, or ``CLAD``, and are used to declare `what something is` in the\nreactor model. Various physics or other framework operations can then be\nparameterized to target specific types of things. For instance, the fuel handling code\ncan infer that blocks with the ``GRID_PLATE`` flag should be considered stationary and\nnot move them with the rest of the block stack in an assembly.\n\nHistorically, flags have also been used to describe directly `what should be done` with\nan object in the reactor model. For instance, an object with the ``DEPLETABLE`` flag set\nwill participate in isotopic depletion analysis, whereas objects without the\n``DEPLETION`` flag set will not. This has led to a lot of confusion, as the meaning of\nvarious flags is buried deep within the code, and can conflict from place to place. We\nare trying to align around a `what something is` interpretation, and bind those to\nspecific behaviors with settings. For more details, see :py:mod:`armi.reactor.flags`.\n\nThe set of specific flags that should be set on an object can be specified in one of two\nways for each object defined in the blueprints. The most precise way is to use include a\n``flags:`` entry for the object blueprint in question. In the example above, the\n``fuel`` component sets the ``FUEL`` and ``TEST`` flags. When specifying flags in this\nway, the value specified must be completely and unambiguously convertible into valid\nFlags. If it cannot, it will lead to an error when constructing the object.\n\nIf ``flags:`` is empty, or not specified, then the name of the object blueprint will be\nused to infer as many flags as possible. In the above example, the ``clad`` component\nwill get the ``CLAD`` flag from its name.\n\n.. note::\n    Additional flags may be specified from plugins, but this should be done with care; see the\n    :py:mod:`armi.reactor.flags` module and :py:meth:`armi.plugins.ArmiPlugin.defineFlags` plugin hook for more details.\n\n.. _assemblies:\n\nAssemblies\n----------\nOnce components and blocks are defined, Assemblies can be created as extruded stacks of blocks from bottom to top. The\nassemblies use YAML anchors to refer to the blocks defined in the previous section.\n\n.. note:: We aren't happy with the use of anchors to refer to blocks, and plan to change it (back) to just using the\n   block names directly. However, the use of anchors for input to be applied to multiple assemblies (e.g. heights) is\n   quite nice.\n\nA complete definition of an inner-core assembly may be seen below::\n\n        assemblies:\n            heights: &standard_heights [10.05, 20.10, 30.15, 20.10, 20.10, 30.15]\n            axial mesh points: &standard_axial_mesh_points [1, 2, 3, 4, 5, 6]\n            inner core:\n                specifier: IC\n                blocks: &inner_core_blocks [*block_shield, *block_fuel, *block_fuel, *block_fuel, *block_fuel, *block_plenum]\n                height: *standard_heights\n                axial mesh points: *standard_axial_mesh_points\n                hotChannelFactors: TWRPclad\n                material modifications:\n                    U235_wt_frac: ['', '', 0.001, 0.002, 0.03, '']\n                    ZR_wt_frac: ['', '', 0.1, 0.1, 0.1, 0.1]\n                nozzleType: Inner\n                xs types: [A, B, C, D, E, F]\n\n.. note::\n        While component dimensions are entered as cold dimensions, axial heights may be entered as\n        either cold or hot dimensions. In older versions of ARMI, it was required to enter heights\n        in the hot dimension (this behavior is preserved by setting `inputHeightsConsideredHot: True`).\n        However, with the\n        :py:class:`axial expansion changer <armi.reactor.converters.axialExpansionChanger.AxialExpansionChanger>`,\n        heights may be entered at cold temperatures (`inputHeightsConsideredHot: False`). Each Assembly will then\n        be expanded to its hot dimensions upon construction.\n\nFor many cases, a shared height and axial mesh point definition is sufficient. These can be included globally as shown\nabove and linked with anchors, or specified explicitly.\n\n\nspecifier\n   The Geometry Assembly Specifier, which is a two-letter ID, such as \"IC\" (for inner core), \"SH\"\n   (for shield), etc. correspond with labels in the geometry input file that is created by the GUI\n   hex dragger.\n\nxs types\n  The **cross-section type** is usually a single capital letter that identifies which cross section\n  (XS) set will be applied to the block. Each cross section set must be defined for at least one\n  block with fissile fuel. When the lattice physics code executes in ARMI, it determines the\n  representative blocks from each cross section type and burnup group and runs it to create the\n  cross section set for all blocks of the same type and in the same burnup group. Generally, it is\n  best to set blocks that have much different compositions to have separate cross section types. The\n  tradeoff is that the more XS types you define, the more CPU time the case will take to run.\n\n  Representing xsType by a single capital letter (A-Z) or number (0-9) limits users to 36 groups. So ARMI\n  will allow 2-letter xsType designations if and only if the ``buGroups`` setting has length 1 (i.e. no burnup groups are defined). This is useful for high-fidelity XS modeling.\n\n  ARMI is able to use lower-case letters (a-z) for an additional 26 cross section groups, but this\n  should only be done when working on a case-sensitive file system. On a case-insensitive file system\n  (Windows, and some MacOS systems) this could cause unpredictable errors.\n\naxial mesh points\n  Blocks will be broken up into this many uniform mesh points in the deterministic neutronics\n  solvers (e.g. DIF3D). This allows you to define large blocks that have multiple flux points within\n  them. You have to keep the neutronic mesh somewhat uniform in order to maintain numerical\n  stability of the solvers. It is important to note that the axial mesh must be uniform throughout\n  the core for many physics kernels, so be sure all block interfaces are consistent among all\n  assemblies in the core. Blocks deplete and get most state variables on the block mesh defined by\n  the height specification. Provisions for multiple meshes for different physics are being planned.\n\nhotChannelFactors\n  A label to define which set of hot channel factors (HCFs) get applied to\n  this block in the thermal/hydraulic calculations. There are various valid sets included with ARMI.\n\nnozzleType\n  This is a string that identifies what type of inlet nozzle an assembly has. This parameter could\n  be used in an implementation of a thermal-hydraulics solver with flow orificing to apply\n  different pressure loss coefficients and/or flow rates to different types of assemblies.\n\nmaterial modifications\n  These are a variety of modifications that are made to the\n  materials in blocks in these locations. It may include the fuel enrichment (mass frac.), poison\n  enrichment (mass frac.), zirconium mass frac, and any additional options required to fully define\n  the material loaded in the component. The material definitions in the material library define\n  valid modifications for them.\n\n  .. exec::\n      from armi.materials import Material\n      from armi.utils.tabulate import tabulate\n\n      data = []\n      for m in Material.__subclasses__():\n          numArgs = m.applyInputParams.__code__.co_argcount\n          if numArgs > 1:\n              modNames = m.applyInputParams.__code__.co_varnames[1:numArgs]\n              data.append((m.__name__, \", \".join(modNames)))\n\n          for subM in m.__subclasses__():\n              num = subM.applyInputParams.__code__.co_argcount\n              if num > 1:\n                  mods = subM.applyInputParams.__code__.co_varnames[1:num]\n                  if numArgs > 1:\n                      mods += modNames\n                  data.append((subM.__name__, \", \".join(mods)))\n\n      d = {}\n      for k, v in data:\n          if k not in d:\n              d[k] = v\n          else:\n              d[k] = d[k].split(\",\") + v.split(\",\")\n              d[k] = sorted(set([vv.strip() for vv in d[k]]))\n              d[k] = \", \".join(d[k])\n      data = [(k, v) for k, v in d.items()]\n      data.sort(key=lambda t: t[0])\n      return tabulate(\n          headers=(\"Material Name\", \"Available Modifications\"),\n          data=data,\n          tableFmt=\"rst\",\n      )\n\n  The class 1/class 2 modifications in fuel materials are used to identify mixtures of\n  custom isotopics labels for input scenarios where a varying blend of a high-reactivity\n  feed with a low-reactivity feed. This is often useful for closed fuel cycles. For example,\n  you can define any fuel material as being made of LWR-derived TRU plus depleted uranium\n  at various weight fractions. Note that this input style only adjusts the heavy metal.\n\n  To enable the application of different values for the same material modification type\n  on different components within a block, the user may specify material modifications\n  by component. This is useful, for instance, when two pins within an assembly\n  made of the same base material have different fuel enrichments. This is done\n  using the ``by component`` attribute to the material modifications as in::\n\n        blocks:\n            fuel: &block_fuel\n                fuel1: &component_fuel_fuel1\n                    shape: Hexagon\n                    material: UZr\n                    Tinput: 600.0\n                    Thot: 600.0\n                    ip: 0.0\n                    mult: 1\n                    op: 10.0\n                fuel2: &component_fuel_fuel2\n                    shape: Hexagon\n                    material: UZr\n                    Tinput: 600.0\n                    Thot: 600.0\n                    ip: 0.0\n                    mult: 1\n                    op: 10.0\n        assemblies:\n            fuel a: &assembly_a\n                specifier: IC\n                blocks: [*block_fuel]\n                height: [1.0]\n                axial mesh points: [1]\n                xs types: [A]\n                material modifications:\n                    by component:\n                        fuel1:\n                            U235_wt_frac: [0.20]\n                        fuel2:\n                            Zr_wt_frac: [0.02]\n                    U235_wt_frac: [0.30]\n\n  Material modifications specified on the ``material modifications`` level are referred to as \"block default\" values\n  and apply to all components on the block not associated with a by-component value. This example would apply an\n  enrichment of 20% to the ``fuel1`` component and an enrichment of 30% to all other components in the block that\n  accept the ``U235_wt_frac`` material modification.\n\n  All by-component material modifications override any block default material modifications of the same type. In\n  addition, any by-component entries omitted for a given axial block will revert to the block default (or material\n  class default, if no block default value is provided and a material class default exists) value::\n\n        blocks:\n            fuel: &block_fuel\n                fuel1: &component_fuel_fuel1\n                    shape: Hexagon\n                    material: UZr\n                    Tinput: 600.0\n                    Thot: 600.0\n                    ip: 0.0\n                    mult: 1\n                    op: 10.0\n                fuel2: &component_fuel_fuel2\n                    shape: Hexagon\n                    material: UZr\n                    Tinput: 600.0\n                    Thot: 600.0\n                    ip: 0.0\n                    mult: 1\n                    op: 10.0\n        assemblies:\n            fuel a: &assembly_a\n                specifier: IC\n                blocks: [*block_fuel, *block_fuel]\n                height: [0.5, 0.5]\n                axial mesh points: [1, 1]\n                xs types: [A, A]\n                material modifications:\n                    by component:\n                        fuel1:\n                            U235_wt_frac: [0.20, ''] # <-- the U235_wt_frac for the second block will go to the block default value\n                        fuel2: # the U235_wt_frac for fuel2 component in both axial blocks will go to the block default values\n                            Zr_wt_frac: [0.02, ''] # <-- the Zr_wt_frac for the second block will go to the material class default because there is no block default value\n                    U235_wt_frac: [0.30, 0.30]\n\nThe first block listed is defined at the bottom of the core. This is typically a grid plate or some other structure.\n\n.. _systems:\n\nSystems\n-------\nOnce assemblies are defined they can be grouped together into the Core, the spent fuel pool (SFP), etc.\n\nA complete reactor structure with a core and a SFP may be seen below::\n\n    systems:\n        core:\n            grid name: core\n            origin:\n                x: 0.0\n                y: 10.1\n                z: 1.1\n        Spent Fuel Pool:\n            type: sfp\n            grid name: sfp\n            origin:\n                x: 1000.0\n                y: 12.1\n                z: 1.1\n\nThe ``origin`` defines the point of origin in global space in units of cm. This allows you to define the relative\nposition of the various structures. The ``grid name`` inputs are string mappings to the grid definitions described\nbelow.\n\nPlugin Behavior\n^^^^^^^^^^^^^^^\n\nThe :meth:`armi.plugins.ArmiPlugin.defineSystemBuilders` method can be provided by plugins to control how ARMI converts\nthe ``systems`` section into ``Composite``\\ s to be modeled. By default, the ``type`` field is used to determine what\nobject is created. The default :class:`armi.reactor.ReactorPlugin` provides the following mapping:\n\n==================  ======================================================\n``type`` Value      Builds\n==================  ======================================================\n``core`` (default)  :class:`~armi.reactor.reactors.Core`\n``excore``          :class:`~armi.reactor.excoreStructure.ExcoreStructure`\n``sfp``             :class:`~armi.reactor.spentFuelPool.SpentFuelPool`\n==================  ======================================================\n\nPlugins are able to provide a superset (e.g., ``core``, ``excore``, and ``sfp``) and new mappings of values to builders.\n\n.. _grids:\n\nGrids\n-----\nGrids are described inside a blueprint file using ``lattice map`` or ``grid contents`` fields to\ndefine arrangements in Hex, Cartesian, or R-Z-Theta. The optional ``lattice pitch`` entry allows\nyou to specify spacing between objects that is different from tight packing. This input is required\nin mixed geometry cases, for example if Hexagonal assemblies are to be loaded into a Cartesian\narrangement. The contents of a grid may defined using one of the following:\n\n``lattice map:``\n    A ASCII map representing the grid contents\n``grid contents:``\n    a direct YAML representation of the contents\n\nExample grid definitions are shown below::\n\n    grids:\n        control:\n            geom: hex\n            symmetry: full\n            lattice map: |\n               - - - - - - - - - 1 1 1 1 1 1 1 1 1 4\n                - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1\n                 - - - - - - - 1 8 1 1 1 1 1 1 1 1 1 1\n                  - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1\n                   - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                    - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                     - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                      - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                       - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                        7 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1\n                         1 1 1 1 1 1 1 1 2 1 1 1 1 1 1 1 1 1\n                          1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                           1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                            1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                             1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                              1 1 1 1 1 1 1 1 1 3 1 1 1\n                               1 1 1 1 1 1 1 1 1 1 1 1\n                                1 6 1 1 1 1 1 1 1 1 1\n                                 1 1 1 1 1 1 1 1 1 1\n    sfp:\n        symmetry: full\n        geom: cartesian\n        lattice pitch:\n            x: 50.0\n            y: 50.0\n        grid contents:\n            [0,0]: MC\n            [1,0]: MC\n            [0,1]: MC\n            [1,1]: MC\n\n.. tip:: We have gone through some effort to allow both pin and core grid definitions to share this\n    input and it may improve in the future.\n\nYou may set up some kinds of grids (e.g. 1/3 and full core hex or Cartesian core\nloadings) using our interactive graphical grid editor described more in\n:py:mod:`armi.utils.gridEditor`.\n\n.. figure:: /.static/gridEditor.png\n    :align: center\n\n    An example of the Grid Editor being used on a FFTF input file\n\n.. _custom-isotopics:\n\nCustom Isotopics\n----------------\nIn some cases (such as benchmarking a previous reactor), the default mass fractions from the\nmaterial library are not what you want to model. In these cases, you may override the isotopic\ncomposition provided by the material library in this section. There are three ways to specify\nthe isotopics: ``mass fractions`` (sum to 1.0), ``number densities`` (in atoms/barn-cm), or\n``number fractions`` (sum to 1.0). For example::\n\n    custom isotopics:\n        LABEL1:\n            input format: mass fractions\n            density: 7.79213903298633\n            C: 0.000664847887388523\n            CR: 0.182466356404319\n            CU: 0.00323253628006144\n            FE: 0.705266053783901\n            MN: 0.0171714161260001\n            MO: 0.00233843050046998\n            NI: 0.0831976890804466\n            SI: 0.00566266993741259\n\nSee the :py:mod:`List of Nuclides <armi.nucDirectory.nuclideBases>` for all valid entries. Note that\nARMI will expand elemental nuclides to their natural isotopics in most cases (to correspond with the\nnuclear data library).\n\nThe (mass) ``density`` input is invalid when specifying ``number densities``; the code will present an error message.\n\nMaterial density may be specified in custom isotopics either explicitly in a ``mass fractions`` input\nformat (shown above) or implicitly with ``number densities``. This is fairly straightforward for the\n``Custom`` material, as it has no baseline density. Density may also be specified for components using\nmaterials which have entries in the materials library. Users should be aware of the following interactions\nwhen specifying a custom density for components using a library material:\n\n    1. The library material density will not be changed. Only the component(s) with the custom isotopics\n    entry will have the density modification.\n\n    2. Density specified by custom isotopics will override all other density modifications in the component\n    construction phase (e.g. ``TD_frac`` entries).\n\n    3. Only the component density is changed, not other material properties are altered to account for the\n    change in composition/density.\n\n    4. Density can only be specified using custom isotopics for non- ``Custom`` materials that have some\n    initial density. Don't try to make ``Void`` have mass!\n\nDensities specified using ``Custom Isotopics`` are applied in component construction, and should be specified\nat the input temperature for the component. Note that when overriding the density of a library material, all\nother properties of that material (e.g. expansion coefficients) will continue to be used as if the component\nconsisted of the library material. In other words, ARMI will still think the component is made out of the\noriginal material!\n\nAdvanced topics\n---------------\n\nOverlapping shapes\n^^^^^^^^^^^^^^^^^^\nSolids of different compositions in contact with each other present complications during thermal\nexpansion. The ARMI Framework does not perform calculations to see exactly how such\nscenarios will behave mechanically; it instead focuses on conserving mass. To do this, users should\ninput a zero-dimension component linking the 2 solid components made of the special ``Void`` material.\nThis gap will allow the 2 components to thermally expand\nindependently while keeping track of the overlapping area.\n\nIt is important to keep track of the areas\nwhen a DerivedShape is included in a block design because ARMI calculates the\nderived area by taking the full area of the block and subtracting the total area of\nthe non-DerivedShapes. If area between thermally-expanding solids was not accounted for, this\nwould non-physically add or subtract coolant into these gaps. To model overlapping components\nheterogeneously, it is suggested to use a :py:mod:`block converter\n<armi.reactor.converters.blockConverters>`.\n\nAdditionally, it should be noted that assigning ``mult: fuel.mult`` will be ever-so-slightly slower\nthan just defining the actual value. This is because ARMI needs to find the sibling\ncomponent and get the siblings ``mult``. If you are concerned about performance at that level and don't expect\n``mult`` to change much in your case, you can replace the constant link (i.e. it does not change over time)\nwith a YAML anchor and alias.\n\nComponent area modifications\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nIn some scenarios, it is desired to have one component's area be subtracted or added to another. For\nexample, the area of the skids in a skid duct design needs to be subtracted from the interstitial\ncoolant. The mechanism to handle this involves adding a parameter to the component to be\nmodified after all the required ones in the form of ``<componentName>.add`` or\n``<componentName>.sub``. The component to be added or subtracted must be defined before the\ncomponent that is being modified. This allows fairly complicated configurations to be modeled\nwithout explicitly defining new components.\n\n::\n\n    blocks:\n        rect with 100 holes:\n            holes:\n                shape: Circle\n                material: Sodium\n                Tinput: 600\n                Thot: 600\n                mult: 100\n                od: 0.05\n            square of steel:\n                shape: Square\n                material: Iron\n                Tinput: 25.0\n                Thot: 600.0\n                widthOuter: 3.0\n                modArea: holes.sub      # \"holes\" is the name of the other component\n\nPutting it all together to make a Block\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nHere is a complete fuel block definition::\n\n        blocks:\n            fuel: &block_fuel\n                bond:\n                    shape: Circle\n                    material: Sodium\n                    Tinput: 450.0\n                    Thot: 450.0\n                    id: fuel.od\n                    mult: fuel.mult\n                    od: cladding.id\n                clad:\n                    shape: Circle\n                    material: HT9\n                    Tinput: 25.0\n                    Thot: 450.0\n                    id: 0.905\n                    mult: fuel.mult\n                    od: 1.045\n                coolant:\n                    shape: DerivedShape\n                    material: Sodium\n                    Tinput: 450.0\n                    Thot: 450.0\n                duct:\n                    shape: Hexagon\n                    material: HT9\n                    Tinput: 25.0\n                    Thot: 450.0\n                    ip: 15.2\n                    mult: 1.0\n                    op: 16.2\n                fuel:\n                    shape: Circle\n                    material: UZr\n                    Tinput: 25.0\n                    Thot: 600.0\n                    id: 0.0\n                    isotopics: LABEL1\n                    mult: 169.0\n                    od: 0.757\n                intercoolant:\n                    shape: Hexagon\n                    material: Sodium\n                    Tinput: 450.0\n                    Thot: 450.0\n                    ip: duct.op\n                    mult: 1.0\n                    op: 16.79\n                wire:\n                    shape: Helix\n                    material: HT9\n                    Tinput: 25.0\n                    Thot: 450.0\n                    axialPitch: 30.0\n                    helixDiameter: 1.145\n                    id: 0.0\n                    mult: fuel.mult\n                    od: 0.1\n\n\nMaking blocks with unshaped components\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nSometimes you will want to make a homogeneous block,  which is a mixture of multiple\nmaterials, and will not want to define an exact shape for each of the components in\nthe block. In this case unshaped components can be used, but ARMI still requires there\nto be at least one component with shape to define the pitch of the block.\n\nIn the example below, the block is a rectangular pitch so one of the\ncomponents is defined as a rectangle to indicate this. Its outer dimensions determine\nthe pitch of the block. The inner dimensions can be whatever is necessary to\npreserve the area fraction. Note that rectangular blocks have pitch defined by two\nnumbers, since they may not be a square. In this case the rectangle component is half\nthe area fraction and the other two components are one quarter::\n\n        blocks:\n            fuel:\n                clad:\n                    shape: Rectangle\n                    material: HT9\n                    Tinput: 25.0\n                    Thot: 25.0\n                    lengthOuter: 3.0\n                    lengthInner: 2.4\n                    widthOuter: 2.0\n                    widthInner: 1.25\n                    mult:1.0\n                fuel:\n                    shape: UnshapedComponent\n                    material: UZr\n                    Tinput: 25.0\n                    Thot: 25.0\n                    area = 1.5\n                coolant:\n                    shape: UnshapedComponent\n                    material: Sodium\n                    Tinput: 25.0\n                    Thot: 25.0\n                    area = 1.5\n\n.. warning:: When using this method avoid thermal expansion by setting TInput=THot, or\n   your pitch component dimensions might change, thus changing your pitch.\n\n\nAlternatively, a void (empty) component with zero area can be added for defining the\npitch, and then all three components can be defined as unshaped. The downside, is there\nare now four components, but only three that have actual area and composition::\n\n        blocks:\n            fuel:\n                clad:\n                    shape: UnshapedComponent\n                    material: HT9\n                    Tinput: 25.0\n                    Thot: 25.0\n                    area: 3.0\n                fuel:\n                    shape: UnshapedComponent\n                    material: UZr\n                    Tinput: 25.0\n                    Thot: 25.0\n                    area = 1.5\n                coolant:\n                    shape: UnshapedComponent\n                    material: Sodium\n                    Tinput: 25.0\n                    Thot: 25.0\n                    area = 1.5\n                PitchDefiningComponent:\n                    shape: Rectangle\n                    material: Void\n                    lengthOuter: 3.0\n                    lengthInner: 3.0\n                    widthOuter: 2.0\n                    widthInner: 2.0\n                    mult:1.0\n\n\nThis can similarly be done for hex geometry and and a hexagon with Outer Pitch (``op``).\n\n.. warning:: The rest of the input described below are scheduled to be moved into the settings input file, since their nature is that of a setting.\n\n.. _nuclide-flags:\n\nNuclide Flags\n-------------\nThe ``nuclide flags`` setting allows the user to choose which nuclides they\nwould like to consider in the problem, and whether or not each nuclide should\ntransmute and decay. For example, sometimes you may not want to deplete trace\nelements in structural materials, but in other analysis you might. If the\nnuclide should deplete, it must have ``burn: true``. If it is to be included\nin the problem at all, it must be have ``xs: true`` All nuclides that will be\nproduced via transmutation/decay  must also have ``burn: true``, so if you add\nThorium, make sure to add all other actinides in its chain. You can use the\n``expandTo:`` section to list a subset of natural nuclides to expand\ninto. If you leave this section out, a default set of nuclide flags will be\napplied to your problem. Remember this\nsection when you start changing which nuclides are modeled and which ones\ndeplete.::\n\n    # this is a YAML comment\n    nuclide flags:\n        AL: {burn: false, xs: true}\n        AM241: {burn: true, xs: true}\n        C: &carbon_flags {burn: false, xs: true}    # an anchor to \"carbon_flags\"\n        CA: *carbon_flags\n        CL: *carbon_flags\n        CO: *carbon_flags                           # the alias back to \"carbon_flags\"\n        CR: *carbon_flags\n        CU: *carbon_flags\n        FE: *carbon_flags\n        H: {burn: false, xs: true}\n        MN: {burn: false, xs: true}\n        MO: {burn: false, xs: true}\n        N: {burn: false, xs: true}\n        NA: {burn: false, xs: true}\n        NI: {burn: false, xs: true}\n        O: {burn: false, xs: true, expandTo: [\"O16\", \"O17\"]}\n        P: {burn: false, xs: true}\n        PU238: {burn: true, xs: true}\n        PU239: {burn: true, xs: true}\n        PU240: {burn: true, xs: true}\n        PU241: {burn: true, xs: true}\n        PU242: {burn: true, xs: true}\n        S: {burn: false, xs: true}\n        SI: {burn: false, xs: true}\n        U234: {burn: false, xs: true}\n        U235: {burn: true, xs: true}\n        U236: {burn: true, xs: true}\n        U238: {burn: true, xs: true}\n\nThe code will crash if materials used in :ref:`blocks-and-components` contain nuclides not defined in\n``nuclide flags``. A failure can also occur if the burn chain is missing a nuclide.\n\n.. tip::\n    We plan to upgrade the default behavior of this to inherit from all defined materials\n    in a problem to reduce the user-input burden.\n\n.. These following are rst substitutions. They're useful for keeping the plaintext readable\n   while getting subscripted text.\n\n.. |Tinput| replace:: T\\ :sub:`input`\n.. |Thot| replace:: T\\ :sub:`hot`\n\n.. _fuel-management-input:\n\nFuel Management Input\n=====================\n\nFuel management in ARMI is specified through custom Python scripts or YAML files that often reside\nin the working directory of a run (but can be anywhere if you use full paths). During a normal run,\nARMI checks for several fuel management settings:\n\n``shuffleLogic``\n   The path to the Python source file or dotted import path to a module that contains the user's custom fuel\n   management logic\n\n``shuffleSequenceFile``\n   The path to a yaml file containing the user's custom fuel management logic.\n\n``fuelHandlerName``\n   The name of a FuelHandler class that ARMI will look for in the Fuel Management Input module or file\n   specified by ``shuffleLogic``. Since it's input, it's the user's responsibility\n   to design and place that object in that module or file.\n\n.. note:: We consider the limited syntax needed to express fuel management in Python\n   code itself to be sufficiently expressive and simple for non-programmers to\n   actually use. Also, fuel management options are available through YAML input files.\n\nThe ARMI Operator will call its fuel handler's ``outage`` method before each cycle (and, if requested, during branch\nsearch calculations). The :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.outage` method\nwill perform bookkeeping operations, and eventually\ncall the user-defined ``chooseSwaps`` method (located in Fuel Management Input). ``chooseSwaps`` will\ngenerally contain calls to :py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.findAssembly`,\n:py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.swapAssemblies` ,\n:py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.swapCascade`, and\n:py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.dischargeSwap`, which are the primary\nfuel management operations and can be found in the fuel management module.\n\nAlso found in the user-defined Fuel Management Input module is a ``getFactors`` method, which is used to control which\nshuffling routines get called and at which time.\n\n.. note::\n\n    See the :py:mod:`fuelHandlers module <armi.physics.fuelCycle.fuelHandlers>` for more details.\n\nFuel Management Operations\n--------------------------\nIn the ARMI, the assemblies can be moved as units around the reactor with swapAssemblies,\ndischargeSwap, and swapCascade of a ``FuelHandler`` interface.\n\nswapAssemblies\n^^^^^^^^^^^^^^\nswapAssemblies is the simplest fuel management operation. Given two assembly objects, this method will switch\ntheir locations. ::\n\n    self.swapAssemblies(a1,a2)\n\ndischargeSwap\n^^^^^^^^^^^^^\nA discharge swap is a simple operation that puts a new assembly into the reactor while discharging\nan outgoing one. ::\n\n    self.dischargeSwap(newIncoming,oldOutgoing)\n\nThis operation keeps track of the outgoing assembly in a SpentFuelPool object that the Reactor\nobject has access to so you can see how much of what you discharged.\n\nswapCascade\n^^^^^^^^^^^\nSwapCascade is a more powerful swapping function that can swap a list of assemblies in a \"daisy-chain\" type\nof operation. These are useful for doing the main overtone shuffling operations such as convergent shuffling\nand/or convergent-divergent shuffling. If we load up the list of assemblies, the first one will be put in the\nlast one's position, and all others will shift accordingly.\n\nAs an example, consider assemblies 1 through 5 in core positions A through E.::\n\n    self.swapCascade([a1,a2,a3,a4,a5])\n\nThis table shows the positions of the assemblies before and after the swap cascade.\n\n\n========    ============================    ===========================\nAssembly    Position Before Swap Cascade    Position After Swap Cascade\n========    ============================    ===========================\n1           A                                   E\n2           B                                   A\n3           C                                   B\n4           D                                   C\n5           E                                   D\n========    ============================    ===========================\n\nArbitrarily complex cascades can thusly be assembled by choosing the order of the assemblies passed into swapCascade.\n\nChoosing Assemblies to Move\n---------------------------\nThe methods described in the previous section require known assemblies to shuffle. Choosing these assemblies is\nthe essence of fuel shuffling design. The single method used for these purposes is the FuelHandler's ``findAssembly``\nmethod. This method is very general purpose, and ranks in the top 3 most important\nmethods of the ARMI altogether.\n\nTo use it, just say::\n\n    a = self.findAssembly(param='maxPercentBu',compareTo=20)\n\nThis will return the assembly in the reactor that has a maximum burnup closest to 20%.\nOther inputs to findAssembly are summarized in the API docs of\n:py:meth:`~armi.physics.fuelCycle.fuelHandlers.FuelHandler.findAssembly`.\n\n\nFuel Management Examples\n------------------------\n\nConvergent-Divergent\n^^^^^^^^^^^^^^^^^^^^\nConvergent-divergent shuffling is when fresh assemblies march in from the outside until\nthey approach the jump ring, at which point they jump to the center and diverge until\nthey reach the jump ring again, where they now jump to the outer periphery of the core,\nor become discharged.\n\nIf the jump ring is 6,  the order of target rings is::\n\n    [6, 5, 4, 3, 2, 1, 6, 7, 8, 9, 10, 11, 12, 13]\n\nIn this case, assemblies converge from ring 13 to 12, to 11, to 10, ..., to 6, and then\njump to 1 and diverge until they get back to 6. In a discharging equilibrium case, the\nhighest burned assembly in the jumpRing should get discharged and the lowest should\njump by calling a dischargeSwap on cascade[0] and a fresh feed after this cascade is\nrun.\n\nThe convergent rings in this case are 7 through 13 and the divergent ones are 1\nthrough 5 are the divergent ones.\n\n\nFuel Management Tips\n--------------------\nSome mistakes are common. Follow these tips.\n\n    * Always make sure your assembly-level types in the settings file are up to date\n      with the grids in your bluepints file. Otherwise you'll be moving feeds when you\n      want to move igniters, or something.\n    * Use the exclusions list! If you move a cascade and then the next cascade tries\n      to run, it will choose your newly-moved assemblies if they fit your criteria in\n      ``findAssemblies``. This leads to very confusing results. Therefore, once you move\n      assemblies, you should default to adding them to the exclusions list.\n    * Print cascades during debugging. After you've built a cascade to swap, print it\n      out and check the locations and types of each assembly in it. Is it what you want?\n    * Watch ``typeNum`` in the database. You can get good intuition about what is\n      getting moved by viewing this parameter.\n\nRunning a branch search\n-----------------------\nARMI can perform a branch search where a number of fuel management operations\nare performed in parallel and the preferred one is chosen and proceeded with.\nThe key to any branch search is writing a fuel handler that can interpret\n**fuel management factors**, defined as keyed values between 0 and 1.\n\nAs an example, a fuel handler may be written to interpret two factors, ``numDischarges``\nand ``chargeEnrich``. One method in the fuel handler would then take\nthe value of ``factors['numDischarges']`` and multiply it by the maximum\nnumber of discharges (often set by another user setting) and then discharge\nthis many assemblies. Similarly, another method would take the ``factors['chargeEnrich']``\nvalue (between 0 and 1) and multiply it by the maximum allowable enrichment\n(again, usually controlled by a user setting) to determine which enrichment\nshould be used to fabricate new assemblies.\n\nGiven a fuel handler that can thusly interpret factors between 0 and 1, the\nconcept of branch searches is simple. They simply build uniformly distributed\nlists between 0 and 1 across however many CPUs are available and cases on all\nof them, passing one of each of the factors to each CPU in parallel. When the cases\nfinish, the branch search determines the optimal result and selects the corresponding\nvalue of the factor to proceed.\n\nBranch searches are controlled by custom ``getFactorList`` methods specified in the\n``shuffleLogic`` input modules or files. This method should return two things:\n\n    * A ``defaultFactors``; a dictionary with user-defined keys and values between\n      0 and 1 for each key. These factors will be passed to the ``chooseSwaps``\n      method, which is typically overridden by the user in custom fuel handling code.\n      The fuel handling code should interpret the values and move the fuel\n      according to what is sent.\n\n    * A ``factorSearchFlags`` list, which lists the keys to be branch searched.\n      The search will optimize the first key first, and then do a second pass\n      on the second key, holding the optimal first value constant, and so on.\n\nSuch a method may look like this::\n\n    def getFactorList(cycle,cs=None):\n\n        # init default shuffling factors\n        defaultFactors = {'chargeEnrich':0,'numDischarges':1}\n        factorSearchFlags=[] # init factors to run branch searches on\n\n        # determine when to activate various factors / searches\n        if cycle not in [0,5,6]:\n            # shuffling happens before neutronics so skip the first cycle.\n            defaultFactors['chargeEnrich']=1\n        else:\n            defaultFactors['numDischarges']=0\n            factorSearchFlags = ['chargeEnrich']\n\n        return defaultFactors,factorSearchFlags\n\nOnce a proper ``getFactorList`` method exists and a fuel handler object exists that can interpret the factors, activate a branch search during a regular run by selecting the **Branch Search** option on the GUI.\n\nThe **best** result from the branch search is determined by comparing the *keff* values with the ``targetK`` setting, which is available for setting in the GUI. The branch with *keff* closest to the setting, while still being above 1.0 is chosen.\n"
  },
  {
    "path": "doc/user/manual_data_access.rst",
    "content": "**********************\nAccessing Data in ARMI\n**********************\n\nA basic user only needs to know the CLI or GUI and can perform basic analysis and design with just\nthat. But a power user will be more interested in programmatically building and manipulating inputs\nand gathering detailed information out of ARMI results. Let's now go into a bit more detail for the\npower user.\n\nSettings and State Variables\n============================\nThe following links contain large tables describing the various global settings and state parameters\nin use across ARMI.\n\n* :ref:`settings-report`\n* :ref:`reactor-parameters-report`\n* :ref:`core-parameters-report`\n* :ref:`component-parameters-report`\n* :ref:`assembly-parameters-report`\n* :ref:`block-parameters-report`\n\n\nAccessing Some Interesting Info\n===============================\nOften times, you may be interested in the geometric dimensions of various blocks. These are stored\non the :py:mod:`components <armi.reactor.components>`, and may be accessed as follows::\n\n    # This may need to be ``o.r``.\n    b = r.core.getFirstBlock(Flags.FUEL)\n    fuel = b.getComponent(Flags.FUEL)\n\n    # fuel outer diameter in cm\n    od = fuel.getDimension('od',cold=True)\n    odHot = fuel.getDimension('od')  # hot dimension\n\n    # hot inner diameter at a specific temperature\n    id600 = fuel.getDimension('id',Tc=600)\n    clad = b.getComponent(Flags.CLAD)\n\n    # number of cladding pins (multiplicity)\n    numClad = clad.getDimension('mult')\n\n    cladMat = clad.getProperties()  # get the cladding material\n    # get the thermal conductivity of the clad material at 500C\n    k = cladMat.thermalConductivity(Tc=500)\n\nThe dimensions available depend on the shape of the component. Hexagons have `op` and `ip` for outer\nand inner pitch. Other options are seen at the source at :py:mod:`armi.reactor.components`.\n"
  },
  {
    "path": "doc/user/outputs.rst",
    "content": "*******\nOutputs\n*******\n\nARMI output files are described in this section. Many outputs may be generated during an ARMI run.\nThey fall into various categories:\n\nFramework outputs\n    Files like the **stdout** and the **database** are produced in nearly all runs.\n\nInterface outputs\n    Certain plugins/interfaces produce intermediate output files.\n\nPhysics kernel outputs\n    If ARMI executes an external physics kernel during a run, its associated output files are often available in the\n    working directory. These files are typically read by ARMI during the run, and relevant data is transferred onto the\n    reactor model (and ends up in the ARMI **database**). If the user desires to retain all of the inputs and outputs\n    associated with the physics kernel runs for a given time step, this can be specified with the ``savePhysicsIO``\n    setting. For any time step specified in the list under ``savePhysicsIO``, a ``cXnY/`` folder will be created, and\n    ARMI will store all inputs and outputs associated with each physics kernel executed at this time step in a folder\n    inside of ``cXnY/``. The format for specifying a state point is 00X00Y for cycle X, step Y.\n\nTogether the output fully define the analyzed ARMI case.\n\n\nThe Standard Output\n===================\nThe Standard Output (or **stdout**) is a running log of things an ARMI run prints out as it executes\na case. It shows what happened during a run, which inputs were used, which warnings were issued, and\nin some cases, what the summary results are. Here is an excerpt::\n\n        =========== Completed BOL Event ===========\n\n        =========== Triggering BOC - cycle 0 Event ===========\n        =========== 01 - main                 BOC - cycle 0 ===========\n        [impt] Beginning of Cycle 0\n        =========== 02 - fissionProducts      BOC - cycle 0 ===========\n        =========== 03 - xsGroups             BOC - cycle 0 ===========\n        [xtra] Generating representative blocks for XS\n        [xtra] Cross section group manager summary\n\nIn a standard run, the various interfaces will loop through and print out messages according to the `verbosity`\nsetting. In multi-processing runs, the **stdout** shows messages from the primary node first and then shows information\nfrom all other nodes below (with verbosity set by the `branchVerbosity` setting). Sometimes a user will want to set the\nverbosity of just one module (.py file) in the code higher than the rest of ARMI, to do so they can set up a custom\nlogger by placing this line at the top of the file::\n\n    runLog = logging.getLogger(__name__)\n\nThese single-module (file) loggers can be controlled using a the `moduleVerbosity` setting. All of\nthese logger verbosities can be controlled from the settings file, for example::\n\n    branchVerbosity: debug\n    moduleVerbosity:\n        armi.reactor.reactors: info\n    verbosity: extra\n\nIf there is an error, a useful message may be printed in the **stdout**, and a full traceback will\nbe provided in the associated **stderr** file.\n\nSome Linux users tend to use the **tail** command to monitor the progress of an ARMI run::\n\n    tail -f myRun.stdout\n\nThis provides live information on the progress.\n\n.. _database-file:\n\nThe Database File\n=================\nThe **database** file is a self-contained, binary representation of the state of the ARMI composite\nmodel state during a simulation. The database contains full, plain-text of the input files that were\nused to create the case. And for each time node, the values of all composite parameters as well as\nlayout information to help fully reconstruct the reactor data model.\n\nLoading Reactor State\n---------------------\nAmong other things, the database file can be used to recover an ARMI reactor model from any of the\ntime nodes that it contains. This can be useful for performing restart runs, or for doing custom\npost-processing analysis. To load a reactor state, you will need to open the database file into a\n``Database`` object. From there, you can call the :py:meth:`armi.bookkeeping.db.Database.load()`\nmethod to get a recovered ``Reactor`` object. For instance, given a database file called\n``myDatabase.h5``, we could load the reactor state at cycle 5, time node 2 with the following::\n\n   from armi.bookkeeping.db import databaseFactory\n\n   db = databaseFactory(\"myDatabase.h5\", \"r\")\n\n   # The underlying file is not left open unless necessary. Use the\n   # handy context manager to temporarily open the file and\n   # interact with the data:\n   with db:\n       r = db.load(5, 2)\n\n.. note:: The cycles are 0-indexed, but the time nodes, in practice, are not. Therefore, cycle 5 above is actually the 6th cycle in the simulation. For cycle 5 with two time nodes, there will be three time steps saved to the database: c5n0 (BOC), c5n1 (time node 1), and c5n2 (time node 2).\n\nExtracting Reactor History\n--------------------------\nNot only can the database reproduce reactor state for a given time node, it can also\nextract a history of specific parameters for specific objects through the\n:py:meth:`armi.bookkeeping.db.Database.getHistory()` and\n:py:meth:`armi.bookkeeping.db.Database.getHistories()` methods.\nFor example, given the reactor object, ``r`` from the example above, we could get the\nentire history of an assembly's ring, position and areal power density with the\nfollowing::\n\n   from armi.reactor.flags import Flags\n\n   # grab a fuel assembly from the reactor\n   a = r.core.getAssemblies(Flags.FUEL)\n\n   # Don't forget to open the database!\n   with db:\n       aHist = db.getHistory(a, [\"ring\", \"pos\", \"arealPd\"])\n\n\nExtracting Settings and Blueprints\n----------------------------------\nAs well as the reactor states for each time node, the database file also stores the\ninput files (blueprints and settings files) used to run the case that generated it.\nThese can be recovered using the `extract-inputs` ARMI entry point. Use `python -m armi\nextract-inputs --help` for more information.\n\nFile format\n-----------\nThe database file format is built on top of the HDF5 format. There are many tools\navailable for viewing, editing, and scripting HDF5 files. The ARMI database uses the\n`h5py` package for interacting with the underlying data and metadata.\nAt a high level there are 3 things to know about HDF5:\n\n1. **Groups** - Groups are named collections of datasets. Think of a group as a filesystem folder.\n2. **Datasets** - Datasets are named values. If a group is a folder, a dataset is a file. Values are\n   strongly typed (think `int`, `float`, `double`, but also whether it is big endian, little endian\n   so that the file is portable across different systems). Values can be scalar, vector, or\n   N-dimensional arrays.\n3. **Attributes** - Attributes can exist on a dataset or a group to provide supplemental\n   information about the group or dataset. We use attributes to indicate the ARMI database version\n   that was used to create the database, the time the case was executed, and whether or not the\n   case completed successfully. We also sometimes apply attributes to datasets to indicate if any\n   special formatting or layout was used to store Parameter values or the like.\n\nThere are many other features of HDF5, but this is enough information to get started.\n\nDatabase Structure\n------------------\nThe broad strokes of the database structure is outlined below.\n\n.. list-table:: Database structure\n   :header-rows: 1\n   :class: longtable\n\n   * - Name\n     - Type\n     - Description\n   * - ``/``\n     - H5Group\n     - root node\n   * - ``/inputs/``\n     - H5Group\n     - A group that contains all inputs\n   * - ``/inputs/settings``\n     - string\n     - A representation of the settings file that was used to create the case\n   * - ``/inputs/blueprints``\n     - string\n     - A representation of the blueprints file that used to create the case\n   * -\n     -\n     -\n   * - ``/c{CC}n{NN}/``\n     - H5Group\n     - A group that contains the ARMI model for a specific cycle {CC} and time node\n       {NN}. For the following, there may be a bit of pseudo-code to explain the origin\n       of data. ``comp`` is any old component within the ARMI model hierarchy.\n\n       Also, it is important to note that all components are flattened and then grouped\n       by type.\n   * - ``/c{CC}n{NN}EOL/``\n     - H5Group\n     - A special time node, like the one above, where {CC} is the last cycle and {NN} is the last\n       node. If this exists, it is meant to represent the EOL, which is perhaps a few days after the\n       end of the last cycle, where fuel is decaying non-operationally.\n   * - ``/c{CC}n{NN}/layout/``\n     - H5Group\n     - A group that contains  a description of the ARMI model within this timenode\n   * - ``/c{CC}n{NN}/layout/name``\n     - list of strings\n     - ``comp.name``\n   * - ``/c{CC}n{NN}/layout/type``\n     - list of strings\n     - ``type(comp).__name__`` -- The name of the component type. We can use this to\n       construct a new object when reading. You could also use it to filter down to data\n       that you care about using hdf5 directly.\n   * - ``/c{CC}n{NN}/layout/serialNum``\n     - list of int\n     - ``comp.p.serialNum`` -- Serial number of the component. This number is unique\n       within a component type.\n   * - ``/c{CC}n{NN}/layout/location``\n     - list of 3-tuple floats\n     - ``tuple(comp.spatialLocator) or (0, 0, 0)`` -- Gives the location indices for a\n       given component. Note these are relative, so there are duplicates.\n   * - ``/c{CC}n{NN}/layout/locationType``\n     - list of strings\n     - ``type(comp.spatialLocator).__name__ or \"None\"`` -- The type name of the\n       location.\n   * - ``/c{CC}n{NN}/layout/indexInData``\n     - list of int\n     - The components are grouped by ``type(comp).__name__``. The integers are a mapping\n       between the component and its index in the ``/c{CC}n{NN}/{COMP_TYPE}/`` group.\n   * - ``/c{CC}n{NN}/layout/numChildren``\n     - list of int\n     - ``len(comp)`` -- The number of direct child composites this composite has.\n       Notably, this is not a summation of all the children.\n   * - ``/c{CC}n{NN}/layout/temperatures``\n     - list of 2-tuple floats\n     - ``(comp.InputTemperatureInC, comp.TemperatureInC) or (-900, -900)`` --\n       Temperatures in for Component objects.\n   * - ``/c{CC}n{NN}/layout/material``\n     - list of string\n     - ``type(comp.material).__name__ or \"\"`` -- Name of the associated material for an\n       Component.\n   * -\n     -\n     -\n   * - ``/c{CC}n{NN}/{COMP_TYPE}/``\n     - H5Group\n     - ``{COMP_TYPE}`` corresponds to the ``type(comp).__name__``.\n   * - ``/c{CC}n{NN}/{COMP_TYPE}/{PARAMETER}``\n     - list of inferred data\n     - Values for all parameters for a specific component type, in the order defined by\n       the ``/c{CC}n{NN}/layout/``. See the next table to see a description of the\n       attributes.\n\nPython supports a rich and dynamic type system, which is sometimes difficult to\nrepresent with the HDF5 format. Namely, HDF5 only supports dense, homogeneous\nN-dimensional collections of data in any given dataset. Some parameter values do not fit\ninto this mold. Examples of tricky cases are:\n\n* Representing ``None`` values interspersed among a bunch of ``floats``\n* Jagged arrays, where each \"row\" of a matrix has a different number of entries (or\n  higher-dimensional analogs)\n* Dictionaries\n\nNone of these have a direct representation in HDF5. Therefore, the parameter values on\nthe composite model sometimes need to be manipulated to fit into the HDF5 format, while\nstill being able to faithfully reconstruct the original data. To accomplish this, we use\nHDF5 dataset attributes to indicate when some manipulation is necessary. Writing\nsuch special data to the HDF5 file and reading it back again is accomplished with the\n:py:func:`armi.bookkeeping.db.database.packSpecialData` and\n:py:func:`armi.bookkeeping.db.database.unpackSpecialData`. Refer to their implementations\nand documentation for more details.\n\nLoading Reactor State as Read-Only\n----------------------------------\nAnother option you have, though it will probably come up less often, is to lead a ``Reactor`` object\nfrom a database file in read-only mode. Mostly what this does is set all the parameters loaded into\nthe reactor data model to a read-only mode. This can be useful to ensure that downstream analysts\ndo not modify the data they are reading. It looks much like the usual database load::\n\n   from armi.bookkeeping.db import databaseFactory\n\n   db = databaseFactory(\"myDatabase.h5\", \"r\")\n\n   with db:\n       r = db.loadReadOnly(5, 2)\n\nAnother common use for ``Database.loadReadOnly()`` is when you want to build a tool for analysts\nthat can open an ARMI database file without the ``App`` that created it. Solving such a problem\ngenerically is hard-or-impossible, but assuming you probably know a lot about the ``App`` that\ncreated an ARMI output file, this is usually doable in practice. To do so, you will want to look at\nthe :py:class:`PassiveDBLoadPlugin <armi.bookkeeping.db.passiveDBLoadPlugin.PassiveDBLoadPlugin>`.\nThis tool allows you to passively load an output database even if there are parameters or blueprint\nsections that are unknown.\n"
  },
  {
    "path": "doc/user/params_report.rst",
    "content": ".. _params-report:\n\n=================\nParameters Report\n=================\n\n.. exec::\n   from armi.reactor import assemblies\n   from armi.reactor import assemblyParameters\n   from armi.reactor import blockParameters\n   from armi.reactor import blocks\n   from armi.reactor import reactorParameters\n   from armi.reactor import reactors\n   from armi.reactor.components import Component\n   from armi.reactor.components.componentParameters import getComponentParameterDefinitions\n   from dochelpers import generateParamTable\n\n   s = generateParamTable(reactors.Reactor, reactorParameters.defineReactorParameters())\n   numR = s.count(\"  * - \") - 1\n\n   s = generateParamTable(reactors.Core, reactorParameters.defineCoreParameters())\n   numC = s.count(\"  * - \") - 1\n\n   s = generateParamTable(assemblies.Assembly, assemblyParameters.getAssemblyParameterDefinitions())\n   numA = s.count(\"  * - \") - 1\n\n   s = generateParamTable(blocks.Block, blockParameters.getBlockParameterDefinitions())\n   numB = s.count(\"  * - \") - 1\n\n   s = generateParamTable(Component, getComponentParameterDefinitions())\n   numComp = s.count(\"  * - \") - 1\n\n   numParams = numR + numC + numA + numB + numComp\n\n   txt = f\"This document lists all {numParams} Parameters in ARMI:\\n\\n\"\n   txt += f\"* {numR} Reactor Parameters.\\n\"\n   txt += f\"* {numC} Core Parameters.\\n\"\n   txt += f\"* {numA} Assembly Parameters.\\n\"\n   txt += f\"* {numB} Block Parameters.\\n\"\n   txt += f\"* {numComp} Component Parameters.\\n\\n\"\n\n   return txt\n\nUsers of the ARMI Framework are not required to use all of these parameters. And the system is easy to extend to add\nnew Parameters for your use-cases. These are simply the default Parameters that come with ARMI. See\n:py:mod:`armi.reactor.parameters` for use.\n\n\n.. _reactor-parameters-report:\n\n******************\nReactor Parameters\n******************\n\nThis is a list of all of the Reactor Parameters that are provided by the ARMI Framework.\n\n.. exec::\n   from armi.reactor import reactors\n   from armi.reactor import reactorParameters\n   from dochelpers import generateParamTable\n\n   return generateParamTable(reactors.Reactor, reactorParameters.defineReactorParameters())\n\n.. _core-parameters-report:\n\n\n***************\nCore Parameters\n***************\n\nThis is a list of all of the Core Parameters that are provided by the ARMI Framework.\n\n.. exec::\n   from armi.reactor import reactors\n   from armi.reactor import reactorParameters\n   from dochelpers import generateParamTable\n\n   return generateParamTable(reactors.Core, reactorParameters.defineCoreParameters())\n\n\n.. _assembly-parameters-report:\n\n*******************\nAssembly Parameters\n*******************\n\nThis is a list of all of the Assembly Parameters that are provided by the ARMI Framework.\n\n.. exec::\n   from armi.reactor import assemblies\n   from armi.reactor import assemblyParameters\n   from dochelpers import generateParamTable\n\n   return generateParamTable(assemblies.Assembly, assemblyParameters.getAssemblyParameterDefinitions())\n\n\n.. _block-parameters-report:\n\n****************\nBlock Parameters\n****************\n\nThis is a list of all of the Block Parameters that are provided by the ARMI Framework.\n\n.. exec::\n   from armi.reactor import blocks\n   from armi.reactor import blockParameters\n   from dochelpers import generateParamTable\n\n   return generateParamTable(blocks.Block, blockParameters.getBlockParameterDefinitions())\n\n\n.. _component-parameters-report:\n\n********************\nComponent Parameters\n********************\n\nThis is a list of all of the Component Parameters that are provided by the ARMI Framework.\n\n.. exec::\n   from armi.reactor.components import Component\n   from armi.reactor.components.componentParameters import getComponentParameterDefinitions\n   from dochelpers import generateParamTable\n\n   return generateParamTable(Component, getComponentParameterDefinitions())\n"
  },
  {
    "path": "doc/user/physics_coupling.rst",
    "content": "****************\nPhysics Coupling\n****************\n\nLoose Coupling\n==============\nARMI supports loose and tight coupling. Loose coupling is interpreted as one-way coupling between physics for a single time node. For example, a power distribution in cycle 0 node 0 is used to calculate a temperature distribution in cycle 0 node 0. This temperature is then used in cycle 0 node 1 to compute new cross sections and a new power distribution. This process repeats itself for the lifetime of the simulation. \n\n.. graphviz:: /.static/looseCouplingIllustration.dot\n\nLoose coupling is enabled by default in ARMI simulations.\n\nTight Coupling\n==============\nTight coupling is interpreted as two-way communication between physics within a given time node. Revisiting our previous example, enabling tight coupling results in the temperature distribution being used to generate updated cross sections (new temperatures induce changes such as Doppler broadening feedback) and ultimately an updated power distribution. This process is repeated iteratively until a numerical convergence criteria is met.\n\n.. graphviz:: /.static/tightCouplingIllustration.dot\n\nThe following settings are involved with enabling tight coupling in ARMI:\n\n1. ``tightCoupling``: When ``True``, tight coupling is enabled.\n2. ``tightCouplingSettings``: Used to specify which parameters and convergence criteria will be used to measure the convergence of a given interface.\n\n.. code-block:: yaml\n\n       tightCoupling: true\n       tightCouplingSettings:\n         globalFlux:\n           parameter: power\n           convergence: 1.0e-4\n         thermalHydraulics:\n           parameter: THmassFlowRate\n           convergence: 1.0e-2\n\n\nThe ``tightCouplingSettings`` settings interact with the interfaces available in ARMI (or an ARMI app). The interface headers (i.e., \"globalFlux\" and \"thermalHydraulics\") must match the value prescribed for :py:attr:`Interface.purpose <armi.interfaces.interface.purpose>`. The option, ``parameter``, can be a registered parameter. The ``convergence`` option is expected to be any float value. In the current implementation, different interfaces may have different developer intended restrictions. For example, the global flux interface currently only allows the eigenvalue (i.e. :math:`k_{\\text{eff}}`) or block-wise power to be valid ``parameter`` values.\n\n.. warning::\n    The inherent limitations of the above interface-based tight coupling settings have been documented and a new and improved user-interface is currently being developed.\n\nIn the global flux interface, the following norms are used to compute the convergence of :math:`k_{\\text{eff}}` and block-wise power.\n\nEigenvalue\n----------\nThe convergence of the eigenvalue is measured through an L2-norm.\n\n.. math::\n    \\epsilon = \\| k_\\text{eff} \\|_2 = \\left( \\left( k_\\text{eff,old} - k_\\text{eff,new} \\right)^2 \\right) ^ \\frac{1}{2}\n\nBlock-wise Power\n----------------\nThe block-wise power can be used as a convergence mechanism to avoid the integral effects of :math:`k_{\\text{eff}}` (i.e., over and under predictions cancelling each other out) and in turn, can have a different convergence rate. To measure the convergence of the power distribution with the prescribed tolerances (e.g., 1e-4), the power is scaled in the following manner (otherwise the calculation struggles to converge). \n\nFor an assembly, :math:`a`, we compute the total power of the assembly,\n\n.. math::\n    a_{\\text{power},i} = \\sum_{j}b_{\\text{power},(i,j)},\n\nwhere :math:`i` is the :math:`i^{\\text{th}}` assembly and :math:`j` is the :math:`j^{\\text{th}}` block within assembly, :math:`i`. With the assembly power, we scale the block power and obtain an array of scaled block powers for a given assembly, :math:`\\mathbf{b}_{i}`,\n\n.. math::\n    \\mathbf{b}_{i} = \\left\\lbrace \\frac{b_{\\text{power},(i,j)}}{a_{\\text{power},i}} \\right\\rbrace, \\quad \\forall j \\in a_i.\n\nWe can now calculate a convergence parameter for each assembly,\n\n.. math::\n    \\epsilon_i &= \\| \\textbf{b}_{i,\\text{old}} - \\textbf{b}_{i,\\text{new}} \\|_2 \\\\\n               &=\\sqrt{\\sum_{i}\\left( \\textbf{b}_{i,\\text{old}} - \\textbf{b}_{i,\\text{new}} \\right)^2}.\n\nThese assembly-wise convergence parameters are then stored in an array of convergence values,\n\n.. math::\n    \\xi = \\left\\lbrace \\epsilon_i \\right\\rbrace,\\quad \\forall i \\in \\text{Core}.\n\nThe total convergence of the power distribution is finally measured through the infinity norm (i.e, the max) of :math:`\\xi`,\n\n.. math::\n    \\epsilon = \\| \\xi \\|_{\\inf} = \\max \\xi.\n\n\nThe Global Flux Interface\n-------------------------\nThe :py:class:`Global Flux Interface <armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxInterface>`\nclass will attempt to set its own ``TightCoupler`` based on ``keff``. To see the specifics, see:\n:py:meth:`_setTightCouplingDefaults <armi.physics.neutronics.globalFlux.globalFluxInterface.GlobalFluxInterface._setTightCouplingDefaults>`.\nIf you want to change the tight coupling performance of the ``GlobalFluxInterface``, it would be\neasiest to just subclass the interface and over-write the `_setTightCouplingDefaults` method.\n"
  },
  {
    "path": "doc/user/radial_and_axial_expansion.rst",
    "content": "******************************************\nRadial and Axial Expansion and Contraction\n******************************************\n\nARMI natively supports linear expansion in both the radial and axial dimensions for pin-type reactors. These expansion\ntypes function independently of one another and each have their own set of underlying assumptions and use-cases. Radial\nexpansion happens by default but there are several settings that control axial expansion:\n\n* ``inputHeightsConsideredHot`` - Indicates whether blueprints heights have already been thermally expanded. If ``False``, ARMI will expand components at BOL consistent with provided temperatures.\n* ``assemFlagsToSkipAxialExpansion`` - Assemblies with a flag in this list will not be axially expanded.\n* ``detailedAxialExpansion`` - Allow each assembly to expand independently. This will result in a non-uniform mesh.\n\nIf they happen, ARMI runs radial and axial expansion when objects are created from blueprints. That is, when the reactor is\ncreated from blueprints at BOL, these calculations are performed. But also at BOC if new assemblies are added to the\ncore, then expansion will happen again when the assembly object is created from blueprints.\n\n\nThermal Expansion\n=================\nARMI treats thermal expansion as a linear phenomena using a standard linear expansion relationship,\n\n.. math::\n    \\frac{\\Delta L}{L_0} = \\alpha(T) \\Delta T,\n    :label: linearExp\n\nwhere, :math:`\\Delta L` and :math:`\\Delta T` are the change in length and temperature from the\nreference state, respectively, and :math:`\\alpha` is the thermal expansion coefficient relative to\n:math:`T_0`. Expanding and rearranging Equation :eq:`linearExp`, we can obtain an expression for the\nnew length, :math:`L_1`,\n\n.. math::\n    L_1 = L_0\\left[1 + \\alpha(T_1)\\left(T_1 - T_0\\right) \\right].\n    :label: newLength\n\nGiven Equation :eq:`linearExp`, we can create expressions for the change in length between our \"hot\"\ntemperature (Equation :eq:`hotExp`)\n\n.. math::\n    \\begin{aligned}\n        \\frac{L_h - L_0}{L_0} &= \\alpha(T_h)\\left(T_h - T_0\\right),\\\\\n        \\frac{L_h}{L_0} &= 1 + \\alpha(T_h)\\left(T_h - T_0\\right).\n    \\end{aligned}\n    :label: hotExp\n\nand \"non-reference\" temperature, :math:`T_c` (Equation :eq:`nonRefExp`),\n\n.. math::\n    \\begin{aligned}\n        \\frac{L_c - L_0}{L_0} &= \\alpha(T_c)\\left(T_c - T_0\\right),\\\\\n        \\frac{L_c}{L_0} &= 1 + \\alpha(T_c)\\left(T_c - T_0\\right).\n    \\end{aligned}\n    :label: nonRefExp\n\nThese are used within ARMI to enable thermal expansion and contraction with a temperature not equal\nto the reference temperature, :math:`T_0`. By taking the difference between Equation :eq:`hotExp`\nand :eq:`nonRefExp`, we can obtain an expression relating the change in length, :math:`L_h - L_c`,\nto the reference length, :math:`L_0`,\n\n.. math::\n    \\begin{aligned}\n        \\frac{L_h - L_0}{L_0} - \\frac{L_c - L_0}{L_0} &= \\frac{L_h}{L_0} - 1 - \\frac{L_c}{L_0} + 1, \\\\\n        &= \\frac{L_h - L_c}{L_0}.\n    \\end{aligned}\n    :label: diffHotNonRef\n\nUsing Equations :eq:`diffHotNonRef` and :eq:`nonRefExp`, we can obtain an expression for the change\nin length, :math:`L_h - L_c`, relative to the non-reference temperature,\n\n.. math::\n    \\frac{L_h - L_c}{L_c} &= \\frac{L_h - L_c}{L_0} \\frac{L_0}{L_c}\\\\\n    &= \\left( \\frac{L_h}{L_0} - \\frac{L_c}{L_0} \\right) \\left( 1 + \\alpha(T_c)\\left(T_c - T_0\\right) \\right)^{-1}.\n    :label: expNewRelative\n\nUsing Equations :eq:`hotExp` and :eq:`nonRefExp`, we can simplify Equation :eq:`expNewRelative` to find,\n\n.. math::\n    \\frac{L_h - L_c}{L_c} = \\frac{\\alpha(T_h) \\left(T_h - T_0\\right) - \\alpha(T_c)\\left(T_c - T_0\\right)}{1 + \\alpha(T_c)\\left(T_c - T_0\\right)}.\n    :label: linearExpansionFactor\n\nEquation :eq:`linearExpansionFactor` is the expression used by ARMI in\n:py:meth:`linearExpansionFactor <armi.materials.material.Material.linearExpansionFactor>`.\n\n.. note::\n    :py:meth:`linearExpansionPercent\n    <armi.materials.material.Material.linearExpansionPercent>` returns\n    :math:`\\frac{L - L_0}{L_0}` in %.\n\nGiven that thermal expansion (or contraction) of solid components must conserve mass throughout the\nsystem, the density of the component is adjusted as a function of temperature based on Equation\n:eq:`hot_density_general`, assuming isotropic thermal expansion.\n\n.. math::\n    \\rho(T_h) = \\frac{\\rho(T_0)}{\\left(1 + \\frac{\\Delta L}{L_0}\\right)^3} = \\frac{\\rho(T_0)}{\\left(1 + \\alpha_m (T_h) (T_h - T_0)\\right)^3}\n    :label: hot_density_general\n\nwhere, :math:`\\rho(T_h)` is the component density in :math:`\\frac{kg}{m^3}` at the given temperature\n:math:`T_h`, :math:`\\rho(T_0)` is the component density in :math:`\\frac{kg}{m^3}` at the reference\ntemperature :math:`T_0`, and :math:`\\alpha(T_h)` is the mean coefficient of thermal expansion at the\nspecified temperature :math:`T_h` relative to the material's reference temperature.\n\nAn update to mass densities is applied for all solid components given the assumption of isotropic\nthermal expansion. Here we assume the masses of non-solid components (e.g., fluids or gases) are\nallowed to change within the reactor core model based on changes to solid volume changes. For\ninstance, if solids change volume due to temperature changes, there is a change in the amount of\nvolume left for fluid components.\n\nImplementation Discussion and Example of Radial and Axial Thermal Expansion\n===========================================================================\nThis section provides an example thermal expansion calculation for a simple cylindrical component\nfrom a reference temperature of 20°C to 1000°C with example material properties and dimensions as\nshown in the table below.\n\n.. list-table:: Example Component Properties for Thermal Expansion\n   :widths: 50 50\n   :header-rows: 1\n   :name: thermal_exp_comp_properties\n\n   * - Property\n     - Example\n   * - Material\n     - Steel\n   * - Radius\n     - 0.25 cm\n   * - Height\n     - 5.0 cm\n   * - Reference Temperature\n     - 20°C\n   * - Density\n     - 1.0 g/cc\n   * - Mean Coefficient Thermal Expansion\n     - :math:`2\\times 10^{-6}` 1/°C\n\nThe figure below illustrates the thermal expansion phenomena in both the radial and axial\ndirections.\n\n.. figure:: /.static/axial_expansion_simple.png\n\n    Illustration of radial and axial thermal expansion for a cylinder in ARMI.\n\nThermal expansion calculations are performed for each component in the ARMI reactor data model as\ncomponent temperatures change. Since components are constrained within blocks, the height of\ncomponents are determined by the height of their parent block. Equations :eq:`hot_radius` through\n:eq:`hot_density` illustrate how the radius, height, volume, density, and mass are updated for\na Component during thermal expansion, respectively.\n\n.. list-table:: Example Calculation of Radial and Axial Thermal Expansion for a Cylindrical Component\n   :widths: 33 33 33\n   :header-rows: 1\n\n   * - Component Temperature\n     - 20°C\n     - 1000°C\n   * - Radius\n     - 0.25 cm\n     - 0.251 cm\n   * - Height\n     - 5.0 cm\n     - 5.01 cm\n   * - Volume\n     - 0.982 cc\n     - 0.988 cc\n   * - Density\n     - 1.0 g/cc\n     - 0.994 g/cc\n   * - Mass\n     - 0.982 g\n     - 0.982 g\n\n.. math::\n   :name: hot_radius\n\n    r(T_h) = 0.25 \\left(1 + \\left(2\\times 10^{-6}(1000 − 20)\\right)\\right) = 0.251 cm\n\n.. math::\n   :name: hot_height\n\n    h(T_h) = 5.0 \\left(1 + \\left(2\\times 10^{-6}(1000 − 20)\\right)\\right) = 5.01 cm\n\n.. math::\n   :name: hot_volume\n\n    V(T_h) = \\pi (0.251)^2 5.01 = 0.988 cm^3\n\n.. math::\n   :name: hot_density\n\n    \\rho(T_h) = \\frac{1.0}{\\left(1 + 2\\times 10^{-6}(1000 − 20)\\right)^3} = 0.994 \\frac{g}{cc}\n\n.. math::\n   :name: hot_mass\n\n    m(T_h) = 0.994 \\times 0.988 = 0.982 g\n\nRadial thermal expansion occurs for each Component in a given Block. Mechanical contact between\ncomponents is not accounted for, meaning that the radial expansion of one Component is independent\nfrom the radial expansion of the others. Solid components may be radially linked to gas/fluid components\n(i.e., sodium bond, helium) and the gas/fluid area is allowed to radially expand and contract with changes in\nComponent temperature. It is worth noting that void components are allowed to have negative areas\nin cases where the expansion of two solid components overlap each other.\n\nAxial thermal expansion occurs for each solid Component with a given Block. Axial mechanical contact between components\nis accounted for as the expansion or contraction of a Component affects the positions of components in mechanical\ncontact in axially neighboring blocks. The logic for determining Component-to-Component mechanical contact is\ndescribed in Section :ref:`axialLink`. When two or more solid components exist within the Block, the change in Block\nheight is driven by an axial expansion \"target Component\" (e.g., fuel). The logic for determining the axial\nexpansion \"target Component\" is provided in Section :ref:`axialExpTargetComp`.\n\nFigures :ref:`components_for_exp_illustration` and :ref:`axial_exp_illustration` provide illustrations of the axial\nthermal expansion process for an example core assembly. In this example there are four main block types defined: Shield,\nFuel, Plenum, and Dummy.\n\n.. note::\n\n  The \"dummy\" Block is necessary to maintain a consistent core-wide assembly height as this is a common necessity for\n  physics solvers utilizing discrete-ordinates discretization methods.\n\n.. figure:: /.static/axial_expansion_components.png\n  :name: components_for_exp_illustration\n\n  Illustration of Components for Axial Thermal Expansion Process\n\n.. figure:: /.static/axial_expansion_process.png\n  :name: axial_exp_illustration\n\n  Simplified Illustration of Axial Thermal Expansion Process for a Core Assembly\n\nThe target components for each Block type are provided in the following table:\n\n.. list-table:: Example Assignment of Target Components within Blocks\n   :widths: 50 50\n   :header-rows: 1\n\n   * - Block\n     - Target Component\n   * - Shield\n     - Shield\n   * - Fuel\n     - Fuel\n   * - Plenum\n     - Clad\n   * - Dummy\n     - N/A\n\nThe axial thermal expansion algorithm is applied in four steps:\n\n#. Expand the axial dimensions of each solid Component within each block independently.\n#. Align blocks axially such that axially-linked components have consistent alignments (e.g.,\n   overlapping radial dimensions).\n#. Assign the Block lower and upper elevations to account for the thermal expansion of blocks\n   below each Block.\n\n   * Create new mesh lines (i.e., Block bounds) that track the target component.\n\n#. Adjust the \"dummy\" Block located at the top of the assembly to maintain a consistent\n   core-wide assembly height before and after axial thermal expansion is applied.\n\n.. _axialLink:\n\nComponent-to-Component Axial Linking\n------------------------------------\nFor components to be in mechanical contact, and therefore axially linked, they need to meet the following criteria:\n\n#. The same Component class. E.g., both are :py:class:`basicShapes.Circle`.\n#. Both solid materials.\n\nIf those are met, then geometric overlap may be checked if the following are met:\n\n#. The components are not :py:class:`components.UnshapedComponent`\n#. The components have the same multiplicity\n#. Or, they share the same grid indices, as specified by a Block :py:class:`<grid> grids.locations.MultiIndexLocation`.\n\nFinally, geometric overlap is established if the biggest inner bounding diameter of the components is less than the\nsmallest outer bounding diameter of the components.\n\nLimitations\n^^^^^^^^^^^\n\nA current limitation of the axial linking logic is that multiple Components may not be linked to a single Component.\nE.g., consider the following:\n\n#. A solid cylinder with an outer diameter of 1.0 cm.\n#. Above, a solid cylinder wrapped with an annular cylinder (separate ARMI components) each with the following dimensions:\n\n  * Solid cylinder with an outer diameter of 0.5 cm.\n  * Annulus with inner diameter of 0.5 cm and outer diameter of 0.75 cm.\n\nFor the above example, in reality, the annulus wrapped pin (two separate ARMI components) would be affected by any\nchanges in height from the solid cylinder. However, this set up is not allowed by the current implementation and will\nraise a ``RuntimeError``.\n\nA second limitation of the component linking implementation involves the Block grid based approach. When Block grids are\nused to specify a pin lattice, the Block-grid should be used throughout the Assembly definition; i.e., a mixture of\nthe Block-grid and multiplicity assignment should not be used (and will likely produce unexpected results and may even\nfail). For example, in the following partial blueprint definition, in reality, each shield pin should be in mechanical\ncontact with the fuel pins. However, since there is a mixture of mulitiplicity and Block-grid approaches, they are\nassumed to be not-linked. In order to ensure properly linking, ``block_fuel_axial_shield`` needs to be redefined with\nthe Block-grid based approach.\n\n.. code-block:: yaml\n\n  axial shield: &block_fuel_axial_shield\n    shield:\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      mult: 169.0\n      od: 0.86602\n\n  fuel multiPin: &block_fuel_multiPin\n    grid name: twoPin\n    fuel 1: &component_fuelmultiPin\n      shape: Circle\n      material: UZr\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      od: 0.86602\n      latticeIDs: [1]\n    fuel 2:\n      <<: *component_fuelmultiPin\n      latticeIDs: [2]\n\nThe following incorporates the fix for ``block_fuel_axial_shield`` and illustrates another potentially undesirable\nsituation where unexpected results or runtime failure may occur. Here a plenum block is added above the fuel and while\nit does utilize a Block-grid, ``clad`` will not be axially linked to either the ``fuel 1`` or ``fuel 2`` components\nbelow it. This is because the ``clad`` and ``fuel*`` components have different grids via their ``grid.spatialLocator``\nvalues. As in the previous example, similar unexpected behavior would also occur if a multiplicity-based definition\nwere used for ``clad``.\n\n.. code-block:: yaml\n\n  axial shield multiPin: &block_fuel_multiPin_axial_shield\n    grid name: twoPin\n    shield 1: &component_shield_shield1\n      shape: Circle\n      material: HT9\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      od: 0.8\n      latticeIDs: [1]\n    shield 2:\n      <<: *component_shield_shield1\n      latticeIDs: [2]\n\n  fuel multiPin: &block_fuel_multiPin\n    grid name: twoPin\n    fuel 1: &component_fuelmultiPin\n      shape: Circle\n      material: UZr\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.0\n      od: 0.8\n      latticeIDs: [1]\n    fuel 2:\n      <<: *component_fuelmultiPin\n      latticeIDs: [2]\n\n  plenum 2pin: &block_plenum_multiPin\n    grid name: twoPin\n    clad:\n      shape: Circle\n      material: Void\n      Tinput: 25.0\n      Thot: 600.0\n      id: 0.9\n      od: 1.0\n      latticeIDs: [1,2]\n\nTo resolve this potential issue, ``block_plenum_multiPin`` should be replaced with the following definition. See the\n``multi pin fuel`` assembly definition within ``armi/tests/detailedAxialExpansion/refSmallReactorBase.yaml`` for a\ncomplete example.\n\n.. code-block:: yaml\n\n    plenum 2pin: &block_plenum_multiPin\n    grid name: twoPin\n    clad 1: &component_plenummultiPin_clad1\n        shape: Circle\n        material: Void\n        Tinput: 25.0\n        Thot: 600.0\n        id: 0.9\n        od: 1.0\n        latticeIDs: [1]\n    clad 2:\n      <<: *component_plenummultiPin_clad1\n        latticeIDs: [2]\n\n\n.. _axialExpTargetComp:\n\nTarget Component Logic\n----------------------\nWhen two or more solid components exist within a Block, the overall height change of the Block is\ndriven by an \"axial expansion target component\" (e.g., fuel). This Component may either be inferred\nfrom the flags prescribed in the blueprints or manually set using the ``axial expansion target\ncomponent`` block blueprint attribute. The following logic is used to infer the target component:\n\n#. Search Component flags for neutronically important components. These are defined in\n   :py:data:`expansionData.TARGET_FLAGS_IN_PREFERRED_ORDER`.\n#. Compare the Block and Component flags. If a Block and Component contain the same flags, that\n   Component is selected as the axial expansion target Component.\n#. If a Block has :py:data:`flags.flags.PLENUM` or :py:data:`flags.flags.ACLP`, the\n   :py:data:`flags.flags.CLAD` Component is hard-coded to be the axial expansion target component.\n   If one does not exist, an error is raised.\n#. \"Dummy Blocks\" are intended to only contain fluid (generally coolant fluid), and do not contain\n   solid components, and therefore do not have an axial expansion target component.\n\n.. _mass_conservation:\n\nMass Conservation\n-----------------\nDue to the fact that all components within a Block are the same height, the conservation of\nmass post-axial expansion is not trivial. The ``axial expansion target component`` plays a critical role in the\nconservation of mass. For pinned-blocks, this is typically chosen to be the most neutronically important Component;\ne.g., in a fuel Block this is typically the fuel Component. Generally speaking, components which are not the axial\nexpansion target will exhibit non-conservation on the Block-level as mass is redistributed across the axially-\nneighboring blocks; this is discussed in more detail in :numref:`mass_redistribution`. However, the mass of all\nsolid components are designed to be conserved at the assembly-level if the following are met for a given assembly\ndesign.\n\n#. Axial continuity of like-objects. E.g., pins, clad, etc.\n#. Components that may expand at different rates axially terminate in unique blocks\n\n   * E.g., the clad extends above the termination of the fuel and the radial duct encasing an\n     assembly extends past the termination of the clad.\n\n#. The top-most Block must be a \"dummy Block\" containing fluid (typically coolant).\n\nSee `armi.tests.detailedAxialExpansion\n<https://github.com/terrapower/armi/tree/main/armi/tests/detailedAxialExpansion>`_ for an example\nblueprint which satisfy the above requirements.\n\n.. _mass_redistribution:\n\nBlock-Level Mass Redistribution\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nFigure :ref:`mass_redistribution_illustration` illustrates the mass redistribution process for axial expansion in ARMI given\na uniform axial expansion of 10% for fuel components.\n\n.. figure:: /.static/mass_redistribution_illustration.png\n  :name: mass_redistribution_illustration\n\n  Illustration of mass redistribution for axial expansion in ARMI.\n\nThe redistribution process can be written mathematically. In Figure :ref:`mass_redistribution_illustration`, consider the\nexchange of mass between the clad in Block 0 and Block 1,\n\n.. math::\n\t:name: cMass0\n\n\t\\hat{c}_{0,m} = c_{0,m} + 0.1c_{1,m}\n\n.. math::\n  :name: cMass1\n\n\t\\hat{c}_{1,m} = 0.9c_{1,m},\n\nwhere :math:`c_{0/1,m}` represents the clad mass in Block 0/1 prior to redistribution and :math:`\\hat{c}_{0/1,m}`\nrepresents the clad mass in Block 0/1 after redistribution, respectively. To compute the post-redistribution mass\non-the-fly, the post-mass redistribution number densities, :math:`\\hat{N}_{i,0/1}`, where the subscript\n:math:`i,0/1` represents isotope :math:`i` for Block 0/1, need to be computed.\n\nComputing :math:`\\hat{N}_{i,1}` satisfying :math:`\\hat{c}_{1,m}` can be found by scaling the pre-redistribution number\ndensities by the expansion factor. In practice however, the number densities are not changed and the mass is decreased\nthrough the reduction in the height of the parent Block.\n\n.. note::\n  Recall, component mass in ARMI is calculated as the product of the mass density of the component, the area of the\n  component, and the height of the block. The mass of components can be tuned through either of these three parameters.\n\nComputing :math:`\\hat{N}_{i,0}` is non-trivial as, in general, :math:`c_0` and :math:`c_1` are at different\ntemperatures. Consider,\n\n.. math::\n  :name: newCMass\n\n  \\hat{c}_{0,m} &= c_{0,m} + 0.1c_{1,m},\\\\\n  &= \\sum_{x=0}^M N_{x,0} A_0(T_0) h_0 + 0.1 \\sum_{j=0}^K N_{j,1} A_1(T_1) h_1,\\\\\n  \\sum_{i=0}^P \\hat{N}_{i,0} A_0(\\hat{T}_0) \\hat{h}_0 &= \\sum_{x=0}^N N_{x,0} A_0(T_0) h_0 + \\sum_{j=0}^K 0.1 N_{j,1} A_1(T_1) h_1 \\big),\n\nwhere,\n\n* :math:`A_{0/1}(T_{0/1})` is the area of Component 0/1 at temperature 0/1,\n* :math:`h_{0/1}` is the height of Component 0/1,\n* :math:`N`, :math:`K` are the total number of isotopes in Component 0/1, respectively,\n* :math:`P` is the union of the isotopes in Component 0/1,\n* and :math:`\\hat{\\square}` represents post-redistribution values.\n\nThe post-redistribution height, :math:`\\hat{h}_0` is found to be the sum of the pre-expansion height, :math:`h_0`, and\nthe different in z-elevation between it and the ``axial expansion target component`` for the Block, :math:`b`,\n\n.. math::\n\n  \\hat{h}_0 &= h_0 + \\delta,\\\\\n  &= h_0 + \\left(b_{\\text{ztop}} - c_{\\text{ztop}}\\right).\n\n.. note::\n\n  #. Recall, axial block bounds are determined by the ``axial expansion target component`` so the top z-elevation ``ztop``\n     for the block is the same as the top of the ``axial expansion target component``.\n  #. In the axial expansion module, components are given z-elevation attributes. This information is not serialized to\n     the database.\n\nWith :math:`\\hat{h}_0` known, the two remaining unknowns in Equation :eq:`newCMass` are the post-redistribution\ntemperature, :math:`\\hat{T}_0`, and number densities, :math:`\\hat{N}_{i,0}`. The latter are solved by using the\nexpected post-redistribution per-isotope mass and component volume. The mass of isotope, :math:`i`, for Block 0/1\nis calculated as follows,\n\n.. math::\n\n  m_{i,0/1} = N_{i,0/1} V_{0/1} \\alpha_i \\chi,\n\nwhere :math:`\\alpha_i` is the atomic weight for isotope, :math:`i`, and :math:`\\chi` is a constant scaling from moles per\ncc to atoms per barn per cm. Given :math:`m_i`, the post redistribution number density is calculated as follows,\n\n.. math::\n\n  \\hat{N}_{i,0} = \\frac{\\left( m_{i,0} + m_{i,1} \\right) \\chi}{ \\big(A_1(T_1) h_1 + A_2(T_2)\\delta\\big) \\alpha_i}.\n\nThe post redistribution temperature, :math:`\\hat{T}_0`, is computed by minimizing the residual of the difference between\nthe actual post-redistribution area of the Component and its expected area,\n\n.. math::\n  :name: newTemp\n\n  A_0(\\hat{T}_0) \\left( h_1 + \\delta \\right) &= A_1(T_1) h_1 + A_2(T_2)\\delta,\\\\\n  A_0(\\hat{T}_0) &= \\frac{A_1(T_1) h_1 + A_2(T_2)\\delta}{h_1 + \\delta}.\n\nThe minimization of Equation :eq:`newTemp` is solved using Brent's method within ``scipy`` where the bounds of the solve\nare the temperatures of the two components exchanging mass, :math:`T_0` and :math:`T_1`. In some instances, the\nminimization may fail. In this case, a mass weighted temperature is used instead,\n\n.. math::\n  :name: consolationPrize\n\n  \\hat{T}_0 = \\frac{m_{i,0}T_0 + m_{i,1}T_1}{m_{i,0} + m_{i,1}}.\n\n\nWarnings and Runtime Error Messages\n-----------------------------------\n\nMass Redistribution Between Like Materials\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\nMass redistribution is currently only possible between components that are the same material. This restriction is to\nensure that material properties post-redistribution are known (e.g., mixing different alloys of metal may result in a\nmaterial with unknown properties). If components of different materials are attempted to have their mass redistributed,\nthe following warning is populated to the stdout:\n\n.. code-block::\n\n  Cannot redistribute mass between components that are different materials!\n    Trying to redistribute mass between the following components in <Assembly>:\n        from --> {<Block 0>} : {<Component 0>} : {<Material 0>}\n          to --> {<Block 1>} : {<Component 1>} : {<Material 1>}\n\n    Instead, mass will be removed from (<Component 0> | <Material 0>) and\n    (<Component 1> | <Material 1> will be artificially expanded. The consequence is that mass\n    conservation is no longer guaranteed for the <Component 1> component type on this assembly!\n\nPost-Redistribution Temperature Search Failure\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nAs described in :numref:`mass_redistribution`, the minimization of Equation :eq:`newTemp` may fail. The two mechanisms\nin which Brent's method may fail are if Equation :eq:`newTemp` does not have opposite signs at each prescribed\ntemperature bound of if Equation :eq:`newTemp` is discontinuous. If the minimization routine fails, the following\nwarning is printed to the stdout:\n\n.. code-block::\n\n  Temperature search algorithm in axial expansion has failed in <Assembly>\n  Trying to search for new temp between\n      from --> <Block 0> : <Component 0> : <Material 0> at <Temperature 0> C\n        to --> <Block 1> : <Component 1> : <Material 1> at <Temperature 1> C\n\n  f(<Temperature 0>) = {Area 0(Tc=<Temperature 0>) - targetArea}\n  f(<Temperature 1>) = {Area 0(Tc=<Temperature 1>) - targetArea}\n\n  Instead, a mass weighted average temperature of {Component 0} will be used. The consequence is that\n  mass conservation is no longer guaranteed for this component type on this assembly!\n\n.. note::\n\n  The above warning has been limited to only components which have the ``FUEL`` or ``CONTROL`` flag. These are\n  determined to be the most neutronically important components where the impact of this warning are the most relevant.\n\nAn example of where this warning may raise is in the following:\n\n#. If two axially linked components have the same ``Thot`` values and different ``Tinput`` values, they will be the same\n   temperature and have different areas. The range for the temperature search is null and will be impossible to find a\n   temperature satisfying Equation :eq:`newTemp`.\n#. If the coefficient of thermal expansion for a material is sufficiently small relative the difference in temperature\n   between two component, the bounds of Equation :eq:`newTemp` may not generate opposite signs and Brent's method will\n   fail.\n\nNegative Block or Component Heights\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nIf a Block or Component height becomes negative, an ``ArithmeticError`` is raised indicating which Block and/or\nComponent has a negative height. Both signal a non-physical condition that is un-resolveable in the current\nimplementation. This is often caused by thermal expansion of a solid component being drastically different that the\nother components nearby.\n\nInconsistent Component and Block Heights\n^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n\nThe current implementation is designed such that the heights of each Component and their parent block remain consistent.\nHowever, these can go out of sync and have been found to be due incompatible blueprints definitions. As stated in\n:numref:`mass_conservation`, in order for mass to be conserved, each component must axially terminate in unique blocks.\nIf a given blueprint does not meet this condition, the following warning may be raised for non-isothermal conditions:\n\n.. code-block::\n\n  The height of <Component> has gone out of sync with its parent block!\n     Assembly: <Assembly>\n        Block: <Block>\n    Component: <Component>\n\n        Block Height = <Block Height>\n    Component Height = <Component Height>\n\n  The difference in height is <height difference> cm. This difference will result in an artificial\n  <\"increase\" or \"decrease\"> in the mass of <Component>. This is indicative that there are multiple axial component\n  terminations in <Block>. Per the ARMI User Manual, to preserve mass there can only be one axial component termination\n  per block.\n\nIf the different in height is positive, then the Component in question extends above the bounds of its parent Block and\nits mass will be artificially chopped proportional to the difference in height. If the difference in height is negative,\nthe the Component in question stops below the bounds of the parent Block and its mass with artificially increase\nproportional to the different in height.\n\n.. note::\n\n  The above warning has been limited to only components which have the ``FUEL`` or ``CONTROL`` flag. These are\n  determined to be the most neutronically important components where the impact of this warning are the most relevant.\n"
  },
  {
    "path": "doc/user/settings_report.rst",
    "content": ".. _settings-report:\n\n===============\nSettings Report\n===============\n\n.. exec::\n    from armi import settings\n    cs = settings.Settings()\n    numSettings = len(cs.values())\n\n    return f\"This document lists all {numSettings} `settings <#the-settings-input-file>`_ in ARMI.\\n\"\n\nThey are all accessible to developers through the :py:class:`armi.settings.caseSettings.Settings` object, which is typically stored in a variable named ``cs``. Interfaces have access to a simulation's settings through ``self.cs``.\n\n\n.. exec::\n    import textwrap\n    from dochelpers import escapeSpecialCharacters\n    from armi import settings\n\n    def looks_like_path(s):\n        \"\"\"Super quick, not robust, check if a string looks like a file path.\"\"\"\n        if s.startswith(\"\\\\\\\\\") or s.startswith(\"//\") or s[1:].startswith(\":\\\\\"):\n            return True\n        return False\n\n    subclassTables = {}\n    cs = settings.Settings()\n\n    # User textwrap to split up long words that mess up the table.\n    ws = \"    \"\n    ws2 = ws + \"    \"\n    ws3 = ws2 + \"  \"\n    wrapper = textwrap.TextWrapper(width=25, subsequent_indent='')\n    wrapper2 = textwrap.TextWrapper(width=10, subsequent_indent='')\n    content = '\\n.. container:: break_before ssp-landscape\\n\\n'\n    content += ws + '.. list-table:: ARMI Settings\\n'\n    content += ws2 + ':widths: 30 40 15 15\\n'\n    content += ws2 + ':class: ssp-tiny\\n'\n    content += ws2 + ':header-rows: 1\\n\\n'\n    content += ws2 + '* - Name\\n' + ws3 + '- Description\\n' + ws3 + '- Default\\n' + ws3 + '- Options\\n'\n\n    for setting in sorted(cs.values(), key=lambda s: s.name):\n        content += ws2 + '* - {}\\n'.format(' '.join(wrapper.wrap(setting.name)))\n        description = escapeSpecialCharacters(str(setting.description) or \"\")\n        content += ws3 + \"- {}\\n\".format(\" \".join(wrapper.wrap(description)))\n        default = str(getattr(setting, 'default', None)).split(\"/\")[-1]\n        options = str(getattr(setting,'options','') or '')\n        if looks_like_path(default):\n            # We don't want to display default file paths in this table.\n            default = \"\"\n            options = \"\"\n        content += ws3 + '- {}\\n'.format(' '.join(['``{}``'.format(wrapped) for wrapped in wrapper2.wrap(default)]))\n        content += ws3 + '- {}\\n'.format(' '.join(['``{}``'.format(wrapped) for wrapped in wrapper2.wrap(options)]))\n\n    content += '\\n'\n\n    return content"
  },
  {
    "path": "doc/user/spatial_block_data.rst",
    "content": "******************\nSpatial block data\n******************\n\nMany parameters assigned on a ``Block`` are scalar quantities that are useful for visualization and\nsimple queries (e.g., block with the maximum burnup in an assembly). Spatial parameters in a block,\nsuch as power produced by each pin, is also of interest. Especially when communicating data to\nphysics codes that support sub-block geometric modeling. This page will talk about how spatial\ninformation is assigned to components on a block, how spatial data can be assigned and accessed, and\nhow those data may or may not be updated by the framework.\n\nSub-block spatial grid\n======================\n\nThere are two ways to create the block grid: explicitly via blueprints or via an automated builder.\nThe former is recommended, but the later can work in some specific circumstances.\n\nBlueprints\n----------\n\nIn your blueprints file, you likely have a core grid that defines where assemblies reside in the reactor. Assemblies\nare assigned to locations on that grid according to their ``specifier`` blueprint attribute. Below is an example\nof a \"flats up\" hexagonal core grid of fuel assemblies with 1/3 symmetry.\n\n.. code:: yaml\n\n    grids:\n      core:\n        geom: hex\n        symmetry: third periodic\n        lattice map: |\n          F\n           F\n          F F\n           F\n          F F\n\nWe can similarly define a grid for the block with a similar entry in the ``grids`` portion of the blueprints.\n\n.. code:: yaml\n\n    pins:\n      geom: hex_corners_up\n      symmetry: full\n      lattice map: |\n         - - - - - - - - - 1 1 1 1 1 1 1 1 1 1\n          - - - - - - - - 1 1 1 1 1 1 1 1 1 1 1\n           - - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1\n            - - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1\n             - - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n              - - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n               - - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                - - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                 - 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                  1 1 1 1 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1\n                   1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                    1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                     1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                      1 1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                       1 1 1 1 1 1 1 1 1 1 1 1 1 1\n                        1 1 1 1 1 1 1 1 1 1 1 1 1\n                         1 1 1 1 1 1 1 1 1 1 1 1\n                          1 1 1 1 1 1 1 1 1 1 1\n                           1 1 1 1 1 1 1 1 1 1\n\nThis creates a ten-ring hexagonal lattice in a \"corners up\" orientation. While the resulting geometry\nmay look like a flats up lattice, the individual hexagons that make up a lattice site are corners up.\n\n.. note::\n\n    The sub-block grid does not need to be of a different orientation of the parent block. A flats up\n    hex block can have a flats up pin lattice. In most cases, an assembly full of pins will have a pin\n    lattice that is off a different type to maximally load pins into the block.\n\nSay we wanted to have a guide tube at the center lattice site with cladding surrounding void and every other lattice\nsite to contain a fuel pin. We need to add the following items to our block definition to link the grid, and to\nassign components to sites on the grid.\n\n1. The block needs a ``grid name`` entry that points to the grid we want to use for this block.\n2. Each component that wants to be placed on a lattice site needs a ``latticeIDs`` entry that contains\n   the IDs, like assembly specifiers in the core grid, for that component.\n\nIn the example above, we have two lattice IDs: ``0`` for the center site and ``1`` for the other pins. These\nare chosen for brevity but we could have also done ``fuel`` and ``guide`` or ``F`` and ``G``. Do what makes sense\nfor you.\n\n.. note::\n\n    Like with assembly specifiers, keeping the lattice IDs to have the same number of characters\n    will help the grid render nicer in text editors. This is not a requirement, but it may make life\n    easier for you and your team.\n\nOur complete block definition would start like::\n\n    blocks: &block_fuel\n        grid name: pins\n        fuel:\n            shape: Circle\n            material: UO2\n            Tinput: 20\n            Thot: 20\n            od: 0.819\n            latticeIDs: [1]\n        clad:\n            shape: Circle\n            material: UO2\n            Tinput: 20\n            Thot: 20\n            id: 0.819\n            od: 0.9\n            latticeIDs: [0, 1]\n        void:\n            shape: Circle\n            material: Void\n            Tinput: 20\n            Thot: 20\n            od: 0.819\n            latticeIDs: [0]\n\nNote that we can assign the same component to multiple lattice sites with multiple entries in the\n``latticeIDs`` list. Also note that we do not need to assign a ``mult`` entry to these components.\nTheir multiplicity will be determined based on the number of lattice sites they occupy!\n\n.. seealso::\n\n    The :ref:`LWR tutorial <walkthrough-lwr>` contains additional examples for working with sub-block grids.\n\nAuto grid\n---------\n\nIn some cases, you may have an assembly that contains one pin type. The framework provides a\nmechanism for automatically constructing a spatial grid for the block based only on the multiplicity\nof pin-like components. When constructing a block from blueprints, a grid may be added to the block\ndepending on:\n\n1. The existence of an explicitly defined block grid, like in the previously discussed section, and\n2. If the ``autoGenerateBlockGrids`` setting is active.\n\nShould either of these conditions be met, the framework will attempt to add a grid by calling\n:meth:`armi.reactor.blocks.Block.autoCreateSpatialGrids`. However, this behavior is not generalized\nand only implemented on :class:`armi.reactor.blocks.HexBlock`, which makes the following assumptions:\n\n1. You want a corners up hexagonal lattice grid.\n2. The pitch of your hexagonal lattice is determined by :meth:`armi.reactor.blocks.HexBlock.getPinPitch`\n   which may place restrictions on what constitutes a pin.\n3. The number of pins is determined by :meth:`armi.reactor.blocks.HexBlock.getNumPins` which may\n   place similar restrictions on what constitutes a pin.\n\nIf the auto grid creation is successful, components with a multiplicity equal to the number of pins\nwill be assigned locations on the lattice grid.\n\n.. warning::\n\n    Consider subclassing :class:`~armi.reactor.blocks.HexBlock` with specific pin-like methods and\n    overriding the :meth:`~armi.reactor.blocks.HexBlock.autoCreateSpatialGrids` if you want complete\n    control over this process. Alternatively, use an explicit grid in blueprints.\n\n\nInteracting with spatial data\n=============================\n\nThis section will focus on accessing locations of components in the block, locations of specifically\npins, and examples of some pin data that may be assigned to a block's parameter set.\n\nComponent locations\n-------------------\n\nComponents that live on a spatial grid have a ``spatialLocator`` attribute to help indicate where\nthat component exists in space. If we grab the fuel component from the UO2 block in the\n:ref:`ANL AFCI 177 example <walkthrough-inputs>` we can see where it exists in the block::\n\n    >>> import armi\n    >>> armi.configure()\n    >>> from armi.reactor.flags import Flags\n    >>> r = armi.init(fName=\"anl-afci-177.yaml\").r\n    >>> fuelAssem = r.core[5]\n    >>> fuelBlock = fuelAssem[1]\n    >>> fuelBlock.spatialGrid\n    <HexGrid -- 2046645914880\n    Bounds:\n    None\n    None\n    None\n    Steps:\n    [ 0.4444 -0.4444  0.    ]\n    [0.76972338 0.76972338 0.        ]\n    [0. 0. 0.]\n    Anchor: <fuel B0009-001 at 008-040-001 XS: C ENV GP: A>\n    Offset: [0. 0. 0.]\n    Num Locations: 400>\n    >>> fuel = fuelBlock.getChildrenWithFlags(Flags.FUEL)[0]\n    >>> fuel.getDimension(\"mult\")\n    271\n    >>> fuel.spatialLocator\n    <MultiIndexLocation with 271 locations>\n\nThis :class:`~armi.reactor.grids.MultiIndexLocation` is a way to indicate this Component exists at multiple\nsites. Each item in this locator is one location on the underlying grid where we could find this component::\n\n    >>> fuel.spatialLocator[0]\n    <IndexLocation @ (0,0,0)>\n    >>> fuel.spatialLocator[0].getLocalCoordinates()\n    array([0., 0., 0.])\n    >>> coordsFromFuel = fuel.spatialLocator.getLocalCoordinates()\n    >>> coordsFromFuel.shape\n    (271, 3)\n\nWe get a ``(271, 3)`` array because we have 271 of these fuel components in the block, and each row contains one\n(x, y, z) location for that component. We can do this for every component, though some may only exist at a single\nsite on the grid and be assigned a :class:`~armi.reactor.grids.CoordinateLocation` spatial locator instead. The API\nis mostly the same, but attempts to signify such an object does not live on the grid e.g., duct or derived shape\nobjects::\n\n    >>> duct = fuelBlock.getChildrenWithFlags(Flags.DUCT)[0]\n    >>> duct.spatialLocator\n    <CoordinateLocation @ (0.0,0.0,0.0)>\n\nPin locations\n-------------\n\nEverything in the before section works for finding center points of pins in your assembly. But often\ntimes you have multiple components that may exist at the same lattice site (e.g., fuel, gap, clad,\nmaybe a wire?). Or you may have multiple cladded-things that count as pins and but exist in multiple\ncomponents. In some circumstances, :meth:`armi.reactor.blocks.HexBlock.getPinCoordinates` may be\nuseful to find the unique centroids of pins in a block. Using our example above, we get a very\nsimilar set of coordinates when comparing to the coordinates of the fuel pin::\n\n    >>> coordsFromPin = fuel.spatialLocator.getLocalCoordinates()\n    >>> coordsFromBlock = fuelBlock.getPinCoordinates()\n    >>> (coordsFromPin == coordsFromBlock).all()\n    True\n\nIn this specific case :meth:`~armi.reactor.blocks.HexBlock.getPinCoordinates` looks at components\nwith ``Flags.CLAD`` and obtains their locations, and we have one cladding component and it exists at\neach of the 271 sites we care about. However, if you have multiple cladding components per lattice\nsite, such as in the :ref:`C5G7 example <walkthrough-lwr>`, you may see an incorrect number of\nlocations returned.\n\n.. note::\n\n    Consider making application-specific subclasses of ``Block``, ``HexBlock``, and/or ``CartesianBlock``\n    with more targeted implementations of :meth:`~armi.reactor.blocks.Block.getNumPins`,\n    :meth:`~armi.reactor.blocks.Block.getPinPitch`, :meth:`~armi.reactor.blocks.Blocks.getPinLocations`\n    and other pin-specific methods.\n\n\nPin parameter data\n------------------\n\nThe ARMI framework defines a few parameters that live on the block, but define data for each of the\nchild pin components. Two examples are ``Block.p.linPowByPin`` and ``Block.p.pinMgFluxes``. These\nparameters are structured and related to the output of ``getPinCoordinates`` such that\n\n1. Pin ``i`` can be found at ``Block.getPinCoordinates()[i]``.\n2. Parameter data for pin ``i`` can be found at location ``i`` in the parameter array, e.g.,\n   ``Block.p.linPowByPin[i]``.\n\nParameters like ``Block.p.pinMgFluxes`` may be higher dimensional, storing mutli-group flux for each\npin. In this case, the parameter data array has shape ``(nPins, nGroups)`` such that\n``Block.p.pinMgFluxes[i, g]`` has the group ``g`` flux in pin ``i``, found at\n``Block.getPinCoordinates()[i]``.\n\nBlock rotation\n==============\n\n.. warning:: \n    \n    Rotation is currently only supported for hexagonal blocks\n\nUsing the logic from the previous section on pin parameter data, it may be useful to know how\nrotating a block changes the data stored on that block.\n\nSpatial locators\n----------------\n\nFirst, rotating a block will update the ``spatialLocator`` attribute on every child of the block.\nFor objects defined at the center of the block, they will still be located at the center. Objects\nwith a ``MultiIndexLocator`` will have new locations such that ``spatialLocator[i]`` will be\nconsistent before and after rotation::\n\n    >>> import math\n    >>> # zeroth location is the origin so pick a location that\n    >>> # changes through rotation\n    >>> fuel.spatialLocator[1]\n    <IndexLocation @ (1,0,0)>\n    >>> fuel.spatialLocator[1].getLocalCoordinates()\n    array([0.4444    , 0.76972338, 0.        ]))\n    >>> fuelBlock.rotate(math.radians(60))\n    >>> fuel.spatialLocator[1]\n    <IndexLocation @ (0,1,0)>\n    >>> fuel.spatialLocator[1].getLocalCoordinates()\n    array([-0.4444    ,  0.76972338,  0.        ])\n\nBecause this sub-block grid is a corners up hex grid, to tightly fit inside the flats up hex block,\none rotation from the north east location, ``(1,0,0)``, reflects this pin across the y-axis.\n\nPin parameters\n--------------\n\nParameter data that are defined on children of the block are not updated. Therefore data for pin\n``i`` will be found in e.g., ``Block.p.pinMgFluxes[i]`` before and after rotation.\n\nCorners and edges\n-----------------\n\nParameters defined on the edges and corners of the block, i.e., those with\n:attr:`armi.reactor.parameters.ParamLocation.CORNERS` and\n:attr:`~armi.reactor.parameters.ParamLocation.EDGES` will be shuffled in place to reflect the new\nrotation. For hexagonal blocks, these parameters should have six entries, e.g., one value for each\ncorner, starting at the upper right and moving counter clockwise. Let's assign some fake data to our\nfuel block from above and see what happens::\n\n    >>> import numpy as np\n    >>> fuelBlock.p.cornerFastFlux = np.arange(6, dtype=float)\n    >>> fuelBlock.p.cornerFastFlux\n    array([0., 1., 2., 3., 4., 5.])\n    >>> # Two clockwise rotations of 60 degrees\n    >>> fuelBlock.rotate(math.radians(-120))\n    >>> fuelBlock.p.cornerFastFlux\n    array([2., 3., 4., 5., 0., 1.])\n\nVisually, the upper right corner, number ``0``, has been rotated to the lower right corner, number ``4``.\nAnd the corner ``2``, the leftmost corner, has been moved to corner ``0``, the upper right corner.\n\nOther rotated parameters\n------------------------\n\nOther parameters may be updated to reflect some geometric state. The second position of\n``Block.p.orientation`` reflects the cumulative rotation around the z-axis and is updated through\nrotation. Displacement parameters like ``Block.p.displacementX`` are updated as the displacement\nvector rotates through space.\n"
  },
  {
    "path": "doc/user/symmetry_handling.rst",
    "content": "*****************\nSymmetry Handling\n*****************\n\nThis section will describe how partial core symmetry is handled in ARMI.\n\nIntroduction\n============\n\nA partial core may be specified in the blueprints file using the ``symmetry`` attribute, as shown below.\n\n.. code:: yaml\n\n    grids:\n      core:\n        geom: hex\n        symmetry: third periodic\n        lattice map: |\n          F\n           F\n          F F\n           F\n          C F\n\nSpecifying a core this way is useful for saving computation time, so long as the core state being modeled is truly symmetric.\n\nBecause of this, assemblies and blocks have a ``symmetryFactor`` attribute that is used to track how much of the object\nis present in the currently modeled core. For example, the central assembly (labeled \"C\") in the core lattice definition above would \nhave a symmetry factor of 3, representing that only 1/3rd of the assembly is in the core model. The blocks within that\nassembly would have the same symmetry factor.\n\nReactors do not have symmetry factors, Cores have symmetry factors but no core parameters are adjusted due to symmetry currently,\nand Components always have a symmetry factor of 1. That is, only parameters for Assembly and Block objects are adjusted for \nsymmetry.\n\n\nSymmetry-Aware Operations\n=========================\n\nBecause some assemblies may be partially in a partial core (e.g. the central assembly in a 1/3rd hex core) certain \ncore and assembly operations must adjust parameters to maintain accurate bookkeeping.\n\nThe third core hex converter methods :py:meth:`convert <armi.reactor.converters.geometryConverters.ThirdCoreHexToFullCoreChanger.convert>` and \n:py:meth:`restorePreviousGeometry <armi.reactor.converters.geometryConverters.ThirdCoreHexToFullCoreChanger.restorePreviousGeometry>` are both \ncore-level operations that account for symmetry when calculating the values of parameters on assemblies and blocks in the converted core. \n\nOn an assembly level, the assembly method :py:meth:`moveTo <armi.reactor.assemblies.Assembly.moveTo>` adjusts parameters as necessary when\nmoving an assembly between locations with differing symmetry factors.\n\n\nParameters Adjusted With Symmetry\n=================================\n\nOnly some parameters need to be adjusted with symmetry. A parameter must be on either an Assembly or Block object and \nhave the flag ``VOLUME_INTEGRATED`` to be adjusted in the operations listed in the previous section. \n"
  },
  {
    "path": "doc/user/user_install.rst",
    "content": "************\nInstallation\n************\n\nThis section will guide you through installing the ARMI Framework on your machine.\n\nPrerequisites\n=============\nThese instructions target users with some software development knowledge. In particular, we assume familiarity with\n`Python <https://www.python.org/>`__, `virtual environments <https://docs.python.org/3/tutorial/venv.html>`_, and\n`Git <https://git-scm.com/>`_.\n\nYou must have the following installed before proceeding:\n\n* `Python <https://www.python.org/downloads/>`__ version 3.9 or newer.\n\n  .. admonition:: The right Python command\n\n     Python 2 and Python 3 often co-exist on the same system. Whether the ``python`` command refers to Python 2 or 3\n     depends on operating system and configuration. Under some circumstances ``python3`` or ``pip3`` will need to be\n     used in place of ``python`` or ``pip`` to target the correct version. You can verify your version by running\n     ``python -VV``. You can also refer to the Python executable with a full path.\n\nYou also likely need the following for interacting with the source code repository:\n\n* `Git <https://git-scm.com/>`_\n\nPreparing a Virtual Environment\n===============================\nWhile not *technically* required, we highly recommend installing ARMI into a\n`virtual environment <https://docs.python.org/3/library/venv.html>`_  to assist in dependency management. In short,\nvirtual environments are a mechanism by which a Python user can maintain separate sets of Python packages for various\napplications on the same machine. This prevents dependencies from various tools conflicting with one another. ARMI has a\nlot of requirements and may conflict with other libraries on your system unless you do this step.\n\nStart a terminal and navigate to the directory you'd like to install ARMI into. To create a new virtual environment, use\na command like::\n\n    $ python -m venv armi-venv\n\nThe result is a folder named ``armi-venv``, which contains a minimal set of Python packages, and a set of scripts for\nactivating and deactivating that environment. To activate the environment, invoke the appropriate script. On Windows::\n\n    $ armi-venv\\Scripts\\activate.bat\n\nOr on Linux::\n\n    $ source armi-venv/bin/activate\n\n.. note:: You'll have to activate the venv every time you open a new command line.\n\tMany people set up scripts to activate this automatically.\n\nIf you will be running ARMI in parallel over MPI, you must also install the ``mpi4py`` Python library. On Linux, doing\nso will require some MPI development libraries (e.g. ``sudo apt install libopenmpi-dev``).\n\nGetting the code\n================\nChoose one of the following two installation methods depending on your needs.\n\nStep 0: Update PIP\n------------------\nIf you are using an older version of Python, say 3.9 or older, you should ensure that you are using a version of PIP that is at least 22.1::\n\n    (armi-venv) $ pip install pip>=22.1\n\nor it will be enough to just do::\n\n    (armi-venv) $ pip install -U pip\n\n\nOption 1: Install as a library\n------------------------------\nIf you plan on running ARMI without viewing or modifying source code, you may install it with ``pip``, which will\nautomatically discover and install the dependencies. This is useful for quick evaluations or to use it as a dependency\nin another project::\n\n   \t(armi-venv) $ pip install https://github.com/terrapower/armi/archive/main.zip\n\n\nOption 2: Install as a repository (for developers)\n--------------------------------------------------\nIf you'd like to view or change the ARMI source code (common!), you need to clone the ARMI source and then install its\ndependencies. Clone the ARMI source code from the git repository with::\n\n   (armi-venv) $ git clone https://github.com/terrapower/armi\n\n.. tip:: If you plan to contribute to ARMI (please do!), you may want to use\n\tSSH keys and use ``git clone git@github.com:terrapower/armi.git``.\n\nNow install ARMI with all its dependencies::\n\n    (armi-venv) $ cd armi\n    (armi-venv) $ pip install -e \".[test]\"\n\n.. tip:: If you don't want to install ARMI into your venv, you will need to add the ARMI source\n\tlocation to your system's ``PYTHONPATH`` environment variable so that\n\tPython will be able to find the code when you import it from other directories.\n\n\tIn Windows, click *Start* and type ``Edit Environmental Variable`` to adjust ``PYTHONPATH``.\n\tIn Linux, add ``export PYTHONPATH=/path/to/armi/source`` in a  user profile script (like ``.bashrc``).\n\n\nVerifying installation\n----------------------\nCheck the installation status by running::\n\n    (armi-venv) $ armi\n\nor, equivalently::\n\n    (armi-venv) $ python -m armi\n\nIf it worked, you should see the ARMI splash screen and no errors::\n\n                       ---------------------------------------------------\n                      |             _      ____     __  __    ___         |\n                      |            / \\    |  _ \\   |  \\/  |  |_ _|        |\n                      |           / _ \\   | |_) |  | |\\/| |   | |         |\n                      |          / ___ \\  |  _ <   | |  | |   | |         |\n                      |         /_/   \\_\\ |_| \\_\\  |_|  |_|  |___|        |\n                      |         Advanced  Reactor  Modeling Interface     |\n                       ---------------------------------------------------\n\n\nIf it works, congrats! So far so good.\n\n\nOptional Setup\n==============\nThis subsection provides setup for optional items.\n\nGUI input\n---------\nTo use the :py:mod:`graphical core-map editor <armi.utils.gridEditor>` you will need to also install\n`wxPython <https://wxpython.org/pages/downloads/index.html>`_. This is not installed by default during armi installation\nbecause it can cause installation complexities on some platforms.\n\nIn any case, all GUI dependencies can be installed by::\n\n    (armi-venv) $ pip install armi[grids]\n\nGUI output\n----------\nARMI can write VTK and XDMF output files which can be viewed in tools such as `ParaView <https://www.paraview.org/>`_\nand `VisIT <https://wci.llnl.gov/simulation/computer-codes/visit>`_. Download and install those tools from their\nwebsites.\n"
  },
  {
    "path": "pyproject.toml",
    "content": "# Copyright 2023 TerraPower, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n#     http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n#######################################################################\n#                        GENERAL PYTHON CONFIG                        #\n#######################################################################\n[build-system]\nrequires = [\"setuptools>=61.2\"]\nbuild-backend = \"setuptools.build_meta\"\n\n[project]\nname = \"armi\"\nversion = \"0.7.0\"\ndescription = \"An open-source nuclear reactor analysis automation framework that helps design teams increase efficiency and quality.\"\nlicense-files = [\"LICENSE.md\", \"AUTHORS\"]\nrequires-python = \">3.8\"\nreadme = \"README.rst\"\nauthors = [\n    { name=\"TerraPower, LLC\", email=\"armi-devs@terrapower.com\" },\n]\ndependencies = [\n    \"coverage>=7.2.0\", # Code coverage tool. Sadly baked into every Case.\n    \"h5py>=3.0,<=3.9 ; python_version < '3.11.0'\",\n    \"h5py>=3.9 ; python_version >= '3.11.0'\", # Needed because our database files are H5 format\n    \"matplotlib>=3.5.3,<3.8.0 ; python_version < '3.11.0'\",\n    \"matplotlib>=3.5.3 ; python_version >= '3.11.0'\", # Important plotting library\n    \"numpy>=1.21\", # Important math library\n    \"ordered-set>=3.1.1\", # A useful data structure\n    \"pluggy>=1.2.0\", # Central tool behind the ARMI Plugin system\n    \"pyevtk>=1.2.0\", # Handles binary VTK visualization files\n    \"ruamel.yaml>=0.19.1 ; python_version >= '3.11.0'\", # Our foundational YAML library\n    \"ruamel.yaml<=0.17.21 ; python_version < '3.11.0'\", # Our foundational YAML library\n    \"scipy>=1.7.0\", # Used for curve-fitting and matrix math\n    \"sympy>=1.14\", # Used to represent mathematical curves for material properties\n    \"toml>0.9.5\", # Needed to parse the pyproject.toml file\n    \"voluptuous>=0.12.1\", # Used to validate YAML data files\n    \"yamlize==0.7.1\", # Custom YAML-to-object library\n]\nclassifiers = [\n    \"Development Status :: 4 - Beta\",\n    \"Intended Audience :: Science/Research\",\n    \"License :: OSI Approved :: Apache Software License\",\n    \"Operating System :: OS Independent\",\n    \"Programming Language :: Python\",\n    \"Programming Language :: Python :: 3\",\n    \"Programming Language :: Python :: 3 :: Only\",\n    \"Programming Language :: Python :: 3.9\",\n    \"Programming Language :: Python :: 3.10\",\n    \"Programming Language :: Python :: 3.11\",\n    \"Programming Language :: Python :: 3.12\",\n    \"Programming Language :: Python :: 3.13\",\n    \"Programming Language :: Python :: 3.14\",\n    \"Topic :: Scientific/Engineering :: Information Analysis\",\n]\n\n[project.urls]\nHomepage = \"https://terrapower.github.io/armi/\"\nDocumentation = \"https://terrapower.github.io/armi\"\nChangelog = \"https://github.com/terrapower/armi/releases\"\nRepository = \"https://github.com/terrapower/armi\"\n\"Bug Tracker\" = \"https://github.com/terrapower/armi/issues\"\n\n[project.optional-dependencies]\ngrids = [\"wxpython==4.2.1\"]\nmemprof = [\"psutil\"]\nmpi = [\"mpi4py\"]\ntest = [\n    \"ipykernel>=6.0.0\", # IPython Kernel (We run test notebooks from the doc tutorials.)\n    \"jupyter_client>=7.0.0\", # Reference implementation of the Jupyter protocol\n    \"nbconvert>=7.0.0\", # Converting Jupyter Notebooks to other formats\n    \"nbformat>=5.5.0\", # Jupyter Notebook reader\n    \"pytest-cov>=4.0.0\", # coverage plugin\n    \"pytest-xdist>=3.0.0\", # To spread our tests over multiple CPUs\n    \"pytest>=7.0.0\", # Our primary test tooling\n    \"ruff==0.9.7\", # Linting and code formatting (version-pinned)\n]\ndocs = [\n######################################################################################\n# These are most specified that usual, because Sphinx docs seem to be quite fragile. #\n#                                                                                    #\n# Officially, we build our docs with Python 3.13.                                    #\n######################################################################################\n    \"docutils==0.21.2\", # Needed by sphinx-rtd-them\n    \"ipykernel>=6.0.0\", # iPython kernel to run Jupyter notebooks\n    \"Jinja2==3.1.5\", # Used in numpydoc and nbconvert\n    \"nbsphinx-link==1.3.1\", # Adds Jupyter NBs to Sphinx source root\n    \"nbsphinx==0.9.6\", # Parses Jupyter notebooks\n    \"pandoc\", # Must be in the path (to convert file formats)\n    \"pylint\", # Generates UML diagrams\n    \"pypdf==5.3.1\", # Generating a single PDF file for the Sphinx documentation\n    \"setuptools\",  # needed for conf.py tooling\n    \"sphinx-data-viewer==0.1.5\",\n    \"sphinx-gallery==0.13.0\", # Builds an HTML version of a Python script and puts it into a gallery\n    \"sphinx-needs==4.2.0\", # Requirements traceability matrices for QA\n    \"sphinx-rtd-theme==3.0.2\", # Read-The-Docs theme for Sphinx\n    \"sphinx-test-reports==1.1.0\", # sphinx-needs test reports in the STR\n    \"Sphinx==7.4.7\", # central library used to build our docs\n    \"sphinxcontrib-apidoc==0.5.0\", # More easily document our API\n    \"sphinxcontrib-applehelp==2.0.0\",\n    \"sphinxcontrib-devhelp==2.0.0\",\n    \"sphinxcontrib-htmlhelp==2.1.0\",\n    \"sphinxcontrib-jquery==4.1\", # Handle missing jquery errors\n    \"sphinxcontrib-jsmath==1.0.1\",\n    \"sphinxcontrib-plantuml==0.30\", # UML support in sphinx-needs\n    \"sphinxcontrib-qthelp==2.0.0\",\n    \"sphinxcontrib-serializinghtml==2.0.0\",\n    \"sphinxext-opengraph==0.9.1\", # Generates OpenGraph metadata to make cards for social media\n    \"unittest-xml-reporting==3.2.0\", # Allows us to generate junit XML test reports\n]\n\n[project.scripts]\narmi = \"armi.__main__:main\"\n\n[tool.setuptools.packages]\nfind = {}\n\n\n#######################################################################\n#                             RUFF CONFIG                             #\n#######################################################################\n[tool.ruff]\n# This is the exact version of Ruff we use.\nrequired-version = \"0.9.7\"\n\n# Assume Python 3.13\ntarget-version = \"py313\"\n\n# Setting line-length to 120\nline-length = 120\n\n# Exclude a variety of commonly ignored directories.\nexclude = [\n    \".bzr\",\n    \".direnv\",\n    \".eggs\",\n    \".git\",\n    \".git-rewrite\",\n    \".hg\",\n    \".mypy_cache\",\n    \".nox\",\n    \".pants.d\",\n    \".pytype\",\n    \".ruff_cache\",\n    \".svn\",\n    \".tox\",\n    \".venv\",\n    \"__pycache__\",\n    \"__pypackages__\",\n    \"_build\",\n    \"buck-out\",\n    \"build\",\n    \"dist\",\n    \"doc/tutorials/armi-example-app\",\n    \"node_modules\",\n    \"venv\",\n]\n\n[tool.ruff.lint]\n# Enable pycodestyle (E) and Pyflakes (F) codes by default.\n# D - NumPy docstring rules\n# I - Sorting imports\n# N801 - Class name should use CapWords convention\n# SIM - code simplification rules\n# TID - tidy imports\nselect = [\"D\", \"E\", \"F\", \"I\", \"N801\", \"SIM\", \"TID\"]\n\n# Ruff rules we ignore (for now) because they are not 100% automatable\n#\n# D100 - Missing docstring in public module\n# D101 - Missing docstring in public class\n# D102 - Missing docstring in public method\n# D103 - Missing docstring in public function\n# D106 - Missing docstring in public nested class\n# D401 - First line of docstring should be in imperative mood\n# D404 - First word of the docstring should not be \"This\"\n# SIM102 - Use a single if statement instead of nested if statements\n# SIM105 - Use contextlib.suppress({exception}) instead of try-except-pass\n# SIM108 - Use ternary operator {contents} instead of if-else-block\n# SIM114 - Combine if branches using logical or operator\n# SIM115 - Use context handler for opening files\n# SIM117 - Use a single with statement with multiple contexts instead of nested with statements\n\n# Ruff rules we ignore because we don't want them\n#\n# D105 - we don't need to document well-known magic methods\n# D205 - 1 blank line required between summary line and description\n# E731 - we can use lambdas however we want\n# RUF100 - no unused noqa statements (not consistent enough yet)\n# SIM118 - this does not work where we overload the .keys() method\n#\nignore = [\"D100\", \"D101\", \"D102\", \"D103\", \"D105\", \"D106\", \"D205\", \"D401\", \"D404\", \"E731\", \"RUF100\", \"SIM102\", \"SIM105\", \"SIM108\", \"SIM114\", \"SIM115\", \"SIM117\", \"SIM118\"]\n\n[tool.ruff.lint.per-file-ignores]\n# D1XX - enforces writing docstrings\n# E741 - ambiguous variable name\n# N - We have our own naming conventions for unit tests.\n# SLF001 - private member access\n\"*/tests/*\" = [\"D1\", \"E741\", \"N\", \"SLF001\"]\n\"doc/gallery-src/*\" = [\"D400\"]\n\n[tool.ruff.lint.flake8-tidy-imports]\nban-relative-imports = \"all\"\n\n[tool.ruff.lint.pydocstyle]\nconvention = \"numpy\"\n\n[tool.ruff.format]\ndocstring-code-format = true\ndocstring-code-line-length = 120\n\n\n#######################################################################\n#                            PYTEST CONFIG                            #\n#######################################################################\n[tool.pytest.ini_options]\npython_files = \"test_*.py\"\npython_functions = \"nothing matches this pattern\"\naddopts = \"--durations=30 --tb=native\"\nfilterwarnings = [\n    \"ignore: the matrix subclass is not the recommended way:PendingDeprecationWarning\",\n]\n\n[tool.coverage.run]\nexclude_also = [\n    \"armi/cli/gridGui.py\",\n    \"armi/utils/gridEditor.py\",\n    \"armi/utils/tests/test_gridGui.py\",\n    \"venv/\",\n    ]\nsource = [\"armi\"]\nparallel = true\n# Change default .coverage file to something that doesn't have a dot because some Windows services can't handle dots.\ndata_file = \"coverage_results.cov\"\n\n[tool.coverage.report]\n# Regexes for lines to exclude from consideration\nomit = [\n    \"*/tests/*\",\n    \"armi/cli/gridGui.py\",\n    \"armi/utils/gridEditor.py\",\n    ]\n\nexclude_also = [\n    # Don't complain about missing debug-only code:\n    \"def __repr__\",\n    \"if self\\\\.debug\",\n\n    # Don't complain if non-runnable code isn't run:\n    \"if __name__ == .__main__.:\",\n\n    # Don't complain about missing type checking-only code:\n    \"if TYPE_CHECKING\",\n\n    # Don't complain if tests don't hit defensive assertion code:\n    \"except ImportError\",\n    \"pass\",\n    \"raise AssertionError\",\n    \"raise KeyboardInterrupt\",\n    \"raise NotImplementedError\",\n    ]\n\nignore_errors = true\n\n\n#######################################################################\n#              DATA FILES TO BE INCLUDED WITH THE PROJECT             #\n#######################################################################\n[tool.setuptools.package-data]\narmi = [\n    \"bookkeeping/tests/armiRunSmallest-A0000-aHist-ref.txt\",\n    \"matProps/tests/invalidTestFiles/*\",\n    \"matProps/tests/testDir1/*\",\n    \"matProps/tests/testDir2/*\",\n    \"matProps/tests/testDir3/*\",\n    \"matProps/tests/testDir4/*\",\n    \"matProps/tests/testMaterialsData/*\",\n    \"nuclearDataIO/cccc/tests/fixtures/labels.ascii\",\n    \"nuclearDataIO/cccc/tests/fixtures/labels.binary\",\n    \"nuclearDataIO/cccc/tests/fixtures/mc2v3.dlayxs\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_cartesian.pwdint\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_cartesian.rtflux\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_cartesian.rzflux\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_hexz.dif3d\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_hexz.geodst\",\n    \"nuclearDataIO/cccc/tests/fixtures/simple_hexz.nhflux\",\n    \"nuclearDataIO/tests/fixtures/AA.gamiso\",\n    \"nuclearDataIO/tests/fixtures/AA.pmatrx\",\n    \"nuclearDataIO/tests/fixtures/AB.gamiso\",\n    \"nuclearDataIO/tests/fixtures/AB.pmatrx\",\n    \"nuclearDataIO/tests/fixtures/combined-AA-AB.gamiso\",\n    \"nuclearDataIO/tests/fixtures/combined-AA-AB.isotxs\",\n    \"nuclearDataIO/tests/fixtures/combined-AA-AB.pmatrx\",\n    \"nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.gamiso\",\n    \"nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.isotxs\",\n    \"nuclearDataIO/tests/fixtures/combined-and-lumped-AA-AB.pmatrx\",\n    \"nuclearDataIO/tests/fixtures/ISOAA\",\n    \"nuclearDataIO/tests/fixtures/ISOAB\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AA.flux_ufg\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AA.gamiso\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AA.isotxs\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AA.pmatrx\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AB.gamiso\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AB.isotxs\",\n    \"nuclearDataIO/tests/fixtures/mc2v3-AB.pmatrx\",\n    \"nuclearDataIO/tests/library-file-generation\",\n    \"nuclearDataIO/tests/library-file-generation/combine-AA-AB.inp\",\n    \"nuclearDataIO/tests/library-file-generation/combine-and-lump-AA-AB.inp\",\n    \"nuclearDataIO/tests/library-file-generation/mc2v2-dlayxs.inp\",\n    \"nuclearDataIO/tests/library-file-generation/mc2v3-AA.inp\",\n    \"nuclearDataIO/tests/library-file-generation/mc2v3-AB.inp\",\n    \"nuclearDataIO/tests/library-file-generation/mc2v3-dlayxs.inp\",\n    \"nuclearDataIO/tests/simple_hexz.inp\",\n    \"physics/neutronics/tests/ISOXA\",\n    \"physics/neutronics/tests/rzmflxYA\",\n    \"resources/burn-chain.yaml\",\n    \"resources/elements.dat\",\n    \"resources/mcc-nuclides.yaml\",\n    \"resources/nuclides.dat\",\n    \"resources/referenceFissionProducts.dat\",\n    \"testing/reactors/anl-afci-177/anl-afci-177-blueprints.yaml\",\n    \"testing/reactors/anl-afci-177/anl-afci-177-coreMap.yaml\",\n    \"testing/reactors/anl-afci-177/anl-afci-177-fuelManagement.py\",\n    \"testing/reactors/anl-afci-177/anl-afci-177.yaml\",\n    \"testing/reactors/c5g7/c5g7-blueprints.yaml\",\n    \"testing/reactors/c5g7/c5g7-settings.yaml\",\n    \"testing/reactors/godiva/godiva-blueprints.yaml\",\n    \"testing/reactors/godiva/godiva.armi.unittest.yaml\",\n    \"testing/reactors/smallHexReactor/smallHexReactor-bp.yaml\",\n    \"testing/reactors/smallHexReactor/smallHexReactor.yaml\",\n    \"testing/resources/armiRun-SHUFFLES.txt\",\n    \"testing/resources/armiRun-SHUFFLES.yaml\",\n    \"testing/resources/COMPXS.ascii\",\n    \"tests/1DslabXSByCompTest.yaml\",\n    \"tests/armiRun.yaml\",\n    \"tests/detailedAxialExpansion/armiRun.yaml\",\n    \"tests/detailedAxialExpansion/refSmallCoreGrid.yaml\",\n    \"tests/detailedAxialExpansion/refSmallReactor.yaml\",\n    \"tests/detailedAxialExpansion/refSmallReactorBase.yaml\",\n    \"tests/ISOAA\",\n    \"tests/refSmallCartesian.yaml\",\n    \"tests/refSmallCoreGrid.yaml\",\n    \"tests/refSmallReactor.yaml\",\n    \"tests/refSmallReactorBase.yaml\",\n    \"tests/refSmallSfpGrid.yaml\",\n    \"tests/refTestCartesian.yaml\",\n    \"tests/smallestTestReactor/armiRunSmallest.yaml\",\n    \"tests/smallestTestReactor/refOneBlockReactor.yaml\",\n    \"tests/smallestTestReactor/refSmallestReactor.yaml\",\n    \"tests/tutorials\",\n    \"tests/tutorials/data_model.ipynb\",\n    \"tests/zpprTest.yaml\",\n    \"tests/zpprTestGeom.yaml\",\n]\n"
  }
]